Merge tag 'for-linus-20190516' of git://git.kernel.dk/linux-block
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/reservation.h>
33 #include <linux/slab.h>
34 #include <linux/vgaarb.h>
35
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/drm_rect.h>
45 #include <drm/i915_drm.h>
46
47 #include "i915_drv.h"
48 #include "i915_gem_clflush.h"
49 #include "i915_reset.h"
50 #include "i915_trace.h"
51 #include "intel_atomic_plane.h"
52 #include "intel_color.h"
53 #include "intel_cdclk.h"
54 #include "intel_crt.h"
55 #include "intel_ddi.h"
56 #include "intel_dp.h"
57 #include "intel_drv.h"
58 #include "intel_dsi.h"
59 #include "intel_dvo.h"
60 #include "intel_fbc.h"
61 #include "intel_fbdev.h"
62 #include "intel_frontbuffer.h"
63 #include "intel_hdcp.h"
64 #include "intel_hdmi.h"
65 #include "intel_lvds.h"
66 #include "intel_pipe_crc.h"
67 #include "intel_pm.h"
68 #include "intel_psr.h"
69 #include "intel_sdvo.h"
70 #include "intel_sprite.h"
71 #include "intel_tv.h"
72
73 /* Primary plane formats for gen <= 3 */
74 static const u32 i8xx_primary_formats[] = {
75         DRM_FORMAT_C8,
76         DRM_FORMAT_RGB565,
77         DRM_FORMAT_XRGB1555,
78         DRM_FORMAT_XRGB8888,
79 };
80
81 /* Primary plane formats for gen >= 4 */
82 static const u32 i965_primary_formats[] = {
83         DRM_FORMAT_C8,
84         DRM_FORMAT_RGB565,
85         DRM_FORMAT_XRGB8888,
86         DRM_FORMAT_XBGR8888,
87         DRM_FORMAT_XRGB2101010,
88         DRM_FORMAT_XBGR2101010,
89 };
90
91 static const u64 i9xx_format_modifiers[] = {
92         I915_FORMAT_MOD_X_TILED,
93         DRM_FORMAT_MOD_LINEAR,
94         DRM_FORMAT_MOD_INVALID
95 };
96
97 /* Cursor formats */
98 static const u32 intel_cursor_formats[] = {
99         DRM_FORMAT_ARGB8888,
100 };
101
102 static const u64 cursor_format_modifiers[] = {
103         DRM_FORMAT_MOD_LINEAR,
104         DRM_FORMAT_MOD_INVALID
105 };
106
107 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
108                                 struct intel_crtc_state *pipe_config);
109 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
110                                    struct intel_crtc_state *pipe_config);
111
112 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
113                                   struct drm_i915_gem_object *obj,
114                                   struct drm_mode_fb_cmd2 *mode_cmd);
115 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
116 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
117 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
118                                          const struct intel_link_m_n *m_n,
119                                          const struct intel_link_m_n *m2_n2);
120 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
121 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
122 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
123 static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state);
124 static void vlv_prepare_pll(struct intel_crtc *crtc,
125                             const struct intel_crtc_state *pipe_config);
126 static void chv_prepare_pll(struct intel_crtc *crtc,
127                             const struct intel_crtc_state *pipe_config);
128 static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
129 static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
130 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
131                                     struct intel_crtc_state *crtc_state);
132 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
133 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
134 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
135 static void intel_modeset_setup_hw_state(struct drm_device *dev,
136                                          struct drm_modeset_acquire_ctx *ctx);
137 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
138
139 struct intel_limit {
140         struct {
141                 int min, max;
142         } dot, vco, n, m, m1, m2, p, p1;
143
144         struct {
145                 int dot_limit;
146                 int p2_slow, p2_fast;
147         } p2;
148 };
149
150 /* returns HPLL frequency in kHz */
151 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
152 {
153         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
154
155         /* Obtain SKU information */
156         mutex_lock(&dev_priv->sb_lock);
157         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
158                 CCK_FUSE_HPLL_FREQ_MASK;
159         mutex_unlock(&dev_priv->sb_lock);
160
161         return vco_freq[hpll_freq] * 1000;
162 }
163
164 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
165                       const char *name, u32 reg, int ref_freq)
166 {
167         u32 val;
168         int divider;
169
170         mutex_lock(&dev_priv->sb_lock);
171         val = vlv_cck_read(dev_priv, reg);
172         mutex_unlock(&dev_priv->sb_lock);
173
174         divider = val & CCK_FREQUENCY_VALUES;
175
176         WARN((val & CCK_FREQUENCY_STATUS) !=
177              (divider << CCK_FREQUENCY_STATUS_SHIFT),
178              "%s change in progress\n", name);
179
180         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
181 }
182
183 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
184                            const char *name, u32 reg)
185 {
186         if (dev_priv->hpll_freq == 0)
187                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
188
189         return vlv_get_cck_clock(dev_priv, name, reg,
190                                  dev_priv->hpll_freq);
191 }
192
193 static void intel_update_czclk(struct drm_i915_private *dev_priv)
194 {
195         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
196                 return;
197
198         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
199                                                       CCK_CZ_CLOCK_CONTROL);
200
201         DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
202 }
203
204 static inline u32 /* units of 100MHz */
205 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
206                     const struct intel_crtc_state *pipe_config)
207 {
208         if (HAS_DDI(dev_priv))
209                 return pipe_config->port_clock; /* SPLL */
210         else
211                 return dev_priv->fdi_pll_freq;
212 }
213
214 static const struct intel_limit intel_limits_i8xx_dac = {
215         .dot = { .min = 25000, .max = 350000 },
216         .vco = { .min = 908000, .max = 1512000 },
217         .n = { .min = 2, .max = 16 },
218         .m = { .min = 96, .max = 140 },
219         .m1 = { .min = 18, .max = 26 },
220         .m2 = { .min = 6, .max = 16 },
221         .p = { .min = 4, .max = 128 },
222         .p1 = { .min = 2, .max = 33 },
223         .p2 = { .dot_limit = 165000,
224                 .p2_slow = 4, .p2_fast = 2 },
225 };
226
227 static const struct intel_limit intel_limits_i8xx_dvo = {
228         .dot = { .min = 25000, .max = 350000 },
229         .vco = { .min = 908000, .max = 1512000 },
230         .n = { .min = 2, .max = 16 },
231         .m = { .min = 96, .max = 140 },
232         .m1 = { .min = 18, .max = 26 },
233         .m2 = { .min = 6, .max = 16 },
234         .p = { .min = 4, .max = 128 },
235         .p1 = { .min = 2, .max = 33 },
236         .p2 = { .dot_limit = 165000,
237                 .p2_slow = 4, .p2_fast = 4 },
238 };
239
240 static const struct intel_limit intel_limits_i8xx_lvds = {
241         .dot = { .min = 25000, .max = 350000 },
242         .vco = { .min = 908000, .max = 1512000 },
243         .n = { .min = 2, .max = 16 },
244         .m = { .min = 96, .max = 140 },
245         .m1 = { .min = 18, .max = 26 },
246         .m2 = { .min = 6, .max = 16 },
247         .p = { .min = 4, .max = 128 },
248         .p1 = { .min = 1, .max = 6 },
249         .p2 = { .dot_limit = 165000,
250                 .p2_slow = 14, .p2_fast = 7 },
251 };
252
253 static const struct intel_limit intel_limits_i9xx_sdvo = {
254         .dot = { .min = 20000, .max = 400000 },
255         .vco = { .min = 1400000, .max = 2800000 },
256         .n = { .min = 1, .max = 6 },
257         .m = { .min = 70, .max = 120 },
258         .m1 = { .min = 8, .max = 18 },
259         .m2 = { .min = 3, .max = 7 },
260         .p = { .min = 5, .max = 80 },
261         .p1 = { .min = 1, .max = 8 },
262         .p2 = { .dot_limit = 200000,
263                 .p2_slow = 10, .p2_fast = 5 },
264 };
265
266 static const struct intel_limit intel_limits_i9xx_lvds = {
267         .dot = { .min = 20000, .max = 400000 },
268         .vco = { .min = 1400000, .max = 2800000 },
269         .n = { .min = 1, .max = 6 },
270         .m = { .min = 70, .max = 120 },
271         .m1 = { .min = 8, .max = 18 },
272         .m2 = { .min = 3, .max = 7 },
273         .p = { .min = 7, .max = 98 },
274         .p1 = { .min = 1, .max = 8 },
275         .p2 = { .dot_limit = 112000,
276                 .p2_slow = 14, .p2_fast = 7 },
277 };
278
279
280 static const struct intel_limit intel_limits_g4x_sdvo = {
281         .dot = { .min = 25000, .max = 270000 },
282         .vco = { .min = 1750000, .max = 3500000},
283         .n = { .min = 1, .max = 4 },
284         .m = { .min = 104, .max = 138 },
285         .m1 = { .min = 17, .max = 23 },
286         .m2 = { .min = 5, .max = 11 },
287         .p = { .min = 10, .max = 30 },
288         .p1 = { .min = 1, .max = 3},
289         .p2 = { .dot_limit = 270000,
290                 .p2_slow = 10,
291                 .p2_fast = 10
292         },
293 };
294
295 static const struct intel_limit intel_limits_g4x_hdmi = {
296         .dot = { .min = 22000, .max = 400000 },
297         .vco = { .min = 1750000, .max = 3500000},
298         .n = { .min = 1, .max = 4 },
299         .m = { .min = 104, .max = 138 },
300         .m1 = { .min = 16, .max = 23 },
301         .m2 = { .min = 5, .max = 11 },
302         .p = { .min = 5, .max = 80 },
303         .p1 = { .min = 1, .max = 8},
304         .p2 = { .dot_limit = 165000,
305                 .p2_slow = 10, .p2_fast = 5 },
306 };
307
308 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
309         .dot = { .min = 20000, .max = 115000 },
310         .vco = { .min = 1750000, .max = 3500000 },
311         .n = { .min = 1, .max = 3 },
312         .m = { .min = 104, .max = 138 },
313         .m1 = { .min = 17, .max = 23 },
314         .m2 = { .min = 5, .max = 11 },
315         .p = { .min = 28, .max = 112 },
316         .p1 = { .min = 2, .max = 8 },
317         .p2 = { .dot_limit = 0,
318                 .p2_slow = 14, .p2_fast = 14
319         },
320 };
321
322 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
323         .dot = { .min = 80000, .max = 224000 },
324         .vco = { .min = 1750000, .max = 3500000 },
325         .n = { .min = 1, .max = 3 },
326         .m = { .min = 104, .max = 138 },
327         .m1 = { .min = 17, .max = 23 },
328         .m2 = { .min = 5, .max = 11 },
329         .p = { .min = 14, .max = 42 },
330         .p1 = { .min = 2, .max = 6 },
331         .p2 = { .dot_limit = 0,
332                 .p2_slow = 7, .p2_fast = 7
333         },
334 };
335
336 static const struct intel_limit intel_limits_pineview_sdvo = {
337         .dot = { .min = 20000, .max = 400000},
338         .vco = { .min = 1700000, .max = 3500000 },
339         /* Pineview's Ncounter is a ring counter */
340         .n = { .min = 3, .max = 6 },
341         .m = { .min = 2, .max = 256 },
342         /* Pineview only has one combined m divider, which we treat as m2. */
343         .m1 = { .min = 0, .max = 0 },
344         .m2 = { .min = 0, .max = 254 },
345         .p = { .min = 5, .max = 80 },
346         .p1 = { .min = 1, .max = 8 },
347         .p2 = { .dot_limit = 200000,
348                 .p2_slow = 10, .p2_fast = 5 },
349 };
350
351 static const struct intel_limit intel_limits_pineview_lvds = {
352         .dot = { .min = 20000, .max = 400000 },
353         .vco = { .min = 1700000, .max = 3500000 },
354         .n = { .min = 3, .max = 6 },
355         .m = { .min = 2, .max = 256 },
356         .m1 = { .min = 0, .max = 0 },
357         .m2 = { .min = 0, .max = 254 },
358         .p = { .min = 7, .max = 112 },
359         .p1 = { .min = 1, .max = 8 },
360         .p2 = { .dot_limit = 112000,
361                 .p2_slow = 14, .p2_fast = 14 },
362 };
363
364 /* Ironlake / Sandybridge
365  *
366  * We calculate clock using (register_value + 2) for N/M1/M2, so here
367  * the range value for them is (actual_value - 2).
368  */
369 static const struct intel_limit intel_limits_ironlake_dac = {
370         .dot = { .min = 25000, .max = 350000 },
371         .vco = { .min = 1760000, .max = 3510000 },
372         .n = { .min = 1, .max = 5 },
373         .m = { .min = 79, .max = 127 },
374         .m1 = { .min = 12, .max = 22 },
375         .m2 = { .min = 5, .max = 9 },
376         .p = { .min = 5, .max = 80 },
377         .p1 = { .min = 1, .max = 8 },
378         .p2 = { .dot_limit = 225000,
379                 .p2_slow = 10, .p2_fast = 5 },
380 };
381
382 static const struct intel_limit intel_limits_ironlake_single_lvds = {
383         .dot = { .min = 25000, .max = 350000 },
384         .vco = { .min = 1760000, .max = 3510000 },
385         .n = { .min = 1, .max = 3 },
386         .m = { .min = 79, .max = 118 },
387         .m1 = { .min = 12, .max = 22 },
388         .m2 = { .min = 5, .max = 9 },
389         .p = { .min = 28, .max = 112 },
390         .p1 = { .min = 2, .max = 8 },
391         .p2 = { .dot_limit = 225000,
392                 .p2_slow = 14, .p2_fast = 14 },
393 };
394
395 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
396         .dot = { .min = 25000, .max = 350000 },
397         .vco = { .min = 1760000, .max = 3510000 },
398         .n = { .min = 1, .max = 3 },
399         .m = { .min = 79, .max = 127 },
400         .m1 = { .min = 12, .max = 22 },
401         .m2 = { .min = 5, .max = 9 },
402         .p = { .min = 14, .max = 56 },
403         .p1 = { .min = 2, .max = 8 },
404         .p2 = { .dot_limit = 225000,
405                 .p2_slow = 7, .p2_fast = 7 },
406 };
407
408 /* LVDS 100mhz refclk limits. */
409 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
410         .dot = { .min = 25000, .max = 350000 },
411         .vco = { .min = 1760000, .max = 3510000 },
412         .n = { .min = 1, .max = 2 },
413         .m = { .min = 79, .max = 126 },
414         .m1 = { .min = 12, .max = 22 },
415         .m2 = { .min = 5, .max = 9 },
416         .p = { .min = 28, .max = 112 },
417         .p1 = { .min = 2, .max = 8 },
418         .p2 = { .dot_limit = 225000,
419                 .p2_slow = 14, .p2_fast = 14 },
420 };
421
422 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
423         .dot = { .min = 25000, .max = 350000 },
424         .vco = { .min = 1760000, .max = 3510000 },
425         .n = { .min = 1, .max = 3 },
426         .m = { .min = 79, .max = 126 },
427         .m1 = { .min = 12, .max = 22 },
428         .m2 = { .min = 5, .max = 9 },
429         .p = { .min = 14, .max = 42 },
430         .p1 = { .min = 2, .max = 6 },
431         .p2 = { .dot_limit = 225000,
432                 .p2_slow = 7, .p2_fast = 7 },
433 };
434
435 static const struct intel_limit intel_limits_vlv = {
436          /*
437           * These are the data rate limits (measured in fast clocks)
438           * since those are the strictest limits we have. The fast
439           * clock and actual rate limits are more relaxed, so checking
440           * them would make no difference.
441           */
442         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
443         .vco = { .min = 4000000, .max = 6000000 },
444         .n = { .min = 1, .max = 7 },
445         .m1 = { .min = 2, .max = 3 },
446         .m2 = { .min = 11, .max = 156 },
447         .p1 = { .min = 2, .max = 3 },
448         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
449 };
450
451 static const struct intel_limit intel_limits_chv = {
452         /*
453          * These are the data rate limits (measured in fast clocks)
454          * since those are the strictest limits we have.  The fast
455          * clock and actual rate limits are more relaxed, so checking
456          * them would make no difference.
457          */
458         .dot = { .min = 25000 * 5, .max = 540000 * 5},
459         .vco = { .min = 4800000, .max = 6480000 },
460         .n = { .min = 1, .max = 1 },
461         .m1 = { .min = 2, .max = 2 },
462         .m2 = { .min = 24 << 22, .max = 175 << 22 },
463         .p1 = { .min = 2, .max = 4 },
464         .p2 = { .p2_slow = 1, .p2_fast = 14 },
465 };
466
467 static const struct intel_limit intel_limits_bxt = {
468         /* FIXME: find real dot limits */
469         .dot = { .min = 0, .max = INT_MAX },
470         .vco = { .min = 4800000, .max = 6700000 },
471         .n = { .min = 1, .max = 1 },
472         .m1 = { .min = 2, .max = 2 },
473         /* FIXME: find real m2 limits */
474         .m2 = { .min = 2 << 22, .max = 255 << 22 },
475         .p1 = { .min = 2, .max = 4 },
476         .p2 = { .p2_slow = 1, .p2_fast = 20 },
477 };
478
479 static void
480 skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
481 {
482         if (enable)
483                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
484                            I915_READ(CLKGATE_DIS_PSL(pipe)) |
485                            DUPS1_GATING_DIS | DUPS2_GATING_DIS);
486         else
487                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
488                            I915_READ(CLKGATE_DIS_PSL(pipe)) &
489                            ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
490 }
491
492 static bool
493 needs_modeset(const struct drm_crtc_state *state)
494 {
495         return drm_atomic_crtc_needs_modeset(state);
496 }
497
498 /*
499  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
500  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
501  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
502  * The helpers' return value is the rate of the clock that is fed to the
503  * display engine's pipe which can be the above fast dot clock rate or a
504  * divided-down version of it.
505  */
506 /* m1 is reserved as 0 in Pineview, n is a ring counter */
507 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
508 {
509         clock->m = clock->m2 + 2;
510         clock->p = clock->p1 * clock->p2;
511         if (WARN_ON(clock->n == 0 || clock->p == 0))
512                 return 0;
513         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
514         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
515
516         return clock->dot;
517 }
518
519 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
520 {
521         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
522 }
523
524 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
525 {
526         clock->m = i9xx_dpll_compute_m(clock);
527         clock->p = clock->p1 * clock->p2;
528         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
529                 return 0;
530         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
531         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
532
533         return clock->dot;
534 }
535
536 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
537 {
538         clock->m = clock->m1 * clock->m2;
539         clock->p = clock->p1 * clock->p2;
540         if (WARN_ON(clock->n == 0 || clock->p == 0))
541                 return 0;
542         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
543         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
544
545         return clock->dot / 5;
546 }
547
548 int chv_calc_dpll_params(int refclk, struct dpll *clock)
549 {
550         clock->m = clock->m1 * clock->m2;
551         clock->p = clock->p1 * clock->p2;
552         if (WARN_ON(clock->n == 0 || clock->p == 0))
553                 return 0;
554         clock->vco = DIV_ROUND_CLOSEST_ULL((u64)refclk * clock->m,
555                                            clock->n << 22);
556         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
557
558         return clock->dot / 5;
559 }
560
561 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
562
563 /*
564  * Returns whether the given set of divisors are valid for a given refclk with
565  * the given connectors.
566  */
567 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
568                                const struct intel_limit *limit,
569                                const struct dpll *clock)
570 {
571         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
572                 INTELPllInvalid("n out of range\n");
573         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
574                 INTELPllInvalid("p1 out of range\n");
575         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
576                 INTELPllInvalid("m2 out of range\n");
577         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
578                 INTELPllInvalid("m1 out of range\n");
579
580         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
581             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
582                 if (clock->m1 <= clock->m2)
583                         INTELPllInvalid("m1 <= m2\n");
584
585         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
586             !IS_GEN9_LP(dev_priv)) {
587                 if (clock->p < limit->p.min || limit->p.max < clock->p)
588                         INTELPllInvalid("p out of range\n");
589                 if (clock->m < limit->m.min || limit->m.max < clock->m)
590                         INTELPllInvalid("m out of range\n");
591         }
592
593         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
594                 INTELPllInvalid("vco out of range\n");
595         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
596          * connector, etc., rather than just a single range.
597          */
598         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
599                 INTELPllInvalid("dot out of range\n");
600
601         return true;
602 }
603
604 static int
605 i9xx_select_p2_div(const struct intel_limit *limit,
606                    const struct intel_crtc_state *crtc_state,
607                    int target)
608 {
609         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
610
611         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
612                 /*
613                  * For LVDS just rely on its current settings for dual-channel.
614                  * We haven't figured out how to reliably set up different
615                  * single/dual channel state, if we even can.
616                  */
617                 if (intel_is_dual_link_lvds(dev_priv))
618                         return limit->p2.p2_fast;
619                 else
620                         return limit->p2.p2_slow;
621         } else {
622                 if (target < limit->p2.dot_limit)
623                         return limit->p2.p2_slow;
624                 else
625                         return limit->p2.p2_fast;
626         }
627 }
628
629 /*
630  * Returns a set of divisors for the desired target clock with the given
631  * refclk, or FALSE.  The returned values represent the clock equation:
632  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
633  *
634  * Target and reference clocks are specified in kHz.
635  *
636  * If match_clock is provided, then best_clock P divider must match the P
637  * divider from @match_clock used for LVDS downclocking.
638  */
639 static bool
640 i9xx_find_best_dpll(const struct intel_limit *limit,
641                     struct intel_crtc_state *crtc_state,
642                     int target, int refclk, struct dpll *match_clock,
643                     struct dpll *best_clock)
644 {
645         struct drm_device *dev = crtc_state->base.crtc->dev;
646         struct dpll clock;
647         int err = target;
648
649         memset(best_clock, 0, sizeof(*best_clock));
650
651         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
652
653         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
654              clock.m1++) {
655                 for (clock.m2 = limit->m2.min;
656                      clock.m2 <= limit->m2.max; clock.m2++) {
657                         if (clock.m2 >= clock.m1)
658                                 break;
659                         for (clock.n = limit->n.min;
660                              clock.n <= limit->n.max; clock.n++) {
661                                 for (clock.p1 = limit->p1.min;
662                                         clock.p1 <= limit->p1.max; clock.p1++) {
663                                         int this_err;
664
665                                         i9xx_calc_dpll_params(refclk, &clock);
666                                         if (!intel_PLL_is_valid(to_i915(dev),
667                                                                 limit,
668                                                                 &clock))
669                                                 continue;
670                                         if (match_clock &&
671                                             clock.p != match_clock->p)
672                                                 continue;
673
674                                         this_err = abs(clock.dot - target);
675                                         if (this_err < err) {
676                                                 *best_clock = clock;
677                                                 err = this_err;
678                                         }
679                                 }
680                         }
681                 }
682         }
683
684         return (err != target);
685 }
686
687 /*
688  * Returns a set of divisors for the desired target clock with the given
689  * refclk, or FALSE.  The returned values represent the clock equation:
690  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
691  *
692  * Target and reference clocks are specified in kHz.
693  *
694  * If match_clock is provided, then best_clock P divider must match the P
695  * divider from @match_clock used for LVDS downclocking.
696  */
697 static bool
698 pnv_find_best_dpll(const struct intel_limit *limit,
699                    struct intel_crtc_state *crtc_state,
700                    int target, int refclk, struct dpll *match_clock,
701                    struct dpll *best_clock)
702 {
703         struct drm_device *dev = crtc_state->base.crtc->dev;
704         struct dpll clock;
705         int err = target;
706
707         memset(best_clock, 0, sizeof(*best_clock));
708
709         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
710
711         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
712              clock.m1++) {
713                 for (clock.m2 = limit->m2.min;
714                      clock.m2 <= limit->m2.max; clock.m2++) {
715                         for (clock.n = limit->n.min;
716                              clock.n <= limit->n.max; clock.n++) {
717                                 for (clock.p1 = limit->p1.min;
718                                         clock.p1 <= limit->p1.max; clock.p1++) {
719                                         int this_err;
720
721                                         pnv_calc_dpll_params(refclk, &clock);
722                                         if (!intel_PLL_is_valid(to_i915(dev),
723                                                                 limit,
724                                                                 &clock))
725                                                 continue;
726                                         if (match_clock &&
727                                             clock.p != match_clock->p)
728                                                 continue;
729
730                                         this_err = abs(clock.dot - target);
731                                         if (this_err < err) {
732                                                 *best_clock = clock;
733                                                 err = this_err;
734                                         }
735                                 }
736                         }
737                 }
738         }
739
740         return (err != target);
741 }
742
743 /*
744  * Returns a set of divisors for the desired target clock with the given
745  * refclk, or FALSE.  The returned values represent the clock equation:
746  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
747  *
748  * Target and reference clocks are specified in kHz.
749  *
750  * If match_clock is provided, then best_clock P divider must match the P
751  * divider from @match_clock used for LVDS downclocking.
752  */
753 static bool
754 g4x_find_best_dpll(const struct intel_limit *limit,
755                    struct intel_crtc_state *crtc_state,
756                    int target, int refclk, struct dpll *match_clock,
757                    struct dpll *best_clock)
758 {
759         struct drm_device *dev = crtc_state->base.crtc->dev;
760         struct dpll clock;
761         int max_n;
762         bool found = false;
763         /* approximately equals target * 0.00585 */
764         int err_most = (target >> 8) + (target >> 9);
765
766         memset(best_clock, 0, sizeof(*best_clock));
767
768         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
769
770         max_n = limit->n.max;
771         /* based on hardware requirement, prefer smaller n to precision */
772         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
773                 /* based on hardware requirement, prefere larger m1,m2 */
774                 for (clock.m1 = limit->m1.max;
775                      clock.m1 >= limit->m1.min; clock.m1--) {
776                         for (clock.m2 = limit->m2.max;
777                              clock.m2 >= limit->m2.min; clock.m2--) {
778                                 for (clock.p1 = limit->p1.max;
779                                      clock.p1 >= limit->p1.min; clock.p1--) {
780                                         int this_err;
781
782                                         i9xx_calc_dpll_params(refclk, &clock);
783                                         if (!intel_PLL_is_valid(to_i915(dev),
784                                                                 limit,
785                                                                 &clock))
786                                                 continue;
787
788                                         this_err = abs(clock.dot - target);
789                                         if (this_err < err_most) {
790                                                 *best_clock = clock;
791                                                 err_most = this_err;
792                                                 max_n = clock.n;
793                                                 found = true;
794                                         }
795                                 }
796                         }
797                 }
798         }
799         return found;
800 }
801
802 /*
803  * Check if the calculated PLL configuration is more optimal compared to the
804  * best configuration and error found so far. Return the calculated error.
805  */
806 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
807                                const struct dpll *calculated_clock,
808                                const struct dpll *best_clock,
809                                unsigned int best_error_ppm,
810                                unsigned int *error_ppm)
811 {
812         /*
813          * For CHV ignore the error and consider only the P value.
814          * Prefer a bigger P value based on HW requirements.
815          */
816         if (IS_CHERRYVIEW(to_i915(dev))) {
817                 *error_ppm = 0;
818
819                 return calculated_clock->p > best_clock->p;
820         }
821
822         if (WARN_ON_ONCE(!target_freq))
823                 return false;
824
825         *error_ppm = div_u64(1000000ULL *
826                                 abs(target_freq - calculated_clock->dot),
827                              target_freq);
828         /*
829          * Prefer a better P value over a better (smaller) error if the error
830          * is small. Ensure this preference for future configurations too by
831          * setting the error to 0.
832          */
833         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
834                 *error_ppm = 0;
835
836                 return true;
837         }
838
839         return *error_ppm + 10 < best_error_ppm;
840 }
841
842 /*
843  * Returns a set of divisors for the desired target clock with the given
844  * refclk, or FALSE.  The returned values represent the clock equation:
845  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
846  */
847 static bool
848 vlv_find_best_dpll(const struct intel_limit *limit,
849                    struct intel_crtc_state *crtc_state,
850                    int target, int refclk, struct dpll *match_clock,
851                    struct dpll *best_clock)
852 {
853         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
854         struct drm_device *dev = crtc->base.dev;
855         struct dpll clock;
856         unsigned int bestppm = 1000000;
857         /* min update 19.2 MHz */
858         int max_n = min(limit->n.max, refclk / 19200);
859         bool found = false;
860
861         target *= 5; /* fast clock */
862
863         memset(best_clock, 0, sizeof(*best_clock));
864
865         /* based on hardware requirement, prefer smaller n to precision */
866         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
867                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
868                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
869                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
870                                 clock.p = clock.p1 * clock.p2;
871                                 /* based on hardware requirement, prefer bigger m1,m2 values */
872                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
873                                         unsigned int ppm;
874
875                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
876                                                                      refclk * clock.m1);
877
878                                         vlv_calc_dpll_params(refclk, &clock);
879
880                                         if (!intel_PLL_is_valid(to_i915(dev),
881                                                                 limit,
882                                                                 &clock))
883                                                 continue;
884
885                                         if (!vlv_PLL_is_optimal(dev, target,
886                                                                 &clock,
887                                                                 best_clock,
888                                                                 bestppm, &ppm))
889                                                 continue;
890
891                                         *best_clock = clock;
892                                         bestppm = ppm;
893                                         found = true;
894                                 }
895                         }
896                 }
897         }
898
899         return found;
900 }
901
902 /*
903  * Returns a set of divisors for the desired target clock with the given
904  * refclk, or FALSE.  The returned values represent the clock equation:
905  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
906  */
907 static bool
908 chv_find_best_dpll(const struct intel_limit *limit,
909                    struct intel_crtc_state *crtc_state,
910                    int target, int refclk, struct dpll *match_clock,
911                    struct dpll *best_clock)
912 {
913         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
914         struct drm_device *dev = crtc->base.dev;
915         unsigned int best_error_ppm;
916         struct dpll clock;
917         u64 m2;
918         int found = false;
919
920         memset(best_clock, 0, sizeof(*best_clock));
921         best_error_ppm = 1000000;
922
923         /*
924          * Based on hardware doc, the n always set to 1, and m1 always
925          * set to 2.  If requires to support 200Mhz refclk, we need to
926          * revisit this because n may not 1 anymore.
927          */
928         clock.n = 1, clock.m1 = 2;
929         target *= 5;    /* fast clock */
930
931         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
932                 for (clock.p2 = limit->p2.p2_fast;
933                                 clock.p2 >= limit->p2.p2_slow;
934                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
935                         unsigned int error_ppm;
936
937                         clock.p = clock.p1 * clock.p2;
938
939                         m2 = DIV_ROUND_CLOSEST_ULL(((u64)target * clock.p *
940                                         clock.n) << 22, refclk * clock.m1);
941
942                         if (m2 > INT_MAX/clock.m1)
943                                 continue;
944
945                         clock.m2 = m2;
946
947                         chv_calc_dpll_params(refclk, &clock);
948
949                         if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
950                                 continue;
951
952                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
953                                                 best_error_ppm, &error_ppm))
954                                 continue;
955
956                         *best_clock = clock;
957                         best_error_ppm = error_ppm;
958                         found = true;
959                 }
960         }
961
962         return found;
963 }
964
965 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
966                         struct dpll *best_clock)
967 {
968         int refclk = 100000;
969         const struct intel_limit *limit = &intel_limits_bxt;
970
971         return chv_find_best_dpll(limit, crtc_state,
972                                   crtc_state->port_clock, refclk,
973                                   NULL, best_clock);
974 }
975
976 bool intel_crtc_active(struct intel_crtc *crtc)
977 {
978         /* Be paranoid as we can arrive here with only partial
979          * state retrieved from the hardware during setup.
980          *
981          * We can ditch the adjusted_mode.crtc_clock check as soon
982          * as Haswell has gained clock readout/fastboot support.
983          *
984          * We can ditch the crtc->primary->state->fb check as soon as we can
985          * properly reconstruct framebuffers.
986          *
987          * FIXME: The intel_crtc->active here should be switched to
988          * crtc->state->active once we have proper CRTC states wired up
989          * for atomic.
990          */
991         return crtc->active && crtc->base.primary->state->fb &&
992                 crtc->config->base.adjusted_mode.crtc_clock;
993 }
994
995 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
996                                              enum pipe pipe)
997 {
998         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
999
1000         return crtc->config->cpu_transcoder;
1001 }
1002
1003 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1004                                     enum pipe pipe)
1005 {
1006         i915_reg_t reg = PIPEDSL(pipe);
1007         u32 line1, line2;
1008         u32 line_mask;
1009
1010         if (IS_GEN(dev_priv, 2))
1011                 line_mask = DSL_LINEMASK_GEN2;
1012         else
1013                 line_mask = DSL_LINEMASK_GEN3;
1014
1015         line1 = I915_READ(reg) & line_mask;
1016         msleep(5);
1017         line2 = I915_READ(reg) & line_mask;
1018
1019         return line1 != line2;
1020 }
1021
1022 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1023 {
1024         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1025         enum pipe pipe = crtc->pipe;
1026
1027         /* Wait for the display line to settle/start moving */
1028         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1029                 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1030                           pipe_name(pipe), onoff(state));
1031 }
1032
1033 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1034 {
1035         wait_for_pipe_scanline_moving(crtc, false);
1036 }
1037
1038 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1039 {
1040         wait_for_pipe_scanline_moving(crtc, true);
1041 }
1042
1043 static void
1044 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1045 {
1046         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1047         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1048
1049         if (INTEL_GEN(dev_priv) >= 4) {
1050                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1051                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1052
1053                 /* Wait for the Pipe State to go off */
1054                 if (intel_wait_for_register(&dev_priv->uncore,
1055                                             reg, I965_PIPECONF_ACTIVE, 0,
1056                                             100))
1057                         WARN(1, "pipe_off wait timed out\n");
1058         } else {
1059                 intel_wait_for_pipe_scanline_stopped(crtc);
1060         }
1061 }
1062
1063 /* Only for pre-ILK configs */
1064 void assert_pll(struct drm_i915_private *dev_priv,
1065                 enum pipe pipe, bool state)
1066 {
1067         u32 val;
1068         bool cur_state;
1069
1070         val = I915_READ(DPLL(pipe));
1071         cur_state = !!(val & DPLL_VCO_ENABLE);
1072         I915_STATE_WARN(cur_state != state,
1073              "PLL state assertion failure (expected %s, current %s)\n",
1074                         onoff(state), onoff(cur_state));
1075 }
1076
1077 /* XXX: the dsi pll is shared between MIPI DSI ports */
1078 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1079 {
1080         u32 val;
1081         bool cur_state;
1082
1083         mutex_lock(&dev_priv->sb_lock);
1084         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1085         mutex_unlock(&dev_priv->sb_lock);
1086
1087         cur_state = val & DSI_PLL_VCO_EN;
1088         I915_STATE_WARN(cur_state != state,
1089              "DSI PLL state assertion failure (expected %s, current %s)\n",
1090                         onoff(state), onoff(cur_state));
1091 }
1092
1093 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1094                           enum pipe pipe, bool state)
1095 {
1096         bool cur_state;
1097         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1098                                                                       pipe);
1099
1100         if (HAS_DDI(dev_priv)) {
1101                 /* DDI does not have a specific FDI_TX register */
1102                 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1103                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1104         } else {
1105                 u32 val = I915_READ(FDI_TX_CTL(pipe));
1106                 cur_state = !!(val & FDI_TX_ENABLE);
1107         }
1108         I915_STATE_WARN(cur_state != state,
1109              "FDI TX state assertion failure (expected %s, current %s)\n",
1110                         onoff(state), onoff(cur_state));
1111 }
1112 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1113 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1114
1115 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1116                           enum pipe pipe, bool state)
1117 {
1118         u32 val;
1119         bool cur_state;
1120
1121         val = I915_READ(FDI_RX_CTL(pipe));
1122         cur_state = !!(val & FDI_RX_ENABLE);
1123         I915_STATE_WARN(cur_state != state,
1124              "FDI RX state assertion failure (expected %s, current %s)\n",
1125                         onoff(state), onoff(cur_state));
1126 }
1127 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1128 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1129
1130 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1131                                       enum pipe pipe)
1132 {
1133         u32 val;
1134
1135         /* ILK FDI PLL is always enabled */
1136         if (IS_GEN(dev_priv, 5))
1137                 return;
1138
1139         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1140         if (HAS_DDI(dev_priv))
1141                 return;
1142
1143         val = I915_READ(FDI_TX_CTL(pipe));
1144         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1145 }
1146
1147 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1148                        enum pipe pipe, bool state)
1149 {
1150         u32 val;
1151         bool cur_state;
1152
1153         val = I915_READ(FDI_RX_CTL(pipe));
1154         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1155         I915_STATE_WARN(cur_state != state,
1156              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1157                         onoff(state), onoff(cur_state));
1158 }
1159
1160 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1161 {
1162         i915_reg_t pp_reg;
1163         u32 val;
1164         enum pipe panel_pipe = INVALID_PIPE;
1165         bool locked = true;
1166
1167         if (WARN_ON(HAS_DDI(dev_priv)))
1168                 return;
1169
1170         if (HAS_PCH_SPLIT(dev_priv)) {
1171                 u32 port_sel;
1172
1173                 pp_reg = PP_CONTROL(0);
1174                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1175
1176                 switch (port_sel) {
1177                 case PANEL_PORT_SELECT_LVDS:
1178                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1179                         break;
1180                 case PANEL_PORT_SELECT_DPA:
1181                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1182                         break;
1183                 case PANEL_PORT_SELECT_DPC:
1184                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1185                         break;
1186                 case PANEL_PORT_SELECT_DPD:
1187                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1188                         break;
1189                 default:
1190                         MISSING_CASE(port_sel);
1191                         break;
1192                 }
1193         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1194                 /* presumably write lock depends on pipe, not port select */
1195                 pp_reg = PP_CONTROL(pipe);
1196                 panel_pipe = pipe;
1197         } else {
1198                 u32 port_sel;
1199
1200                 pp_reg = PP_CONTROL(0);
1201                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1202
1203                 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1204                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1205         }
1206
1207         val = I915_READ(pp_reg);
1208         if (!(val & PANEL_POWER_ON) ||
1209             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1210                 locked = false;
1211
1212         I915_STATE_WARN(panel_pipe == pipe && locked,
1213              "panel assertion failure, pipe %c regs locked\n",
1214              pipe_name(pipe));
1215 }
1216
1217 void assert_pipe(struct drm_i915_private *dev_priv,
1218                  enum pipe pipe, bool state)
1219 {
1220         bool cur_state;
1221         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1222                                                                       pipe);
1223         enum intel_display_power_domain power_domain;
1224         intel_wakeref_t wakeref;
1225
1226         /* we keep both pipes enabled on 830 */
1227         if (IS_I830(dev_priv))
1228                 state = true;
1229
1230         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1231         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1232         if (wakeref) {
1233                 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1234                 cur_state = !!(val & PIPECONF_ENABLE);
1235
1236                 intel_display_power_put(dev_priv, power_domain, wakeref);
1237         } else {
1238                 cur_state = false;
1239         }
1240
1241         I915_STATE_WARN(cur_state != state,
1242              "pipe %c assertion failure (expected %s, current %s)\n",
1243                         pipe_name(pipe), onoff(state), onoff(cur_state));
1244 }
1245
1246 static void assert_plane(struct intel_plane *plane, bool state)
1247 {
1248         enum pipe pipe;
1249         bool cur_state;
1250
1251         cur_state = plane->get_hw_state(plane, &pipe);
1252
1253         I915_STATE_WARN(cur_state != state,
1254                         "%s assertion failure (expected %s, current %s)\n",
1255                         plane->base.name, onoff(state), onoff(cur_state));
1256 }
1257
1258 #define assert_plane_enabled(p) assert_plane(p, true)
1259 #define assert_plane_disabled(p) assert_plane(p, false)
1260
1261 static void assert_planes_disabled(struct intel_crtc *crtc)
1262 {
1263         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1264         struct intel_plane *plane;
1265
1266         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1267                 assert_plane_disabled(plane);
1268 }
1269
1270 static void assert_vblank_disabled(struct drm_crtc *crtc)
1271 {
1272         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1273                 drm_crtc_vblank_put(crtc);
1274 }
1275
1276 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1277                                     enum pipe pipe)
1278 {
1279         u32 val;
1280         bool enabled;
1281
1282         val = I915_READ(PCH_TRANSCONF(pipe));
1283         enabled = !!(val & TRANS_ENABLE);
1284         I915_STATE_WARN(enabled,
1285              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1286              pipe_name(pipe));
1287 }
1288
1289 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1290                                    enum pipe pipe, enum port port,
1291                                    i915_reg_t dp_reg)
1292 {
1293         enum pipe port_pipe;
1294         bool state;
1295
1296         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1297
1298         I915_STATE_WARN(state && port_pipe == pipe,
1299                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
1300                         port_name(port), pipe_name(pipe));
1301
1302         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1303                         "IBX PCH DP %c still using transcoder B\n",
1304                         port_name(port));
1305 }
1306
1307 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1308                                      enum pipe pipe, enum port port,
1309                                      i915_reg_t hdmi_reg)
1310 {
1311         enum pipe port_pipe;
1312         bool state;
1313
1314         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1315
1316         I915_STATE_WARN(state && port_pipe == pipe,
1317                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1318                         port_name(port), pipe_name(pipe));
1319
1320         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1321                         "IBX PCH HDMI %c still using transcoder B\n",
1322                         port_name(port));
1323 }
1324
1325 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1326                                       enum pipe pipe)
1327 {
1328         enum pipe port_pipe;
1329
1330         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1331         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1332         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1333
1334         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1335                         port_pipe == pipe,
1336                         "PCH VGA enabled on transcoder %c, should be disabled\n",
1337                         pipe_name(pipe));
1338
1339         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1340                         port_pipe == pipe,
1341                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
1342                         pipe_name(pipe));
1343
1344         /* PCH SDVOB multiplex with HDMIB */
1345         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1346         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1347         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1348 }
1349
1350 static void _vlv_enable_pll(struct intel_crtc *crtc,
1351                             const struct intel_crtc_state *pipe_config)
1352 {
1353         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1354         enum pipe pipe = crtc->pipe;
1355
1356         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1357         POSTING_READ(DPLL(pipe));
1358         udelay(150);
1359
1360         if (intel_wait_for_register(&dev_priv->uncore,
1361                                     DPLL(pipe),
1362                                     DPLL_LOCK_VLV,
1363                                     DPLL_LOCK_VLV,
1364                                     1))
1365                 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1366 }
1367
1368 static void vlv_enable_pll(struct intel_crtc *crtc,
1369                            const struct intel_crtc_state *pipe_config)
1370 {
1371         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1372         enum pipe pipe = crtc->pipe;
1373
1374         assert_pipe_disabled(dev_priv, pipe);
1375
1376         /* PLL is protected by panel, make sure we can write it */
1377         assert_panel_unlocked(dev_priv, pipe);
1378
1379         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1380                 _vlv_enable_pll(crtc, pipe_config);
1381
1382         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1383         POSTING_READ(DPLL_MD(pipe));
1384 }
1385
1386
1387 static void _chv_enable_pll(struct intel_crtc *crtc,
1388                             const struct intel_crtc_state *pipe_config)
1389 {
1390         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1391         enum pipe pipe = crtc->pipe;
1392         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1393         u32 tmp;
1394
1395         mutex_lock(&dev_priv->sb_lock);
1396
1397         /* Enable back the 10bit clock to display controller */
1398         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1399         tmp |= DPIO_DCLKP_EN;
1400         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1401
1402         mutex_unlock(&dev_priv->sb_lock);
1403
1404         /*
1405          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1406          */
1407         udelay(1);
1408
1409         /* Enable PLL */
1410         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1411
1412         /* Check PLL is locked */
1413         if (intel_wait_for_register(&dev_priv->uncore,
1414                                     DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1415                                     1))
1416                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1417 }
1418
1419 static void chv_enable_pll(struct intel_crtc *crtc,
1420                            const struct intel_crtc_state *pipe_config)
1421 {
1422         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1423         enum pipe pipe = crtc->pipe;
1424
1425         assert_pipe_disabled(dev_priv, pipe);
1426
1427         /* PLL is protected by panel, make sure we can write it */
1428         assert_panel_unlocked(dev_priv, pipe);
1429
1430         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1431                 _chv_enable_pll(crtc, pipe_config);
1432
1433         if (pipe != PIPE_A) {
1434                 /*
1435                  * WaPixelRepeatModeFixForC0:chv
1436                  *
1437                  * DPLLCMD is AWOL. Use chicken bits to propagate
1438                  * the value from DPLLBMD to either pipe B or C.
1439                  */
1440                 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1441                 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1442                 I915_WRITE(CBR4_VLV, 0);
1443                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1444
1445                 /*
1446                  * DPLLB VGA mode also seems to cause problems.
1447                  * We should always have it disabled.
1448                  */
1449                 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1450         } else {
1451                 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1452                 POSTING_READ(DPLL_MD(pipe));
1453         }
1454 }
1455
1456 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1457 {
1458         if (IS_I830(dev_priv))
1459                 return false;
1460
1461         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1462 }
1463
1464 static void i9xx_enable_pll(struct intel_crtc *crtc,
1465                             const struct intel_crtc_state *crtc_state)
1466 {
1467         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1468         i915_reg_t reg = DPLL(crtc->pipe);
1469         u32 dpll = crtc_state->dpll_hw_state.dpll;
1470         int i;
1471
1472         assert_pipe_disabled(dev_priv, crtc->pipe);
1473
1474         /* PLL is protected by panel, make sure we can write it */
1475         if (i9xx_has_pps(dev_priv))
1476                 assert_panel_unlocked(dev_priv, crtc->pipe);
1477
1478         /*
1479          * Apparently we need to have VGA mode enabled prior to changing
1480          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1481          * dividers, even though the register value does change.
1482          */
1483         I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
1484         I915_WRITE(reg, dpll);
1485
1486         /* Wait for the clocks to stabilize. */
1487         POSTING_READ(reg);
1488         udelay(150);
1489
1490         if (INTEL_GEN(dev_priv) >= 4) {
1491                 I915_WRITE(DPLL_MD(crtc->pipe),
1492                            crtc_state->dpll_hw_state.dpll_md);
1493         } else {
1494                 /* The pixel multiplier can only be updated once the
1495                  * DPLL is enabled and the clocks are stable.
1496                  *
1497                  * So write it again.
1498                  */
1499                 I915_WRITE(reg, dpll);
1500         }
1501
1502         /* We do this three times for luck */
1503         for (i = 0; i < 3; i++) {
1504                 I915_WRITE(reg, dpll);
1505                 POSTING_READ(reg);
1506                 udelay(150); /* wait for warmup */
1507         }
1508 }
1509
1510 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1511 {
1512         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1513         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1514         enum pipe pipe = crtc->pipe;
1515
1516         /* Don't disable pipe or pipe PLLs if needed */
1517         if (IS_I830(dev_priv))
1518                 return;
1519
1520         /* Make sure the pipe isn't still relying on us */
1521         assert_pipe_disabled(dev_priv, pipe);
1522
1523         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1524         POSTING_READ(DPLL(pipe));
1525 }
1526
1527 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1528 {
1529         u32 val;
1530
1531         /* Make sure the pipe isn't still relying on us */
1532         assert_pipe_disabled(dev_priv, pipe);
1533
1534         val = DPLL_INTEGRATED_REF_CLK_VLV |
1535                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1536         if (pipe != PIPE_A)
1537                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1538
1539         I915_WRITE(DPLL(pipe), val);
1540         POSTING_READ(DPLL(pipe));
1541 }
1542
1543 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1544 {
1545         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1546         u32 val;
1547
1548         /* Make sure the pipe isn't still relying on us */
1549         assert_pipe_disabled(dev_priv, pipe);
1550
1551         val = DPLL_SSC_REF_CLK_CHV |
1552                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1553         if (pipe != PIPE_A)
1554                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1555
1556         I915_WRITE(DPLL(pipe), val);
1557         POSTING_READ(DPLL(pipe));
1558
1559         mutex_lock(&dev_priv->sb_lock);
1560
1561         /* Disable 10bit clock to display controller */
1562         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1563         val &= ~DPIO_DCLKP_EN;
1564         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1565
1566         mutex_unlock(&dev_priv->sb_lock);
1567 }
1568
1569 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1570                          struct intel_digital_port *dport,
1571                          unsigned int expected_mask)
1572 {
1573         u32 port_mask;
1574         i915_reg_t dpll_reg;
1575
1576         switch (dport->base.port) {
1577         case PORT_B:
1578                 port_mask = DPLL_PORTB_READY_MASK;
1579                 dpll_reg = DPLL(0);
1580                 break;
1581         case PORT_C:
1582                 port_mask = DPLL_PORTC_READY_MASK;
1583                 dpll_reg = DPLL(0);
1584                 expected_mask <<= 4;
1585                 break;
1586         case PORT_D:
1587                 port_mask = DPLL_PORTD_READY_MASK;
1588                 dpll_reg = DPIO_PHY_STATUS;
1589                 break;
1590         default:
1591                 BUG();
1592         }
1593
1594         if (intel_wait_for_register(&dev_priv->uncore,
1595                                     dpll_reg, port_mask, expected_mask,
1596                                     1000))
1597                 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1598                      port_name(dport->base.port),
1599                      I915_READ(dpll_reg) & port_mask, expected_mask);
1600 }
1601
1602 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1603 {
1604         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1605         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1606         enum pipe pipe = crtc->pipe;
1607         i915_reg_t reg;
1608         u32 val, pipeconf_val;
1609
1610         /* Make sure PCH DPLL is enabled */
1611         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1612
1613         /* FDI must be feeding us bits for PCH ports */
1614         assert_fdi_tx_enabled(dev_priv, pipe);
1615         assert_fdi_rx_enabled(dev_priv, pipe);
1616
1617         if (HAS_PCH_CPT(dev_priv)) {
1618                 /* Workaround: Set the timing override bit before enabling the
1619                  * pch transcoder. */
1620                 reg = TRANS_CHICKEN2(pipe);
1621                 val = I915_READ(reg);
1622                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1623                 I915_WRITE(reg, val);
1624         }
1625
1626         reg = PCH_TRANSCONF(pipe);
1627         val = I915_READ(reg);
1628         pipeconf_val = I915_READ(PIPECONF(pipe));
1629
1630         if (HAS_PCH_IBX(dev_priv)) {
1631                 /*
1632                  * Make the BPC in transcoder be consistent with
1633                  * that in pipeconf reg. For HDMI we must use 8bpc
1634                  * here for both 8bpc and 12bpc.
1635                  */
1636                 val &= ~PIPECONF_BPC_MASK;
1637                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1638                         val |= PIPECONF_8BPC;
1639                 else
1640                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1641         }
1642
1643         val &= ~TRANS_INTERLACE_MASK;
1644         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1645                 if (HAS_PCH_IBX(dev_priv) &&
1646                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1647                         val |= TRANS_LEGACY_INTERLACED_ILK;
1648                 else
1649                         val |= TRANS_INTERLACED;
1650         } else {
1651                 val |= TRANS_PROGRESSIVE;
1652         }
1653
1654         I915_WRITE(reg, val | TRANS_ENABLE);
1655         if (intel_wait_for_register(&dev_priv->uncore,
1656                                     reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1657                                     100))
1658                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1659 }
1660
1661 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1662                                       enum transcoder cpu_transcoder)
1663 {
1664         u32 val, pipeconf_val;
1665
1666         /* FDI must be feeding us bits for PCH ports */
1667         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1668         assert_fdi_rx_enabled(dev_priv, PIPE_A);
1669
1670         /* Workaround: set timing override bit. */
1671         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1672         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1673         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1674
1675         val = TRANS_ENABLE;
1676         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1677
1678         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1679             PIPECONF_INTERLACED_ILK)
1680                 val |= TRANS_INTERLACED;
1681         else
1682                 val |= TRANS_PROGRESSIVE;
1683
1684         I915_WRITE(LPT_TRANSCONF, val);
1685         if (intel_wait_for_register(&dev_priv->uncore,
1686                                     LPT_TRANSCONF,
1687                                     TRANS_STATE_ENABLE,
1688                                     TRANS_STATE_ENABLE,
1689                                     100))
1690                 DRM_ERROR("Failed to enable PCH transcoder\n");
1691 }
1692
1693 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1694                                             enum pipe pipe)
1695 {
1696         i915_reg_t reg;
1697         u32 val;
1698
1699         /* FDI relies on the transcoder */
1700         assert_fdi_tx_disabled(dev_priv, pipe);
1701         assert_fdi_rx_disabled(dev_priv, pipe);
1702
1703         /* Ports must be off as well */
1704         assert_pch_ports_disabled(dev_priv, pipe);
1705
1706         reg = PCH_TRANSCONF(pipe);
1707         val = I915_READ(reg);
1708         val &= ~TRANS_ENABLE;
1709         I915_WRITE(reg, val);
1710         /* wait for PCH transcoder off, transcoder state */
1711         if (intel_wait_for_register(&dev_priv->uncore,
1712                                     reg, TRANS_STATE_ENABLE, 0,
1713                                     50))
1714                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1715
1716         if (HAS_PCH_CPT(dev_priv)) {
1717                 /* Workaround: Clear the timing override chicken bit again. */
1718                 reg = TRANS_CHICKEN2(pipe);
1719                 val = I915_READ(reg);
1720                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1721                 I915_WRITE(reg, val);
1722         }
1723 }
1724
1725 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1726 {
1727         u32 val;
1728
1729         val = I915_READ(LPT_TRANSCONF);
1730         val &= ~TRANS_ENABLE;
1731         I915_WRITE(LPT_TRANSCONF, val);
1732         /* wait for PCH transcoder off, transcoder state */
1733         if (intel_wait_for_register(&dev_priv->uncore,
1734                                     LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1735                                     50))
1736                 DRM_ERROR("Failed to disable PCH transcoder\n");
1737
1738         /* Workaround: clear timing override bit. */
1739         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1740         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1741         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1742 }
1743
1744 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1745 {
1746         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1747
1748         if (HAS_PCH_LPT(dev_priv))
1749                 return PIPE_A;
1750         else
1751                 return crtc->pipe;
1752 }
1753
1754 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1755 {
1756         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1757
1758         /*
1759          * On i965gm the hardware frame counter reads
1760          * zero when the TV encoder is enabled :(
1761          */
1762         if (IS_I965GM(dev_priv) &&
1763             (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1764                 return 0;
1765
1766         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1767                 return 0xffffffff; /* full 32 bit counter */
1768         else if (INTEL_GEN(dev_priv) >= 3)
1769                 return 0xffffff; /* only 24 bits of frame count */
1770         else
1771                 return 0; /* Gen2 doesn't have a hardware frame counter */
1772 }
1773
1774 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1775 {
1776         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1777
1778         drm_crtc_set_max_vblank_count(&crtc->base,
1779                                       intel_crtc_max_vblank_count(crtc_state));
1780         drm_crtc_vblank_on(&crtc->base);
1781 }
1782
1783 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1784 {
1785         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1786         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1787         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1788         enum pipe pipe = crtc->pipe;
1789         i915_reg_t reg;
1790         u32 val;
1791
1792         DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1793
1794         assert_planes_disabled(crtc);
1795
1796         /*
1797          * A pipe without a PLL won't actually be able to drive bits from
1798          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1799          * need the check.
1800          */
1801         if (HAS_GMCH(dev_priv)) {
1802                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1803                         assert_dsi_pll_enabled(dev_priv);
1804                 else
1805                         assert_pll_enabled(dev_priv, pipe);
1806         } else {
1807                 if (new_crtc_state->has_pch_encoder) {
1808                         /* if driving the PCH, we need FDI enabled */
1809                         assert_fdi_rx_pll_enabled(dev_priv,
1810                                                   intel_crtc_pch_transcoder(crtc));
1811                         assert_fdi_tx_pll_enabled(dev_priv,
1812                                                   (enum pipe) cpu_transcoder);
1813                 }
1814                 /* FIXME: assert CPU port conditions for SNB+ */
1815         }
1816
1817         trace_intel_pipe_enable(dev_priv, pipe);
1818
1819         reg = PIPECONF(cpu_transcoder);
1820         val = I915_READ(reg);
1821         if (val & PIPECONF_ENABLE) {
1822                 /* we keep both pipes enabled on 830 */
1823                 WARN_ON(!IS_I830(dev_priv));
1824                 return;
1825         }
1826
1827         I915_WRITE(reg, val | PIPECONF_ENABLE);
1828         POSTING_READ(reg);
1829
1830         /*
1831          * Until the pipe starts PIPEDSL reads will return a stale value,
1832          * which causes an apparent vblank timestamp jump when PIPEDSL
1833          * resets to its proper value. That also messes up the frame count
1834          * when it's derived from the timestamps. So let's wait for the
1835          * pipe to start properly before we call drm_crtc_vblank_on()
1836          */
1837         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1838                 intel_wait_for_pipe_scanline_moving(crtc);
1839 }
1840
1841 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1842 {
1843         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1844         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1845         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1846         enum pipe pipe = crtc->pipe;
1847         i915_reg_t reg;
1848         u32 val;
1849
1850         DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1851
1852         /*
1853          * Make sure planes won't keep trying to pump pixels to us,
1854          * or we might hang the display.
1855          */
1856         assert_planes_disabled(crtc);
1857
1858         trace_intel_pipe_disable(dev_priv, pipe);
1859
1860         reg = PIPECONF(cpu_transcoder);
1861         val = I915_READ(reg);
1862         if ((val & PIPECONF_ENABLE) == 0)
1863                 return;
1864
1865         /*
1866          * Double wide has implications for planes
1867          * so best keep it disabled when not needed.
1868          */
1869         if (old_crtc_state->double_wide)
1870                 val &= ~PIPECONF_DOUBLE_WIDE;
1871
1872         /* Don't disable pipe or pipe PLLs if needed */
1873         if (!IS_I830(dev_priv))
1874                 val &= ~PIPECONF_ENABLE;
1875
1876         I915_WRITE(reg, val);
1877         if ((val & PIPECONF_ENABLE) == 0)
1878                 intel_wait_for_pipe_off(old_crtc_state);
1879 }
1880
1881 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1882 {
1883         return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1884 }
1885
1886 static unsigned int
1887 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1888 {
1889         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1890         unsigned int cpp = fb->format->cpp[color_plane];
1891
1892         switch (fb->modifier) {
1893         case DRM_FORMAT_MOD_LINEAR:
1894                 return cpp;
1895         case I915_FORMAT_MOD_X_TILED:
1896                 if (IS_GEN(dev_priv, 2))
1897                         return 128;
1898                 else
1899                         return 512;
1900         case I915_FORMAT_MOD_Y_TILED_CCS:
1901                 if (color_plane == 1)
1902                         return 128;
1903                 /* fall through */
1904         case I915_FORMAT_MOD_Y_TILED:
1905                 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1906                         return 128;
1907                 else
1908                         return 512;
1909         case I915_FORMAT_MOD_Yf_TILED_CCS:
1910                 if (color_plane == 1)
1911                         return 128;
1912                 /* fall through */
1913         case I915_FORMAT_MOD_Yf_TILED:
1914                 switch (cpp) {
1915                 case 1:
1916                         return 64;
1917                 case 2:
1918                 case 4:
1919                         return 128;
1920                 case 8:
1921                 case 16:
1922                         return 256;
1923                 default:
1924                         MISSING_CASE(cpp);
1925                         return cpp;
1926                 }
1927                 break;
1928         default:
1929                 MISSING_CASE(fb->modifier);
1930                 return cpp;
1931         }
1932 }
1933
1934 static unsigned int
1935 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1936 {
1937         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
1938                 return 1;
1939         else
1940                 return intel_tile_size(to_i915(fb->dev)) /
1941                         intel_tile_width_bytes(fb, color_plane);
1942 }
1943
1944 /* Return the tile dimensions in pixel units */
1945 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1946                             unsigned int *tile_width,
1947                             unsigned int *tile_height)
1948 {
1949         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1950         unsigned int cpp = fb->format->cpp[color_plane];
1951
1952         *tile_width = tile_width_bytes / cpp;
1953         *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1954 }
1955
1956 unsigned int
1957 intel_fb_align_height(const struct drm_framebuffer *fb,
1958                       int color_plane, unsigned int height)
1959 {
1960         unsigned int tile_height = intel_tile_height(fb, color_plane);
1961
1962         return ALIGN(height, tile_height);
1963 }
1964
1965 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1966 {
1967         unsigned int size = 0;
1968         int i;
1969
1970         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1971                 size += rot_info->plane[i].width * rot_info->plane[i].height;
1972
1973         return size;
1974 }
1975
1976 static void
1977 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
1978                         const struct drm_framebuffer *fb,
1979                         unsigned int rotation)
1980 {
1981         view->type = I915_GGTT_VIEW_NORMAL;
1982         if (drm_rotation_90_or_270(rotation)) {
1983                 view->type = I915_GGTT_VIEW_ROTATED;
1984                 view->rotated = to_intel_framebuffer(fb)->rot_info;
1985         }
1986 }
1987
1988 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
1989 {
1990         if (IS_I830(dev_priv))
1991                 return 16 * 1024;
1992         else if (IS_I85X(dev_priv))
1993                 return 256;
1994         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
1995                 return 32;
1996         else
1997                 return 4 * 1024;
1998 }
1999
2000 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2001 {
2002         if (INTEL_GEN(dev_priv) >= 9)
2003                 return 256 * 1024;
2004         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2005                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2006                 return 128 * 1024;
2007         else if (INTEL_GEN(dev_priv) >= 4)
2008                 return 4 * 1024;
2009         else
2010                 return 0;
2011 }
2012
2013 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2014                                          int color_plane)
2015 {
2016         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2017
2018         /* AUX_DIST needs only 4K alignment */
2019         if (color_plane == 1)
2020                 return 4096;
2021
2022         switch (fb->modifier) {
2023         case DRM_FORMAT_MOD_LINEAR:
2024                 return intel_linear_alignment(dev_priv);
2025         case I915_FORMAT_MOD_X_TILED:
2026                 if (INTEL_GEN(dev_priv) >= 9)
2027                         return 256 * 1024;
2028                 return 0;
2029         case I915_FORMAT_MOD_Y_TILED_CCS:
2030         case I915_FORMAT_MOD_Yf_TILED_CCS:
2031         case I915_FORMAT_MOD_Y_TILED:
2032         case I915_FORMAT_MOD_Yf_TILED:
2033                 return 1 * 1024 * 1024;
2034         default:
2035                 MISSING_CASE(fb->modifier);
2036                 return 0;
2037         }
2038 }
2039
2040 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2041 {
2042         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2043         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2044
2045         return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
2046 }
2047
2048 struct i915_vma *
2049 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2050                            const struct i915_ggtt_view *view,
2051                            bool uses_fence,
2052                            unsigned long *out_flags)
2053 {
2054         struct drm_device *dev = fb->dev;
2055         struct drm_i915_private *dev_priv = to_i915(dev);
2056         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2057         intel_wakeref_t wakeref;
2058         struct i915_vma *vma;
2059         unsigned int pinctl;
2060         u32 alignment;
2061
2062         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2063
2064         alignment = intel_surf_alignment(fb, 0);
2065
2066         /* Note that the w/a also requires 64 PTE of padding following the
2067          * bo. We currently fill all unused PTE with the shadow page and so
2068          * we should always have valid PTE following the scanout preventing
2069          * the VT-d warning.
2070          */
2071         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2072                 alignment = 256 * 1024;
2073
2074         /*
2075          * Global gtt pte registers are special registers which actually forward
2076          * writes to a chunk of system memory. Which means that there is no risk
2077          * that the register values disappear as soon as we call
2078          * intel_runtime_pm_put(), so it is correct to wrap only the
2079          * pin/unpin/fence and not more.
2080          */
2081         wakeref = intel_runtime_pm_get(dev_priv);
2082
2083         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2084
2085         pinctl = 0;
2086
2087         /* Valleyview is definitely limited to scanning out the first
2088          * 512MiB. Lets presume this behaviour was inherited from the
2089          * g4x display engine and that all earlier gen are similarly
2090          * limited. Testing suggests that it is a little more
2091          * complicated than this. For example, Cherryview appears quite
2092          * happy to scanout from anywhere within its global aperture.
2093          */
2094         if (HAS_GMCH(dev_priv))
2095                 pinctl |= PIN_MAPPABLE;
2096
2097         vma = i915_gem_object_pin_to_display_plane(obj,
2098                                                    alignment, view, pinctl);
2099         if (IS_ERR(vma))
2100                 goto err;
2101
2102         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2103                 int ret;
2104
2105                 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2106                  * fence, whereas 965+ only requires a fence if using
2107                  * framebuffer compression.  For simplicity, we always, when
2108                  * possible, install a fence as the cost is not that onerous.
2109                  *
2110                  * If we fail to fence the tiled scanout, then either the
2111                  * modeset will reject the change (which is highly unlikely as
2112                  * the affected systems, all but one, do not have unmappable
2113                  * space) or we will not be able to enable full powersaving
2114                  * techniques (also likely not to apply due to various limits
2115                  * FBC and the like impose on the size of the buffer, which
2116                  * presumably we violated anyway with this unmappable buffer).
2117                  * Anyway, it is presumably better to stumble onwards with
2118                  * something and try to run the system in a "less than optimal"
2119                  * mode that matches the user configuration.
2120                  */
2121                 ret = i915_vma_pin_fence(vma);
2122                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2123                         i915_gem_object_unpin_from_display_plane(vma);
2124                         vma = ERR_PTR(ret);
2125                         goto err;
2126                 }
2127
2128                 if (ret == 0 && vma->fence)
2129                         *out_flags |= PLANE_HAS_FENCE;
2130         }
2131
2132         i915_vma_get(vma);
2133 err:
2134         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2135
2136         intel_runtime_pm_put(dev_priv, wakeref);
2137         return vma;
2138 }
2139
2140 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2141 {
2142         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2143
2144         if (flags & PLANE_HAS_FENCE)
2145                 i915_vma_unpin_fence(vma);
2146         i915_gem_object_unpin_from_display_plane(vma);
2147         i915_vma_put(vma);
2148 }
2149
2150 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2151                           unsigned int rotation)
2152 {
2153         if (drm_rotation_90_or_270(rotation))
2154                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2155         else
2156                 return fb->pitches[color_plane];
2157 }
2158
2159 /*
2160  * Convert the x/y offsets into a linear offset.
2161  * Only valid with 0/180 degree rotation, which is fine since linear
2162  * offset is only used with linear buffers on pre-hsw and tiled buffers
2163  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2164  */
2165 u32 intel_fb_xy_to_linear(int x, int y,
2166                           const struct intel_plane_state *state,
2167                           int color_plane)
2168 {
2169         const struct drm_framebuffer *fb = state->base.fb;
2170         unsigned int cpp = fb->format->cpp[color_plane];
2171         unsigned int pitch = state->color_plane[color_plane].stride;
2172
2173         return y * pitch + x * cpp;
2174 }
2175
2176 /*
2177  * Add the x/y offsets derived from fb->offsets[] to the user
2178  * specified plane src x/y offsets. The resulting x/y offsets
2179  * specify the start of scanout from the beginning of the gtt mapping.
2180  */
2181 void intel_add_fb_offsets(int *x, int *y,
2182                           const struct intel_plane_state *state,
2183                           int color_plane)
2184
2185 {
2186         const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
2187         unsigned int rotation = state->base.rotation;
2188
2189         if (drm_rotation_90_or_270(rotation)) {
2190                 *x += intel_fb->rotated[color_plane].x;
2191                 *y += intel_fb->rotated[color_plane].y;
2192         } else {
2193                 *x += intel_fb->normal[color_plane].x;
2194                 *y += intel_fb->normal[color_plane].y;
2195         }
2196 }
2197
2198 static u32 intel_adjust_tile_offset(int *x, int *y,
2199                                     unsigned int tile_width,
2200                                     unsigned int tile_height,
2201                                     unsigned int tile_size,
2202                                     unsigned int pitch_tiles,
2203                                     u32 old_offset,
2204                                     u32 new_offset)
2205 {
2206         unsigned int pitch_pixels = pitch_tiles * tile_width;
2207         unsigned int tiles;
2208
2209         WARN_ON(old_offset & (tile_size - 1));
2210         WARN_ON(new_offset & (tile_size - 1));
2211         WARN_ON(new_offset > old_offset);
2212
2213         tiles = (old_offset - new_offset) / tile_size;
2214
2215         *y += tiles / pitch_tiles * tile_height;
2216         *x += tiles % pitch_tiles * tile_width;
2217
2218         /* minimize x in case it got needlessly big */
2219         *y += *x / pitch_pixels * tile_height;
2220         *x %= pitch_pixels;
2221
2222         return new_offset;
2223 }
2224
2225 static bool is_surface_linear(u64 modifier, int color_plane)
2226 {
2227         return modifier == DRM_FORMAT_MOD_LINEAR;
2228 }
2229
2230 static u32 intel_adjust_aligned_offset(int *x, int *y,
2231                                        const struct drm_framebuffer *fb,
2232                                        int color_plane,
2233                                        unsigned int rotation,
2234                                        unsigned int pitch,
2235                                        u32 old_offset, u32 new_offset)
2236 {
2237         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2238         unsigned int cpp = fb->format->cpp[color_plane];
2239
2240         WARN_ON(new_offset > old_offset);
2241
2242         if (!is_surface_linear(fb->modifier, color_plane)) {
2243                 unsigned int tile_size, tile_width, tile_height;
2244                 unsigned int pitch_tiles;
2245
2246                 tile_size = intel_tile_size(dev_priv);
2247                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2248
2249                 if (drm_rotation_90_or_270(rotation)) {
2250                         pitch_tiles = pitch / tile_height;
2251                         swap(tile_width, tile_height);
2252                 } else {
2253                         pitch_tiles = pitch / (tile_width * cpp);
2254                 }
2255
2256                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2257                                          tile_size, pitch_tiles,
2258                                          old_offset, new_offset);
2259         } else {
2260                 old_offset += *y * pitch + *x * cpp;
2261
2262                 *y = (old_offset - new_offset) / pitch;
2263                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2264         }
2265
2266         return new_offset;
2267 }
2268
2269 /*
2270  * Adjust the tile offset by moving the difference into
2271  * the x/y offsets.
2272  */
2273 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2274                                              const struct intel_plane_state *state,
2275                                              int color_plane,
2276                                              u32 old_offset, u32 new_offset)
2277 {
2278         return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
2279                                            state->base.rotation,
2280                                            state->color_plane[color_plane].stride,
2281                                            old_offset, new_offset);
2282 }
2283
2284 /*
2285  * Computes the aligned offset to the base tile and adjusts
2286  * x, y. bytes per pixel is assumed to be a power-of-two.
2287  *
2288  * In the 90/270 rotated case, x and y are assumed
2289  * to be already rotated to match the rotated GTT view, and
2290  * pitch is the tile_height aligned framebuffer height.
2291  *
2292  * This function is used when computing the derived information
2293  * under intel_framebuffer, so using any of that information
2294  * here is not allowed. Anything under drm_framebuffer can be
2295  * used. This is why the user has to pass in the pitch since it
2296  * is specified in the rotated orientation.
2297  */
2298 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2299                                         int *x, int *y,
2300                                         const struct drm_framebuffer *fb,
2301                                         int color_plane,
2302                                         unsigned int pitch,
2303                                         unsigned int rotation,
2304                                         u32 alignment)
2305 {
2306         unsigned int cpp = fb->format->cpp[color_plane];
2307         u32 offset, offset_aligned;
2308
2309         if (alignment)
2310                 alignment--;
2311
2312         if (!is_surface_linear(fb->modifier, color_plane)) {
2313                 unsigned int tile_size, tile_width, tile_height;
2314                 unsigned int tile_rows, tiles, pitch_tiles;
2315
2316                 tile_size = intel_tile_size(dev_priv);
2317                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2318
2319                 if (drm_rotation_90_or_270(rotation)) {
2320                         pitch_tiles = pitch / tile_height;
2321                         swap(tile_width, tile_height);
2322                 } else {
2323                         pitch_tiles = pitch / (tile_width * cpp);
2324                 }
2325
2326                 tile_rows = *y / tile_height;
2327                 *y %= tile_height;
2328
2329                 tiles = *x / tile_width;
2330                 *x %= tile_width;
2331
2332                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2333                 offset_aligned = offset & ~alignment;
2334
2335                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2336                                          tile_size, pitch_tiles,
2337                                          offset, offset_aligned);
2338         } else {
2339                 offset = *y * pitch + *x * cpp;
2340                 offset_aligned = offset & ~alignment;
2341
2342                 *y = (offset & alignment) / pitch;
2343                 *x = ((offset & alignment) - *y * pitch) / cpp;
2344         }
2345
2346         return offset_aligned;
2347 }
2348
2349 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2350                                               const struct intel_plane_state *state,
2351                                               int color_plane)
2352 {
2353         struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2354         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2355         const struct drm_framebuffer *fb = state->base.fb;
2356         unsigned int rotation = state->base.rotation;
2357         int pitch = state->color_plane[color_plane].stride;
2358         u32 alignment;
2359
2360         if (intel_plane->id == PLANE_CURSOR)
2361                 alignment = intel_cursor_alignment(dev_priv);
2362         else
2363                 alignment = intel_surf_alignment(fb, color_plane);
2364
2365         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2366                                             pitch, rotation, alignment);
2367 }
2368
2369 /* Convert the fb->offset[] into x/y offsets */
2370 static int intel_fb_offset_to_xy(int *x, int *y,
2371                                  const struct drm_framebuffer *fb,
2372                                  int color_plane)
2373 {
2374         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2375         unsigned int height;
2376
2377         if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2378             fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2379                 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2380                               fb->offsets[color_plane], color_plane);
2381                 return -EINVAL;
2382         }
2383
2384         height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2385         height = ALIGN(height, intel_tile_height(fb, color_plane));
2386
2387         /* Catch potential overflows early */
2388         if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2389                             fb->offsets[color_plane])) {
2390                 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2391                               fb->offsets[color_plane], fb->pitches[color_plane],
2392                               color_plane);
2393                 return -ERANGE;
2394         }
2395
2396         *x = 0;
2397         *y = 0;
2398
2399         intel_adjust_aligned_offset(x, y,
2400                                     fb, color_plane, DRM_MODE_ROTATE_0,
2401                                     fb->pitches[color_plane],
2402                                     fb->offsets[color_plane], 0);
2403
2404         return 0;
2405 }
2406
2407 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2408 {
2409         switch (fb_modifier) {
2410         case I915_FORMAT_MOD_X_TILED:
2411                 return I915_TILING_X;
2412         case I915_FORMAT_MOD_Y_TILED:
2413         case I915_FORMAT_MOD_Y_TILED_CCS:
2414                 return I915_TILING_Y;
2415         default:
2416                 return I915_TILING_NONE;
2417         }
2418 }
2419
2420 /*
2421  * From the Sky Lake PRM:
2422  * "The Color Control Surface (CCS) contains the compression status of
2423  *  the cache-line pairs. The compression state of the cache-line pair
2424  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2425  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2426  *  cache-line-pairs. CCS is always Y tiled."
2427  *
2428  * Since cache line pairs refers to horizontally adjacent cache lines,
2429  * each cache line in the CCS corresponds to an area of 32x16 cache
2430  * lines on the main surface. Since each pixel is 4 bytes, this gives
2431  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2432  * main surface.
2433  */
2434 static const struct drm_format_info ccs_formats[] = {
2435         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2436         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2437         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2438         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2439 };
2440
2441 static const struct drm_format_info *
2442 lookup_format_info(const struct drm_format_info formats[],
2443                    int num_formats, u32 format)
2444 {
2445         int i;
2446
2447         for (i = 0; i < num_formats; i++) {
2448                 if (formats[i].format == format)
2449                         return &formats[i];
2450         }
2451
2452         return NULL;
2453 }
2454
2455 static const struct drm_format_info *
2456 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2457 {
2458         switch (cmd->modifier[0]) {
2459         case I915_FORMAT_MOD_Y_TILED_CCS:
2460         case I915_FORMAT_MOD_Yf_TILED_CCS:
2461                 return lookup_format_info(ccs_formats,
2462                                           ARRAY_SIZE(ccs_formats),
2463                                           cmd->pixel_format);
2464         default:
2465                 return NULL;
2466         }
2467 }
2468
2469 bool is_ccs_modifier(u64 modifier)
2470 {
2471         return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2472                modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2473 }
2474
2475 static int
2476 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2477                    struct drm_framebuffer *fb)
2478 {
2479         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2480         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2481         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2482         u32 gtt_offset_rotated = 0;
2483         unsigned int max_size = 0;
2484         int i, num_planes = fb->format->num_planes;
2485         unsigned int tile_size = intel_tile_size(dev_priv);
2486
2487         for (i = 0; i < num_planes; i++) {
2488                 unsigned int width, height;
2489                 unsigned int cpp, size;
2490                 u32 offset;
2491                 int x, y;
2492                 int ret;
2493
2494                 cpp = fb->format->cpp[i];
2495                 width = drm_framebuffer_plane_width(fb->width, fb, i);
2496                 height = drm_framebuffer_plane_height(fb->height, fb, i);
2497
2498                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2499                 if (ret) {
2500                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2501                                       i, fb->offsets[i]);
2502                         return ret;
2503                 }
2504
2505                 if (is_ccs_modifier(fb->modifier) && i == 1) {
2506                         int hsub = fb->format->hsub;
2507                         int vsub = fb->format->vsub;
2508                         int tile_width, tile_height;
2509                         int main_x, main_y;
2510                         int ccs_x, ccs_y;
2511
2512                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2513                         tile_width *= hsub;
2514                         tile_height *= vsub;
2515
2516                         ccs_x = (x * hsub) % tile_width;
2517                         ccs_y = (y * vsub) % tile_height;
2518                         main_x = intel_fb->normal[0].x % tile_width;
2519                         main_y = intel_fb->normal[0].y % tile_height;
2520
2521                         /*
2522                          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2523                          * x/y offsets must match between CCS and the main surface.
2524                          */
2525                         if (main_x != ccs_x || main_y != ccs_y) {
2526                                 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2527                                               main_x, main_y,
2528                                               ccs_x, ccs_y,
2529                                               intel_fb->normal[0].x,
2530                                               intel_fb->normal[0].y,
2531                                               x, y);
2532                                 return -EINVAL;
2533                         }
2534                 }
2535
2536                 /*
2537                  * The fence (if used) is aligned to the start of the object
2538                  * so having the framebuffer wrap around across the edge of the
2539                  * fenced region doesn't really work. We have no API to configure
2540                  * the fence start offset within the object (nor could we probably
2541                  * on gen2/3). So it's just easier if we just require that the
2542                  * fb layout agrees with the fence layout. We already check that the
2543                  * fb stride matches the fence stride elsewhere.
2544                  */
2545                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2546                     (x + width) * cpp > fb->pitches[i]) {
2547                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2548                                       i, fb->offsets[i]);
2549                         return -EINVAL;
2550                 }
2551
2552                 /*
2553                  * First pixel of the framebuffer from
2554                  * the start of the normal gtt mapping.
2555                  */
2556                 intel_fb->normal[i].x = x;
2557                 intel_fb->normal[i].y = y;
2558
2559                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2560                                                       fb->pitches[i],
2561                                                       DRM_MODE_ROTATE_0,
2562                                                       tile_size);
2563                 offset /= tile_size;
2564
2565                 if (!is_surface_linear(fb->modifier, i)) {
2566                         unsigned int tile_width, tile_height;
2567                         unsigned int pitch_tiles;
2568                         struct drm_rect r;
2569
2570                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2571
2572                         rot_info->plane[i].offset = offset;
2573                         rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2574                         rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2575                         rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2576
2577                         intel_fb->rotated[i].pitch =
2578                                 rot_info->plane[i].height * tile_height;
2579
2580                         /* how many tiles does this plane need */
2581                         size = rot_info->plane[i].stride * rot_info->plane[i].height;
2582                         /*
2583                          * If the plane isn't horizontally tile aligned,
2584                          * we need one more tile.
2585                          */
2586                         if (x != 0)
2587                                 size++;
2588
2589                         /* rotate the x/y offsets to match the GTT view */
2590                         r.x1 = x;
2591                         r.y1 = y;
2592                         r.x2 = x + width;
2593                         r.y2 = y + height;
2594                         drm_rect_rotate(&r,
2595                                         rot_info->plane[i].width * tile_width,
2596                                         rot_info->plane[i].height * tile_height,
2597                                         DRM_MODE_ROTATE_270);
2598                         x = r.x1;
2599                         y = r.y1;
2600
2601                         /* rotate the tile dimensions to match the GTT view */
2602                         pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2603                         swap(tile_width, tile_height);
2604
2605                         /*
2606                          * We only keep the x/y offsets, so push all of the
2607                          * gtt offset into the x/y offsets.
2608                          */
2609                         intel_adjust_tile_offset(&x, &y,
2610                                                  tile_width, tile_height,
2611                                                  tile_size, pitch_tiles,
2612                                                  gtt_offset_rotated * tile_size, 0);
2613
2614                         gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2615
2616                         /*
2617                          * First pixel of the framebuffer from
2618                          * the start of the rotated gtt mapping.
2619                          */
2620                         intel_fb->rotated[i].x = x;
2621                         intel_fb->rotated[i].y = y;
2622                 } else {
2623                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2624                                             x * cpp, tile_size);
2625                 }
2626
2627                 /* how many tiles in total needed in the bo */
2628                 max_size = max(max_size, offset + size);
2629         }
2630
2631         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2632                 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2633                               mul_u32_u32(max_size, tile_size), obj->base.size);
2634                 return -EINVAL;
2635         }
2636
2637         return 0;
2638 }
2639
2640 static int i9xx_format_to_fourcc(int format)
2641 {
2642         switch (format) {
2643         case DISPPLANE_8BPP:
2644                 return DRM_FORMAT_C8;
2645         case DISPPLANE_BGRX555:
2646                 return DRM_FORMAT_XRGB1555;
2647         case DISPPLANE_BGRX565:
2648                 return DRM_FORMAT_RGB565;
2649         default:
2650         case DISPPLANE_BGRX888:
2651                 return DRM_FORMAT_XRGB8888;
2652         case DISPPLANE_RGBX888:
2653                 return DRM_FORMAT_XBGR8888;
2654         case DISPPLANE_BGRX101010:
2655                 return DRM_FORMAT_XRGB2101010;
2656         case DISPPLANE_RGBX101010:
2657                 return DRM_FORMAT_XBGR2101010;
2658         }
2659 }
2660
2661 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2662 {
2663         switch (format) {
2664         case PLANE_CTL_FORMAT_RGB_565:
2665                 return DRM_FORMAT_RGB565;
2666         case PLANE_CTL_FORMAT_NV12:
2667                 return DRM_FORMAT_NV12;
2668         case PLANE_CTL_FORMAT_P010:
2669                 return DRM_FORMAT_P010;
2670         case PLANE_CTL_FORMAT_P012:
2671                 return DRM_FORMAT_P012;
2672         case PLANE_CTL_FORMAT_P016:
2673                 return DRM_FORMAT_P016;
2674         case PLANE_CTL_FORMAT_Y210:
2675                 return DRM_FORMAT_Y210;
2676         case PLANE_CTL_FORMAT_Y212:
2677                 return DRM_FORMAT_Y212;
2678         case PLANE_CTL_FORMAT_Y216:
2679                 return DRM_FORMAT_Y216;
2680         case PLANE_CTL_FORMAT_Y410:
2681                 return DRM_FORMAT_XVYU2101010;
2682         case PLANE_CTL_FORMAT_Y412:
2683                 return DRM_FORMAT_XVYU12_16161616;
2684         case PLANE_CTL_FORMAT_Y416:
2685                 return DRM_FORMAT_XVYU16161616;
2686         default:
2687         case PLANE_CTL_FORMAT_XRGB_8888:
2688                 if (rgb_order) {
2689                         if (alpha)
2690                                 return DRM_FORMAT_ABGR8888;
2691                         else
2692                                 return DRM_FORMAT_XBGR8888;
2693                 } else {
2694                         if (alpha)
2695                                 return DRM_FORMAT_ARGB8888;
2696                         else
2697                                 return DRM_FORMAT_XRGB8888;
2698                 }
2699         case PLANE_CTL_FORMAT_XRGB_2101010:
2700                 if (rgb_order)
2701                         return DRM_FORMAT_XBGR2101010;
2702                 else
2703                         return DRM_FORMAT_XRGB2101010;
2704         case PLANE_CTL_FORMAT_XRGB_16161616F:
2705                 if (rgb_order) {
2706                         if (alpha)
2707                                 return DRM_FORMAT_ABGR16161616F;
2708                         else
2709                                 return DRM_FORMAT_XBGR16161616F;
2710                 } else {
2711                         if (alpha)
2712                                 return DRM_FORMAT_ARGB16161616F;
2713                         else
2714                                 return DRM_FORMAT_XRGB16161616F;
2715                 }
2716         }
2717 }
2718
2719 static bool
2720 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2721                               struct intel_initial_plane_config *plane_config)
2722 {
2723         struct drm_device *dev = crtc->base.dev;
2724         struct drm_i915_private *dev_priv = to_i915(dev);
2725         struct drm_i915_gem_object *obj = NULL;
2726         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2727         struct drm_framebuffer *fb = &plane_config->fb->base;
2728         u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2729         u32 size_aligned = round_up(plane_config->base + plane_config->size,
2730                                     PAGE_SIZE);
2731
2732         size_aligned -= base_aligned;
2733
2734         if (plane_config->size == 0)
2735                 return false;
2736
2737         /* If the FB is too big, just don't use it since fbdev is not very
2738          * important and we should probably use that space with FBC or other
2739          * features. */
2740         if (size_aligned * 2 > dev_priv->stolen_usable_size)
2741                 return false;
2742
2743         switch (fb->modifier) {
2744         case DRM_FORMAT_MOD_LINEAR:
2745         case I915_FORMAT_MOD_X_TILED:
2746         case I915_FORMAT_MOD_Y_TILED:
2747                 break;
2748         default:
2749                 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
2750                                  fb->modifier);
2751                 return false;
2752         }
2753
2754         mutex_lock(&dev->struct_mutex);
2755         obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
2756                                                              base_aligned,
2757                                                              base_aligned,
2758                                                              size_aligned);
2759         mutex_unlock(&dev->struct_mutex);
2760         if (!obj)
2761                 return false;
2762
2763         switch (plane_config->tiling) {
2764         case I915_TILING_NONE:
2765                 break;
2766         case I915_TILING_X:
2767         case I915_TILING_Y:
2768                 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
2769                 break;
2770         default:
2771                 MISSING_CASE(plane_config->tiling);
2772                 return false;
2773         }
2774
2775         mode_cmd.pixel_format = fb->format->format;
2776         mode_cmd.width = fb->width;
2777         mode_cmd.height = fb->height;
2778         mode_cmd.pitches[0] = fb->pitches[0];
2779         mode_cmd.modifier[0] = fb->modifier;
2780         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2781
2782         if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
2783                 DRM_DEBUG_KMS("intel fb init failed\n");
2784                 goto out_unref_obj;
2785         }
2786
2787
2788         DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2789         return true;
2790
2791 out_unref_obj:
2792         i915_gem_object_put(obj);
2793         return false;
2794 }
2795
2796 static void
2797 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2798                         struct intel_plane_state *plane_state,
2799                         bool visible)
2800 {
2801         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2802
2803         plane_state->base.visible = visible;
2804
2805         if (visible)
2806                 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
2807         else
2808                 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
2809 }
2810
2811 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
2812 {
2813         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2814         struct drm_plane *plane;
2815
2816         /*
2817          * Active_planes aliases if multiple "primary" or cursor planes
2818          * have been used on the same (or wrong) pipe. plane_mask uses
2819          * unique ids, hence we can use that to reconstruct active_planes.
2820          */
2821         crtc_state->active_planes = 0;
2822
2823         drm_for_each_plane_mask(plane, &dev_priv->drm,
2824                                 crtc_state->base.plane_mask)
2825                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
2826 }
2827
2828 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2829                                          struct intel_plane *plane)
2830 {
2831         struct intel_crtc_state *crtc_state =
2832                 to_intel_crtc_state(crtc->base.state);
2833         struct intel_plane_state *plane_state =
2834                 to_intel_plane_state(plane->base.state);
2835
2836         DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
2837                       plane->base.base.id, plane->base.name,
2838                       crtc->base.base.id, crtc->base.name);
2839
2840         intel_set_plane_visible(crtc_state, plane_state, false);
2841         fixup_active_planes(crtc_state);
2842
2843         if (plane->id == PLANE_PRIMARY)
2844                 intel_pre_disable_primary_noatomic(&crtc->base);
2845
2846         intel_disable_plane(plane, crtc_state);
2847 }
2848
2849 static void
2850 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2851                              struct intel_initial_plane_config *plane_config)
2852 {
2853         struct drm_device *dev = intel_crtc->base.dev;
2854         struct drm_i915_private *dev_priv = to_i915(dev);
2855         struct drm_crtc *c;
2856         struct drm_i915_gem_object *obj;
2857         struct drm_plane *primary = intel_crtc->base.primary;
2858         struct drm_plane_state *plane_state = primary->state;
2859         struct intel_plane *intel_plane = to_intel_plane(primary);
2860         struct intel_plane_state *intel_state =
2861                 to_intel_plane_state(plane_state);
2862         struct drm_framebuffer *fb;
2863
2864         if (!plane_config->fb)
2865                 return;
2866
2867         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2868                 fb = &plane_config->fb->base;
2869                 goto valid_fb;
2870         }
2871
2872         kfree(plane_config->fb);
2873
2874         /*
2875          * Failed to alloc the obj, check to see if we should share
2876          * an fb with another CRTC instead
2877          */
2878         for_each_crtc(dev, c) {
2879                 struct intel_plane_state *state;
2880
2881                 if (c == &intel_crtc->base)
2882                         continue;
2883
2884                 if (!to_intel_crtc(c)->active)
2885                         continue;
2886
2887                 state = to_intel_plane_state(c->primary->state);
2888                 if (!state->vma)
2889                         continue;
2890
2891                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2892                         fb = state->base.fb;
2893                         drm_framebuffer_get(fb);
2894                         goto valid_fb;
2895                 }
2896         }
2897
2898         /*
2899          * We've failed to reconstruct the BIOS FB.  Current display state
2900          * indicates that the primary plane is visible, but has a NULL FB,
2901          * which will lead to problems later if we don't fix it up.  The
2902          * simplest solution is to just disable the primary plane now and
2903          * pretend the BIOS never had it enabled.
2904          */
2905         intel_plane_disable_noatomic(intel_crtc, intel_plane);
2906
2907         return;
2908
2909 valid_fb:
2910         intel_state->base.rotation = plane_config->rotation;
2911         intel_fill_fb_ggtt_view(&intel_state->view, fb,
2912                                 intel_state->base.rotation);
2913         intel_state->color_plane[0].stride =
2914                 intel_fb_pitch(fb, 0, intel_state->base.rotation);
2915
2916         mutex_lock(&dev->struct_mutex);
2917         intel_state->vma =
2918                 intel_pin_and_fence_fb_obj(fb,
2919                                            &intel_state->view,
2920                                            intel_plane_uses_fence(intel_state),
2921                                            &intel_state->flags);
2922         mutex_unlock(&dev->struct_mutex);
2923         if (IS_ERR(intel_state->vma)) {
2924                 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2925                           intel_crtc->pipe, PTR_ERR(intel_state->vma));
2926
2927                 intel_state->vma = NULL;
2928                 drm_framebuffer_put(fb);
2929                 return;
2930         }
2931
2932         obj = intel_fb_obj(fb);
2933         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
2934
2935         plane_state->src_x = 0;
2936         plane_state->src_y = 0;
2937         plane_state->src_w = fb->width << 16;
2938         plane_state->src_h = fb->height << 16;
2939
2940         plane_state->crtc_x = 0;
2941         plane_state->crtc_y = 0;
2942         plane_state->crtc_w = fb->width;
2943         plane_state->crtc_h = fb->height;
2944
2945         intel_state->base.src = drm_plane_state_src(plane_state);
2946         intel_state->base.dst = drm_plane_state_dest(plane_state);
2947
2948         if (i915_gem_object_is_tiled(obj))
2949                 dev_priv->preserve_bios_swizzle = true;
2950
2951         plane_state->fb = fb;
2952         plane_state->crtc = &intel_crtc->base;
2953
2954         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2955                   &obj->frontbuffer_bits);
2956 }
2957
2958 static int skl_max_plane_width(const struct drm_framebuffer *fb,
2959                                int color_plane,
2960                                unsigned int rotation)
2961 {
2962         int cpp = fb->format->cpp[color_plane];
2963
2964         switch (fb->modifier) {
2965         case DRM_FORMAT_MOD_LINEAR:
2966         case I915_FORMAT_MOD_X_TILED:
2967                 switch (cpp) {
2968                 case 8:
2969                         return 4096;
2970                 case 4:
2971                 case 2:
2972                 case 1:
2973                         return 8192;
2974                 default:
2975                         MISSING_CASE(cpp);
2976                         break;
2977                 }
2978                 break;
2979         case I915_FORMAT_MOD_Y_TILED_CCS:
2980         case I915_FORMAT_MOD_Yf_TILED_CCS:
2981                 /* FIXME AUX plane? */
2982         case I915_FORMAT_MOD_Y_TILED:
2983         case I915_FORMAT_MOD_Yf_TILED:
2984                 switch (cpp) {
2985                 case 8:
2986                         return 2048;
2987                 case 4:
2988                         return 4096;
2989                 case 2:
2990                 case 1:
2991                         return 8192;
2992                 default:
2993                         MISSING_CASE(cpp);
2994                         break;
2995                 }
2996                 break;
2997         default:
2998                 MISSING_CASE(fb->modifier);
2999         }
3000
3001         return 2048;
3002 }
3003
3004 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3005                                            int main_x, int main_y, u32 main_offset)
3006 {
3007         const struct drm_framebuffer *fb = plane_state->base.fb;
3008         int hsub = fb->format->hsub;
3009         int vsub = fb->format->vsub;
3010         int aux_x = plane_state->color_plane[1].x;
3011         int aux_y = plane_state->color_plane[1].y;
3012         u32 aux_offset = plane_state->color_plane[1].offset;
3013         u32 alignment = intel_surf_alignment(fb, 1);
3014
3015         while (aux_offset >= main_offset && aux_y <= main_y) {
3016                 int x, y;
3017
3018                 if (aux_x == main_x && aux_y == main_y)
3019                         break;
3020
3021                 if (aux_offset == 0)
3022                         break;
3023
3024                 x = aux_x / hsub;
3025                 y = aux_y / vsub;
3026                 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3027                                                                aux_offset, aux_offset - alignment);
3028                 aux_x = x * hsub + aux_x % hsub;
3029                 aux_y = y * vsub + aux_y % vsub;
3030         }
3031
3032         if (aux_x != main_x || aux_y != main_y)
3033                 return false;
3034
3035         plane_state->color_plane[1].offset = aux_offset;
3036         plane_state->color_plane[1].x = aux_x;
3037         plane_state->color_plane[1].y = aux_y;
3038
3039         return true;
3040 }
3041
3042 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3043 {
3044         const struct drm_framebuffer *fb = plane_state->base.fb;
3045         unsigned int rotation = plane_state->base.rotation;
3046         int x = plane_state->base.src.x1 >> 16;
3047         int y = plane_state->base.src.y1 >> 16;
3048         int w = drm_rect_width(&plane_state->base.src) >> 16;
3049         int h = drm_rect_height(&plane_state->base.src) >> 16;
3050         int max_width = skl_max_plane_width(fb, 0, rotation);
3051         int max_height = 4096;
3052         u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3053
3054         if (w > max_width || h > max_height) {
3055                 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3056                               w, h, max_width, max_height);
3057                 return -EINVAL;
3058         }
3059
3060         intel_add_fb_offsets(&x, &y, plane_state, 0);
3061         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3062         alignment = intel_surf_alignment(fb, 0);
3063
3064         /*
3065          * AUX surface offset is specified as the distance from the
3066          * main surface offset, and it must be non-negative. Make
3067          * sure that is what we will get.
3068          */
3069         if (offset > aux_offset)
3070                 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3071                                                            offset, aux_offset & ~(alignment - 1));
3072
3073         /*
3074          * When using an X-tiled surface, the plane blows up
3075          * if the x offset + width exceed the stride.
3076          *
3077          * TODO: linear and Y-tiled seem fine, Yf untested,
3078          */
3079         if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3080                 int cpp = fb->format->cpp[0];
3081
3082                 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3083                         if (offset == 0) {
3084                                 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3085                                 return -EINVAL;
3086                         }
3087
3088                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3089                                                                    offset, offset - alignment);
3090                 }
3091         }
3092
3093         /*
3094          * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3095          * they match with the main surface x/y offsets.
3096          */
3097         if (is_ccs_modifier(fb->modifier)) {
3098                 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3099                         if (offset == 0)
3100                                 break;
3101
3102                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3103                                                                    offset, offset - alignment);
3104                 }
3105
3106                 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3107                         DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3108                         return -EINVAL;
3109                 }
3110         }
3111
3112         plane_state->color_plane[0].offset = offset;
3113         plane_state->color_plane[0].x = x;
3114         plane_state->color_plane[0].y = y;
3115
3116         return 0;
3117 }
3118
3119 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3120 {
3121         const struct drm_framebuffer *fb = plane_state->base.fb;
3122         unsigned int rotation = plane_state->base.rotation;
3123         int max_width = skl_max_plane_width(fb, 1, rotation);
3124         int max_height = 4096;
3125         int x = plane_state->base.src.x1 >> 17;
3126         int y = plane_state->base.src.y1 >> 17;
3127         int w = drm_rect_width(&plane_state->base.src) >> 17;
3128         int h = drm_rect_height(&plane_state->base.src) >> 17;
3129         u32 offset;
3130
3131         intel_add_fb_offsets(&x, &y, plane_state, 1);
3132         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3133
3134         /* FIXME not quite sure how/if these apply to the chroma plane */
3135         if (w > max_width || h > max_height) {
3136                 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3137                               w, h, max_width, max_height);
3138                 return -EINVAL;
3139         }
3140
3141         plane_state->color_plane[1].offset = offset;
3142         plane_state->color_plane[1].x = x;
3143         plane_state->color_plane[1].y = y;
3144
3145         return 0;
3146 }
3147
3148 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3149 {
3150         const struct drm_framebuffer *fb = plane_state->base.fb;
3151         int src_x = plane_state->base.src.x1 >> 16;
3152         int src_y = plane_state->base.src.y1 >> 16;
3153         int hsub = fb->format->hsub;
3154         int vsub = fb->format->vsub;
3155         int x = src_x / hsub;
3156         int y = src_y / vsub;
3157         u32 offset;
3158
3159         intel_add_fb_offsets(&x, &y, plane_state, 1);
3160         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3161
3162         plane_state->color_plane[1].offset = offset;
3163         plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3164         plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3165
3166         return 0;
3167 }
3168
3169 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3170 {
3171         const struct drm_framebuffer *fb = plane_state->base.fb;
3172         unsigned int rotation = plane_state->base.rotation;
3173         int ret;
3174
3175         intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
3176         plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3177         plane_state->color_plane[1].stride = intel_fb_pitch(fb, 1, rotation);
3178
3179         ret = intel_plane_check_stride(plane_state);
3180         if (ret)
3181                 return ret;
3182
3183         if (!plane_state->base.visible)
3184                 return 0;
3185
3186         /* Rotate src coordinates to match rotated GTT view */
3187         if (drm_rotation_90_or_270(rotation))
3188                 drm_rect_rotate(&plane_state->base.src,
3189                                 fb->width << 16, fb->height << 16,
3190                                 DRM_MODE_ROTATE_270);
3191
3192         /*
3193          * Handle the AUX surface first since
3194          * the main surface setup depends on it.
3195          */
3196         if (is_planar_yuv_format(fb->format->format)) {
3197                 ret = skl_check_nv12_aux_surface(plane_state);
3198                 if (ret)
3199                         return ret;
3200         } else if (is_ccs_modifier(fb->modifier)) {
3201                 ret = skl_check_ccs_aux_surface(plane_state);
3202                 if (ret)
3203                         return ret;
3204         } else {
3205                 plane_state->color_plane[1].offset = ~0xfff;
3206                 plane_state->color_plane[1].x = 0;
3207                 plane_state->color_plane[1].y = 0;
3208         }
3209
3210         ret = skl_check_main_surface(plane_state);
3211         if (ret)
3212                 return ret;
3213
3214         return 0;
3215 }
3216
3217 unsigned int
3218 i9xx_plane_max_stride(struct intel_plane *plane,
3219                       u32 pixel_format, u64 modifier,
3220                       unsigned int rotation)
3221 {
3222         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3223
3224         if (!HAS_GMCH(dev_priv)) {
3225                 return 32*1024;
3226         } else if (INTEL_GEN(dev_priv) >= 4) {
3227                 if (modifier == I915_FORMAT_MOD_X_TILED)
3228                         return 16*1024;
3229                 else
3230                         return 32*1024;
3231         } else if (INTEL_GEN(dev_priv) >= 3) {
3232                 if (modifier == I915_FORMAT_MOD_X_TILED)
3233                         return 8*1024;
3234                 else
3235                         return 16*1024;
3236         } else {
3237                 if (plane->i9xx_plane == PLANE_C)
3238                         return 4*1024;
3239                 else
3240                         return 8*1024;
3241         }
3242 }
3243
3244 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3245 {
3246         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3247         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3248         u32 dspcntr = 0;
3249
3250         if (crtc_state->gamma_enable)
3251                 dspcntr |= DISPPLANE_GAMMA_ENABLE;
3252
3253         if (crtc_state->csc_enable)
3254                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3255
3256         if (INTEL_GEN(dev_priv) < 5)
3257                 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3258
3259         return dspcntr;
3260 }
3261
3262 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3263                           const struct intel_plane_state *plane_state)
3264 {
3265         struct drm_i915_private *dev_priv =
3266                 to_i915(plane_state->base.plane->dev);
3267         const struct drm_framebuffer *fb = plane_state->base.fb;
3268         unsigned int rotation = plane_state->base.rotation;
3269         u32 dspcntr;
3270
3271         dspcntr = DISPLAY_PLANE_ENABLE;
3272
3273         if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3274             IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3275                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3276
3277         switch (fb->format->format) {
3278         case DRM_FORMAT_C8:
3279                 dspcntr |= DISPPLANE_8BPP;
3280                 break;
3281         case DRM_FORMAT_XRGB1555:
3282                 dspcntr |= DISPPLANE_BGRX555;
3283                 break;
3284         case DRM_FORMAT_RGB565:
3285                 dspcntr |= DISPPLANE_BGRX565;
3286                 break;
3287         case DRM_FORMAT_XRGB8888:
3288                 dspcntr |= DISPPLANE_BGRX888;
3289                 break;
3290         case DRM_FORMAT_XBGR8888:
3291                 dspcntr |= DISPPLANE_RGBX888;
3292                 break;
3293         case DRM_FORMAT_XRGB2101010:
3294                 dspcntr |= DISPPLANE_BGRX101010;
3295                 break;
3296         case DRM_FORMAT_XBGR2101010:
3297                 dspcntr |= DISPPLANE_RGBX101010;
3298                 break;
3299         default:
3300                 MISSING_CASE(fb->format->format);
3301                 return 0;
3302         }
3303
3304         if (INTEL_GEN(dev_priv) >= 4 &&
3305             fb->modifier == I915_FORMAT_MOD_X_TILED)
3306                 dspcntr |= DISPPLANE_TILED;
3307
3308         if (rotation & DRM_MODE_ROTATE_180)
3309                 dspcntr |= DISPPLANE_ROTATE_180;
3310
3311         if (rotation & DRM_MODE_REFLECT_X)
3312                 dspcntr |= DISPPLANE_MIRROR;
3313
3314         return dspcntr;
3315 }
3316
3317 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3318 {
3319         struct drm_i915_private *dev_priv =
3320                 to_i915(plane_state->base.plane->dev);
3321         const struct drm_framebuffer *fb = plane_state->base.fb;
3322         unsigned int rotation = plane_state->base.rotation;
3323         int src_x = plane_state->base.src.x1 >> 16;
3324         int src_y = plane_state->base.src.y1 >> 16;
3325         u32 offset;
3326         int ret;
3327
3328         intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
3329         plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3330
3331         ret = intel_plane_check_stride(plane_state);
3332         if (ret)
3333                 return ret;
3334
3335         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3336
3337         if (INTEL_GEN(dev_priv) >= 4)
3338                 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3339                                                             plane_state, 0);
3340         else
3341                 offset = 0;
3342
3343         /* HSW/BDW do this automagically in hardware */
3344         if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3345                 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3346                 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3347
3348                 if (rotation & DRM_MODE_ROTATE_180) {
3349                         src_x += src_w - 1;
3350                         src_y += src_h - 1;
3351                 } else if (rotation & DRM_MODE_REFLECT_X) {
3352                         src_x += src_w - 1;
3353                 }
3354         }
3355
3356         plane_state->color_plane[0].offset = offset;
3357         plane_state->color_plane[0].x = src_x;
3358         plane_state->color_plane[0].y = src_y;
3359
3360         return 0;
3361 }
3362
3363 static int
3364 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3365                  struct intel_plane_state *plane_state)
3366 {
3367         int ret;
3368
3369         ret = chv_plane_check_rotation(plane_state);
3370         if (ret)
3371                 return ret;
3372
3373         ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3374                                                   &crtc_state->base,
3375                                                   DRM_PLANE_HELPER_NO_SCALING,
3376                                                   DRM_PLANE_HELPER_NO_SCALING,
3377                                                   false, true);
3378         if (ret)
3379                 return ret;
3380
3381         if (!plane_state->base.visible)
3382                 return 0;
3383
3384         ret = intel_plane_check_src_coordinates(plane_state);
3385         if (ret)
3386                 return ret;
3387
3388         ret = i9xx_check_plane_surface(plane_state);
3389         if (ret)
3390                 return ret;
3391
3392         plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3393
3394         return 0;
3395 }
3396
3397 static void i9xx_update_plane(struct intel_plane *plane,
3398                               const struct intel_crtc_state *crtc_state,
3399                               const struct intel_plane_state *plane_state)
3400 {
3401         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3402         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3403         u32 linear_offset;
3404         int x = plane_state->color_plane[0].x;
3405         int y = plane_state->color_plane[0].y;
3406         unsigned long irqflags;
3407         u32 dspaddr_offset;
3408         u32 dspcntr;
3409
3410         dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
3411
3412         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3413
3414         if (INTEL_GEN(dev_priv) >= 4)
3415                 dspaddr_offset = plane_state->color_plane[0].offset;
3416         else
3417                 dspaddr_offset = linear_offset;
3418
3419         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3420
3421         I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3422
3423         if (INTEL_GEN(dev_priv) < 4) {
3424                 /* pipesrc and dspsize control the size that is scaled from,
3425                  * which should always be the user's requested size.
3426                  */
3427                 I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
3428                 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3429                               ((crtc_state->pipe_src_h - 1) << 16) |
3430                               (crtc_state->pipe_src_w - 1));
3431         } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3432                 I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
3433                 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3434                               ((crtc_state->pipe_src_h - 1) << 16) |
3435                               (crtc_state->pipe_src_w - 1));
3436                 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3437         }
3438
3439         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3440                 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3441         } else if (INTEL_GEN(dev_priv) >= 4) {
3442                 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3443                 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3444         }
3445
3446         /*
3447          * The control register self-arms if the plane was previously
3448          * disabled. Try to make the plane enable atomic by writing
3449          * the control register just before the surface register.
3450          */
3451         I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3452         if (INTEL_GEN(dev_priv) >= 4)
3453                 I915_WRITE_FW(DSPSURF(i9xx_plane),
3454                               intel_plane_ggtt_offset(plane_state) +
3455                               dspaddr_offset);
3456         else
3457                 I915_WRITE_FW(DSPADDR(i9xx_plane),
3458                               intel_plane_ggtt_offset(plane_state) +
3459                               dspaddr_offset);
3460
3461         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3462 }
3463
3464 static void i9xx_disable_plane(struct intel_plane *plane,
3465                                const struct intel_crtc_state *crtc_state)
3466 {
3467         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3468         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3469         unsigned long irqflags;
3470         u32 dspcntr;
3471
3472         /*
3473          * DSPCNTR pipe gamma enable on g4x+ and pipe csc
3474          * enable on ilk+ affect the pipe bottom color as
3475          * well, so we must configure them even if the plane
3476          * is disabled.
3477          *
3478          * On pre-g4x there is no way to gamma correct the
3479          * pipe bottom color but we'll keep on doing this
3480          * anyway so that the crtc state readout works correctly.
3481          */
3482         dspcntr = i9xx_plane_ctl_crtc(crtc_state);
3483
3484         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3485
3486         I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3487         if (INTEL_GEN(dev_priv) >= 4)
3488                 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3489         else
3490                 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3491
3492         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3493 }
3494
3495 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3496                                     enum pipe *pipe)
3497 {
3498         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3499         enum intel_display_power_domain power_domain;
3500         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3501         intel_wakeref_t wakeref;
3502         bool ret;
3503         u32 val;
3504
3505         /*
3506          * Not 100% correct for planes that can move between pipes,
3507          * but that's only the case for gen2-4 which don't have any
3508          * display power wells.
3509          */
3510         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
3511         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3512         if (!wakeref)
3513                 return false;
3514
3515         val = I915_READ(DSPCNTR(i9xx_plane));
3516
3517         ret = val & DISPLAY_PLANE_ENABLE;
3518
3519         if (INTEL_GEN(dev_priv) >= 5)
3520                 *pipe = plane->pipe;
3521         else
3522                 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3523                         DISPPLANE_SEL_PIPE_SHIFT;
3524
3525         intel_display_power_put(dev_priv, power_domain, wakeref);
3526
3527         return ret;
3528 }
3529
3530 static u32
3531 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
3532 {
3533         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3534                 return 64;
3535         else
3536                 return intel_tile_width_bytes(fb, color_plane);
3537 }
3538
3539 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3540 {
3541         struct drm_device *dev = intel_crtc->base.dev;
3542         struct drm_i915_private *dev_priv = to_i915(dev);
3543
3544         I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3545         I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3546         I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
3547 }
3548
3549 /*
3550  * This function detaches (aka. unbinds) unused scalers in hardware
3551  */
3552 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
3553 {
3554         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3555         const struct intel_crtc_scaler_state *scaler_state =
3556                 &crtc_state->scaler_state;
3557         int i;
3558
3559         /* loop through and disable scalers that aren't in use */
3560         for (i = 0; i < intel_crtc->num_scalers; i++) {
3561                 if (!scaler_state->scalers[i].in_use)
3562                         skl_detach_scaler(intel_crtc, i);
3563         }
3564 }
3565
3566 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
3567                                           int color_plane, unsigned int rotation)
3568 {
3569         /*
3570          * The stride is either expressed as a multiple of 64 bytes chunks for
3571          * linear buffers or in number of tiles for tiled buffers.
3572          */
3573         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3574                 return 64;
3575         else if (drm_rotation_90_or_270(rotation))
3576                 return intel_tile_height(fb, color_plane);
3577         else
3578                 return intel_tile_width_bytes(fb, color_plane);
3579 }
3580
3581 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
3582                      int color_plane)
3583 {
3584         const struct drm_framebuffer *fb = plane_state->base.fb;
3585         unsigned int rotation = plane_state->base.rotation;
3586         u32 stride = plane_state->color_plane[color_plane].stride;
3587
3588         if (color_plane >= fb->format->num_planes)
3589                 return 0;
3590
3591         return stride / skl_plane_stride_mult(fb, color_plane, rotation);
3592 }
3593
3594 static u32 skl_plane_ctl_format(u32 pixel_format)
3595 {
3596         switch (pixel_format) {
3597         case DRM_FORMAT_C8:
3598                 return PLANE_CTL_FORMAT_INDEXED;
3599         case DRM_FORMAT_RGB565:
3600                 return PLANE_CTL_FORMAT_RGB_565;
3601         case DRM_FORMAT_XBGR8888:
3602         case DRM_FORMAT_ABGR8888:
3603                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3604         case DRM_FORMAT_XRGB8888:
3605         case DRM_FORMAT_ARGB8888:
3606                 return PLANE_CTL_FORMAT_XRGB_8888;
3607         case DRM_FORMAT_XRGB2101010:
3608                 return PLANE_CTL_FORMAT_XRGB_2101010;
3609         case DRM_FORMAT_XBGR2101010:
3610                 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3611         case DRM_FORMAT_XBGR16161616F:
3612         case DRM_FORMAT_ABGR16161616F:
3613                 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
3614         case DRM_FORMAT_XRGB16161616F:
3615         case DRM_FORMAT_ARGB16161616F:
3616                 return PLANE_CTL_FORMAT_XRGB_16161616F;
3617         case DRM_FORMAT_YUYV:
3618                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3619         case DRM_FORMAT_YVYU:
3620                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3621         case DRM_FORMAT_UYVY:
3622                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3623         case DRM_FORMAT_VYUY:
3624                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3625         case DRM_FORMAT_NV12:
3626                 return PLANE_CTL_FORMAT_NV12;
3627         case DRM_FORMAT_P010:
3628                 return PLANE_CTL_FORMAT_P010;
3629         case DRM_FORMAT_P012:
3630                 return PLANE_CTL_FORMAT_P012;
3631         case DRM_FORMAT_P016:
3632                 return PLANE_CTL_FORMAT_P016;
3633         case DRM_FORMAT_Y210:
3634                 return PLANE_CTL_FORMAT_Y210;
3635         case DRM_FORMAT_Y212:
3636                 return PLANE_CTL_FORMAT_Y212;
3637         case DRM_FORMAT_Y216:
3638                 return PLANE_CTL_FORMAT_Y216;
3639         case DRM_FORMAT_XVYU2101010:
3640                 return PLANE_CTL_FORMAT_Y410;
3641         case DRM_FORMAT_XVYU12_16161616:
3642                 return PLANE_CTL_FORMAT_Y412;
3643         case DRM_FORMAT_XVYU16161616:
3644                 return PLANE_CTL_FORMAT_Y416;
3645         default:
3646                 MISSING_CASE(pixel_format);
3647         }
3648
3649         return 0;
3650 }
3651
3652 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
3653 {
3654         if (!plane_state->base.fb->format->has_alpha)
3655                 return PLANE_CTL_ALPHA_DISABLE;
3656
3657         switch (plane_state->base.pixel_blend_mode) {
3658         case DRM_MODE_BLEND_PIXEL_NONE:
3659                 return PLANE_CTL_ALPHA_DISABLE;
3660         case DRM_MODE_BLEND_PREMULTI:
3661                 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3662         case DRM_MODE_BLEND_COVERAGE:
3663                 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
3664         default:
3665                 MISSING_CASE(plane_state->base.pixel_blend_mode);
3666                 return PLANE_CTL_ALPHA_DISABLE;
3667         }
3668 }
3669
3670 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
3671 {
3672         if (!plane_state->base.fb->format->has_alpha)
3673                 return PLANE_COLOR_ALPHA_DISABLE;
3674
3675         switch (plane_state->base.pixel_blend_mode) {
3676         case DRM_MODE_BLEND_PIXEL_NONE:
3677                 return PLANE_COLOR_ALPHA_DISABLE;
3678         case DRM_MODE_BLEND_PREMULTI:
3679                 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
3680         case DRM_MODE_BLEND_COVERAGE:
3681                 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
3682         default:
3683                 MISSING_CASE(plane_state->base.pixel_blend_mode);
3684                 return PLANE_COLOR_ALPHA_DISABLE;
3685         }
3686 }
3687
3688 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
3689 {
3690         switch (fb_modifier) {
3691         case DRM_FORMAT_MOD_LINEAR:
3692                 break;
3693         case I915_FORMAT_MOD_X_TILED:
3694                 return PLANE_CTL_TILED_X;
3695         case I915_FORMAT_MOD_Y_TILED:
3696                 return PLANE_CTL_TILED_Y;
3697         case I915_FORMAT_MOD_Y_TILED_CCS:
3698                 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3699         case I915_FORMAT_MOD_Yf_TILED:
3700                 return PLANE_CTL_TILED_YF;
3701         case I915_FORMAT_MOD_Yf_TILED_CCS:
3702                 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3703         default:
3704                 MISSING_CASE(fb_modifier);
3705         }
3706
3707         return 0;
3708 }
3709
3710 static u32 skl_plane_ctl_rotate(unsigned int rotate)
3711 {
3712         switch (rotate) {
3713         case DRM_MODE_ROTATE_0:
3714                 break;
3715         /*
3716          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
3717          * while i915 HW rotation is clockwise, thats why this swapping.
3718          */
3719         case DRM_MODE_ROTATE_90:
3720                 return PLANE_CTL_ROTATE_270;
3721         case DRM_MODE_ROTATE_180:
3722                 return PLANE_CTL_ROTATE_180;
3723         case DRM_MODE_ROTATE_270:
3724                 return PLANE_CTL_ROTATE_90;
3725         default:
3726                 MISSING_CASE(rotate);
3727         }
3728
3729         return 0;
3730 }
3731
3732 static u32 cnl_plane_ctl_flip(unsigned int reflect)
3733 {
3734         switch (reflect) {
3735         case 0:
3736                 break;
3737         case DRM_MODE_REFLECT_X:
3738                 return PLANE_CTL_FLIP_HORIZONTAL;
3739         case DRM_MODE_REFLECT_Y:
3740         default:
3741                 MISSING_CASE(reflect);
3742         }
3743
3744         return 0;
3745 }
3746
3747 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3748 {
3749         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3750         u32 plane_ctl = 0;
3751
3752         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3753                 return plane_ctl;
3754
3755         if (crtc_state->gamma_enable)
3756                 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
3757
3758         if (crtc_state->csc_enable)
3759                 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
3760
3761         return plane_ctl;
3762 }
3763
3764 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
3765                   const struct intel_plane_state *plane_state)
3766 {
3767         struct drm_i915_private *dev_priv =
3768                 to_i915(plane_state->base.plane->dev);
3769         const struct drm_framebuffer *fb = plane_state->base.fb;
3770         unsigned int rotation = plane_state->base.rotation;
3771         const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
3772         u32 plane_ctl;
3773
3774         plane_ctl = PLANE_CTL_ENABLE;
3775
3776         if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
3777                 plane_ctl |= skl_plane_ctl_alpha(plane_state);
3778                 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3779
3780                 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3781                         plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
3782
3783                 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3784                         plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
3785         }
3786
3787         plane_ctl |= skl_plane_ctl_format(fb->format->format);
3788         plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
3789         plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
3790
3791         if (INTEL_GEN(dev_priv) >= 10)
3792                 plane_ctl |= cnl_plane_ctl_flip(rotation &
3793                                                 DRM_MODE_REFLECT_MASK);
3794
3795         if (key->flags & I915_SET_COLORKEY_DESTINATION)
3796                 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
3797         else if (key->flags & I915_SET_COLORKEY_SOURCE)
3798                 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
3799
3800         return plane_ctl;
3801 }
3802
3803 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
3804 {
3805         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3806         u32 plane_color_ctl = 0;
3807
3808         if (INTEL_GEN(dev_priv) >= 11)
3809                 return plane_color_ctl;
3810
3811         if (crtc_state->gamma_enable)
3812                 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
3813
3814         if (crtc_state->csc_enable)
3815                 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
3816
3817         return plane_color_ctl;
3818 }
3819
3820 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3821                         const struct intel_plane_state *plane_state)
3822 {
3823         struct drm_i915_private *dev_priv =
3824                 to_i915(plane_state->base.plane->dev);
3825         const struct drm_framebuffer *fb = plane_state->base.fb;
3826         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3827         u32 plane_color_ctl = 0;
3828
3829         plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
3830         plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
3831
3832         if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
3833                 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3834                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
3835                 else
3836                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
3837
3838                 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3839                         plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
3840         } else if (fb->format->is_yuv) {
3841                 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
3842         }
3843
3844         return plane_color_ctl;
3845 }
3846
3847 static int
3848 __intel_display_resume(struct drm_device *dev,
3849                        struct drm_atomic_state *state,
3850                        struct drm_modeset_acquire_ctx *ctx)
3851 {
3852         struct drm_crtc_state *crtc_state;
3853         struct drm_crtc *crtc;
3854         int i, ret;
3855
3856         intel_modeset_setup_hw_state(dev, ctx);
3857         i915_redisable_vga(to_i915(dev));
3858
3859         if (!state)
3860                 return 0;
3861
3862         /*
3863          * We've duplicated the state, pointers to the old state are invalid.
3864          *
3865          * Don't attempt to use the old state until we commit the duplicated state.
3866          */
3867         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
3868                 /*
3869                  * Force recalculation even if we restore
3870                  * current state. With fast modeset this may not result
3871                  * in a modeset when the state is compatible.
3872                  */
3873                 crtc_state->mode_changed = true;
3874         }
3875
3876         /* ignore any reset values/BIOS leftovers in the WM registers */
3877         if (!HAS_GMCH(to_i915(dev)))
3878                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
3879
3880         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
3881
3882         WARN_ON(ret == -EDEADLK);
3883         return ret;
3884 }
3885
3886 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
3887 {
3888         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
3889                 intel_has_gpu_reset(dev_priv));
3890 }
3891
3892 void intel_prepare_reset(struct drm_i915_private *dev_priv)
3893 {
3894         struct drm_device *dev = &dev_priv->drm;
3895         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3896         struct drm_atomic_state *state;
3897         int ret;
3898
3899         /* reset doesn't touch the display */
3900         if (!i915_modparams.force_reset_modeset_test &&
3901             !gpu_reset_clobbers_display(dev_priv))
3902                 return;
3903
3904         /* We have a modeset vs reset deadlock, defensively unbreak it. */
3905         set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3906         wake_up_all(&dev_priv->gpu_error.wait_queue);
3907
3908         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
3909                 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
3910                 i915_gem_set_wedged(dev_priv);
3911         }
3912
3913         /*
3914          * Need mode_config.mutex so that we don't
3915          * trample ongoing ->detect() and whatnot.
3916          */
3917         mutex_lock(&dev->mode_config.mutex);
3918         drm_modeset_acquire_init(ctx, 0);
3919         while (1) {
3920                 ret = drm_modeset_lock_all_ctx(dev, ctx);
3921                 if (ret != -EDEADLK)
3922                         break;
3923
3924                 drm_modeset_backoff(ctx);
3925         }
3926         /*
3927          * Disabling the crtcs gracefully seems nicer. Also the
3928          * g33 docs say we should at least disable all the planes.
3929          */
3930         state = drm_atomic_helper_duplicate_state(dev, ctx);
3931         if (IS_ERR(state)) {
3932                 ret = PTR_ERR(state);
3933                 DRM_ERROR("Duplicating state failed with %i\n", ret);
3934                 return;
3935         }
3936
3937         ret = drm_atomic_helper_disable_all(dev, ctx);
3938         if (ret) {
3939                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
3940                 drm_atomic_state_put(state);
3941                 return;
3942         }
3943
3944         dev_priv->modeset_restore_state = state;
3945         state->acquire_ctx = ctx;
3946 }
3947
3948 void intel_finish_reset(struct drm_i915_private *dev_priv)
3949 {
3950         struct drm_device *dev = &dev_priv->drm;
3951         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3952         struct drm_atomic_state *state;
3953         int ret;
3954
3955         /* reset doesn't touch the display */
3956         if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
3957                 return;
3958
3959         state = fetch_and_zero(&dev_priv->modeset_restore_state);
3960         if (!state)
3961                 goto unlock;
3962
3963         /* reset doesn't touch the display */
3964         if (!gpu_reset_clobbers_display(dev_priv)) {
3965                 /* for testing only restore the display */
3966                 ret = __intel_display_resume(dev, state, ctx);
3967                 if (ret)
3968                         DRM_ERROR("Restoring old state failed with %i\n", ret);
3969         } else {
3970                 /*
3971                  * The display has been reset as well,
3972                  * so need a full re-initialization.
3973                  */
3974                 intel_pps_unlock_regs_wa(dev_priv);
3975                 intel_modeset_init_hw(dev);
3976                 intel_init_clock_gating(dev_priv);
3977
3978                 spin_lock_irq(&dev_priv->irq_lock);
3979                 if (dev_priv->display.hpd_irq_setup)
3980                         dev_priv->display.hpd_irq_setup(dev_priv);
3981                 spin_unlock_irq(&dev_priv->irq_lock);
3982
3983                 ret = __intel_display_resume(dev, state, ctx);
3984                 if (ret)
3985                         DRM_ERROR("Restoring old state failed with %i\n", ret);
3986
3987                 intel_hpd_init(dev_priv);
3988         }
3989
3990         drm_atomic_state_put(state);
3991 unlock:
3992         drm_modeset_drop_locks(ctx);
3993         drm_modeset_acquire_fini(ctx);
3994         mutex_unlock(&dev->mode_config.mutex);
3995
3996         clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3997 }
3998
3999 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4000 {
4001         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4002         enum pipe pipe = crtc->pipe;
4003         u32 tmp;
4004
4005         tmp = I915_READ(PIPE_CHICKEN(pipe));
4006
4007         /*
4008          * Display WA #1153: icl
4009          * enable hardware to bypass the alpha math
4010          * and rounding for per-pixel values 00 and 0xff
4011          */
4012         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4013         /*
4014          * Display WA # 1605353570: icl
4015          * Set the pixel rounding bit to 1 for allowing
4016          * passthrough of Frame buffer pixels unmodified
4017          * across pipe
4018          */
4019         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4020         I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4021 }
4022
4023 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
4024                                      const struct intel_crtc_state *new_crtc_state)
4025 {
4026         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
4027         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4028
4029         /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
4030         crtc->base.mode = new_crtc_state->base.mode;
4031
4032         /*
4033          * Update pipe size and adjust fitter if needed: the reason for this is
4034          * that in compute_mode_changes we check the native mode (not the pfit
4035          * mode) to see if we can flip rather than do a full mode set. In the
4036          * fastboot case, we'll flip, but if we don't update the pipesrc and
4037          * pfit state, we'll end up with a big fb scanned out into the wrong
4038          * sized surface.
4039          */
4040
4041         I915_WRITE(PIPESRC(crtc->pipe),
4042                    ((new_crtc_state->pipe_src_w - 1) << 16) |
4043                    (new_crtc_state->pipe_src_h - 1));
4044
4045         /* on skylake this is done by detaching scalers */
4046         if (INTEL_GEN(dev_priv) >= 9) {
4047                 skl_detach_scalers(new_crtc_state);
4048
4049                 if (new_crtc_state->pch_pfit.enabled)
4050                         skylake_pfit_enable(new_crtc_state);
4051         } else if (HAS_PCH_SPLIT(dev_priv)) {
4052                 if (new_crtc_state->pch_pfit.enabled)
4053                         ironlake_pfit_enable(new_crtc_state);
4054                 else if (old_crtc_state->pch_pfit.enabled)
4055                         ironlake_pfit_disable(old_crtc_state);
4056         }
4057
4058         if (INTEL_GEN(dev_priv) >= 11)
4059                 icl_set_pipe_chicken(crtc);
4060 }
4061
4062 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4063 {
4064         struct drm_device *dev = crtc->base.dev;
4065         struct drm_i915_private *dev_priv = to_i915(dev);
4066         int pipe = crtc->pipe;
4067         i915_reg_t reg;
4068         u32 temp;
4069
4070         /* enable normal train */
4071         reg = FDI_TX_CTL(pipe);
4072         temp = I915_READ(reg);
4073         if (IS_IVYBRIDGE(dev_priv)) {
4074                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4075                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4076         } else {
4077                 temp &= ~FDI_LINK_TRAIN_NONE;
4078                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4079         }
4080         I915_WRITE(reg, temp);
4081
4082         reg = FDI_RX_CTL(pipe);
4083         temp = I915_READ(reg);
4084         if (HAS_PCH_CPT(dev_priv)) {
4085                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4086                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4087         } else {
4088                 temp &= ~FDI_LINK_TRAIN_NONE;
4089                 temp |= FDI_LINK_TRAIN_NONE;
4090         }
4091         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4092
4093         /* wait one idle pattern time */
4094         POSTING_READ(reg);
4095         udelay(1000);
4096
4097         /* IVB wants error correction enabled */
4098         if (IS_IVYBRIDGE(dev_priv))
4099                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4100                            FDI_FE_ERRC_ENABLE);
4101 }
4102
4103 /* The FDI link training functions for ILK/Ibexpeak. */
4104 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4105                                     const struct intel_crtc_state *crtc_state)
4106 {
4107         struct drm_device *dev = crtc->base.dev;
4108         struct drm_i915_private *dev_priv = to_i915(dev);
4109         int pipe = crtc->pipe;
4110         i915_reg_t reg;
4111         u32 temp, tries;
4112
4113         /* FDI needs bits from pipe first */
4114         assert_pipe_enabled(dev_priv, pipe);
4115
4116         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4117            for train result */
4118         reg = FDI_RX_IMR(pipe);
4119         temp = I915_READ(reg);
4120         temp &= ~FDI_RX_SYMBOL_LOCK;
4121         temp &= ~FDI_RX_BIT_LOCK;
4122         I915_WRITE(reg, temp);
4123         I915_READ(reg);
4124         udelay(150);
4125
4126         /* enable CPU FDI TX and PCH FDI RX */
4127         reg = FDI_TX_CTL(pipe);
4128         temp = I915_READ(reg);
4129         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4130         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4131         temp &= ~FDI_LINK_TRAIN_NONE;
4132         temp |= FDI_LINK_TRAIN_PATTERN_1;
4133         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4134
4135         reg = FDI_RX_CTL(pipe);
4136         temp = I915_READ(reg);
4137         temp &= ~FDI_LINK_TRAIN_NONE;
4138         temp |= FDI_LINK_TRAIN_PATTERN_1;
4139         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4140
4141         POSTING_READ(reg);
4142         udelay(150);
4143
4144         /* Ironlake workaround, enable clock pointer after FDI enable*/
4145         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4146         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4147                    FDI_RX_PHASE_SYNC_POINTER_EN);
4148
4149         reg = FDI_RX_IIR(pipe);
4150         for (tries = 0; tries < 5; tries++) {
4151                 temp = I915_READ(reg);
4152                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4153
4154                 if ((temp & FDI_RX_BIT_LOCK)) {
4155                         DRM_DEBUG_KMS("FDI train 1 done.\n");
4156                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4157                         break;
4158                 }
4159         }
4160         if (tries == 5)
4161                 DRM_ERROR("FDI train 1 fail!\n");
4162
4163         /* Train 2 */
4164         reg = FDI_TX_CTL(pipe);
4165         temp = I915_READ(reg);
4166         temp &= ~FDI_LINK_TRAIN_NONE;
4167         temp |= FDI_LINK_TRAIN_PATTERN_2;
4168         I915_WRITE(reg, temp);
4169
4170         reg = FDI_RX_CTL(pipe);
4171         temp = I915_READ(reg);
4172         temp &= ~FDI_LINK_TRAIN_NONE;
4173         temp |= FDI_LINK_TRAIN_PATTERN_2;
4174         I915_WRITE(reg, temp);
4175
4176         POSTING_READ(reg);
4177         udelay(150);
4178
4179         reg = FDI_RX_IIR(pipe);
4180         for (tries = 0; tries < 5; tries++) {
4181                 temp = I915_READ(reg);
4182                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4183
4184                 if (temp & FDI_RX_SYMBOL_LOCK) {
4185                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4186                         DRM_DEBUG_KMS("FDI train 2 done.\n");
4187                         break;
4188                 }
4189         }
4190         if (tries == 5)
4191                 DRM_ERROR("FDI train 2 fail!\n");
4192
4193         DRM_DEBUG_KMS("FDI train done\n");
4194
4195 }
4196
4197 static const int snb_b_fdi_train_param[] = {
4198         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4199         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4200         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4201         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4202 };
4203
4204 /* The FDI link training functions for SNB/Cougarpoint. */
4205 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4206                                 const struct intel_crtc_state *crtc_state)
4207 {
4208         struct drm_device *dev = crtc->base.dev;
4209         struct drm_i915_private *dev_priv = to_i915(dev);
4210         int pipe = crtc->pipe;
4211         i915_reg_t reg;
4212         u32 temp, i, retry;
4213
4214         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4215            for train result */
4216         reg = FDI_RX_IMR(pipe);
4217         temp = I915_READ(reg);
4218         temp &= ~FDI_RX_SYMBOL_LOCK;
4219         temp &= ~FDI_RX_BIT_LOCK;
4220         I915_WRITE(reg, temp);
4221
4222         POSTING_READ(reg);
4223         udelay(150);
4224
4225         /* enable CPU FDI TX and PCH FDI RX */
4226         reg = FDI_TX_CTL(pipe);
4227         temp = I915_READ(reg);
4228         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4229         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4230         temp &= ~FDI_LINK_TRAIN_NONE;
4231         temp |= FDI_LINK_TRAIN_PATTERN_1;
4232         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4233         /* SNB-B */
4234         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4235         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4236
4237         I915_WRITE(FDI_RX_MISC(pipe),
4238                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4239
4240         reg = FDI_RX_CTL(pipe);
4241         temp = I915_READ(reg);
4242         if (HAS_PCH_CPT(dev_priv)) {
4243                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4244                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4245         } else {
4246                 temp &= ~FDI_LINK_TRAIN_NONE;
4247                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4248         }
4249         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4250
4251         POSTING_READ(reg);
4252         udelay(150);
4253
4254         for (i = 0; i < 4; i++) {
4255                 reg = FDI_TX_CTL(pipe);
4256                 temp = I915_READ(reg);
4257                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4258                 temp |= snb_b_fdi_train_param[i];
4259                 I915_WRITE(reg, temp);
4260
4261                 POSTING_READ(reg);
4262                 udelay(500);
4263
4264                 for (retry = 0; retry < 5; retry++) {
4265                         reg = FDI_RX_IIR(pipe);
4266                         temp = I915_READ(reg);
4267                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4268                         if (temp & FDI_RX_BIT_LOCK) {
4269                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4270                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
4271                                 break;
4272                         }
4273                         udelay(50);
4274                 }
4275                 if (retry < 5)
4276                         break;
4277         }
4278         if (i == 4)
4279                 DRM_ERROR("FDI train 1 fail!\n");
4280
4281         /* Train 2 */
4282         reg = FDI_TX_CTL(pipe);
4283         temp = I915_READ(reg);
4284         temp &= ~FDI_LINK_TRAIN_NONE;
4285         temp |= FDI_LINK_TRAIN_PATTERN_2;
4286         if (IS_GEN(dev_priv, 6)) {
4287                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4288                 /* SNB-B */
4289                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4290         }
4291         I915_WRITE(reg, temp);
4292
4293         reg = FDI_RX_CTL(pipe);
4294         temp = I915_READ(reg);
4295         if (HAS_PCH_CPT(dev_priv)) {
4296                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4297                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4298         } else {
4299                 temp &= ~FDI_LINK_TRAIN_NONE;
4300                 temp |= FDI_LINK_TRAIN_PATTERN_2;
4301         }
4302         I915_WRITE(reg, temp);
4303
4304         POSTING_READ(reg);
4305         udelay(150);
4306
4307         for (i = 0; i < 4; i++) {
4308                 reg = FDI_TX_CTL(pipe);
4309                 temp = I915_READ(reg);
4310                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4311                 temp |= snb_b_fdi_train_param[i];
4312                 I915_WRITE(reg, temp);
4313
4314                 POSTING_READ(reg);
4315                 udelay(500);
4316
4317                 for (retry = 0; retry < 5; retry++) {
4318                         reg = FDI_RX_IIR(pipe);
4319                         temp = I915_READ(reg);
4320                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4321                         if (temp & FDI_RX_SYMBOL_LOCK) {
4322                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4323                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
4324                                 break;
4325                         }
4326                         udelay(50);
4327                 }
4328                 if (retry < 5)
4329                         break;
4330         }
4331         if (i == 4)
4332                 DRM_ERROR("FDI train 2 fail!\n");
4333
4334         DRM_DEBUG_KMS("FDI train done.\n");
4335 }
4336
4337 /* Manual link training for Ivy Bridge A0 parts */
4338 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4339                                       const struct intel_crtc_state *crtc_state)
4340 {
4341         struct drm_device *dev = crtc->base.dev;
4342         struct drm_i915_private *dev_priv = to_i915(dev);
4343         int pipe = crtc->pipe;
4344         i915_reg_t reg;
4345         u32 temp, i, j;
4346
4347         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4348            for train result */
4349         reg = FDI_RX_IMR(pipe);
4350         temp = I915_READ(reg);
4351         temp &= ~FDI_RX_SYMBOL_LOCK;
4352         temp &= ~FDI_RX_BIT_LOCK;
4353         I915_WRITE(reg, temp);
4354
4355         POSTING_READ(reg);
4356         udelay(150);
4357
4358         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4359                       I915_READ(FDI_RX_IIR(pipe)));
4360
4361         /* Try each vswing and preemphasis setting twice before moving on */
4362         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4363                 /* disable first in case we need to retry */
4364                 reg = FDI_TX_CTL(pipe);
4365                 temp = I915_READ(reg);
4366                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4367                 temp &= ~FDI_TX_ENABLE;
4368                 I915_WRITE(reg, temp);
4369
4370                 reg = FDI_RX_CTL(pipe);
4371                 temp = I915_READ(reg);
4372                 temp &= ~FDI_LINK_TRAIN_AUTO;
4373                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4374                 temp &= ~FDI_RX_ENABLE;
4375                 I915_WRITE(reg, temp);
4376
4377                 /* enable CPU FDI TX and PCH FDI RX */
4378                 reg = FDI_TX_CTL(pipe);
4379                 temp = I915_READ(reg);
4380                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4381                 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4382                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4383                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4384                 temp |= snb_b_fdi_train_param[j/2];
4385                 temp |= FDI_COMPOSITE_SYNC;
4386                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4387
4388                 I915_WRITE(FDI_RX_MISC(pipe),
4389                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4390
4391                 reg = FDI_RX_CTL(pipe);
4392                 temp = I915_READ(reg);
4393                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4394                 temp |= FDI_COMPOSITE_SYNC;
4395                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4396
4397                 POSTING_READ(reg);
4398                 udelay(1); /* should be 0.5us */
4399
4400                 for (i = 0; i < 4; i++) {
4401                         reg = FDI_RX_IIR(pipe);
4402                         temp = I915_READ(reg);
4403                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4404
4405                         if (temp & FDI_RX_BIT_LOCK ||
4406                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4407                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4408                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4409                                               i);
4410                                 break;
4411                         }
4412                         udelay(1); /* should be 0.5us */
4413                 }
4414                 if (i == 4) {
4415                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4416                         continue;
4417                 }
4418
4419                 /* Train 2 */
4420                 reg = FDI_TX_CTL(pipe);
4421                 temp = I915_READ(reg);
4422                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4423                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4424                 I915_WRITE(reg, temp);
4425
4426                 reg = FDI_RX_CTL(pipe);
4427                 temp = I915_READ(reg);
4428                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4429                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4430                 I915_WRITE(reg, temp);
4431
4432                 POSTING_READ(reg);
4433                 udelay(2); /* should be 1.5us */
4434
4435                 for (i = 0; i < 4; i++) {
4436                         reg = FDI_RX_IIR(pipe);
4437                         temp = I915_READ(reg);
4438                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4439
4440                         if (temp & FDI_RX_SYMBOL_LOCK ||
4441                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4442                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4443                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4444                                               i);
4445                                 goto train_done;
4446                         }
4447                         udelay(2); /* should be 1.5us */
4448                 }
4449                 if (i == 4)
4450                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4451         }
4452
4453 train_done:
4454         DRM_DEBUG_KMS("FDI train done.\n");
4455 }
4456
4457 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4458 {
4459         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4460         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4461         int pipe = intel_crtc->pipe;
4462         i915_reg_t reg;
4463         u32 temp;
4464
4465         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4466         reg = FDI_RX_CTL(pipe);
4467         temp = I915_READ(reg);
4468         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4469         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4470         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4471         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4472
4473         POSTING_READ(reg);
4474         udelay(200);
4475
4476         /* Switch from Rawclk to PCDclk */
4477         temp = I915_READ(reg);
4478         I915_WRITE(reg, temp | FDI_PCDCLK);
4479
4480         POSTING_READ(reg);
4481         udelay(200);
4482
4483         /* Enable CPU FDI TX PLL, always on for Ironlake */
4484         reg = FDI_TX_CTL(pipe);
4485         temp = I915_READ(reg);
4486         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4487                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4488
4489                 POSTING_READ(reg);
4490                 udelay(100);
4491         }
4492 }
4493
4494 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4495 {
4496         struct drm_device *dev = intel_crtc->base.dev;
4497         struct drm_i915_private *dev_priv = to_i915(dev);
4498         int pipe = intel_crtc->pipe;
4499         i915_reg_t reg;
4500         u32 temp;
4501
4502         /* Switch from PCDclk to Rawclk */
4503         reg = FDI_RX_CTL(pipe);
4504         temp = I915_READ(reg);
4505         I915_WRITE(reg, temp & ~FDI_PCDCLK);
4506
4507         /* Disable CPU FDI TX PLL */
4508         reg = FDI_TX_CTL(pipe);
4509         temp = I915_READ(reg);
4510         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4511
4512         POSTING_READ(reg);
4513         udelay(100);
4514
4515         reg = FDI_RX_CTL(pipe);
4516         temp = I915_READ(reg);
4517         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4518
4519         /* Wait for the clocks to turn off. */
4520         POSTING_READ(reg);
4521         udelay(100);
4522 }
4523
4524 static void ironlake_fdi_disable(struct drm_crtc *crtc)
4525 {
4526         struct drm_device *dev = crtc->dev;
4527         struct drm_i915_private *dev_priv = to_i915(dev);
4528         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4529         int pipe = intel_crtc->pipe;
4530         i915_reg_t reg;
4531         u32 temp;
4532
4533         /* disable CPU FDI tx and PCH FDI rx */
4534         reg = FDI_TX_CTL(pipe);
4535         temp = I915_READ(reg);
4536         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4537         POSTING_READ(reg);
4538
4539         reg = FDI_RX_CTL(pipe);
4540         temp = I915_READ(reg);
4541         temp &= ~(0x7 << 16);
4542         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4543         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4544
4545         POSTING_READ(reg);
4546         udelay(100);
4547
4548         /* Ironlake workaround, disable clock pointer after downing FDI */
4549         if (HAS_PCH_IBX(dev_priv))
4550                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4551
4552         /* still set train pattern 1 */
4553         reg = FDI_TX_CTL(pipe);
4554         temp = I915_READ(reg);
4555         temp &= ~FDI_LINK_TRAIN_NONE;
4556         temp |= FDI_LINK_TRAIN_PATTERN_1;
4557         I915_WRITE(reg, temp);
4558
4559         reg = FDI_RX_CTL(pipe);
4560         temp = I915_READ(reg);
4561         if (HAS_PCH_CPT(dev_priv)) {
4562                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4563                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4564         } else {
4565                 temp &= ~FDI_LINK_TRAIN_NONE;
4566                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4567         }
4568         /* BPC in FDI rx is consistent with that in PIPECONF */
4569         temp &= ~(0x07 << 16);
4570         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4571         I915_WRITE(reg, temp);
4572
4573         POSTING_READ(reg);
4574         udelay(100);
4575 }
4576
4577 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
4578 {
4579         struct drm_crtc *crtc;
4580         bool cleanup_done;
4581
4582         drm_for_each_crtc(crtc, &dev_priv->drm) {
4583                 struct drm_crtc_commit *commit;
4584                 spin_lock(&crtc->commit_lock);
4585                 commit = list_first_entry_or_null(&crtc->commit_list,
4586                                                   struct drm_crtc_commit, commit_entry);
4587                 cleanup_done = commit ?
4588                         try_wait_for_completion(&commit->cleanup_done) : true;
4589                 spin_unlock(&crtc->commit_lock);
4590
4591                 if (cleanup_done)
4592                         continue;
4593
4594                 drm_crtc_wait_one_vblank(crtc);
4595
4596                 return true;
4597         }
4598
4599         return false;
4600 }
4601
4602 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
4603 {
4604         u32 temp;
4605
4606         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4607
4608         mutex_lock(&dev_priv->sb_lock);
4609
4610         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4611         temp |= SBI_SSCCTL_DISABLE;
4612         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4613
4614         mutex_unlock(&dev_priv->sb_lock);
4615 }
4616
4617 /* Program iCLKIP clock to the desired frequency */
4618 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
4619 {
4620         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4621         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4622         int clock = crtc_state->base.adjusted_mode.crtc_clock;
4623         u32 divsel, phaseinc, auxdiv, phasedir = 0;
4624         u32 temp;
4625
4626         lpt_disable_iclkip(dev_priv);
4627
4628         /* The iCLK virtual clock root frequency is in MHz,
4629          * but the adjusted_mode->crtc_clock in in KHz. To get the
4630          * divisors, it is necessary to divide one by another, so we
4631          * convert the virtual clock precision to KHz here for higher
4632          * precision.
4633          */
4634         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
4635                 u32 iclk_virtual_root_freq = 172800 * 1000;
4636                 u32 iclk_pi_range = 64;
4637                 u32 desired_divisor;
4638
4639                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4640                                                     clock << auxdiv);
4641                 divsel = (desired_divisor / iclk_pi_range) - 2;
4642                 phaseinc = desired_divisor % iclk_pi_range;
4643
4644                 /*
4645                  * Near 20MHz is a corner case which is
4646                  * out of range for the 7-bit divisor
4647                  */
4648                 if (divsel <= 0x7f)
4649                         break;
4650         }
4651
4652         /* This should not happen with any sane values */
4653         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4654                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4655         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4656                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4657
4658         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4659                         clock,
4660                         auxdiv,
4661                         divsel,
4662                         phasedir,
4663                         phaseinc);
4664
4665         mutex_lock(&dev_priv->sb_lock);
4666
4667         /* Program SSCDIVINTPHASE6 */
4668         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4669         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4670         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4671         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4672         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4673         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4674         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
4675         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
4676
4677         /* Program SSCAUXDIV */
4678         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4679         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4680         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
4681         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
4682
4683         /* Enable modulator and associated divider */
4684         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4685         temp &= ~SBI_SSCCTL_DISABLE;
4686         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4687
4688         mutex_unlock(&dev_priv->sb_lock);
4689
4690         /* Wait for initialization time */
4691         udelay(24);
4692
4693         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4694 }
4695
4696 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
4697 {
4698         u32 divsel, phaseinc, auxdiv;
4699         u32 iclk_virtual_root_freq = 172800 * 1000;
4700         u32 iclk_pi_range = 64;
4701         u32 desired_divisor;
4702         u32 temp;
4703
4704         if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
4705                 return 0;
4706
4707         mutex_lock(&dev_priv->sb_lock);
4708
4709         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4710         if (temp & SBI_SSCCTL_DISABLE) {
4711                 mutex_unlock(&dev_priv->sb_lock);
4712                 return 0;
4713         }
4714
4715         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4716         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
4717                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
4718         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
4719                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
4720
4721         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4722         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
4723                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
4724
4725         mutex_unlock(&dev_priv->sb_lock);
4726
4727         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
4728
4729         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4730                                  desired_divisor << auxdiv);
4731 }
4732
4733 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
4734                                                 enum pipe pch_transcoder)
4735 {
4736         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4737         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4738         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4739
4740         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4741                    I915_READ(HTOTAL(cpu_transcoder)));
4742         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4743                    I915_READ(HBLANK(cpu_transcoder)));
4744         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4745                    I915_READ(HSYNC(cpu_transcoder)));
4746
4747         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4748                    I915_READ(VTOTAL(cpu_transcoder)));
4749         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4750                    I915_READ(VBLANK(cpu_transcoder)));
4751         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4752                    I915_READ(VSYNC(cpu_transcoder)));
4753         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4754                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
4755 }
4756
4757 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
4758 {
4759         u32 temp;
4760
4761         temp = I915_READ(SOUTH_CHICKEN1);
4762         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4763                 return;
4764
4765         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4766         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4767
4768         temp &= ~FDI_BC_BIFURCATION_SELECT;
4769         if (enable)
4770                 temp |= FDI_BC_BIFURCATION_SELECT;
4771
4772         DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4773         I915_WRITE(SOUTH_CHICKEN1, temp);
4774         POSTING_READ(SOUTH_CHICKEN1);
4775 }
4776
4777 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
4778 {
4779         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4780         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4781
4782         switch (crtc->pipe) {
4783         case PIPE_A:
4784                 break;
4785         case PIPE_B:
4786                 if (crtc_state->fdi_lanes > 2)
4787                         cpt_set_fdi_bc_bifurcation(dev_priv, false);
4788                 else
4789                         cpt_set_fdi_bc_bifurcation(dev_priv, true);
4790
4791                 break;
4792         case PIPE_C:
4793                 cpt_set_fdi_bc_bifurcation(dev_priv, true);
4794
4795                 break;
4796         default:
4797                 BUG();
4798         }
4799 }
4800
4801 /*
4802  * Finds the encoder associated with the given CRTC. This can only be
4803  * used when we know that the CRTC isn't feeding multiple encoders!
4804  */
4805 static struct intel_encoder *
4806 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
4807                            const struct intel_crtc_state *crtc_state)
4808 {
4809         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4810         const struct drm_connector_state *connector_state;
4811         const struct drm_connector *connector;
4812         struct intel_encoder *encoder = NULL;
4813         int num_encoders = 0;
4814         int i;
4815
4816         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4817                 if (connector_state->crtc != &crtc->base)
4818                         continue;
4819
4820                 encoder = to_intel_encoder(connector_state->best_encoder);
4821                 num_encoders++;
4822         }
4823
4824         WARN(num_encoders != 1, "%d encoders for pipe %c\n",
4825              num_encoders, pipe_name(crtc->pipe));
4826
4827         return encoder;
4828 }
4829
4830 /*
4831  * Enable PCH resources required for PCH ports:
4832  *   - PCH PLLs
4833  *   - FDI training & RX/TX
4834  *   - update transcoder timings
4835  *   - DP transcoding bits
4836  *   - transcoder
4837  */
4838 static void ironlake_pch_enable(const struct intel_atomic_state *state,
4839                                 const struct intel_crtc_state *crtc_state)
4840 {
4841         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4842         struct drm_device *dev = crtc->base.dev;
4843         struct drm_i915_private *dev_priv = to_i915(dev);
4844         int pipe = crtc->pipe;
4845         u32 temp;
4846
4847         assert_pch_transcoder_disabled(dev_priv, pipe);
4848
4849         if (IS_IVYBRIDGE(dev_priv))
4850                 ivybridge_update_fdi_bc_bifurcation(crtc_state);
4851
4852         /* Write the TU size bits before fdi link training, so that error
4853          * detection works. */
4854         I915_WRITE(FDI_RX_TUSIZE1(pipe),
4855                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4856
4857         /* For PCH output, training FDI link */
4858         dev_priv->display.fdi_link_train(crtc, crtc_state);
4859
4860         /* We need to program the right clock selection before writing the pixel
4861          * mutliplier into the DPLL. */
4862         if (HAS_PCH_CPT(dev_priv)) {
4863                 u32 sel;
4864
4865                 temp = I915_READ(PCH_DPLL_SEL);
4866                 temp |= TRANS_DPLL_ENABLE(pipe);
4867                 sel = TRANS_DPLLB_SEL(pipe);
4868                 if (crtc_state->shared_dpll ==
4869                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
4870                         temp |= sel;
4871                 else
4872                         temp &= ~sel;
4873                 I915_WRITE(PCH_DPLL_SEL, temp);
4874         }
4875
4876         /* XXX: pch pll's can be enabled any time before we enable the PCH
4877          * transcoder, and we actually should do this to not upset any PCH
4878          * transcoder that already use the clock when we share it.
4879          *
4880          * Note that enable_shared_dpll tries to do the right thing, but
4881          * get_shared_dpll unconditionally resets the pll - we need that to have
4882          * the right LVDS enable sequence. */
4883         intel_enable_shared_dpll(crtc_state);
4884
4885         /* set transcoder timing, panel must allow it */
4886         assert_panel_unlocked(dev_priv, pipe);
4887         ironlake_pch_transcoder_set_timings(crtc_state, pipe);
4888
4889         intel_fdi_normal_train(crtc);
4890
4891         /* For PCH DP, enable TRANS_DP_CTL */
4892         if (HAS_PCH_CPT(dev_priv) &&
4893             intel_crtc_has_dp_encoder(crtc_state)) {
4894                 const struct drm_display_mode *adjusted_mode =
4895                         &crtc_state->base.adjusted_mode;
4896                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4897                 i915_reg_t reg = TRANS_DP_CTL(pipe);
4898                 enum port port;
4899
4900                 temp = I915_READ(reg);
4901                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
4902                           TRANS_DP_SYNC_MASK |
4903                           TRANS_DP_BPC_MASK);
4904                 temp |= TRANS_DP_OUTPUT_ENABLE;
4905                 temp |= bpc << 9; /* same format but at 11:9 */
4906
4907                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4908                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4909                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4910                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4911
4912                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
4913                 WARN_ON(port < PORT_B || port > PORT_D);
4914                 temp |= TRANS_DP_PORT_SEL(port);
4915
4916                 I915_WRITE(reg, temp);
4917         }
4918
4919         ironlake_enable_pch_transcoder(crtc_state);
4920 }
4921
4922 static void lpt_pch_enable(const struct intel_atomic_state *state,
4923                            const struct intel_crtc_state *crtc_state)
4924 {
4925         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4926         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4927         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4928
4929         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
4930
4931         lpt_program_iclkip(crtc_state);
4932
4933         /* Set transcoder timing. */
4934         ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
4935
4936         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4937 }
4938
4939 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4940 {
4941         struct drm_i915_private *dev_priv = to_i915(dev);
4942         i915_reg_t dslreg = PIPEDSL(pipe);
4943         u32 temp;
4944
4945         temp = I915_READ(dslreg);
4946         udelay(500);
4947         if (wait_for(I915_READ(dslreg) != temp, 5)) {
4948                 if (wait_for(I915_READ(dslreg) != temp, 5))
4949                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4950         }
4951 }
4952
4953 /*
4954  * The hardware phase 0.0 refers to the center of the pixel.
4955  * We want to start from the top/left edge which is phase
4956  * -0.5. That matches how the hardware calculates the scaling
4957  * factors (from top-left of the first pixel to bottom-right
4958  * of the last pixel, as opposed to the pixel centers).
4959  *
4960  * For 4:2:0 subsampled chroma planes we obviously have to
4961  * adjust that so that the chroma sample position lands in
4962  * the right spot.
4963  *
4964  * Note that for packed YCbCr 4:2:2 formats there is no way to
4965  * control chroma siting. The hardware simply replicates the
4966  * chroma samples for both of the luma samples, and thus we don't
4967  * actually get the expected MPEG2 chroma siting convention :(
4968  * The same behaviour is observed on pre-SKL platforms as well.
4969  *
4970  * Theory behind the formula (note that we ignore sub-pixel
4971  * source coordinates):
4972  * s = source sample position
4973  * d = destination sample position
4974  *
4975  * Downscaling 4:1:
4976  * -0.5
4977  * | 0.0
4978  * | |     1.5 (initial phase)
4979  * | |     |
4980  * v v     v
4981  * | s | s | s | s |
4982  * |       d       |
4983  *
4984  * Upscaling 1:4:
4985  * -0.5
4986  * | -0.375 (initial phase)
4987  * | |     0.0
4988  * | |     |
4989  * v v     v
4990  * |       s       |
4991  * | d | d | d | d |
4992  */
4993 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
4994 {
4995         int phase = -0x8000;
4996         u16 trip = 0;
4997
4998         if (chroma_cosited)
4999                 phase += (sub - 1) * 0x8000 / sub;
5000
5001         phase += scale / (2 * sub);
5002
5003         /*
5004          * Hardware initial phase limited to [-0.5:1.5].
5005          * Since the max hardware scale factor is 3.0, we
5006          * should never actually excdeed 1.0 here.
5007          */
5008         WARN_ON(phase < -0x8000 || phase > 0x18000);
5009
5010         if (phase < 0)
5011                 phase = 0x10000 + phase;
5012         else
5013                 trip = PS_PHASE_TRIP;
5014
5015         return ((phase >> 2) & PS_PHASE_MASK) | trip;
5016 }
5017
5018 static int
5019 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5020                   unsigned int scaler_user, int *scaler_id,
5021                   int src_w, int src_h, int dst_w, int dst_h,
5022                   const struct drm_format_info *format, bool need_scaler)
5023 {
5024         struct intel_crtc_scaler_state *scaler_state =
5025                 &crtc_state->scaler_state;
5026         struct intel_crtc *intel_crtc =
5027                 to_intel_crtc(crtc_state->base.crtc);
5028         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5029         const struct drm_display_mode *adjusted_mode =
5030                 &crtc_state->base.adjusted_mode;
5031
5032         /*
5033          * Src coordinates are already rotated by 270 degrees for
5034          * the 90/270 degree plane rotation cases (to match the
5035          * GTT mapping), hence no need to account for rotation here.
5036          */
5037         if (src_w != dst_w || src_h != dst_h)
5038                 need_scaler = true;
5039
5040         /*
5041          * Scaling/fitting not supported in IF-ID mode in GEN9+
5042          * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5043          * Once NV12 is enabled, handle it here while allocating scaler
5044          * for NV12.
5045          */
5046         if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
5047             need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5048                 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5049                 return -EINVAL;
5050         }
5051
5052         /*
5053          * if plane is being disabled or scaler is no more required or force detach
5054          *  - free scaler binded to this plane/crtc
5055          *  - in order to do this, update crtc->scaler_usage
5056          *
5057          * Here scaler state in crtc_state is set free so that
5058          * scaler can be assigned to other user. Actual register
5059          * update to free the scaler is done in plane/panel-fit programming.
5060          * For this purpose crtc/plane_state->scaler_id isn't reset here.
5061          */
5062         if (force_detach || !need_scaler) {
5063                 if (*scaler_id >= 0) {
5064                         scaler_state->scaler_users &= ~(1 << scaler_user);
5065                         scaler_state->scalers[*scaler_id].in_use = 0;
5066
5067                         DRM_DEBUG_KMS("scaler_user index %u.%u: "
5068                                 "Staged freeing scaler id %d scaler_users = 0x%x\n",
5069                                 intel_crtc->pipe, scaler_user, *scaler_id,
5070                                 scaler_state->scaler_users);
5071                         *scaler_id = -1;
5072                 }
5073                 return 0;
5074         }
5075
5076         if (format && is_planar_yuv_format(format->format) &&
5077             (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5078                 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
5079                 return -EINVAL;
5080         }
5081
5082         /* range checks */
5083         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5084             dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5085             (INTEL_GEN(dev_priv) >= 11 &&
5086              (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5087               dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5088             (INTEL_GEN(dev_priv) < 11 &&
5089              (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5090               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
5091                 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5092                         "size is out of scaler range\n",
5093                         intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
5094                 return -EINVAL;
5095         }
5096
5097         /* mark this plane as a scaler user in crtc_state */
5098         scaler_state->scaler_users |= (1 << scaler_user);
5099         DRM_DEBUG_KMS("scaler_user index %u.%u: "
5100                 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5101                 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5102                 scaler_state->scaler_users);
5103
5104         return 0;
5105 }
5106
5107 /**
5108  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5109  *
5110  * @state: crtc's scaler state
5111  *
5112  * Return
5113  *     0 - scaler_usage updated successfully
5114  *    error - requested scaling cannot be supported or other error condition
5115  */
5116 int skl_update_scaler_crtc(struct intel_crtc_state *state)
5117 {
5118         const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
5119         bool need_scaler = false;
5120
5121         if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5122                 need_scaler = true;
5123
5124         return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
5125                                  &state->scaler_state.scaler_id,
5126                                  state->pipe_src_w, state->pipe_src_h,
5127                                  adjusted_mode->crtc_hdisplay,
5128                                  adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5129 }
5130
5131 /**
5132  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5133  * @crtc_state: crtc's scaler state
5134  * @plane_state: atomic plane state to update
5135  *
5136  * Return
5137  *     0 - scaler_usage updated successfully
5138  *    error - requested scaling cannot be supported or other error condition
5139  */
5140 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5141                                    struct intel_plane_state *plane_state)
5142 {
5143         struct intel_plane *intel_plane =
5144                 to_intel_plane(plane_state->base.plane);
5145         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5146         struct drm_framebuffer *fb = plane_state->base.fb;
5147         int ret;
5148         bool force_detach = !fb || !plane_state->base.visible;
5149         bool need_scaler = false;
5150
5151         /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5152         if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5153             fb && is_planar_yuv_format(fb->format->format))
5154                 need_scaler = true;
5155
5156         ret = skl_update_scaler(crtc_state, force_detach,
5157                                 drm_plane_index(&intel_plane->base),
5158                                 &plane_state->scaler_id,
5159                                 drm_rect_width(&plane_state->base.src) >> 16,
5160                                 drm_rect_height(&plane_state->base.src) >> 16,
5161                                 drm_rect_width(&plane_state->base.dst),
5162                                 drm_rect_height(&plane_state->base.dst),
5163                                 fb ? fb->format : NULL, need_scaler);
5164
5165         if (ret || plane_state->scaler_id < 0)
5166                 return ret;
5167
5168         /* check colorkey */
5169         if (plane_state->ckey.flags) {
5170                 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5171                               intel_plane->base.base.id,
5172                               intel_plane->base.name);
5173                 return -EINVAL;
5174         }
5175
5176         /* Check src format */
5177         switch (fb->format->format) {
5178         case DRM_FORMAT_RGB565:
5179         case DRM_FORMAT_XBGR8888:
5180         case DRM_FORMAT_XRGB8888:
5181         case DRM_FORMAT_ABGR8888:
5182         case DRM_FORMAT_ARGB8888:
5183         case DRM_FORMAT_XRGB2101010:
5184         case DRM_FORMAT_XBGR2101010:
5185         case DRM_FORMAT_XBGR16161616F:
5186         case DRM_FORMAT_ABGR16161616F:
5187         case DRM_FORMAT_XRGB16161616F:
5188         case DRM_FORMAT_ARGB16161616F:
5189         case DRM_FORMAT_YUYV:
5190         case DRM_FORMAT_YVYU:
5191         case DRM_FORMAT_UYVY:
5192         case DRM_FORMAT_VYUY:
5193         case DRM_FORMAT_NV12:
5194         case DRM_FORMAT_P010:
5195         case DRM_FORMAT_P012:
5196         case DRM_FORMAT_P016:
5197         case DRM_FORMAT_Y210:
5198         case DRM_FORMAT_Y212:
5199         case DRM_FORMAT_Y216:
5200         case DRM_FORMAT_XVYU2101010:
5201         case DRM_FORMAT_XVYU12_16161616:
5202         case DRM_FORMAT_XVYU16161616:
5203                 break;
5204         default:
5205                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5206                               intel_plane->base.base.id, intel_plane->base.name,
5207                               fb->base.id, fb->format->format);
5208                 return -EINVAL;
5209         }
5210
5211         return 0;
5212 }
5213
5214 static void skylake_scaler_disable(struct intel_crtc *crtc)
5215 {
5216         int i;
5217
5218         for (i = 0; i < crtc->num_scalers; i++)
5219                 skl_detach_scaler(crtc, i);
5220 }
5221
5222 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5223 {
5224         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5225         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5226         enum pipe pipe = crtc->pipe;
5227         const struct intel_crtc_scaler_state *scaler_state =
5228                 &crtc_state->scaler_state;
5229
5230         if (crtc_state->pch_pfit.enabled) {
5231                 u16 uv_rgb_hphase, uv_rgb_vphase;
5232                 int pfit_w, pfit_h, hscale, vscale;
5233                 int id;
5234
5235                 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5236                         return;
5237
5238                 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5239                 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5240
5241                 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5242                 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5243
5244                 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5245                 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5246
5247                 id = scaler_state->scaler_id;
5248                 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5249                         PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5250                 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5251                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5252                 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5253                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5254                 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5255                 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5256         }
5257 }
5258
5259 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5260 {
5261         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5262         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5263         int pipe = crtc->pipe;
5264
5265         if (crtc_state->pch_pfit.enabled) {
5266                 /* Force use of hard-coded filter coefficients
5267                  * as some pre-programmed values are broken,
5268                  * e.g. x201.
5269                  */
5270                 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5271                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5272                                                  PF_PIPE_SEL_IVB(pipe));
5273                 else
5274                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5275                 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5276                 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5277         }
5278 }
5279
5280 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5281 {
5282         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5283         struct drm_device *dev = crtc->base.dev;
5284         struct drm_i915_private *dev_priv = to_i915(dev);
5285
5286         if (!crtc_state->ips_enabled)
5287                 return;
5288
5289         /*
5290          * We can only enable IPS after we enable a plane and wait for a vblank
5291          * This function is called from post_plane_update, which is run after
5292          * a vblank wait.
5293          */
5294         WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5295
5296         if (IS_BROADWELL(dev_priv)) {
5297                 mutex_lock(&dev_priv->pcu_lock);
5298                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5299                                                 IPS_ENABLE | IPS_PCODE_CONTROL));
5300                 mutex_unlock(&dev_priv->pcu_lock);
5301                 /* Quoting Art Runyan: "its not safe to expect any particular
5302                  * value in IPS_CTL bit 31 after enabling IPS through the
5303                  * mailbox." Moreover, the mailbox may return a bogus state,
5304                  * so we need to just enable it and continue on.
5305                  */
5306         } else {
5307                 I915_WRITE(IPS_CTL, IPS_ENABLE);
5308                 /* The bit only becomes 1 in the next vblank, so this wait here
5309                  * is essentially intel_wait_for_vblank. If we don't have this
5310                  * and don't wait for vblanks until the end of crtc_enable, then
5311                  * the HW state readout code will complain that the expected
5312                  * IPS_CTL value is not the one we read. */
5313                 if (intel_wait_for_register(&dev_priv->uncore,
5314                                             IPS_CTL, IPS_ENABLE, IPS_ENABLE,
5315                                             50))
5316                         DRM_ERROR("Timed out waiting for IPS enable\n");
5317         }
5318 }
5319
5320 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5321 {
5322         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5323         struct drm_device *dev = crtc->base.dev;
5324         struct drm_i915_private *dev_priv = to_i915(dev);
5325
5326         if (!crtc_state->ips_enabled)
5327                 return;
5328
5329         if (IS_BROADWELL(dev_priv)) {
5330                 mutex_lock(&dev_priv->pcu_lock);
5331                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5332                 mutex_unlock(&dev_priv->pcu_lock);
5333                 /*
5334                  * Wait for PCODE to finish disabling IPS. The BSpec specified
5335                  * 42ms timeout value leads to occasional timeouts so use 100ms
5336                  * instead.
5337                  */
5338                 if (intel_wait_for_register(&dev_priv->uncore,
5339                                             IPS_CTL, IPS_ENABLE, 0,
5340                                             100))
5341                         DRM_ERROR("Timed out waiting for IPS disable\n");
5342         } else {
5343                 I915_WRITE(IPS_CTL, 0);
5344                 POSTING_READ(IPS_CTL);
5345         }
5346
5347         /* We need to wait for a vblank before we can disable the plane. */
5348         intel_wait_for_vblank(dev_priv, crtc->pipe);
5349 }
5350
5351 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5352 {
5353         if (intel_crtc->overlay) {
5354                 struct drm_device *dev = intel_crtc->base.dev;
5355
5356                 mutex_lock(&dev->struct_mutex);
5357                 (void) intel_overlay_switch_off(intel_crtc->overlay);
5358                 mutex_unlock(&dev->struct_mutex);
5359         }
5360
5361         /* Let userspace switch the overlay on again. In most cases userspace
5362          * has to recompute where to put it anyway.
5363          */
5364 }
5365
5366 /**
5367  * intel_post_enable_primary - Perform operations after enabling primary plane
5368  * @crtc: the CRTC whose primary plane was just enabled
5369  * @new_crtc_state: the enabling state
5370  *
5371  * Performs potentially sleeping operations that must be done after the primary
5372  * plane is enabled, such as updating FBC and IPS.  Note that this may be
5373  * called due to an explicit primary plane update, or due to an implicit
5374  * re-enable that is caused when a sprite plane is updated to no longer
5375  * completely hide the primary plane.
5376  */
5377 static void
5378 intel_post_enable_primary(struct drm_crtc *crtc,
5379                           const struct intel_crtc_state *new_crtc_state)
5380 {
5381         struct drm_device *dev = crtc->dev;
5382         struct drm_i915_private *dev_priv = to_i915(dev);
5383         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5384         int pipe = intel_crtc->pipe;
5385
5386         /*
5387          * Gen2 reports pipe underruns whenever all planes are disabled.
5388          * So don't enable underrun reporting before at least some planes
5389          * are enabled.
5390          * FIXME: Need to fix the logic to work when we turn off all planes
5391          * but leave the pipe running.
5392          */
5393         if (IS_GEN(dev_priv, 2))
5394                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5395
5396         /* Underruns don't always raise interrupts, so check manually. */
5397         intel_check_cpu_fifo_underruns(dev_priv);
5398         intel_check_pch_fifo_underruns(dev_priv);
5399 }
5400
5401 /* FIXME get rid of this and use pre_plane_update */
5402 static void
5403 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5404 {
5405         struct drm_device *dev = crtc->dev;
5406         struct drm_i915_private *dev_priv = to_i915(dev);
5407         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5408         int pipe = intel_crtc->pipe;
5409
5410         /*
5411          * Gen2 reports pipe underruns whenever all planes are disabled.
5412          * So disable underrun reporting before all the planes get disabled.
5413          */
5414         if (IS_GEN(dev_priv, 2))
5415                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5416
5417         hsw_disable_ips(to_intel_crtc_state(crtc->state));
5418
5419         /*
5420          * Vblank time updates from the shadow to live plane control register
5421          * are blocked if the memory self-refresh mode is active at that
5422          * moment. So to make sure the plane gets truly disabled, disable
5423          * first the self-refresh mode. The self-refresh enable bit in turn
5424          * will be checked/applied by the HW only at the next frame start
5425          * event which is after the vblank start event, so we need to have a
5426          * wait-for-vblank between disabling the plane and the pipe.
5427          */
5428         if (HAS_GMCH(dev_priv) &&
5429             intel_set_memory_cxsr(dev_priv, false))
5430                 intel_wait_for_vblank(dev_priv, pipe);
5431 }
5432
5433 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5434                                        const struct intel_crtc_state *new_crtc_state)
5435 {
5436         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5437         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5438
5439         if (!old_crtc_state->ips_enabled)
5440                 return false;
5441
5442         if (needs_modeset(&new_crtc_state->base))
5443                 return true;
5444
5445         /*
5446          * Workaround : Do not read or write the pipe palette/gamma data while
5447          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5448          *
5449          * Disable IPS before we program the LUT.
5450          */
5451         if (IS_HASWELL(dev_priv) &&
5452             (new_crtc_state->base.color_mgmt_changed ||
5453              new_crtc_state->update_pipe) &&
5454             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5455                 return true;
5456
5457         return !new_crtc_state->ips_enabled;
5458 }
5459
5460 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5461                                        const struct intel_crtc_state *new_crtc_state)
5462 {
5463         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5464         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5465
5466         if (!new_crtc_state->ips_enabled)
5467                 return false;
5468
5469         if (needs_modeset(&new_crtc_state->base))
5470                 return true;
5471
5472         /*
5473          * Workaround : Do not read or write the pipe palette/gamma data while
5474          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5475          *
5476          * Re-enable IPS after the LUT has been programmed.
5477          */
5478         if (IS_HASWELL(dev_priv) &&
5479             (new_crtc_state->base.color_mgmt_changed ||
5480              new_crtc_state->update_pipe) &&
5481             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5482                 return true;
5483
5484         /*
5485          * We can't read out IPS on broadwell, assume the worst and
5486          * forcibly enable IPS on the first fastset.
5487          */
5488         if (new_crtc_state->update_pipe &&
5489             old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5490                 return true;
5491
5492         return !old_crtc_state->ips_enabled;
5493 }
5494
5495 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5496                           const struct intel_crtc_state *crtc_state)
5497 {
5498         if (!crtc_state->nv12_planes)
5499                 return false;
5500
5501         /* WA Display #0827: Gen9:all */
5502         if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
5503                 return true;
5504
5505         return false;
5506 }
5507
5508 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5509 {
5510         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5511         struct drm_device *dev = crtc->base.dev;
5512         struct drm_i915_private *dev_priv = to_i915(dev);
5513         struct drm_atomic_state *old_state = old_crtc_state->base.state;
5514         struct intel_crtc_state *pipe_config =
5515                 intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
5516                                                 crtc);
5517         struct drm_plane *primary = crtc->base.primary;
5518         struct drm_plane_state *old_primary_state =
5519                 drm_atomic_get_old_plane_state(old_state, primary);
5520
5521         intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
5522
5523         if (pipe_config->update_wm_post && pipe_config->base.active)
5524                 intel_update_watermarks(crtc);
5525
5526         if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5527                 hsw_enable_ips(pipe_config);
5528
5529         if (old_primary_state) {
5530                 struct drm_plane_state *new_primary_state =
5531                         drm_atomic_get_new_plane_state(old_state, primary);
5532
5533                 intel_fbc_post_update(crtc);
5534
5535                 if (new_primary_state->visible &&
5536                     (needs_modeset(&pipe_config->base) ||
5537                      !old_primary_state->visible))
5538                         intel_post_enable_primary(&crtc->base, pipe_config);
5539         }
5540
5541         /* Display WA 827 */
5542         if (needs_nv12_wa(dev_priv, old_crtc_state) &&
5543             !needs_nv12_wa(dev_priv, pipe_config)) {
5544                 skl_wa_827(dev_priv, crtc->pipe, false);
5545         }
5546 }
5547
5548 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5549                                    struct intel_crtc_state *pipe_config)
5550 {
5551         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5552         struct drm_device *dev = crtc->base.dev;
5553         struct drm_i915_private *dev_priv = to_i915(dev);
5554         struct drm_atomic_state *old_state = old_crtc_state->base.state;
5555         struct drm_plane *primary = crtc->base.primary;
5556         struct drm_plane_state *old_primary_state =
5557                 drm_atomic_get_old_plane_state(old_state, primary);
5558         bool modeset = needs_modeset(&pipe_config->base);
5559         struct intel_atomic_state *old_intel_state =
5560                 to_intel_atomic_state(old_state);
5561
5562         if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5563                 hsw_disable_ips(old_crtc_state);
5564
5565         if (old_primary_state) {
5566                 struct intel_plane_state *new_primary_state =
5567                         intel_atomic_get_new_plane_state(old_intel_state,
5568                                                          to_intel_plane(primary));
5569
5570                 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
5571                 /*
5572                  * Gen2 reports pipe underruns whenever all planes are disabled.
5573                  * So disable underrun reporting before all the planes get disabled.
5574                  */
5575                 if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
5576                     (modeset || !new_primary_state->base.visible))
5577                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5578         }
5579
5580         /* Display WA 827 */
5581         if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
5582             needs_nv12_wa(dev_priv, pipe_config)) {
5583                 skl_wa_827(dev_priv, crtc->pipe, true);
5584         }
5585
5586         /*
5587          * Vblank time updates from the shadow to live plane control register
5588          * are blocked if the memory self-refresh mode is active at that
5589          * moment. So to make sure the plane gets truly disabled, disable
5590          * first the self-refresh mode. The self-refresh enable bit in turn
5591          * will be checked/applied by the HW only at the next frame start
5592          * event which is after the vblank start event, so we need to have a
5593          * wait-for-vblank between disabling the plane and the pipe.
5594          */
5595         if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
5596             pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5597                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5598
5599         /*
5600          * IVB workaround: must disable low power watermarks for at least
5601          * one frame before enabling scaling.  LP watermarks can be re-enabled
5602          * when scaling is disabled.
5603          *
5604          * WaCxSRDisabledForSpriteScaling:ivb
5605          */
5606         if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
5607             old_crtc_state->base.active)
5608                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5609
5610         /*
5611          * If we're doing a modeset, we're done.  No need to do any pre-vblank
5612          * watermark programming here.
5613          */
5614         if (needs_modeset(&pipe_config->base))
5615                 return;
5616
5617         /*
5618          * For platforms that support atomic watermarks, program the
5619          * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
5620          * will be the intermediate values that are safe for both pre- and
5621          * post- vblank; when vblank happens, the 'active' values will be set
5622          * to the final 'target' values and we'll do this again to get the
5623          * optimal watermarks.  For gen9+ platforms, the values we program here
5624          * will be the final target values which will get automatically latched
5625          * at vblank time; no further programming will be necessary.
5626          *
5627          * If a platform hasn't been transitioned to atomic watermarks yet,
5628          * we'll continue to update watermarks the old way, if flags tell
5629          * us to.
5630          */
5631         if (dev_priv->display.initial_watermarks != NULL)
5632                 dev_priv->display.initial_watermarks(old_intel_state,
5633                                                      pipe_config);
5634         else if (pipe_config->update_wm_pre)
5635                 intel_update_watermarks(crtc);
5636 }
5637
5638 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
5639                                       struct intel_crtc *crtc)
5640 {
5641         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5642         const struct intel_crtc_state *new_crtc_state =
5643                 intel_atomic_get_new_crtc_state(state, crtc);
5644         unsigned int update_mask = new_crtc_state->update_planes;
5645         const struct intel_plane_state *old_plane_state;
5646         struct intel_plane *plane;
5647         unsigned fb_bits = 0;
5648         int i;
5649
5650         intel_crtc_dpms_overlay_disable(crtc);
5651
5652         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
5653                 if (crtc->pipe != plane->pipe ||
5654                     !(update_mask & BIT(plane->id)))
5655                         continue;
5656
5657                 intel_disable_plane(plane, new_crtc_state);
5658
5659                 if (old_plane_state->base.visible)
5660                         fb_bits |= plane->frontbuffer_bit;
5661         }
5662
5663         intel_frontbuffer_flip(dev_priv, fb_bits);
5664 }
5665
5666 static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
5667                                           struct intel_crtc_state *crtc_state,
5668                                           struct drm_atomic_state *old_state)
5669 {
5670         struct drm_connector_state *conn_state;
5671         struct drm_connector *conn;
5672         int i;
5673
5674         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5675                 struct intel_encoder *encoder =
5676                         to_intel_encoder(conn_state->best_encoder);
5677
5678                 if (conn_state->crtc != crtc)
5679                         continue;
5680
5681                 if (encoder->pre_pll_enable)
5682                         encoder->pre_pll_enable(encoder, crtc_state, conn_state);
5683         }
5684 }
5685
5686 static void intel_encoders_pre_enable(struct drm_crtc *crtc,
5687                                       struct intel_crtc_state *crtc_state,
5688                                       struct drm_atomic_state *old_state)
5689 {
5690         struct drm_connector_state *conn_state;
5691         struct drm_connector *conn;
5692         int i;
5693
5694         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5695                 struct intel_encoder *encoder =
5696                         to_intel_encoder(conn_state->best_encoder);
5697
5698                 if (conn_state->crtc != crtc)
5699                         continue;
5700
5701                 if (encoder->pre_enable)
5702                         encoder->pre_enable(encoder, crtc_state, conn_state);
5703         }
5704 }
5705
5706 static void intel_encoders_enable(struct drm_crtc *crtc,
5707                                   struct intel_crtc_state *crtc_state,
5708                                   struct drm_atomic_state *old_state)
5709 {
5710         struct drm_connector_state *conn_state;
5711         struct drm_connector *conn;
5712         int i;
5713
5714         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5715                 struct intel_encoder *encoder =
5716                         to_intel_encoder(conn_state->best_encoder);
5717
5718                 if (conn_state->crtc != crtc)
5719                         continue;
5720
5721                 if (encoder->enable)
5722                         encoder->enable(encoder, crtc_state, conn_state);
5723                 intel_opregion_notify_encoder(encoder, true);
5724         }
5725 }
5726
5727 static void intel_encoders_disable(struct drm_crtc *crtc,
5728                                    struct intel_crtc_state *old_crtc_state,
5729                                    struct drm_atomic_state *old_state)
5730 {
5731         struct drm_connector_state *old_conn_state;
5732         struct drm_connector *conn;
5733         int i;
5734
5735         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5736                 struct intel_encoder *encoder =
5737                         to_intel_encoder(old_conn_state->best_encoder);
5738
5739                 if (old_conn_state->crtc != crtc)
5740                         continue;
5741
5742                 intel_opregion_notify_encoder(encoder, false);
5743                 if (encoder->disable)
5744                         encoder->disable(encoder, old_crtc_state, old_conn_state);
5745         }
5746 }
5747
5748 static void intel_encoders_post_disable(struct drm_crtc *crtc,
5749                                         struct intel_crtc_state *old_crtc_state,
5750                                         struct drm_atomic_state *old_state)
5751 {
5752         struct drm_connector_state *old_conn_state;
5753         struct drm_connector *conn;
5754         int i;
5755
5756         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5757                 struct intel_encoder *encoder =
5758                         to_intel_encoder(old_conn_state->best_encoder);
5759
5760                 if (old_conn_state->crtc != crtc)
5761                         continue;
5762
5763                 if (encoder->post_disable)
5764                         encoder->post_disable(encoder, old_crtc_state, old_conn_state);
5765         }
5766 }
5767
5768 static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
5769                                             struct intel_crtc_state *old_crtc_state,
5770                                             struct drm_atomic_state *old_state)
5771 {
5772         struct drm_connector_state *old_conn_state;
5773         struct drm_connector *conn;
5774         int i;
5775
5776         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5777                 struct intel_encoder *encoder =
5778                         to_intel_encoder(old_conn_state->best_encoder);
5779
5780                 if (old_conn_state->crtc != crtc)
5781                         continue;
5782
5783                 if (encoder->post_pll_disable)
5784                         encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
5785         }
5786 }
5787
5788 static void intel_encoders_update_pipe(struct drm_crtc *crtc,
5789                                        struct intel_crtc_state *crtc_state,
5790                                        struct drm_atomic_state *old_state)
5791 {
5792         struct drm_connector_state *conn_state;
5793         struct drm_connector *conn;
5794         int i;
5795
5796         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5797                 struct intel_encoder *encoder =
5798                         to_intel_encoder(conn_state->best_encoder);
5799
5800                 if (conn_state->crtc != crtc)
5801                         continue;
5802
5803                 if (encoder->update_pipe)
5804                         encoder->update_pipe(encoder, crtc_state, conn_state);
5805         }
5806 }
5807
5808 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
5809 {
5810         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5811         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
5812
5813         plane->disable_plane(plane, crtc_state);
5814 }
5815
5816 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5817                                  struct drm_atomic_state *old_state)
5818 {
5819         struct drm_crtc *crtc = pipe_config->base.crtc;
5820         struct drm_device *dev = crtc->dev;
5821         struct drm_i915_private *dev_priv = to_i915(dev);
5822         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5823         int pipe = intel_crtc->pipe;
5824         struct intel_atomic_state *old_intel_state =
5825                 to_intel_atomic_state(old_state);
5826
5827         if (WARN_ON(intel_crtc->active))
5828                 return;
5829
5830         /*
5831          * Sometimes spurious CPU pipe underruns happen during FDI
5832          * training, at least with VGA+HDMI cloning. Suppress them.
5833          *
5834          * On ILK we get an occasional spurious CPU pipe underruns
5835          * between eDP port A enable and vdd enable. Also PCH port
5836          * enable seems to result in the occasional CPU pipe underrun.
5837          *
5838          * Spurious PCH underruns also occur during PCH enabling.
5839          */
5840         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5841         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5842
5843         if (pipe_config->has_pch_encoder)
5844                 intel_prepare_shared_dpll(pipe_config);
5845
5846         if (intel_crtc_has_dp_encoder(pipe_config))
5847                 intel_dp_set_m_n(pipe_config, M1_N1);
5848
5849         intel_set_pipe_timings(pipe_config);
5850         intel_set_pipe_src_size(pipe_config);
5851
5852         if (pipe_config->has_pch_encoder) {
5853                 intel_cpu_transcoder_set_m_n(pipe_config,
5854                                              &pipe_config->fdi_m_n, NULL);
5855         }
5856
5857         ironlake_set_pipeconf(pipe_config);
5858
5859         intel_crtc->active = true;
5860
5861         intel_encoders_pre_enable(crtc, pipe_config, old_state);
5862
5863         if (pipe_config->has_pch_encoder) {
5864                 /* Note: FDI PLL enabling _must_ be done before we enable the
5865                  * cpu pipes, hence this is separate from all the other fdi/pch
5866                  * enabling. */
5867                 ironlake_fdi_pll_enable(pipe_config);
5868         } else {
5869                 assert_fdi_tx_disabled(dev_priv, pipe);
5870                 assert_fdi_rx_disabled(dev_priv, pipe);
5871         }
5872
5873         ironlake_pfit_enable(pipe_config);
5874
5875         /*
5876          * On ILK+ LUT must be loaded before the pipe is running but with
5877          * clocks enabled
5878          */
5879         intel_color_load_luts(pipe_config);
5880         intel_color_commit(pipe_config);
5881         /* update DSPCNTR to configure gamma for pipe bottom color */
5882         intel_disable_primary_plane(pipe_config);
5883
5884         if (dev_priv->display.initial_watermarks != NULL)
5885                 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
5886         intel_enable_pipe(pipe_config);
5887
5888         if (pipe_config->has_pch_encoder)
5889                 ironlake_pch_enable(old_intel_state, pipe_config);
5890
5891         assert_vblank_disabled(crtc);
5892         intel_crtc_vblank_on(pipe_config);
5893
5894         intel_encoders_enable(crtc, pipe_config, old_state);
5895
5896         if (HAS_PCH_CPT(dev_priv))
5897                 cpt_verify_modeset(dev, intel_crtc->pipe);
5898
5899         /*
5900          * Must wait for vblank to avoid spurious PCH FIFO underruns.
5901          * And a second vblank wait is needed at least on ILK with
5902          * some interlaced HDMI modes. Let's do the double wait always
5903          * in case there are more corner cases we don't know about.
5904          */
5905         if (pipe_config->has_pch_encoder) {
5906                 intel_wait_for_vblank(dev_priv, pipe);
5907                 intel_wait_for_vblank(dev_priv, pipe);
5908         }
5909         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5910         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5911 }
5912
5913 /* IPS only exists on ULT machines and is tied to pipe A. */
5914 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
5915 {
5916         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
5917 }
5918
5919 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
5920                                             enum pipe pipe, bool apply)
5921 {
5922         u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
5923         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
5924
5925         if (apply)
5926                 val |= mask;
5927         else
5928                 val &= ~mask;
5929
5930         I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
5931 }
5932
5933 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
5934 {
5935         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5936         enum pipe pipe = crtc->pipe;
5937         u32 val;
5938
5939         val = MBUS_DBOX_A_CREDIT(2);
5940         val |= MBUS_DBOX_BW_CREDIT(1);
5941         val |= MBUS_DBOX_B_CREDIT(8);
5942
5943         I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
5944 }
5945
5946 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5947                                 struct drm_atomic_state *old_state)
5948 {
5949         struct drm_crtc *crtc = pipe_config->base.crtc;
5950         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5951         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5952         int pipe = intel_crtc->pipe, hsw_workaround_pipe;
5953         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5954         struct intel_atomic_state *old_intel_state =
5955                 to_intel_atomic_state(old_state);
5956         bool psl_clkgate_wa;
5957
5958         if (WARN_ON(intel_crtc->active))
5959                 return;
5960
5961         intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
5962
5963         if (pipe_config->shared_dpll)
5964                 intel_enable_shared_dpll(pipe_config);
5965
5966         intel_encoders_pre_enable(crtc, pipe_config, old_state);
5967
5968         if (intel_crtc_has_dp_encoder(pipe_config))
5969                 intel_dp_set_m_n(pipe_config, M1_N1);
5970
5971         if (!transcoder_is_dsi(cpu_transcoder))
5972                 intel_set_pipe_timings(pipe_config);
5973
5974         intel_set_pipe_src_size(pipe_config);
5975
5976         if (cpu_transcoder != TRANSCODER_EDP &&
5977             !transcoder_is_dsi(cpu_transcoder)) {
5978                 I915_WRITE(PIPE_MULT(cpu_transcoder),
5979                            pipe_config->pixel_multiplier - 1);
5980         }
5981
5982         if (pipe_config->has_pch_encoder) {
5983                 intel_cpu_transcoder_set_m_n(pipe_config,
5984                                              &pipe_config->fdi_m_n, NULL);
5985         }
5986
5987         if (!transcoder_is_dsi(cpu_transcoder))
5988                 haswell_set_pipeconf(pipe_config);
5989
5990         haswell_set_pipemisc(pipe_config);
5991
5992         intel_crtc->active = true;
5993
5994         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
5995         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
5996                          pipe_config->pch_pfit.enabled;
5997         if (psl_clkgate_wa)
5998                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
5999
6000         if (INTEL_GEN(dev_priv) >= 9)
6001                 skylake_pfit_enable(pipe_config);
6002         else
6003                 ironlake_pfit_enable(pipe_config);
6004
6005         /*
6006          * On ILK+ LUT must be loaded before the pipe is running but with
6007          * clocks enabled
6008          */
6009         intel_color_load_luts(pipe_config);
6010         intel_color_commit(pipe_config);
6011         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
6012         if (INTEL_GEN(dev_priv) < 9)
6013                 intel_disable_primary_plane(pipe_config);
6014
6015         if (INTEL_GEN(dev_priv) >= 11)
6016                 icl_set_pipe_chicken(intel_crtc);
6017
6018         intel_ddi_set_pipe_settings(pipe_config);
6019         if (!transcoder_is_dsi(cpu_transcoder))
6020                 intel_ddi_enable_transcoder_func(pipe_config);
6021
6022         if (dev_priv->display.initial_watermarks != NULL)
6023                 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
6024
6025         if (INTEL_GEN(dev_priv) >= 11)
6026                 icl_pipe_mbus_enable(intel_crtc);
6027
6028         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6029         if (!transcoder_is_dsi(cpu_transcoder))
6030                 intel_enable_pipe(pipe_config);
6031
6032         if (pipe_config->has_pch_encoder)
6033                 lpt_pch_enable(old_intel_state, pipe_config);
6034
6035         if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
6036                 intel_ddi_set_vc_payload_alloc(pipe_config, true);
6037
6038         assert_vblank_disabled(crtc);
6039         intel_crtc_vblank_on(pipe_config);
6040
6041         intel_encoders_enable(crtc, pipe_config, old_state);
6042
6043         if (psl_clkgate_wa) {
6044                 intel_wait_for_vblank(dev_priv, pipe);
6045                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6046         }
6047
6048         /* If we change the relative order between pipe/planes enabling, we need
6049          * to change the workaround. */
6050         hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
6051         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6052                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6053                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6054         }
6055 }
6056
6057 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6058 {
6059         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6060         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6061         enum pipe pipe = crtc->pipe;
6062
6063         /* To avoid upsetting the power well on haswell only disable the pfit if
6064          * it's in use. The hw state code will make sure we get this right. */
6065         if (old_crtc_state->pch_pfit.enabled) {
6066                 I915_WRITE(PF_CTL(pipe), 0);
6067                 I915_WRITE(PF_WIN_POS(pipe), 0);
6068                 I915_WRITE(PF_WIN_SZ(pipe), 0);
6069         }
6070 }
6071
6072 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
6073                                   struct drm_atomic_state *old_state)
6074 {
6075         struct drm_crtc *crtc = old_crtc_state->base.crtc;
6076         struct drm_device *dev = crtc->dev;
6077         struct drm_i915_private *dev_priv = to_i915(dev);
6078         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6079         int pipe = intel_crtc->pipe;
6080
6081         /*
6082          * Sometimes spurious CPU pipe underruns happen when the
6083          * pipe is already disabled, but FDI RX/TX is still enabled.
6084          * Happens at least with VGA+HDMI cloning. Suppress them.
6085          */
6086         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6087         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6088
6089         intel_encoders_disable(crtc, old_crtc_state, old_state);
6090
6091         drm_crtc_vblank_off(crtc);
6092         assert_vblank_disabled(crtc);
6093
6094         intel_disable_pipe(old_crtc_state);
6095
6096         ironlake_pfit_disable(old_crtc_state);
6097
6098         if (old_crtc_state->has_pch_encoder)
6099                 ironlake_fdi_disable(crtc);
6100
6101         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6102
6103         if (old_crtc_state->has_pch_encoder) {
6104                 ironlake_disable_pch_transcoder(dev_priv, pipe);
6105
6106                 if (HAS_PCH_CPT(dev_priv)) {
6107                         i915_reg_t reg;
6108                         u32 temp;
6109
6110                         /* disable TRANS_DP_CTL */
6111                         reg = TRANS_DP_CTL(pipe);
6112                         temp = I915_READ(reg);
6113                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6114                                   TRANS_DP_PORT_SEL_MASK);
6115                         temp |= TRANS_DP_PORT_SEL_NONE;
6116                         I915_WRITE(reg, temp);
6117
6118                         /* disable DPLL_SEL */
6119                         temp = I915_READ(PCH_DPLL_SEL);
6120                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6121                         I915_WRITE(PCH_DPLL_SEL, temp);
6122                 }
6123
6124                 ironlake_fdi_pll_disable(intel_crtc);
6125         }
6126
6127         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6128         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6129 }
6130
6131 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
6132                                  struct drm_atomic_state *old_state)
6133 {
6134         struct drm_crtc *crtc = old_crtc_state->base.crtc;
6135         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6136         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6137         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
6138
6139         intel_encoders_disable(crtc, old_crtc_state, old_state);
6140
6141         drm_crtc_vblank_off(crtc);
6142         assert_vblank_disabled(crtc);
6143
6144         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6145         if (!transcoder_is_dsi(cpu_transcoder))
6146                 intel_disable_pipe(old_crtc_state);
6147
6148         if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
6149                 intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
6150
6151         if (!transcoder_is_dsi(cpu_transcoder))
6152                 intel_ddi_disable_transcoder_func(old_crtc_state);
6153
6154         intel_dsc_disable(old_crtc_state);
6155
6156         if (INTEL_GEN(dev_priv) >= 9)
6157                 skylake_scaler_disable(intel_crtc);
6158         else
6159                 ironlake_pfit_disable(old_crtc_state);
6160
6161         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6162
6163         intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6164 }
6165
6166 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6167 {
6168         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6169         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6170
6171         if (!crtc_state->gmch_pfit.control)
6172                 return;
6173
6174         /*
6175          * The panel fitter should only be adjusted whilst the pipe is disabled,
6176          * according to register description and PRM.
6177          */
6178         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6179         assert_pipe_disabled(dev_priv, crtc->pipe);
6180
6181         I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6182         I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
6183
6184         /* Border color in case we don't scale up to the full screen. Black by
6185          * default, change to something else for debugging. */
6186         I915_WRITE(BCLRPAT(crtc->pipe), 0);
6187 }
6188
6189 bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
6190 {
6191         if (port == PORT_NONE)
6192                 return false;
6193
6194         if (IS_ELKHARTLAKE(dev_priv))
6195                 return port <= PORT_C;
6196
6197         if (INTEL_GEN(dev_priv) >= 11)
6198                 return port <= PORT_B;
6199
6200         return false;
6201 }
6202
6203 bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
6204 {
6205         if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
6206                 return port >= PORT_C && port <= PORT_F;
6207
6208         return false;
6209 }
6210
6211 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6212 {
6213         if (!intel_port_is_tc(dev_priv, port))
6214                 return PORT_TC_NONE;
6215
6216         return port - PORT_C;
6217 }
6218
6219 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6220 {
6221         switch (port) {
6222         case PORT_A:
6223                 return POWER_DOMAIN_PORT_DDI_A_LANES;
6224         case PORT_B:
6225                 return POWER_DOMAIN_PORT_DDI_B_LANES;
6226         case PORT_C:
6227                 return POWER_DOMAIN_PORT_DDI_C_LANES;
6228         case PORT_D:
6229                 return POWER_DOMAIN_PORT_DDI_D_LANES;
6230         case PORT_E:
6231                 return POWER_DOMAIN_PORT_DDI_E_LANES;
6232         case PORT_F:
6233                 return POWER_DOMAIN_PORT_DDI_F_LANES;
6234         default:
6235                 MISSING_CASE(port);
6236                 return POWER_DOMAIN_PORT_OTHER;
6237         }
6238 }
6239
6240 enum intel_display_power_domain
6241 intel_aux_power_domain(struct intel_digital_port *dig_port)
6242 {
6243         switch (dig_port->aux_ch) {
6244         case AUX_CH_A:
6245                 return POWER_DOMAIN_AUX_A;
6246         case AUX_CH_B:
6247                 return POWER_DOMAIN_AUX_B;
6248         case AUX_CH_C:
6249                 return POWER_DOMAIN_AUX_C;
6250         case AUX_CH_D:
6251                 return POWER_DOMAIN_AUX_D;
6252         case AUX_CH_E:
6253                 return POWER_DOMAIN_AUX_E;
6254         case AUX_CH_F:
6255                 return POWER_DOMAIN_AUX_F;
6256         default:
6257                 MISSING_CASE(dig_port->aux_ch);
6258                 return POWER_DOMAIN_AUX_A;
6259         }
6260 }
6261
6262 static u64 get_crtc_power_domains(struct drm_crtc *crtc,
6263                                   struct intel_crtc_state *crtc_state)
6264 {
6265         struct drm_device *dev = crtc->dev;
6266         struct drm_i915_private *dev_priv = to_i915(dev);
6267         struct drm_encoder *encoder;
6268         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6269         enum pipe pipe = intel_crtc->pipe;
6270         u64 mask;
6271         enum transcoder transcoder = crtc_state->cpu_transcoder;
6272
6273         if (!crtc_state->base.active)
6274                 return 0;
6275
6276         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6277         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
6278         if (crtc_state->pch_pfit.enabled ||
6279             crtc_state->pch_pfit.force_thru)
6280                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6281
6282         drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
6283                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6284
6285                 mask |= BIT_ULL(intel_encoder->power_domain);
6286         }
6287
6288         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
6289                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
6290
6291         if (crtc_state->shared_dpll)
6292                 mask |= BIT_ULL(POWER_DOMAIN_PLLS);
6293
6294         return mask;
6295 }
6296
6297 static u64
6298 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
6299                                struct intel_crtc_state *crtc_state)
6300 {
6301         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6302         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6303         enum intel_display_power_domain domain;
6304         u64 domains, new_domains, old_domains;
6305
6306         old_domains = intel_crtc->enabled_power_domains;
6307         intel_crtc->enabled_power_domains = new_domains =
6308                 get_crtc_power_domains(crtc, crtc_state);
6309
6310         domains = new_domains & ~old_domains;
6311
6312         for_each_power_domain(domain, domains)
6313                 intel_display_power_get(dev_priv, domain);
6314
6315         return old_domains & ~new_domains;
6316 }
6317
6318 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
6319                                       u64 domains)
6320 {
6321         enum intel_display_power_domain domain;
6322
6323         for_each_power_domain(domain, domains)
6324                 intel_display_power_put_unchecked(dev_priv, domain);
6325 }
6326
6327 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6328                                    struct drm_atomic_state *old_state)
6329 {
6330         struct intel_atomic_state *old_intel_state =
6331                 to_intel_atomic_state(old_state);
6332         struct drm_crtc *crtc = pipe_config->base.crtc;
6333         struct drm_device *dev = crtc->dev;
6334         struct drm_i915_private *dev_priv = to_i915(dev);
6335         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6336         int pipe = intel_crtc->pipe;
6337
6338         if (WARN_ON(intel_crtc->active))
6339                 return;
6340
6341         if (intel_crtc_has_dp_encoder(pipe_config))
6342                 intel_dp_set_m_n(pipe_config, M1_N1);
6343
6344         intel_set_pipe_timings(pipe_config);
6345         intel_set_pipe_src_size(pipe_config);
6346
6347         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6348                 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6349                 I915_WRITE(CHV_CANVAS(pipe), 0);
6350         }
6351
6352         i9xx_set_pipeconf(pipe_config);
6353
6354         intel_crtc->active = true;
6355
6356         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6357
6358         intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6359
6360         if (IS_CHERRYVIEW(dev_priv)) {
6361                 chv_prepare_pll(intel_crtc, pipe_config);
6362                 chv_enable_pll(intel_crtc, pipe_config);
6363         } else {
6364                 vlv_prepare_pll(intel_crtc, pipe_config);
6365                 vlv_enable_pll(intel_crtc, pipe_config);
6366         }
6367
6368         intel_encoders_pre_enable(crtc, pipe_config, old_state);
6369
6370         i9xx_pfit_enable(pipe_config);
6371
6372         intel_color_load_luts(pipe_config);
6373         intel_color_commit(pipe_config);
6374         /* update DSPCNTR to configure gamma for pipe bottom color */
6375         intel_disable_primary_plane(pipe_config);
6376
6377         dev_priv->display.initial_watermarks(old_intel_state,
6378                                              pipe_config);
6379         intel_enable_pipe(pipe_config);
6380
6381         assert_vblank_disabled(crtc);
6382         intel_crtc_vblank_on(pipe_config);
6383
6384         intel_encoders_enable(crtc, pipe_config, old_state);
6385 }
6386
6387 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
6388 {
6389         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6390         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6391
6392         I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
6393         I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
6394 }
6395
6396 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6397                              struct drm_atomic_state *old_state)
6398 {
6399         struct intel_atomic_state *old_intel_state =
6400                 to_intel_atomic_state(old_state);
6401         struct drm_crtc *crtc = pipe_config->base.crtc;
6402         struct drm_device *dev = crtc->dev;
6403         struct drm_i915_private *dev_priv = to_i915(dev);
6404         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6405         enum pipe pipe = intel_crtc->pipe;
6406
6407         if (WARN_ON(intel_crtc->active))
6408                 return;
6409
6410         i9xx_set_pll_dividers(pipe_config);
6411
6412         if (intel_crtc_has_dp_encoder(pipe_config))
6413                 intel_dp_set_m_n(pipe_config, M1_N1);
6414
6415         intel_set_pipe_timings(pipe_config);
6416         intel_set_pipe_src_size(pipe_config);
6417
6418         i9xx_set_pipeconf(pipe_config);
6419
6420         intel_crtc->active = true;
6421
6422         if (!IS_GEN(dev_priv, 2))
6423                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6424
6425         intel_encoders_pre_enable(crtc, pipe_config, old_state);
6426
6427         i9xx_enable_pll(intel_crtc, pipe_config);
6428
6429         i9xx_pfit_enable(pipe_config);
6430
6431         intel_color_load_luts(pipe_config);
6432         intel_color_commit(pipe_config);
6433         /* update DSPCNTR to configure gamma for pipe bottom color */
6434         intel_disable_primary_plane(pipe_config);
6435
6436         if (dev_priv->display.initial_watermarks != NULL)
6437                 dev_priv->display.initial_watermarks(old_intel_state,
6438                                                      pipe_config);
6439         else
6440                 intel_update_watermarks(intel_crtc);
6441         intel_enable_pipe(pipe_config);
6442
6443         assert_vblank_disabled(crtc);
6444         intel_crtc_vblank_on(pipe_config);
6445
6446         intel_encoders_enable(crtc, pipe_config, old_state);
6447 }
6448
6449 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6450 {
6451         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6452         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6453
6454         if (!old_crtc_state->gmch_pfit.control)
6455                 return;
6456
6457         assert_pipe_disabled(dev_priv, crtc->pipe);
6458
6459         DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
6460                       I915_READ(PFIT_CONTROL));
6461         I915_WRITE(PFIT_CONTROL, 0);
6462 }
6463
6464 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6465                               struct drm_atomic_state *old_state)
6466 {
6467         struct drm_crtc *crtc = old_crtc_state->base.crtc;
6468         struct drm_device *dev = crtc->dev;
6469         struct drm_i915_private *dev_priv = to_i915(dev);
6470         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6471         int pipe = intel_crtc->pipe;
6472
6473         /*
6474          * On gen2 planes are double buffered but the pipe isn't, so we must
6475          * wait for planes to fully turn off before disabling the pipe.
6476          */
6477         if (IS_GEN(dev_priv, 2))
6478                 intel_wait_for_vblank(dev_priv, pipe);
6479
6480         intel_encoders_disable(crtc, old_crtc_state, old_state);
6481
6482         drm_crtc_vblank_off(crtc);
6483         assert_vblank_disabled(crtc);
6484
6485         intel_disable_pipe(old_crtc_state);
6486
6487         i9xx_pfit_disable(old_crtc_state);
6488
6489         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6490
6491         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
6492                 if (IS_CHERRYVIEW(dev_priv))
6493                         chv_disable_pll(dev_priv, pipe);
6494                 else if (IS_VALLEYVIEW(dev_priv))
6495                         vlv_disable_pll(dev_priv, pipe);
6496                 else
6497                         i9xx_disable_pll(old_crtc_state);
6498         }
6499
6500         intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6501
6502         if (!IS_GEN(dev_priv, 2))
6503                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6504
6505         if (!dev_priv->display.initial_watermarks)
6506                 intel_update_watermarks(intel_crtc);
6507
6508         /* clock the pipe down to 640x480@60 to potentially save power */
6509         if (IS_I830(dev_priv))
6510                 i830_enable_pipe(dev_priv, pipe);
6511 }
6512
6513 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6514                                         struct drm_modeset_acquire_ctx *ctx)
6515 {
6516         struct intel_encoder *encoder;
6517         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6518         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6519         enum intel_display_power_domain domain;
6520         struct intel_plane *plane;
6521         u64 domains;
6522         struct drm_atomic_state *state;
6523         struct intel_crtc_state *crtc_state;
6524         int ret;
6525
6526         if (!intel_crtc->active)
6527                 return;
6528
6529         for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
6530                 const struct intel_plane_state *plane_state =
6531                         to_intel_plane_state(plane->base.state);
6532
6533                 if (plane_state->base.visible)
6534                         intel_plane_disable_noatomic(intel_crtc, plane);
6535         }
6536
6537         state = drm_atomic_state_alloc(crtc->dev);
6538         if (!state) {
6539                 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6540                               crtc->base.id, crtc->name);
6541                 return;
6542         }
6543
6544         state->acquire_ctx = ctx;
6545
6546         /* Everything's already locked, -EDEADLK can't happen. */
6547         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6548         ret = drm_atomic_add_affected_connectors(state, crtc);
6549
6550         WARN_ON(IS_ERR(crtc_state) || ret);
6551
6552         dev_priv->display.crtc_disable(crtc_state, state);
6553
6554         drm_atomic_state_put(state);
6555
6556         DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6557                       crtc->base.id, crtc->name);
6558
6559         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6560         crtc->state->active = false;
6561         intel_crtc->active = false;
6562         crtc->enabled = false;
6563         crtc->state->connector_mask = 0;
6564         crtc->state->encoder_mask = 0;
6565
6566         for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6567                 encoder->base.crtc = NULL;
6568
6569         intel_fbc_disable(intel_crtc);
6570         intel_update_watermarks(intel_crtc);
6571         intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
6572
6573         domains = intel_crtc->enabled_power_domains;
6574         for_each_power_domain(domain, domains)
6575                 intel_display_power_put_unchecked(dev_priv, domain);
6576         intel_crtc->enabled_power_domains = 0;
6577
6578         dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6579         dev_priv->min_cdclk[intel_crtc->pipe] = 0;
6580         dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
6581 }
6582
6583 /*
6584  * turn all crtc's off, but do not adjust state
6585  * This has to be paired with a call to intel_modeset_setup_hw_state.
6586  */
6587 int intel_display_suspend(struct drm_device *dev)
6588 {
6589         struct drm_i915_private *dev_priv = to_i915(dev);
6590         struct drm_atomic_state *state;
6591         int ret;
6592
6593         state = drm_atomic_helper_suspend(dev);
6594         ret = PTR_ERR_OR_ZERO(state);
6595         if (ret)
6596                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6597         else
6598                 dev_priv->modeset_restore_state = state;
6599         return ret;
6600 }
6601
6602 void intel_encoder_destroy(struct drm_encoder *encoder)
6603 {
6604         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6605
6606         drm_encoder_cleanup(encoder);
6607         kfree(intel_encoder);
6608 }
6609
6610 /* Cross check the actual hw state with our own modeset state tracking (and it's
6611  * internal consistency). */
6612 static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
6613                                          struct drm_connector_state *conn_state)
6614 {
6615         struct intel_connector *connector = to_intel_connector(conn_state->connector);
6616
6617         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6618                       connector->base.base.id,
6619                       connector->base.name);
6620
6621         if (connector->get_hw_state(connector)) {
6622                 struct intel_encoder *encoder = connector->encoder;
6623
6624                 I915_STATE_WARN(!crtc_state,
6625                          "connector enabled without attached crtc\n");
6626
6627                 if (!crtc_state)
6628                         return;
6629
6630                 I915_STATE_WARN(!crtc_state->active,
6631                       "connector is active, but attached crtc isn't\n");
6632
6633                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6634                         return;
6635
6636                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6637                         "atomic encoder doesn't match attached encoder\n");
6638
6639                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6640                         "attached encoder crtc differs from connector crtc\n");
6641         } else {
6642                 I915_STATE_WARN(crtc_state && crtc_state->active,
6643                         "attached crtc is active, but connector isn't\n");
6644                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
6645                         "best encoder set without crtc!\n");
6646         }
6647 }
6648
6649 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6650 {
6651         if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6652                 return crtc_state->fdi_lanes;
6653
6654         return 0;
6655 }
6656
6657 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6658                                      struct intel_crtc_state *pipe_config)
6659 {
6660         struct drm_i915_private *dev_priv = to_i915(dev);
6661         struct drm_atomic_state *state = pipe_config->base.state;
6662         struct intel_crtc *other_crtc;
6663         struct intel_crtc_state *other_crtc_state;
6664
6665         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6666                       pipe_name(pipe), pipe_config->fdi_lanes);
6667         if (pipe_config->fdi_lanes > 4) {
6668                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6669                               pipe_name(pipe), pipe_config->fdi_lanes);
6670                 return -EINVAL;
6671         }
6672
6673         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
6674                 if (pipe_config->fdi_lanes > 2) {
6675                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6676                                       pipe_config->fdi_lanes);
6677                         return -EINVAL;
6678                 } else {
6679                         return 0;
6680                 }
6681         }
6682
6683         if (INTEL_INFO(dev_priv)->num_pipes == 2)
6684                 return 0;
6685
6686         /* Ivybridge 3 pipe is really complicated */
6687         switch (pipe) {
6688         case PIPE_A:
6689                 return 0;
6690         case PIPE_B:
6691                 if (pipe_config->fdi_lanes <= 2)
6692                         return 0;
6693
6694                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
6695                 other_crtc_state =
6696                         intel_atomic_get_crtc_state(state, other_crtc);
6697                 if (IS_ERR(other_crtc_state))
6698                         return PTR_ERR(other_crtc_state);
6699
6700                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6701                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6702                                       pipe_name(pipe), pipe_config->fdi_lanes);
6703                         return -EINVAL;
6704                 }
6705                 return 0;
6706         case PIPE_C:
6707                 if (pipe_config->fdi_lanes > 2) {
6708                         DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6709                                       pipe_name(pipe), pipe_config->fdi_lanes);
6710                         return -EINVAL;
6711                 }
6712
6713                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
6714                 other_crtc_state =
6715                         intel_atomic_get_crtc_state(state, other_crtc);
6716                 if (IS_ERR(other_crtc_state))
6717                         return PTR_ERR(other_crtc_state);
6718
6719                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6720                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6721                         return -EINVAL;
6722                 }
6723                 return 0;
6724         default:
6725                 BUG();
6726         }
6727 }
6728
6729 #define RETRY 1
6730 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6731                                        struct intel_crtc_state *pipe_config)
6732 {
6733         struct drm_device *dev = intel_crtc->base.dev;
6734         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6735         int lane, link_bw, fdi_dotclock, ret;
6736         bool needs_recompute = false;
6737
6738 retry:
6739         /* FDI is a binary signal running at ~2.7GHz, encoding
6740          * each output octet as 10 bits. The actual frequency
6741          * is stored as a divider into a 100MHz clock, and the
6742          * mode pixel clock is stored in units of 1KHz.
6743          * Hence the bw of each lane in terms of the mode signal
6744          * is:
6745          */
6746         link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
6747
6748         fdi_dotclock = adjusted_mode->crtc_clock;
6749
6750         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6751                                            pipe_config->pipe_bpp);
6752
6753         pipe_config->fdi_lanes = lane;
6754
6755         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6756                                link_bw, &pipe_config->fdi_m_n, false);
6757
6758         ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6759         if (ret == -EDEADLK)
6760                 return ret;
6761
6762         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6763                 pipe_config->pipe_bpp -= 2*3;
6764                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6765                               pipe_config->pipe_bpp);
6766                 needs_recompute = true;
6767                 pipe_config->bw_constrained = true;
6768
6769                 goto retry;
6770         }
6771
6772         if (needs_recompute)
6773                 return RETRY;
6774
6775         return ret;
6776 }
6777
6778 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
6779 {
6780         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6781         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6782
6783         /* IPS only exists on ULT machines and is tied to pipe A. */
6784         if (!hsw_crtc_supports_ips(crtc))
6785                 return false;
6786
6787         if (!i915_modparams.enable_ips)
6788                 return false;
6789
6790         if (crtc_state->pipe_bpp > 24)
6791                 return false;
6792
6793         /*
6794          * We compare against max which means we must take
6795          * the increased cdclk requirement into account when
6796          * calculating the new cdclk.
6797          *
6798          * Should measure whether using a lower cdclk w/o IPS
6799          */
6800         if (IS_BROADWELL(dev_priv) &&
6801             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
6802                 return false;
6803
6804         return true;
6805 }
6806
6807 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
6808 {
6809         struct drm_i915_private *dev_priv =
6810                 to_i915(crtc_state->base.crtc->dev);
6811         struct intel_atomic_state *intel_state =
6812                 to_intel_atomic_state(crtc_state->base.state);
6813
6814         if (!hsw_crtc_state_ips_capable(crtc_state))
6815                 return false;
6816
6817         /*
6818          * When IPS gets enabled, the pipe CRC changes. Since IPS gets
6819          * enabled and disabled dynamically based on package C states,
6820          * user space can't make reliable use of the CRCs, so let's just
6821          * completely disable it.
6822          */
6823         if (crtc_state->crc_enabled)
6824                 return false;
6825
6826         /* IPS should be fine as long as at least one plane is enabled. */
6827         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
6828                 return false;
6829
6830         /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
6831         if (IS_BROADWELL(dev_priv) &&
6832             crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
6833                 return false;
6834
6835         return true;
6836 }
6837
6838 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6839 {
6840         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6841
6842         /* GDG double wide on either pipe, otherwise pipe A only */
6843         return INTEL_GEN(dev_priv) < 4 &&
6844                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6845 }
6846
6847 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
6848 {
6849         u32 pixel_rate;
6850
6851         pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
6852
6853         /*
6854          * We only use IF-ID interlacing. If we ever use
6855          * PF-ID we'll need to adjust the pixel_rate here.
6856          */
6857
6858         if (pipe_config->pch_pfit.enabled) {
6859                 u64 pipe_w, pipe_h, pfit_w, pfit_h;
6860                 u32 pfit_size = pipe_config->pch_pfit.size;
6861
6862                 pipe_w = pipe_config->pipe_src_w;
6863                 pipe_h = pipe_config->pipe_src_h;
6864
6865                 pfit_w = (pfit_size >> 16) & 0xFFFF;
6866                 pfit_h = pfit_size & 0xFFFF;
6867                 if (pipe_w < pfit_w)
6868                         pipe_w = pfit_w;
6869                 if (pipe_h < pfit_h)
6870                         pipe_h = pfit_h;
6871
6872                 if (WARN_ON(!pfit_w || !pfit_h))
6873                         return pixel_rate;
6874
6875                 pixel_rate = div_u64((u64)pixel_rate * pipe_w * pipe_h,
6876                                      pfit_w * pfit_h);
6877         }
6878
6879         return pixel_rate;
6880 }
6881
6882 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
6883 {
6884         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
6885
6886         if (HAS_GMCH(dev_priv))
6887                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
6888                 crtc_state->pixel_rate =
6889                         crtc_state->base.adjusted_mode.crtc_clock;
6890         else
6891                 crtc_state->pixel_rate =
6892                         ilk_pipe_pixel_rate(crtc_state);
6893 }
6894
6895 static int intel_crtc_compute_config(struct intel_crtc *crtc,
6896                                      struct intel_crtc_state *pipe_config)
6897 {
6898         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6899         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6900         int clock_limit = dev_priv->max_dotclk_freq;
6901
6902         if (INTEL_GEN(dev_priv) < 4) {
6903                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6904
6905                 /*
6906                  * Enable double wide mode when the dot clock
6907                  * is > 90% of the (display) core speed.
6908                  */
6909                 if (intel_crtc_supports_double_wide(crtc) &&
6910                     adjusted_mode->crtc_clock > clock_limit) {
6911                         clock_limit = dev_priv->max_dotclk_freq;
6912                         pipe_config->double_wide = true;
6913                 }
6914         }
6915
6916         if (adjusted_mode->crtc_clock > clock_limit) {
6917                 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6918                               adjusted_mode->crtc_clock, clock_limit,
6919                               yesno(pipe_config->double_wide));
6920                 return -EINVAL;
6921         }
6922
6923         if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
6924              pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
6925              pipe_config->base.ctm) {
6926                 /*
6927                  * There is only one pipe CSC unit per pipe, and we need that
6928                  * for output conversion from RGB->YCBCR. So if CTM is already
6929                  * applied we can't support YCBCR420 output.
6930                  */
6931                 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
6932                 return -EINVAL;
6933         }
6934
6935         /*
6936          * Pipe horizontal size must be even in:
6937          * - DVO ganged mode
6938          * - LVDS dual channel mode
6939          * - Double wide pipe
6940          */
6941         if (pipe_config->pipe_src_w & 1) {
6942                 if (pipe_config->double_wide) {
6943                         DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
6944                         return -EINVAL;
6945                 }
6946
6947                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6948                     intel_is_dual_link_lvds(dev_priv)) {
6949                         DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
6950                         return -EINVAL;
6951                 }
6952         }
6953
6954         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6955          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6956          */
6957         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
6958                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6959                 return -EINVAL;
6960
6961         intel_crtc_compute_pixel_rate(pipe_config);
6962
6963         if (pipe_config->has_pch_encoder)
6964                 return ironlake_fdi_compute_config(crtc, pipe_config);
6965
6966         return 0;
6967 }
6968
6969 static void
6970 intel_reduce_m_n_ratio(u32 *num, u32 *den)
6971 {
6972         while (*num > DATA_LINK_M_N_MASK ||
6973                *den > DATA_LINK_M_N_MASK) {
6974                 *num >>= 1;
6975                 *den >>= 1;
6976         }
6977 }
6978
6979 static void compute_m_n(unsigned int m, unsigned int n,
6980                         u32 *ret_m, u32 *ret_n,
6981                         bool constant_n)
6982 {
6983         /*
6984          * Several DP dongles in particular seem to be fussy about
6985          * too large link M/N values. Give N value as 0x8000 that
6986          * should be acceptable by specific devices. 0x8000 is the
6987          * specified fixed N value for asynchronous clock mode,
6988          * which the devices expect also in synchronous clock mode.
6989          */
6990         if (constant_n)
6991                 *ret_n = 0x8000;
6992         else
6993                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
6994
6995         *ret_m = div_u64((u64)m * *ret_n, n);
6996         intel_reduce_m_n_ratio(ret_m, ret_n);
6997 }
6998
6999 void
7000 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
7001                        int pixel_clock, int link_clock,
7002                        struct intel_link_m_n *m_n,
7003                        bool constant_n)
7004 {
7005         m_n->tu = 64;
7006
7007         compute_m_n(bits_per_pixel * pixel_clock,
7008                     link_clock * nlanes * 8,
7009                     &m_n->gmch_m, &m_n->gmch_n,
7010                     constant_n);
7011
7012         compute_m_n(pixel_clock, link_clock,
7013                     &m_n->link_m, &m_n->link_n,
7014                     constant_n);
7015 }
7016
7017 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7018 {
7019         if (i915_modparams.panel_use_ssc >= 0)
7020                 return i915_modparams.panel_use_ssc != 0;
7021         return dev_priv->vbt.lvds_use_ssc
7022                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7023 }
7024
7025 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
7026 {
7027         return (1 << dpll->n) << 16 | dpll->m2;
7028 }
7029
7030 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7031 {
7032         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7033 }
7034
7035 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7036                                      struct intel_crtc_state *crtc_state,
7037                                      struct dpll *reduced_clock)
7038 {
7039         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7040         u32 fp, fp2 = 0;
7041
7042         if (IS_PINEVIEW(dev_priv)) {
7043                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7044                 if (reduced_clock)
7045                         fp2 = pnv_dpll_compute_fp(reduced_clock);
7046         } else {
7047                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7048                 if (reduced_clock)
7049                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
7050         }
7051
7052         crtc_state->dpll_hw_state.fp0 = fp;
7053
7054         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7055             reduced_clock) {
7056                 crtc_state->dpll_hw_state.fp1 = fp2;
7057         } else {
7058                 crtc_state->dpll_hw_state.fp1 = fp;
7059         }
7060 }
7061
7062 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7063                 pipe)
7064 {
7065         u32 reg_val;
7066
7067         /*
7068          * PLLB opamp always calibrates to max value of 0x3f, force enable it
7069          * and set it to a reasonable value instead.
7070          */
7071         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7072         reg_val &= 0xffffff00;
7073         reg_val |= 0x00000030;
7074         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7075
7076         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7077         reg_val &= 0x00ffffff;
7078         reg_val |= 0x8c000000;
7079         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7080
7081         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7082         reg_val &= 0xffffff00;
7083         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7084
7085         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7086         reg_val &= 0x00ffffff;
7087         reg_val |= 0xb0000000;
7088         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7089 }
7090
7091 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7092                                          const struct intel_link_m_n *m_n)
7093 {
7094         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7095         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7096         enum pipe pipe = crtc->pipe;
7097
7098         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7099         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7100         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7101         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7102 }
7103
7104 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7105                                  enum transcoder transcoder)
7106 {
7107         if (IS_HASWELL(dev_priv))
7108                 return transcoder == TRANSCODER_EDP;
7109
7110         /*
7111          * Strictly speaking some registers are available before
7112          * gen7, but we only support DRRS on gen7+
7113          */
7114         return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
7115 }
7116
7117 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7118                                          const struct intel_link_m_n *m_n,
7119                                          const struct intel_link_m_n *m2_n2)
7120 {
7121         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7122         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7123         enum pipe pipe = crtc->pipe;
7124         enum transcoder transcoder = crtc_state->cpu_transcoder;
7125
7126         if (INTEL_GEN(dev_priv) >= 5) {
7127                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7128                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7129                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7130                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7131                 /*
7132                  *  M2_N2 registers are set only if DRRS is supported
7133                  * (to make sure the registers are not unnecessarily accessed).
7134                  */
7135                 if (m2_n2 && crtc_state->has_drrs &&
7136                     transcoder_has_m2_n2(dev_priv, transcoder)) {
7137                         I915_WRITE(PIPE_DATA_M2(transcoder),
7138                                         TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7139                         I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7140                         I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7141                         I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7142                 }
7143         } else {
7144                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7145                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7146                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7147                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7148         }
7149 }
7150
7151 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
7152 {
7153         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7154
7155         if (m_n == M1_N1) {
7156                 dp_m_n = &crtc_state->dp_m_n;
7157                 dp_m2_n2 = &crtc_state->dp_m2_n2;
7158         } else if (m_n == M2_N2) {
7159
7160                 /*
7161                  * M2_N2 registers are not supported. Hence m2_n2 divider value
7162                  * needs to be programmed into M1_N1.
7163                  */
7164                 dp_m_n = &crtc_state->dp_m2_n2;
7165         } else {
7166                 DRM_ERROR("Unsupported divider value\n");
7167                 return;
7168         }
7169
7170         if (crtc_state->has_pch_encoder)
7171                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
7172         else
7173                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
7174 }
7175
7176 static void vlv_compute_dpll(struct intel_crtc *crtc,
7177                              struct intel_crtc_state *pipe_config)
7178 {
7179         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7180                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7181         if (crtc->pipe != PIPE_A)
7182                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7183
7184         /* DPLL not used with DSI, but still need the rest set up */
7185         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7186                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7187                         DPLL_EXT_BUFFER_ENABLE_VLV;
7188
7189         pipe_config->dpll_hw_state.dpll_md =
7190                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7191 }
7192
7193 static void chv_compute_dpll(struct intel_crtc *crtc,
7194                              struct intel_crtc_state *pipe_config)
7195 {
7196         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7197                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7198         if (crtc->pipe != PIPE_A)
7199                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7200
7201         /* DPLL not used with DSI, but still need the rest set up */
7202         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7203                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7204
7205         pipe_config->dpll_hw_state.dpll_md =
7206                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7207 }
7208
7209 static void vlv_prepare_pll(struct intel_crtc *crtc,
7210                             const struct intel_crtc_state *pipe_config)
7211 {
7212         struct drm_device *dev = crtc->base.dev;
7213         struct drm_i915_private *dev_priv = to_i915(dev);
7214         enum pipe pipe = crtc->pipe;
7215         u32 mdiv;
7216         u32 bestn, bestm1, bestm2, bestp1, bestp2;
7217         u32 coreclk, reg_val;
7218
7219         /* Enable Refclk */
7220         I915_WRITE(DPLL(pipe),
7221                    pipe_config->dpll_hw_state.dpll &
7222                    ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7223
7224         /* No need to actually set up the DPLL with DSI */
7225         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7226                 return;
7227
7228         mutex_lock(&dev_priv->sb_lock);
7229
7230         bestn = pipe_config->dpll.n;
7231         bestm1 = pipe_config->dpll.m1;
7232         bestm2 = pipe_config->dpll.m2;
7233         bestp1 = pipe_config->dpll.p1;
7234         bestp2 = pipe_config->dpll.p2;
7235
7236         /* See eDP HDMI DPIO driver vbios notes doc */
7237
7238         /* PLL B needs special handling */
7239         if (pipe == PIPE_B)
7240                 vlv_pllb_recal_opamp(dev_priv, pipe);
7241
7242         /* Set up Tx target for periodic Rcomp update */
7243         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7244
7245         /* Disable target IRef on PLL */
7246         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7247         reg_val &= 0x00ffffff;
7248         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7249
7250         /* Disable fast lock */
7251         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7252
7253         /* Set idtafcrecal before PLL is enabled */
7254         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7255         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7256         mdiv |= ((bestn << DPIO_N_SHIFT));
7257         mdiv |= (1 << DPIO_K_SHIFT);
7258
7259         /*
7260          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7261          * but we don't support that).
7262          * Note: don't use the DAC post divider as it seems unstable.
7263          */
7264         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7265         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7266
7267         mdiv |= DPIO_ENABLE_CALIBRATION;
7268         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7269
7270         /* Set HBR and RBR LPF coefficients */
7271         if (pipe_config->port_clock == 162000 ||
7272             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
7273             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
7274                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7275                                  0x009f0003);
7276         else
7277                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7278                                  0x00d0000f);
7279
7280         if (intel_crtc_has_dp_encoder(pipe_config)) {
7281                 /* Use SSC source */
7282                 if (pipe == PIPE_A)
7283                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7284                                          0x0df40000);
7285                 else
7286                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7287                                          0x0df70000);
7288         } else { /* HDMI or VGA */
7289                 /* Use bend source */
7290                 if (pipe == PIPE_A)
7291                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7292                                          0x0df70000);
7293                 else
7294                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7295                                          0x0df40000);
7296         }
7297
7298         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7299         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7300         if (intel_crtc_has_dp_encoder(pipe_config))
7301                 coreclk |= 0x01000000;
7302         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7303
7304         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7305         mutex_unlock(&dev_priv->sb_lock);
7306 }
7307
7308 static void chv_prepare_pll(struct intel_crtc *crtc,
7309                             const struct intel_crtc_state *pipe_config)
7310 {
7311         struct drm_device *dev = crtc->base.dev;
7312         struct drm_i915_private *dev_priv = to_i915(dev);
7313         enum pipe pipe = crtc->pipe;
7314         enum dpio_channel port = vlv_pipe_to_channel(pipe);
7315         u32 loopfilter, tribuf_calcntr;
7316         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7317         u32 dpio_val;
7318         int vco;
7319
7320         /* Enable Refclk and SSC */
7321         I915_WRITE(DPLL(pipe),
7322                    pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7323
7324         /* No need to actually set up the DPLL with DSI */
7325         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7326                 return;
7327
7328         bestn = pipe_config->dpll.n;
7329         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7330         bestm1 = pipe_config->dpll.m1;
7331         bestm2 = pipe_config->dpll.m2 >> 22;
7332         bestp1 = pipe_config->dpll.p1;
7333         bestp2 = pipe_config->dpll.p2;
7334         vco = pipe_config->dpll.vco;
7335         dpio_val = 0;
7336         loopfilter = 0;
7337
7338         mutex_lock(&dev_priv->sb_lock);
7339
7340         /* p1 and p2 divider */
7341         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7342                         5 << DPIO_CHV_S1_DIV_SHIFT |
7343                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7344                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7345                         1 << DPIO_CHV_K_DIV_SHIFT);
7346
7347         /* Feedback post-divider - m2 */
7348         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7349
7350         /* Feedback refclk divider - n and m1 */
7351         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7352                         DPIO_CHV_M1_DIV_BY_2 |
7353                         1 << DPIO_CHV_N_DIV_SHIFT);
7354
7355         /* M2 fraction division */
7356         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7357
7358         /* M2 fraction division enable */
7359         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7360         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7361         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7362         if (bestm2_frac)
7363                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7364         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7365
7366         /* Program digital lock detect threshold */
7367         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7368         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7369                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7370         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7371         if (!bestm2_frac)
7372                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7373         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7374
7375         /* Loop filter */
7376         if (vco == 5400000) {
7377                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7378                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7379                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7380                 tribuf_calcntr = 0x9;
7381         } else if (vco <= 6200000) {
7382                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7383                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7384                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7385                 tribuf_calcntr = 0x9;
7386         } else if (vco <= 6480000) {
7387                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7388                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7389                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7390                 tribuf_calcntr = 0x8;
7391         } else {
7392                 /* Not supported. Apply the same limits as in the max case */
7393                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7394                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7395                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7396                 tribuf_calcntr = 0;
7397         }
7398         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7399
7400         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7401         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7402         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7403         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7404
7405         /* AFC Recal */
7406         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7407                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7408                         DPIO_AFC_RECAL);
7409
7410         mutex_unlock(&dev_priv->sb_lock);
7411 }
7412
7413 /**
7414  * vlv_force_pll_on - forcibly enable just the PLL
7415  * @dev_priv: i915 private structure
7416  * @pipe: pipe PLL to enable
7417  * @dpll: PLL configuration
7418  *
7419  * Enable the PLL for @pipe using the supplied @dpll config. To be used
7420  * in cases where we need the PLL enabled even when @pipe is not going to
7421  * be enabled.
7422  */
7423 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
7424                      const struct dpll *dpll)
7425 {
7426         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
7427         struct intel_crtc_state *pipe_config;
7428
7429         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7430         if (!pipe_config)
7431                 return -ENOMEM;
7432
7433         pipe_config->base.crtc = &crtc->base;
7434         pipe_config->pixel_multiplier = 1;
7435         pipe_config->dpll = *dpll;
7436
7437         if (IS_CHERRYVIEW(dev_priv)) {
7438                 chv_compute_dpll(crtc, pipe_config);
7439                 chv_prepare_pll(crtc, pipe_config);
7440                 chv_enable_pll(crtc, pipe_config);
7441         } else {
7442                 vlv_compute_dpll(crtc, pipe_config);
7443                 vlv_prepare_pll(crtc, pipe_config);
7444                 vlv_enable_pll(crtc, pipe_config);
7445         }
7446
7447         kfree(pipe_config);
7448
7449         return 0;
7450 }
7451
7452 /**
7453  * vlv_force_pll_off - forcibly disable just the PLL
7454  * @dev_priv: i915 private structure
7455  * @pipe: pipe PLL to disable
7456  *
7457  * Disable the PLL for @pipe. To be used in cases where we need
7458  * the PLL enabled even when @pipe is not going to be enabled.
7459  */
7460 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
7461 {
7462         if (IS_CHERRYVIEW(dev_priv))
7463                 chv_disable_pll(dev_priv, pipe);
7464         else
7465                 vlv_disable_pll(dev_priv, pipe);
7466 }
7467
7468 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7469                               struct intel_crtc_state *crtc_state,
7470                               struct dpll *reduced_clock)
7471 {
7472         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7473         u32 dpll;
7474         struct dpll *clock = &crtc_state->dpll;
7475
7476         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7477
7478         dpll = DPLL_VGA_MODE_DIS;
7479
7480         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
7481                 dpll |= DPLLB_MODE_LVDS;
7482         else
7483                 dpll |= DPLLB_MODE_DAC_SERIAL;
7484
7485         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7486             IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
7487                 dpll |= (crtc_state->pixel_multiplier - 1)
7488                         << SDVO_MULTIPLIER_SHIFT_HIRES;
7489         }
7490
7491         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7492             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
7493                 dpll |= DPLL_SDVO_HIGH_SPEED;
7494
7495         if (intel_crtc_has_dp_encoder(crtc_state))
7496                 dpll |= DPLL_SDVO_HIGH_SPEED;
7497
7498         /* compute bitmask from p1 value */
7499         if (IS_PINEVIEW(dev_priv))
7500                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7501         else {
7502                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7503                 if (IS_G4X(dev_priv) && reduced_clock)
7504                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7505         }
7506         switch (clock->p2) {
7507         case 5:
7508                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7509                 break;
7510         case 7:
7511                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7512                 break;
7513         case 10:
7514                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7515                 break;
7516         case 14:
7517                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7518                 break;
7519         }
7520         if (INTEL_GEN(dev_priv) >= 4)
7521                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7522
7523         if (crtc_state->sdvo_tv_clock)
7524                 dpll |= PLL_REF_INPUT_TVCLKINBC;
7525         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7526                  intel_panel_use_ssc(dev_priv))
7527                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7528         else
7529                 dpll |= PLL_REF_INPUT_DREFCLK;
7530
7531         dpll |= DPLL_VCO_ENABLE;
7532         crtc_state->dpll_hw_state.dpll = dpll;
7533
7534         if (INTEL_GEN(dev_priv) >= 4) {
7535                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7536                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7537                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
7538         }
7539 }
7540
7541 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7542                               struct intel_crtc_state *crtc_state,
7543                               struct dpll *reduced_clock)
7544 {
7545         struct drm_device *dev = crtc->base.dev;
7546         struct drm_i915_private *dev_priv = to_i915(dev);
7547         u32 dpll;
7548         struct dpll *clock = &crtc_state->dpll;
7549
7550         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7551
7552         dpll = DPLL_VGA_MODE_DIS;
7553
7554         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7555                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7556         } else {
7557                 if (clock->p1 == 2)
7558                         dpll |= PLL_P1_DIVIDE_BY_TWO;
7559                 else
7560                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7561                 if (clock->p2 == 4)
7562                         dpll |= PLL_P2_DIVIDE_BY_4;
7563         }
7564
7565         /*
7566          * Bspec:
7567          * "[Almador Errata}: For the correct operation of the muxed DVO pins
7568          *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
7569          *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
7570          *  Enable) must be set to “1” in both the DPLL A Control Register
7571          *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
7572          *
7573          * For simplicity We simply keep both bits always enabled in
7574          * both DPLLS. The spec says we should disable the DVO 2X clock
7575          * when not needed, but this seems to work fine in practice.
7576          */
7577         if (IS_I830(dev_priv) ||
7578             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
7579                 dpll |= DPLL_DVO_2X_MODE;
7580
7581         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7582             intel_panel_use_ssc(dev_priv))
7583                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7584         else
7585                 dpll |= PLL_REF_INPUT_DREFCLK;
7586
7587         dpll |= DPLL_VCO_ENABLE;
7588         crtc_state->dpll_hw_state.dpll = dpll;
7589 }
7590
7591 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
7592 {
7593         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7594         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7595         enum pipe pipe = crtc->pipe;
7596         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
7597         const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
7598         u32 crtc_vtotal, crtc_vblank_end;
7599         int vsyncshift = 0;
7600
7601         /* We need to be careful not to changed the adjusted mode, for otherwise
7602          * the hw state checker will get angry at the mismatch. */
7603         crtc_vtotal = adjusted_mode->crtc_vtotal;
7604         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7605
7606         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7607                 /* the chip adds 2 halflines automatically */
7608                 crtc_vtotal -= 1;
7609                 crtc_vblank_end -= 1;
7610
7611                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7612                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7613                 else
7614                         vsyncshift = adjusted_mode->crtc_hsync_start -
7615                                 adjusted_mode->crtc_htotal / 2;
7616                 if (vsyncshift < 0)
7617                         vsyncshift += adjusted_mode->crtc_htotal;
7618         }
7619
7620         if (INTEL_GEN(dev_priv) > 3)
7621                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7622
7623         I915_WRITE(HTOTAL(cpu_transcoder),
7624                    (adjusted_mode->crtc_hdisplay - 1) |
7625                    ((adjusted_mode->crtc_htotal - 1) << 16));
7626         I915_WRITE(HBLANK(cpu_transcoder),
7627                    (adjusted_mode->crtc_hblank_start - 1) |
7628                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
7629         I915_WRITE(HSYNC(cpu_transcoder),
7630                    (adjusted_mode->crtc_hsync_start - 1) |
7631                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
7632
7633         I915_WRITE(VTOTAL(cpu_transcoder),
7634                    (adjusted_mode->crtc_vdisplay - 1) |
7635                    ((crtc_vtotal - 1) << 16));
7636         I915_WRITE(VBLANK(cpu_transcoder),
7637                    (adjusted_mode->crtc_vblank_start - 1) |
7638                    ((crtc_vblank_end - 1) << 16));
7639         I915_WRITE(VSYNC(cpu_transcoder),
7640                    (adjusted_mode->crtc_vsync_start - 1) |
7641                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
7642
7643         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7644          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7645          * documented on the DDI_FUNC_CTL register description, EDP Input Select
7646          * bits. */
7647         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
7648             (pipe == PIPE_B || pipe == PIPE_C))
7649                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7650
7651 }
7652
7653 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
7654 {
7655         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7656         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7657         enum pipe pipe = crtc->pipe;
7658
7659         /* pipesrc controls the size that is scaled from, which should
7660          * always be the user's requested size.
7661          */
7662         I915_WRITE(PIPESRC(pipe),
7663                    ((crtc_state->pipe_src_w - 1) << 16) |
7664                    (crtc_state->pipe_src_h - 1));
7665 }
7666
7667 static void intel_get_pipe_timings(struct intel_crtc *crtc,
7668                                    struct intel_crtc_state *pipe_config)
7669 {
7670         struct drm_device *dev = crtc->base.dev;
7671         struct drm_i915_private *dev_priv = to_i915(dev);
7672         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7673         u32 tmp;
7674
7675         tmp = I915_READ(HTOTAL(cpu_transcoder));
7676         pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7677         pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7678         tmp = I915_READ(HBLANK(cpu_transcoder));
7679         pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7680         pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7681         tmp = I915_READ(HSYNC(cpu_transcoder));
7682         pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7683         pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7684
7685         tmp = I915_READ(VTOTAL(cpu_transcoder));
7686         pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7687         pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7688         tmp = I915_READ(VBLANK(cpu_transcoder));
7689         pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7690         pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7691         tmp = I915_READ(VSYNC(cpu_transcoder));
7692         pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7693         pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7694
7695         if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7696                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7697                 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7698                 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7699         }
7700 }
7701
7702 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7703                                     struct intel_crtc_state *pipe_config)
7704 {
7705         struct drm_device *dev = crtc->base.dev;
7706         struct drm_i915_private *dev_priv = to_i915(dev);
7707         u32 tmp;
7708
7709         tmp = I915_READ(PIPESRC(crtc->pipe));
7710         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7711         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7712
7713         pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7714         pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7715 }
7716
7717 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7718                                  struct intel_crtc_state *pipe_config)
7719 {
7720         mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7721         mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7722         mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7723         mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7724
7725         mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7726         mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7727         mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7728         mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7729
7730         mode->flags = pipe_config->base.adjusted_mode.flags;
7731         mode->type = DRM_MODE_TYPE_DRIVER;
7732
7733         mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7734
7735         mode->hsync = drm_mode_hsync(mode);
7736         mode->vrefresh = drm_mode_vrefresh(mode);
7737         drm_mode_set_name(mode);
7738 }
7739
7740 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
7741 {
7742         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7743         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7744         u32 pipeconf;
7745
7746         pipeconf = 0;
7747
7748         /* we keep both pipes enabled on 830 */
7749         if (IS_I830(dev_priv))
7750                 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
7751
7752         if (crtc_state->double_wide)
7753                 pipeconf |= PIPECONF_DOUBLE_WIDE;
7754
7755         /* only g4x and later have fancy bpc/dither controls */
7756         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7757             IS_CHERRYVIEW(dev_priv)) {
7758                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
7759                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
7760                         pipeconf |= PIPECONF_DITHER_EN |
7761                                     PIPECONF_DITHER_TYPE_SP;
7762
7763                 switch (crtc_state->pipe_bpp) {
7764                 case 18:
7765                         pipeconf |= PIPECONF_6BPC;
7766                         break;
7767                 case 24:
7768                         pipeconf |= PIPECONF_8BPC;
7769                         break;
7770                 case 30:
7771                         pipeconf |= PIPECONF_10BPC;
7772                         break;
7773                 default:
7774                         /* Case prevented by intel_choose_pipe_bpp_dither. */
7775                         BUG();
7776                 }
7777         }
7778
7779         if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7780                 if (INTEL_GEN(dev_priv) < 4 ||
7781                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7782                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7783                 else
7784                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7785         } else {
7786                 pipeconf |= PIPECONF_PROGRESSIVE;
7787         }
7788
7789         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7790              crtc_state->limited_color_range)
7791                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7792
7793         pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
7794
7795         I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
7796         POSTING_READ(PIPECONF(crtc->pipe));
7797 }
7798
7799 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7800                                    struct intel_crtc_state *crtc_state)
7801 {
7802         struct drm_device *dev = crtc->base.dev;
7803         struct drm_i915_private *dev_priv = to_i915(dev);
7804         const struct intel_limit *limit;
7805         int refclk = 48000;
7806
7807         memset(&crtc_state->dpll_hw_state, 0,
7808                sizeof(crtc_state->dpll_hw_state));
7809
7810         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7811                 if (intel_panel_use_ssc(dev_priv)) {
7812                         refclk = dev_priv->vbt.lvds_ssc_freq;
7813                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7814                 }
7815
7816                 limit = &intel_limits_i8xx_lvds;
7817         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
7818                 limit = &intel_limits_i8xx_dvo;
7819         } else {
7820                 limit = &intel_limits_i8xx_dac;
7821         }
7822
7823         if (!crtc_state->clock_set &&
7824             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7825                                  refclk, NULL, &crtc_state->dpll)) {
7826                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7827                 return -EINVAL;
7828         }
7829
7830         i8xx_compute_dpll(crtc, crtc_state, NULL);
7831
7832         return 0;
7833 }
7834
7835 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7836                                   struct intel_crtc_state *crtc_state)
7837 {
7838         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7839         const struct intel_limit *limit;
7840         int refclk = 96000;
7841
7842         memset(&crtc_state->dpll_hw_state, 0,
7843                sizeof(crtc_state->dpll_hw_state));
7844
7845         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7846                 if (intel_panel_use_ssc(dev_priv)) {
7847                         refclk = dev_priv->vbt.lvds_ssc_freq;
7848                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7849                 }
7850
7851                 if (intel_is_dual_link_lvds(dev_priv))
7852                         limit = &intel_limits_g4x_dual_channel_lvds;
7853                 else
7854                         limit = &intel_limits_g4x_single_channel_lvds;
7855         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7856                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7857                 limit = &intel_limits_g4x_hdmi;
7858         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7859                 limit = &intel_limits_g4x_sdvo;
7860         } else {
7861                 /* The option is for other outputs */
7862                 limit = &intel_limits_i9xx_sdvo;
7863         }
7864
7865         if (!crtc_state->clock_set &&
7866             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7867                                 refclk, NULL, &crtc_state->dpll)) {
7868                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7869                 return -EINVAL;
7870         }
7871
7872         i9xx_compute_dpll(crtc, crtc_state, NULL);
7873
7874         return 0;
7875 }
7876
7877 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7878                                   struct intel_crtc_state *crtc_state)
7879 {
7880         struct drm_device *dev = crtc->base.dev;
7881         struct drm_i915_private *dev_priv = to_i915(dev);
7882         const struct intel_limit *limit;
7883         int refclk = 96000;
7884
7885         memset(&crtc_state->dpll_hw_state, 0,
7886                sizeof(crtc_state->dpll_hw_state));
7887
7888         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7889                 if (intel_panel_use_ssc(dev_priv)) {
7890                         refclk = dev_priv->vbt.lvds_ssc_freq;
7891                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7892                 }
7893
7894                 limit = &intel_limits_pineview_lvds;
7895         } else {
7896                 limit = &intel_limits_pineview_sdvo;
7897         }
7898
7899         if (!crtc_state->clock_set &&
7900             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7901                                 refclk, NULL, &crtc_state->dpll)) {
7902                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7903                 return -EINVAL;
7904         }
7905
7906         i9xx_compute_dpll(crtc, crtc_state, NULL);
7907
7908         return 0;
7909 }
7910
7911 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7912                                    struct intel_crtc_state *crtc_state)
7913 {
7914         struct drm_device *dev = crtc->base.dev;
7915         struct drm_i915_private *dev_priv = to_i915(dev);
7916         const struct intel_limit *limit;
7917         int refclk = 96000;
7918
7919         memset(&crtc_state->dpll_hw_state, 0,
7920                sizeof(crtc_state->dpll_hw_state));
7921
7922         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7923                 if (intel_panel_use_ssc(dev_priv)) {
7924                         refclk = dev_priv->vbt.lvds_ssc_freq;
7925                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7926                 }
7927
7928                 limit = &intel_limits_i9xx_lvds;
7929         } else {
7930                 limit = &intel_limits_i9xx_sdvo;
7931         }
7932
7933         if (!crtc_state->clock_set &&
7934             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7935                                  refclk, NULL, &crtc_state->dpll)) {
7936                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7937                 return -EINVAL;
7938         }
7939
7940         i9xx_compute_dpll(crtc, crtc_state, NULL);
7941
7942         return 0;
7943 }
7944
7945 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7946                                   struct intel_crtc_state *crtc_state)
7947 {
7948         int refclk = 100000;
7949         const struct intel_limit *limit = &intel_limits_chv;
7950
7951         memset(&crtc_state->dpll_hw_state, 0,
7952                sizeof(crtc_state->dpll_hw_state));
7953
7954         if (!crtc_state->clock_set &&
7955             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7956                                 refclk, NULL, &crtc_state->dpll)) {
7957                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7958                 return -EINVAL;
7959         }
7960
7961         chv_compute_dpll(crtc, crtc_state);
7962
7963         return 0;
7964 }
7965
7966 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7967                                   struct intel_crtc_state *crtc_state)
7968 {
7969         int refclk = 100000;
7970         const struct intel_limit *limit = &intel_limits_vlv;
7971
7972         memset(&crtc_state->dpll_hw_state, 0,
7973                sizeof(crtc_state->dpll_hw_state));
7974
7975         if (!crtc_state->clock_set &&
7976             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7977                                 refclk, NULL, &crtc_state->dpll)) {
7978                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7979                 return -EINVAL;
7980         }
7981
7982         vlv_compute_dpll(crtc, crtc_state);
7983
7984         return 0;
7985 }
7986
7987 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
7988 {
7989         if (IS_I830(dev_priv))
7990                 return false;
7991
7992         return INTEL_GEN(dev_priv) >= 4 ||
7993                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
7994 }
7995
7996 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
7997                                  struct intel_crtc_state *pipe_config)
7998 {
7999         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8000         u32 tmp;
8001
8002         if (!i9xx_has_pfit(dev_priv))
8003                 return;
8004
8005         tmp = I915_READ(PFIT_CONTROL);
8006         if (!(tmp & PFIT_ENABLE))
8007                 return;
8008
8009         /* Check whether the pfit is attached to our pipe. */
8010         if (INTEL_GEN(dev_priv) < 4) {
8011                 if (crtc->pipe != PIPE_B)
8012                         return;
8013         } else {
8014                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8015                         return;
8016         }
8017
8018         pipe_config->gmch_pfit.control = tmp;
8019         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8020 }
8021
8022 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8023                                struct intel_crtc_state *pipe_config)
8024 {
8025         struct drm_device *dev = crtc->base.dev;
8026         struct drm_i915_private *dev_priv = to_i915(dev);
8027         int pipe = pipe_config->cpu_transcoder;
8028         struct dpll clock;
8029         u32 mdiv;
8030         int refclk = 100000;
8031
8032         /* In case of DSI, DPLL will not be used */
8033         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8034                 return;
8035
8036         mutex_lock(&dev_priv->sb_lock);
8037         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8038         mutex_unlock(&dev_priv->sb_lock);
8039
8040         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8041         clock.m2 = mdiv & DPIO_M2DIV_MASK;
8042         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8043         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8044         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8045
8046         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8047 }
8048
8049 static void
8050 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8051                               struct intel_initial_plane_config *plane_config)
8052 {
8053         struct drm_device *dev = crtc->base.dev;
8054         struct drm_i915_private *dev_priv = to_i915(dev);
8055         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8056         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8057         enum pipe pipe;
8058         u32 val, base, offset;
8059         int fourcc, pixel_format;
8060         unsigned int aligned_height;
8061         struct drm_framebuffer *fb;
8062         struct intel_framebuffer *intel_fb;
8063
8064         if (!plane->get_hw_state(plane, &pipe))
8065                 return;
8066
8067         WARN_ON(pipe != crtc->pipe);
8068
8069         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8070         if (!intel_fb) {
8071                 DRM_DEBUG_KMS("failed to alloc fb\n");
8072                 return;
8073         }
8074
8075         fb = &intel_fb->base;
8076
8077         fb->dev = dev;
8078
8079         val = I915_READ(DSPCNTR(i9xx_plane));
8080
8081         if (INTEL_GEN(dev_priv) >= 4) {
8082                 if (val & DISPPLANE_TILED) {
8083                         plane_config->tiling = I915_TILING_X;
8084                         fb->modifier = I915_FORMAT_MOD_X_TILED;
8085                 }
8086
8087                 if (val & DISPPLANE_ROTATE_180)
8088                         plane_config->rotation = DRM_MODE_ROTATE_180;
8089         }
8090
8091         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
8092             val & DISPPLANE_MIRROR)
8093                 plane_config->rotation |= DRM_MODE_REFLECT_X;
8094
8095         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8096         fourcc = i9xx_format_to_fourcc(pixel_format);
8097         fb->format = drm_format_info(fourcc);
8098
8099         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8100                 offset = I915_READ(DSPOFFSET(i9xx_plane));
8101                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8102         } else if (INTEL_GEN(dev_priv) >= 4) {
8103                 if (plane_config->tiling)
8104                         offset = I915_READ(DSPTILEOFF(i9xx_plane));
8105                 else
8106                         offset = I915_READ(DSPLINOFF(i9xx_plane));
8107                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8108         } else {
8109                 base = I915_READ(DSPADDR(i9xx_plane));
8110         }
8111         plane_config->base = base;
8112
8113         val = I915_READ(PIPESRC(pipe));
8114         fb->width = ((val >> 16) & 0xfff) + 1;
8115         fb->height = ((val >> 0) & 0xfff) + 1;
8116
8117         val = I915_READ(DSPSTRIDE(i9xx_plane));
8118         fb->pitches[0] = val & 0xffffffc0;
8119
8120         aligned_height = intel_fb_align_height(fb, 0, fb->height);
8121
8122         plane_config->size = fb->pitches[0] * aligned_height;
8123
8124         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8125                       crtc->base.name, plane->base.name, fb->width, fb->height,
8126                       fb->format->cpp[0] * 8, base, fb->pitches[0],
8127                       plane_config->size);
8128
8129         plane_config->fb = intel_fb;
8130 }
8131
8132 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8133                                struct intel_crtc_state *pipe_config)
8134 {
8135         struct drm_device *dev = crtc->base.dev;
8136         struct drm_i915_private *dev_priv = to_i915(dev);
8137         int pipe = pipe_config->cpu_transcoder;
8138         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8139         struct dpll clock;
8140         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8141         int refclk = 100000;
8142
8143         /* In case of DSI, DPLL will not be used */
8144         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8145                 return;
8146
8147         mutex_lock(&dev_priv->sb_lock);
8148         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8149         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8150         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8151         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8152         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8153         mutex_unlock(&dev_priv->sb_lock);
8154
8155         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8156         clock.m2 = (pll_dw0 & 0xff) << 22;
8157         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8158                 clock.m2 |= pll_dw2 & 0x3fffff;
8159         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8160         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8161         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8162
8163         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8164 }
8165
8166 static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
8167                                         struct intel_crtc_state *pipe_config)
8168 {
8169         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8170         enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
8171
8172         pipe_config->lspcon_downsampling = false;
8173
8174         if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8175                 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
8176
8177                 if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
8178                         bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
8179                         bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
8180
8181                         if (ycbcr420_enabled) {
8182                                 /* We support 4:2:0 in full blend mode only */
8183                                 if (!blend)
8184                                         output = INTEL_OUTPUT_FORMAT_INVALID;
8185                                 else if (!(IS_GEMINILAKE(dev_priv) ||
8186                                            INTEL_GEN(dev_priv) >= 10))
8187                                         output = INTEL_OUTPUT_FORMAT_INVALID;
8188                                 else
8189                                         output = INTEL_OUTPUT_FORMAT_YCBCR420;
8190                         } else {
8191                                 /*
8192                                  * Currently there is no interface defined to
8193                                  * check user preference between RGB/YCBCR444
8194                                  * or YCBCR420. So the only possible case for
8195                                  * YCBCR444 usage is driving YCBCR420 output
8196                                  * with LSPCON, when pipe is configured for
8197                                  * YCBCR444 output and LSPCON takes care of
8198                                  * downsampling it.
8199                                  */
8200                                 pipe_config->lspcon_downsampling = true;
8201                                 output = INTEL_OUTPUT_FORMAT_YCBCR444;
8202                         }
8203                 }
8204         }
8205
8206         pipe_config->output_format = output;
8207 }
8208
8209 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
8210 {
8211         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8212         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8213         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8214         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8215         u32 tmp;
8216
8217         tmp = I915_READ(DSPCNTR(i9xx_plane));
8218
8219         if (tmp & DISPPLANE_GAMMA_ENABLE)
8220                 crtc_state->gamma_enable = true;
8221
8222         if (!HAS_GMCH(dev_priv) &&
8223             tmp & DISPPLANE_PIPE_CSC_ENABLE)
8224                 crtc_state->csc_enable = true;
8225 }
8226
8227 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8228                                  struct intel_crtc_state *pipe_config)
8229 {
8230         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8231         enum intel_display_power_domain power_domain;
8232         intel_wakeref_t wakeref;
8233         u32 tmp;
8234         bool ret;
8235
8236         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8237         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
8238         if (!wakeref)
8239                 return false;
8240
8241         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8242         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8243         pipe_config->shared_dpll = NULL;
8244
8245         ret = false;
8246
8247         tmp = I915_READ(PIPECONF(crtc->pipe));
8248         if (!(tmp & PIPECONF_ENABLE))
8249                 goto out;
8250
8251         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8252             IS_CHERRYVIEW(dev_priv)) {
8253                 switch (tmp & PIPECONF_BPC_MASK) {
8254                 case PIPECONF_6BPC:
8255                         pipe_config->pipe_bpp = 18;
8256                         break;
8257                 case PIPECONF_8BPC:
8258                         pipe_config->pipe_bpp = 24;
8259                         break;
8260                 case PIPECONF_10BPC:
8261                         pipe_config->pipe_bpp = 30;
8262                         break;
8263                 default:
8264                         break;
8265                 }
8266         }
8267
8268         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8269             (tmp & PIPECONF_COLOR_RANGE_SELECT))
8270                 pipe_config->limited_color_range = true;
8271
8272         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
8273                 PIPECONF_GAMMA_MODE_SHIFT;
8274
8275         if (IS_CHERRYVIEW(dev_priv))
8276                 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
8277
8278         i9xx_get_pipe_color_config(pipe_config);
8279
8280         if (INTEL_GEN(dev_priv) < 4)
8281                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8282
8283         intel_get_pipe_timings(crtc, pipe_config);
8284         intel_get_pipe_src_size(crtc, pipe_config);
8285
8286         i9xx_get_pfit_config(crtc, pipe_config);
8287
8288         if (INTEL_GEN(dev_priv) >= 4) {
8289                 /* No way to read it out on pipes B and C */
8290                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
8291                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
8292                 else
8293                         tmp = I915_READ(DPLL_MD(crtc->pipe));
8294                 pipe_config->pixel_multiplier =
8295                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8296                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8297                 pipe_config->dpll_hw_state.dpll_md = tmp;
8298         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8299                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8300                 tmp = I915_READ(DPLL(crtc->pipe));
8301                 pipe_config->pixel_multiplier =
8302                         ((tmp & SDVO_MULTIPLIER_MASK)
8303                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8304         } else {
8305                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8306                  * port and will be fixed up in the encoder->get_config
8307                  * function. */
8308                 pipe_config->pixel_multiplier = 1;
8309         }
8310         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8311         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
8312                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8313                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8314         } else {
8315                 /* Mask out read-only status bits. */
8316                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8317                                                      DPLL_PORTC_READY_MASK |
8318                                                      DPLL_PORTB_READY_MASK);
8319         }
8320
8321         if (IS_CHERRYVIEW(dev_priv))
8322                 chv_crtc_clock_get(crtc, pipe_config);
8323         else if (IS_VALLEYVIEW(dev_priv))
8324                 vlv_crtc_clock_get(crtc, pipe_config);
8325         else
8326                 i9xx_crtc_clock_get(crtc, pipe_config);
8327
8328         /*
8329          * Normally the dotclock is filled in by the encoder .get_config()
8330          * but in case the pipe is enabled w/o any ports we need a sane
8331          * default.
8332          */
8333         pipe_config->base.adjusted_mode.crtc_clock =
8334                 pipe_config->port_clock / pipe_config->pixel_multiplier;
8335
8336         ret = true;
8337
8338 out:
8339         intel_display_power_put(dev_priv, power_domain, wakeref);
8340
8341         return ret;
8342 }
8343
8344 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
8345 {
8346         struct intel_encoder *encoder;
8347         int i;
8348         u32 val, final;
8349         bool has_lvds = false;
8350         bool has_cpu_edp = false;
8351         bool has_panel = false;
8352         bool has_ck505 = false;
8353         bool can_ssc = false;
8354         bool using_ssc_source = false;
8355
8356         /* We need to take the global config into account */
8357         for_each_intel_encoder(&dev_priv->drm, encoder) {
8358                 switch (encoder->type) {
8359                 case INTEL_OUTPUT_LVDS:
8360                         has_panel = true;
8361                         has_lvds = true;
8362                         break;
8363                 case INTEL_OUTPUT_EDP:
8364                         has_panel = true;
8365                         if (encoder->port == PORT_A)
8366                                 has_cpu_edp = true;
8367                         break;
8368                 default:
8369                         break;
8370                 }
8371         }
8372
8373         if (HAS_PCH_IBX(dev_priv)) {
8374                 has_ck505 = dev_priv->vbt.display_clock_mode;
8375                 can_ssc = has_ck505;
8376         } else {
8377                 has_ck505 = false;
8378                 can_ssc = true;
8379         }
8380
8381         /* Check if any DPLLs are using the SSC source */
8382         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8383                 u32 temp = I915_READ(PCH_DPLL(i));
8384
8385                 if (!(temp & DPLL_VCO_ENABLE))
8386                         continue;
8387
8388                 if ((temp & PLL_REF_INPUT_MASK) ==
8389                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8390                         using_ssc_source = true;
8391                         break;
8392                 }
8393         }
8394
8395         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8396                       has_panel, has_lvds, has_ck505, using_ssc_source);
8397
8398         /* Ironlake: try to setup display ref clock before DPLL
8399          * enabling. This is only under driver's control after
8400          * PCH B stepping, previous chipset stepping should be
8401          * ignoring this setting.
8402          */
8403         val = I915_READ(PCH_DREF_CONTROL);
8404
8405         /* As we must carefully and slowly disable/enable each source in turn,
8406          * compute the final state we want first and check if we need to
8407          * make any changes at all.
8408          */
8409         final = val;
8410         final &= ~DREF_NONSPREAD_SOURCE_MASK;
8411         if (has_ck505)
8412                 final |= DREF_NONSPREAD_CK505_ENABLE;
8413         else
8414                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8415
8416         final &= ~DREF_SSC_SOURCE_MASK;
8417         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8418         final &= ~DREF_SSC1_ENABLE;
8419
8420         if (has_panel) {
8421                 final |= DREF_SSC_SOURCE_ENABLE;
8422
8423                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8424                         final |= DREF_SSC1_ENABLE;
8425
8426                 if (has_cpu_edp) {
8427                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
8428                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8429                         else
8430                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8431                 } else
8432                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8433         } else if (using_ssc_source) {
8434                 final |= DREF_SSC_SOURCE_ENABLE;
8435                 final |= DREF_SSC1_ENABLE;
8436         }
8437
8438         if (final == val)
8439                 return;
8440
8441         /* Always enable nonspread source */
8442         val &= ~DREF_NONSPREAD_SOURCE_MASK;
8443
8444         if (has_ck505)
8445                 val |= DREF_NONSPREAD_CK505_ENABLE;
8446         else
8447                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
8448
8449         if (has_panel) {
8450                 val &= ~DREF_SSC_SOURCE_MASK;
8451                 val |= DREF_SSC_SOURCE_ENABLE;
8452
8453                 /* SSC must be turned on before enabling the CPU output  */
8454                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8455                         DRM_DEBUG_KMS("Using SSC on panel\n");
8456                         val |= DREF_SSC1_ENABLE;
8457                 } else
8458                         val &= ~DREF_SSC1_ENABLE;
8459
8460                 /* Get SSC going before enabling the outputs */
8461                 I915_WRITE(PCH_DREF_CONTROL, val);
8462                 POSTING_READ(PCH_DREF_CONTROL);
8463                 udelay(200);
8464
8465                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8466
8467                 /* Enable CPU source on CPU attached eDP */
8468                 if (has_cpu_edp) {
8469                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8470                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
8471                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8472                         } else
8473                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8474                 } else
8475                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8476
8477                 I915_WRITE(PCH_DREF_CONTROL, val);
8478                 POSTING_READ(PCH_DREF_CONTROL);
8479                 udelay(200);
8480         } else {
8481                 DRM_DEBUG_KMS("Disabling CPU source output\n");
8482
8483                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8484
8485                 /* Turn off CPU output */
8486                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8487
8488                 I915_WRITE(PCH_DREF_CONTROL, val);
8489                 POSTING_READ(PCH_DREF_CONTROL);
8490                 udelay(200);
8491
8492                 if (!using_ssc_source) {
8493                         DRM_DEBUG_KMS("Disabling SSC source\n");
8494
8495                         /* Turn off the SSC source */
8496                         val &= ~DREF_SSC_SOURCE_MASK;
8497                         val |= DREF_SSC_SOURCE_DISABLE;
8498
8499                         /* Turn off SSC1 */
8500                         val &= ~DREF_SSC1_ENABLE;
8501
8502                         I915_WRITE(PCH_DREF_CONTROL, val);
8503                         POSTING_READ(PCH_DREF_CONTROL);
8504                         udelay(200);
8505                 }
8506         }
8507
8508         BUG_ON(val != final);
8509 }
8510
8511 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8512 {
8513         u32 tmp;
8514
8515         tmp = I915_READ(SOUTH_CHICKEN2);
8516         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8517         I915_WRITE(SOUTH_CHICKEN2, tmp);
8518
8519         if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8520                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8521                 DRM_ERROR("FDI mPHY reset assert timeout\n");
8522
8523         tmp = I915_READ(SOUTH_CHICKEN2);
8524         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8525         I915_WRITE(SOUTH_CHICKEN2, tmp);
8526
8527         if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8528                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8529                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8530 }
8531
8532 /* WaMPhyProgramming:hsw */
8533 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8534 {
8535         u32 tmp;
8536
8537         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8538         tmp &= ~(0xFF << 24);
8539         tmp |= (0x12 << 24);
8540         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8541
8542         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8543         tmp |= (1 << 11);
8544         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8545
8546         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8547         tmp |= (1 << 11);
8548         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8549
8550         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8551         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8552         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8553
8554         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8555         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8556         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8557
8558         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8559         tmp &= ~(7 << 13);
8560         tmp |= (5 << 13);
8561         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8562
8563         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8564         tmp &= ~(7 << 13);
8565         tmp |= (5 << 13);
8566         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8567
8568         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8569         tmp &= ~0xFF;
8570         tmp |= 0x1C;
8571         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8572
8573         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8574         tmp &= ~0xFF;
8575         tmp |= 0x1C;
8576         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8577
8578         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8579         tmp &= ~(0xFF << 16);
8580         tmp |= (0x1C << 16);
8581         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8582
8583         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8584         tmp &= ~(0xFF << 16);
8585         tmp |= (0x1C << 16);
8586         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8587
8588         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8589         tmp |= (1 << 27);
8590         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8591
8592         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8593         tmp |= (1 << 27);
8594         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8595
8596         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8597         tmp &= ~(0xF << 28);
8598         tmp |= (4 << 28);
8599         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8600
8601         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8602         tmp &= ~(0xF << 28);
8603         tmp |= (4 << 28);
8604         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8605 }
8606
8607 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8608  * Programming" based on the parameters passed:
8609  * - Sequence to enable CLKOUT_DP
8610  * - Sequence to enable CLKOUT_DP without spread
8611  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8612  */
8613 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
8614                                  bool with_spread, bool with_fdi)
8615 {
8616         u32 reg, tmp;
8617
8618         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8619                 with_spread = true;
8620         if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
8621             with_fdi, "LP PCH doesn't have FDI\n"))
8622                 with_fdi = false;
8623
8624         mutex_lock(&dev_priv->sb_lock);
8625
8626         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8627         tmp &= ~SBI_SSCCTL_DISABLE;
8628         tmp |= SBI_SSCCTL_PATHALT;
8629         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8630
8631         udelay(24);
8632
8633         if (with_spread) {
8634                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8635                 tmp &= ~SBI_SSCCTL_PATHALT;
8636                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8637
8638                 if (with_fdi) {
8639                         lpt_reset_fdi_mphy(dev_priv);
8640                         lpt_program_fdi_mphy(dev_priv);
8641                 }
8642         }
8643
8644         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
8645         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8646         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8647         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8648
8649         mutex_unlock(&dev_priv->sb_lock);
8650 }
8651
8652 /* Sequence to disable CLKOUT_DP */
8653 static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
8654 {
8655         u32 reg, tmp;
8656
8657         mutex_lock(&dev_priv->sb_lock);
8658
8659         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
8660         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8661         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8662         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8663
8664         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8665         if (!(tmp & SBI_SSCCTL_DISABLE)) {
8666                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
8667                         tmp |= SBI_SSCCTL_PATHALT;
8668                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8669                         udelay(32);
8670                 }
8671                 tmp |= SBI_SSCCTL_DISABLE;
8672                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8673         }
8674
8675         mutex_unlock(&dev_priv->sb_lock);
8676 }
8677
8678 #define BEND_IDX(steps) ((50 + (steps)) / 5)
8679
8680 static const u16 sscdivintphase[] = {
8681         [BEND_IDX( 50)] = 0x3B23,
8682         [BEND_IDX( 45)] = 0x3B23,
8683         [BEND_IDX( 40)] = 0x3C23,
8684         [BEND_IDX( 35)] = 0x3C23,
8685         [BEND_IDX( 30)] = 0x3D23,
8686         [BEND_IDX( 25)] = 0x3D23,
8687         [BEND_IDX( 20)] = 0x3E23,
8688         [BEND_IDX( 15)] = 0x3E23,
8689         [BEND_IDX( 10)] = 0x3F23,
8690         [BEND_IDX(  5)] = 0x3F23,
8691         [BEND_IDX(  0)] = 0x0025,
8692         [BEND_IDX( -5)] = 0x0025,
8693         [BEND_IDX(-10)] = 0x0125,
8694         [BEND_IDX(-15)] = 0x0125,
8695         [BEND_IDX(-20)] = 0x0225,
8696         [BEND_IDX(-25)] = 0x0225,
8697         [BEND_IDX(-30)] = 0x0325,
8698         [BEND_IDX(-35)] = 0x0325,
8699         [BEND_IDX(-40)] = 0x0425,
8700         [BEND_IDX(-45)] = 0x0425,
8701         [BEND_IDX(-50)] = 0x0525,
8702 };
8703
8704 /*
8705  * Bend CLKOUT_DP
8706  * steps -50 to 50 inclusive, in steps of 5
8707  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8708  * change in clock period = -(steps / 10) * 5.787 ps
8709  */
8710 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8711 {
8712         u32 tmp;
8713         int idx = BEND_IDX(steps);
8714
8715         if (WARN_ON(steps % 5 != 0))
8716                 return;
8717
8718         if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8719                 return;
8720
8721         mutex_lock(&dev_priv->sb_lock);
8722
8723         if (steps % 10 != 0)
8724                 tmp = 0xAAAAAAAB;
8725         else
8726                 tmp = 0x00000000;
8727         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8728
8729         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8730         tmp &= 0xffff0000;
8731         tmp |= sscdivintphase[idx];
8732         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8733
8734         mutex_unlock(&dev_priv->sb_lock);
8735 }
8736
8737 #undef BEND_IDX
8738
8739 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
8740 {
8741         struct intel_encoder *encoder;
8742         bool has_vga = false;
8743
8744         for_each_intel_encoder(&dev_priv->drm, encoder) {
8745                 switch (encoder->type) {
8746                 case INTEL_OUTPUT_ANALOG:
8747                         has_vga = true;
8748                         break;
8749                 default:
8750                         break;
8751                 }
8752         }
8753
8754         if (has_vga) {
8755                 lpt_bend_clkout_dp(dev_priv, 0);
8756                 lpt_enable_clkout_dp(dev_priv, true, true);
8757         } else {
8758                 lpt_disable_clkout_dp(dev_priv);
8759         }
8760 }
8761
8762 /*
8763  * Initialize reference clocks when the driver loads
8764  */
8765 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
8766 {
8767         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
8768                 ironlake_init_pch_refclk(dev_priv);
8769         else if (HAS_PCH_LPT(dev_priv))
8770                 lpt_init_pch_refclk(dev_priv);
8771 }
8772
8773 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
8774 {
8775         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8776         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8777         enum pipe pipe = crtc->pipe;
8778         u32 val;
8779
8780         val = 0;
8781
8782         switch (crtc_state->pipe_bpp) {
8783         case 18:
8784                 val |= PIPECONF_6BPC;
8785                 break;
8786         case 24:
8787                 val |= PIPECONF_8BPC;
8788                 break;
8789         case 30:
8790                 val |= PIPECONF_10BPC;
8791                 break;
8792         case 36:
8793                 val |= PIPECONF_12BPC;
8794                 break;
8795         default:
8796                 /* Case prevented by intel_choose_pipe_bpp_dither. */
8797                 BUG();
8798         }
8799
8800         if (crtc_state->dither)
8801                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8802
8803         if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8804                 val |= PIPECONF_INTERLACED_ILK;
8805         else
8806                 val |= PIPECONF_PROGRESSIVE;
8807
8808         if (crtc_state->limited_color_range)
8809                 val |= PIPECONF_COLOR_RANGE_SELECT;
8810
8811         val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8812
8813         I915_WRITE(PIPECONF(pipe), val);
8814         POSTING_READ(PIPECONF(pipe));
8815 }
8816
8817 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
8818 {
8819         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8820         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8821         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8822         u32 val = 0;
8823
8824         if (IS_HASWELL(dev_priv) && crtc_state->dither)
8825                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8826
8827         if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8828                 val |= PIPECONF_INTERLACED_ILK;
8829         else
8830                 val |= PIPECONF_PROGRESSIVE;
8831
8832         I915_WRITE(PIPECONF(cpu_transcoder), val);
8833         POSTING_READ(PIPECONF(cpu_transcoder));
8834 }
8835
8836 static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state)
8837 {
8838         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
8839         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
8840
8841         if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8842                 u32 val = 0;
8843
8844                 switch (crtc_state->pipe_bpp) {
8845                 case 18:
8846                         val |= PIPEMISC_DITHER_6_BPC;
8847                         break;
8848                 case 24:
8849                         val |= PIPEMISC_DITHER_8_BPC;
8850                         break;
8851                 case 30:
8852                         val |= PIPEMISC_DITHER_10_BPC;
8853                         break;
8854                 case 36:
8855                         val |= PIPEMISC_DITHER_12_BPC;
8856                         break;
8857                 default:
8858                         /* Case prevented by pipe_config_set_bpp. */
8859                         BUG();
8860                 }
8861
8862                 if (crtc_state->dither)
8863                         val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8864
8865                 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
8866                     crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
8867                         val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
8868
8869                 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
8870                         val |= PIPEMISC_YUV420_ENABLE |
8871                                 PIPEMISC_YUV420_MODE_FULL_BLEND;
8872
8873                 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
8874         }
8875 }
8876
8877 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8878 {
8879         /*
8880          * Account for spread spectrum to avoid
8881          * oversubscribing the link. Max center spread
8882          * is 2.5%; use 5% for safety's sake.
8883          */
8884         u32 bps = target_clock * bpp * 21 / 20;
8885         return DIV_ROUND_UP(bps, link_bw * 8);
8886 }
8887
8888 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8889 {
8890         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8891 }
8892
8893 static void ironlake_compute_dpll(struct intel_crtc *crtc,
8894                                   struct intel_crtc_state *crtc_state,
8895                                   struct dpll *reduced_clock)
8896 {
8897         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8898         u32 dpll, fp, fp2;
8899         int factor;
8900
8901         /* Enable autotuning of the PLL clock (if permissible) */
8902         factor = 21;
8903         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8904                 if ((intel_panel_use_ssc(dev_priv) &&
8905                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
8906                     (HAS_PCH_IBX(dev_priv) &&
8907                      intel_is_dual_link_lvds(dev_priv)))
8908                         factor = 25;
8909         } else if (crtc_state->sdvo_tv_clock) {
8910                 factor = 20;
8911         }
8912
8913         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8914
8915         if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8916                 fp |= FP_CB_TUNE;
8917
8918         if (reduced_clock) {
8919                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
8920
8921                 if (reduced_clock->m < factor * reduced_clock->n)
8922                         fp2 |= FP_CB_TUNE;
8923         } else {
8924                 fp2 = fp;
8925         }
8926
8927         dpll = 0;
8928
8929         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8930                 dpll |= DPLLB_MODE_LVDS;
8931         else
8932                 dpll |= DPLLB_MODE_DAC_SERIAL;
8933
8934         dpll |= (crtc_state->pixel_multiplier - 1)
8935                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8936
8937         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8938             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8939                 dpll |= DPLL_SDVO_HIGH_SPEED;
8940
8941         if (intel_crtc_has_dp_encoder(crtc_state))
8942                 dpll |= DPLL_SDVO_HIGH_SPEED;
8943
8944         /*
8945          * The high speed IO clock is only really required for
8946          * SDVO/HDMI/DP, but we also enable it for CRT to make it
8947          * possible to share the DPLL between CRT and HDMI. Enabling
8948          * the clock needlessly does no real harm, except use up a
8949          * bit of power potentially.
8950          *
8951          * We'll limit this to IVB with 3 pipes, since it has only two
8952          * DPLLs and so DPLL sharing is the only way to get three pipes
8953          * driving PCH ports at the same time. On SNB we could do this,
8954          * and potentially avoid enabling the second DPLL, but it's not
8955          * clear if it''s a win or loss power wise. No point in doing
8956          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
8957          */
8958         if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
8959             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
8960                 dpll |= DPLL_SDVO_HIGH_SPEED;
8961
8962         /* compute bitmask from p1 value */
8963         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8964         /* also FPA1 */
8965         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8966
8967         switch (crtc_state->dpll.p2) {
8968         case 5:
8969                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8970                 break;
8971         case 7:
8972                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8973                 break;
8974         case 10:
8975                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8976                 break;
8977         case 14:
8978                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8979                 break;
8980         }
8981
8982         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8983             intel_panel_use_ssc(dev_priv))
8984                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8985         else
8986                 dpll |= PLL_REF_INPUT_DREFCLK;
8987
8988         dpll |= DPLL_VCO_ENABLE;
8989
8990         crtc_state->dpll_hw_state.dpll = dpll;
8991         crtc_state->dpll_hw_state.fp0 = fp;
8992         crtc_state->dpll_hw_state.fp1 = fp2;
8993 }
8994
8995 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8996                                        struct intel_crtc_state *crtc_state)
8997 {
8998         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8999         const struct intel_limit *limit;
9000         int refclk = 120000;
9001
9002         memset(&crtc_state->dpll_hw_state, 0,
9003                sizeof(crtc_state->dpll_hw_state));
9004
9005         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9006         if (!crtc_state->has_pch_encoder)
9007                 return 0;
9008
9009         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9010                 if (intel_panel_use_ssc(dev_priv)) {
9011                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9012                                       dev_priv->vbt.lvds_ssc_freq);
9013                         refclk = dev_priv->vbt.lvds_ssc_freq;
9014                 }
9015
9016                 if (intel_is_dual_link_lvds(dev_priv)) {
9017                         if (refclk == 100000)
9018                                 limit = &intel_limits_ironlake_dual_lvds_100m;
9019                         else
9020                                 limit = &intel_limits_ironlake_dual_lvds;
9021                 } else {
9022                         if (refclk == 100000)
9023                                 limit = &intel_limits_ironlake_single_lvds_100m;
9024                         else
9025                                 limit = &intel_limits_ironlake_single_lvds;
9026                 }
9027         } else {
9028                 limit = &intel_limits_ironlake_dac;
9029         }
9030
9031         if (!crtc_state->clock_set &&
9032             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9033                                 refclk, NULL, &crtc_state->dpll)) {
9034                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9035                 return -EINVAL;
9036         }
9037
9038         ironlake_compute_dpll(crtc, crtc_state, NULL);
9039
9040         if (!intel_get_shared_dpll(crtc_state, NULL)) {
9041                 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9042                               pipe_name(crtc->pipe));
9043                 return -EINVAL;
9044         }
9045
9046         return 0;
9047 }
9048
9049 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9050                                          struct intel_link_m_n *m_n)
9051 {
9052         struct drm_device *dev = crtc->base.dev;
9053         struct drm_i915_private *dev_priv = to_i915(dev);
9054         enum pipe pipe = crtc->pipe;
9055
9056         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9057         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9058         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9059                 & ~TU_SIZE_MASK;
9060         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9061         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9062                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9063 }
9064
9065 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9066                                          enum transcoder transcoder,
9067                                          struct intel_link_m_n *m_n,
9068                                          struct intel_link_m_n *m2_n2)
9069 {
9070         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9071         enum pipe pipe = crtc->pipe;
9072
9073         if (INTEL_GEN(dev_priv) >= 5) {
9074                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9075                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9076                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9077                         & ~TU_SIZE_MASK;
9078                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9079                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9080                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9081
9082                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
9083                         m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9084                         m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9085                         m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9086                                         & ~TU_SIZE_MASK;
9087                         m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9088                         m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9089                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9090                 }
9091         } else {
9092                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9093                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9094                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9095                         & ~TU_SIZE_MASK;
9096                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9097                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9098                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9099         }
9100 }
9101
9102 void intel_dp_get_m_n(struct intel_crtc *crtc,
9103                       struct intel_crtc_state *pipe_config)
9104 {
9105         if (pipe_config->has_pch_encoder)
9106                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9107         else
9108                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9109                                              &pipe_config->dp_m_n,
9110                                              &pipe_config->dp_m2_n2);
9111 }
9112
9113 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9114                                         struct intel_crtc_state *pipe_config)
9115 {
9116         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9117                                      &pipe_config->fdi_m_n, NULL);
9118 }
9119
9120 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9121                                     struct intel_crtc_state *pipe_config)
9122 {
9123         struct drm_device *dev = crtc->base.dev;
9124         struct drm_i915_private *dev_priv = to_i915(dev);
9125         struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9126         u32 ps_ctrl = 0;
9127         int id = -1;
9128         int i;
9129
9130         /* find scaler attached to this pipe */
9131         for (i = 0; i < crtc->num_scalers; i++) {
9132                 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9133                 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9134                         id = i;
9135                         pipe_config->pch_pfit.enabled = true;
9136                         pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9137                         pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9138                         scaler_state->scalers[i].in_use = true;
9139                         break;
9140                 }
9141         }
9142
9143         scaler_state->scaler_id = id;
9144         if (id >= 0) {
9145                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9146         } else {
9147                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9148         }
9149 }
9150
9151 static void
9152 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9153                                  struct intel_initial_plane_config *plane_config)
9154 {
9155         struct drm_device *dev = crtc->base.dev;
9156         struct drm_i915_private *dev_priv = to_i915(dev);
9157         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9158         enum plane_id plane_id = plane->id;
9159         enum pipe pipe;
9160         u32 val, base, offset, stride_mult, tiling, alpha;
9161         int fourcc, pixel_format;
9162         unsigned int aligned_height;
9163         struct drm_framebuffer *fb;
9164         struct intel_framebuffer *intel_fb;
9165
9166         if (!plane->get_hw_state(plane, &pipe))
9167                 return;
9168
9169         WARN_ON(pipe != crtc->pipe);
9170
9171         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9172         if (!intel_fb) {
9173                 DRM_DEBUG_KMS("failed to alloc fb\n");
9174                 return;
9175         }
9176
9177         fb = &intel_fb->base;
9178
9179         fb->dev = dev;
9180
9181         val = I915_READ(PLANE_CTL(pipe, plane_id));
9182
9183         if (INTEL_GEN(dev_priv) >= 11)
9184                 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
9185         else
9186                 pixel_format = val & PLANE_CTL_FORMAT_MASK;
9187
9188         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
9189                 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
9190                 alpha &= PLANE_COLOR_ALPHA_MASK;
9191         } else {
9192                 alpha = val & PLANE_CTL_ALPHA_MASK;
9193         }
9194
9195         fourcc = skl_format_to_fourcc(pixel_format,
9196                                       val & PLANE_CTL_ORDER_RGBX, alpha);
9197         fb->format = drm_format_info(fourcc);
9198
9199         tiling = val & PLANE_CTL_TILED_MASK;
9200         switch (tiling) {
9201         case PLANE_CTL_TILED_LINEAR:
9202                 fb->modifier = DRM_FORMAT_MOD_LINEAR;
9203                 break;
9204         case PLANE_CTL_TILED_X:
9205                 plane_config->tiling = I915_TILING_X;
9206                 fb->modifier = I915_FORMAT_MOD_X_TILED;
9207                 break;
9208         case PLANE_CTL_TILED_Y:
9209                 plane_config->tiling = I915_TILING_Y;
9210                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9211                         fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
9212                 else
9213                         fb->modifier = I915_FORMAT_MOD_Y_TILED;
9214                 break;
9215         case PLANE_CTL_TILED_YF:
9216                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9217                         fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
9218                 else
9219                         fb->modifier = I915_FORMAT_MOD_Yf_TILED;
9220                 break;
9221         default:
9222                 MISSING_CASE(tiling);
9223                 goto error;
9224         }
9225
9226         /*
9227          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
9228          * while i915 HW rotation is clockwise, thats why this swapping.
9229          */
9230         switch (val & PLANE_CTL_ROTATE_MASK) {
9231         case PLANE_CTL_ROTATE_0:
9232                 plane_config->rotation = DRM_MODE_ROTATE_0;
9233                 break;
9234         case PLANE_CTL_ROTATE_90:
9235                 plane_config->rotation = DRM_MODE_ROTATE_270;
9236                 break;
9237         case PLANE_CTL_ROTATE_180:
9238                 plane_config->rotation = DRM_MODE_ROTATE_180;
9239                 break;
9240         case PLANE_CTL_ROTATE_270:
9241                 plane_config->rotation = DRM_MODE_ROTATE_90;
9242                 break;
9243         }
9244
9245         if (INTEL_GEN(dev_priv) >= 10 &&
9246             val & PLANE_CTL_FLIP_HORIZONTAL)
9247                 plane_config->rotation |= DRM_MODE_REFLECT_X;
9248
9249         base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
9250         plane_config->base = base;
9251
9252         offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
9253
9254         val = I915_READ(PLANE_SIZE(pipe, plane_id));
9255         fb->height = ((val >> 16) & 0xfff) + 1;
9256         fb->width = ((val >> 0) & 0x1fff) + 1;
9257
9258         val = I915_READ(PLANE_STRIDE(pipe, plane_id));
9259         stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
9260         fb->pitches[0] = (val & 0x3ff) * stride_mult;
9261
9262         aligned_height = intel_fb_align_height(fb, 0, fb->height);
9263
9264         plane_config->size = fb->pitches[0] * aligned_height;
9265
9266         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9267                       crtc->base.name, plane->base.name, fb->width, fb->height,
9268                       fb->format->cpp[0] * 8, base, fb->pitches[0],
9269                       plane_config->size);
9270
9271         plane_config->fb = intel_fb;
9272         return;
9273
9274 error:
9275         kfree(intel_fb);
9276 }
9277
9278 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9279                                      struct intel_crtc_state *pipe_config)
9280 {
9281         struct drm_device *dev = crtc->base.dev;
9282         struct drm_i915_private *dev_priv = to_i915(dev);
9283         u32 tmp;
9284
9285         tmp = I915_READ(PF_CTL(crtc->pipe));
9286
9287         if (tmp & PF_ENABLE) {
9288                 pipe_config->pch_pfit.enabled = true;
9289                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9290                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9291
9292                 /* We currently do not free assignements of panel fitters on
9293                  * ivb/hsw (since we don't use the higher upscaling modes which
9294                  * differentiates them) so just WARN about this case for now. */
9295                 if (IS_GEN(dev_priv, 7)) {
9296                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9297                                 PF_PIPE_SEL_IVB(crtc->pipe));
9298                 }
9299         }
9300 }
9301
9302 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9303                                      struct intel_crtc_state *pipe_config)
9304 {
9305         struct drm_device *dev = crtc->base.dev;
9306         struct drm_i915_private *dev_priv = to_i915(dev);
9307         enum intel_display_power_domain power_domain;
9308         intel_wakeref_t wakeref;
9309         u32 tmp;
9310         bool ret;
9311
9312         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9313         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9314         if (!wakeref)
9315                 return false;
9316
9317         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9318         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9319         pipe_config->shared_dpll = NULL;
9320
9321         ret = false;
9322         tmp = I915_READ(PIPECONF(crtc->pipe));
9323         if (!(tmp & PIPECONF_ENABLE))
9324                 goto out;
9325
9326         switch (tmp & PIPECONF_BPC_MASK) {
9327         case PIPECONF_6BPC:
9328                 pipe_config->pipe_bpp = 18;
9329                 break;
9330         case PIPECONF_8BPC:
9331                 pipe_config->pipe_bpp = 24;
9332                 break;
9333         case PIPECONF_10BPC:
9334                 pipe_config->pipe_bpp = 30;
9335                 break;
9336         case PIPECONF_12BPC:
9337                 pipe_config->pipe_bpp = 36;
9338                 break;
9339         default:
9340                 break;
9341         }
9342
9343         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9344                 pipe_config->limited_color_range = true;
9345
9346         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
9347                 PIPECONF_GAMMA_MODE_SHIFT;
9348
9349         pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
9350
9351         i9xx_get_pipe_color_config(pipe_config);
9352
9353         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9354                 struct intel_shared_dpll *pll;
9355                 enum intel_dpll_id pll_id;
9356
9357                 pipe_config->has_pch_encoder = true;
9358
9359                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9360                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9361                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
9362
9363                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9364
9365                 if (HAS_PCH_IBX(dev_priv)) {
9366                         /*
9367                          * The pipe->pch transcoder and pch transcoder->pll
9368                          * mapping is fixed.
9369                          */
9370                         pll_id = (enum intel_dpll_id) crtc->pipe;
9371                 } else {
9372                         tmp = I915_READ(PCH_DPLL_SEL);
9373                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9374                                 pll_id = DPLL_ID_PCH_PLL_B;
9375                         else
9376                                 pll_id= DPLL_ID_PCH_PLL_A;
9377                 }
9378
9379                 pipe_config->shared_dpll =
9380                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
9381                 pll = pipe_config->shared_dpll;
9382
9383                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9384                                                 &pipe_config->dpll_hw_state));
9385
9386                 tmp = pipe_config->dpll_hw_state.dpll;
9387                 pipe_config->pixel_multiplier =
9388                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9389                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9390
9391                 ironlake_pch_clock_get(crtc, pipe_config);
9392         } else {
9393                 pipe_config->pixel_multiplier = 1;
9394         }
9395
9396         intel_get_pipe_timings(crtc, pipe_config);
9397         intel_get_pipe_src_size(crtc, pipe_config);
9398
9399         ironlake_get_pfit_config(crtc, pipe_config);
9400
9401         ret = true;
9402
9403 out:
9404         intel_display_power_put(dev_priv, power_domain, wakeref);
9405
9406         return ret;
9407 }
9408
9409 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9410 {
9411         struct drm_device *dev = &dev_priv->drm;
9412         struct intel_crtc *crtc;
9413
9414         for_each_intel_crtc(dev, crtc)
9415                 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
9416                      pipe_name(crtc->pipe));
9417
9418         I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
9419                         "Display power well on\n");
9420         I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9421         I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9422         I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9423         I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
9424         I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9425              "CPU PWM1 enabled\n");
9426         if (IS_HASWELL(dev_priv))
9427                 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9428                      "CPU PWM2 enabled\n");
9429         I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
9430              "PCH PWM1 enabled\n");
9431         I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
9432              "Utility pin enabled\n");
9433         I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
9434
9435         /*
9436          * In theory we can still leave IRQs enabled, as long as only the HPD
9437          * interrupts remain enabled. We used to check for that, but since it's
9438          * gen-specific and since we only disable LCPLL after we fully disable
9439          * the interrupts, the check below should be enough.
9440          */
9441         I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9442 }
9443
9444 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
9445 {
9446         if (IS_HASWELL(dev_priv))
9447                 return I915_READ(D_COMP_HSW);
9448         else
9449                 return I915_READ(D_COMP_BDW);
9450 }
9451
9452 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
9453 {
9454         if (IS_HASWELL(dev_priv)) {
9455                 mutex_lock(&dev_priv->pcu_lock);
9456                 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9457                                             val))
9458                         DRM_DEBUG_KMS("Failed to write to D_COMP\n");
9459                 mutex_unlock(&dev_priv->pcu_lock);
9460         } else {
9461                 I915_WRITE(D_COMP_BDW, val);
9462                 POSTING_READ(D_COMP_BDW);
9463         }
9464 }
9465
9466 /*
9467  * This function implements pieces of two sequences from BSpec:
9468  * - Sequence for display software to disable LCPLL
9469  * - Sequence for display software to allow package C8+
9470  * The steps implemented here are just the steps that actually touch the LCPLL
9471  * register. Callers should take care of disabling all the display engine
9472  * functions, doing the mode unset, fixing interrupts, etc.
9473  */
9474 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9475                               bool switch_to_fclk, bool allow_power_down)
9476 {
9477         u32 val;
9478
9479         assert_can_disable_lcpll(dev_priv);
9480
9481         val = I915_READ(LCPLL_CTL);
9482
9483         if (switch_to_fclk) {
9484                 val |= LCPLL_CD_SOURCE_FCLK;
9485                 I915_WRITE(LCPLL_CTL, val);
9486
9487                 if (wait_for_us(I915_READ(LCPLL_CTL) &
9488                                 LCPLL_CD_SOURCE_FCLK_DONE, 1))
9489                         DRM_ERROR("Switching to FCLK failed\n");
9490
9491                 val = I915_READ(LCPLL_CTL);
9492         }
9493
9494         val |= LCPLL_PLL_DISABLE;
9495         I915_WRITE(LCPLL_CTL, val);
9496         POSTING_READ(LCPLL_CTL);
9497
9498         if (intel_wait_for_register(&dev_priv->uncore,
9499                                     LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
9500                 DRM_ERROR("LCPLL still locked\n");
9501
9502         val = hsw_read_dcomp(dev_priv);
9503         val |= D_COMP_COMP_DISABLE;
9504         hsw_write_dcomp(dev_priv, val);
9505         ndelay(100);
9506
9507         if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9508                      1))
9509                 DRM_ERROR("D_COMP RCOMP still in progress\n");
9510
9511         if (allow_power_down) {
9512                 val = I915_READ(LCPLL_CTL);
9513                 val |= LCPLL_POWER_DOWN_ALLOW;
9514                 I915_WRITE(LCPLL_CTL, val);
9515                 POSTING_READ(LCPLL_CTL);
9516         }
9517 }
9518
9519 /*
9520  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9521  * source.
9522  */
9523 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9524 {
9525         u32 val;
9526
9527         val = I915_READ(LCPLL_CTL);
9528
9529         if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9530                     LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9531                 return;
9532
9533         /*
9534          * Make sure we're not on PC8 state before disabling PC8, otherwise
9535          * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9536          */
9537         intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
9538
9539         if (val & LCPLL_POWER_DOWN_ALLOW) {
9540                 val &= ~LCPLL_POWER_DOWN_ALLOW;
9541                 I915_WRITE(LCPLL_CTL, val);
9542                 POSTING_READ(LCPLL_CTL);
9543         }
9544
9545         val = hsw_read_dcomp(dev_priv);
9546         val |= D_COMP_COMP_FORCE;
9547         val &= ~D_COMP_COMP_DISABLE;
9548         hsw_write_dcomp(dev_priv, val);
9549
9550         val = I915_READ(LCPLL_CTL);
9551         val &= ~LCPLL_PLL_DISABLE;
9552         I915_WRITE(LCPLL_CTL, val);
9553
9554         if (intel_wait_for_register(&dev_priv->uncore,
9555                                     LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
9556                                     5))
9557                 DRM_ERROR("LCPLL not locked yet\n");
9558
9559         if (val & LCPLL_CD_SOURCE_FCLK) {
9560                 val = I915_READ(LCPLL_CTL);
9561                 val &= ~LCPLL_CD_SOURCE_FCLK;
9562                 I915_WRITE(LCPLL_CTL, val);
9563
9564                 if (wait_for_us((I915_READ(LCPLL_CTL) &
9565                                  LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9566                         DRM_ERROR("Switching back to LCPLL failed\n");
9567         }
9568
9569         intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
9570
9571         intel_update_cdclk(dev_priv);
9572         intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
9573 }
9574
9575 /*
9576  * Package states C8 and deeper are really deep PC states that can only be
9577  * reached when all the devices on the system allow it, so even if the graphics
9578  * device allows PC8+, it doesn't mean the system will actually get to these
9579  * states. Our driver only allows PC8+ when going into runtime PM.
9580  *
9581  * The requirements for PC8+ are that all the outputs are disabled, the power
9582  * well is disabled and most interrupts are disabled, and these are also
9583  * requirements for runtime PM. When these conditions are met, we manually do
9584  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9585  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9586  * hang the machine.
9587  *
9588  * When we really reach PC8 or deeper states (not just when we allow it) we lose
9589  * the state of some registers, so when we come back from PC8+ we need to
9590  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9591  * need to take care of the registers kept by RC6. Notice that this happens even
9592  * if we don't put the device in PCI D3 state (which is what currently happens
9593  * because of the runtime PM support).
9594  *
9595  * For more, read "Display Sequences for Package C8" on the hardware
9596  * documentation.
9597  */
9598 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9599 {
9600         u32 val;
9601
9602         DRM_DEBUG_KMS("Enabling package C8+\n");
9603
9604         if (HAS_PCH_LPT_LP(dev_priv)) {
9605                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9606                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9607                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9608         }
9609
9610         lpt_disable_clkout_dp(dev_priv);
9611         hsw_disable_lcpll(dev_priv, true, true);
9612 }
9613
9614 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9615 {
9616         u32 val;
9617
9618         DRM_DEBUG_KMS("Disabling package C8+\n");
9619
9620         hsw_restore_lcpll(dev_priv);
9621         lpt_init_pch_refclk(dev_priv);
9622
9623         if (HAS_PCH_LPT_LP(dev_priv)) {
9624                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9625                 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9626                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9627         }
9628 }
9629
9630 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9631                                       struct intel_crtc_state *crtc_state)
9632 {
9633         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9634         struct intel_atomic_state *state =
9635                 to_intel_atomic_state(crtc_state->base.state);
9636
9637         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
9638             INTEL_GEN(dev_priv) >= 11) {
9639                 struct intel_encoder *encoder =
9640                         intel_get_crtc_new_encoder(state, crtc_state);
9641
9642                 if (!intel_get_shared_dpll(crtc_state, encoder)) {
9643                         DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9644                                       pipe_name(crtc->pipe));
9645                         return -EINVAL;
9646                 }
9647         }
9648
9649         return 0;
9650 }
9651
9652 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
9653                                    enum port port,
9654                                    struct intel_crtc_state *pipe_config)
9655 {
9656         enum intel_dpll_id id;
9657         u32 temp;
9658
9659         temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9660         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9661
9662         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
9663                 return;
9664
9665         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9666 }
9667
9668 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
9669                                 enum port port,
9670                                 struct intel_crtc_state *pipe_config)
9671 {
9672         enum intel_dpll_id id;
9673         u32 temp;
9674
9675         /* TODO: TBT pll not implemented. */
9676         if (intel_port_is_combophy(dev_priv, port)) {
9677                 temp = I915_READ(DPCLKA_CFGCR0_ICL) &
9678                        DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9679                 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9680         } else if (intel_port_is_tc(dev_priv, port)) {
9681                 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
9682         } else {
9683                 WARN(1, "Invalid port %x\n", port);
9684                 return;
9685         }
9686
9687         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9688 }
9689
9690 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9691                                 enum port port,
9692                                 struct intel_crtc_state *pipe_config)
9693 {
9694         enum intel_dpll_id id;
9695
9696         switch (port) {
9697         case PORT_A:
9698                 id = DPLL_ID_SKL_DPLL0;
9699                 break;
9700         case PORT_B:
9701                 id = DPLL_ID_SKL_DPLL1;
9702                 break;
9703         case PORT_C:
9704                 id = DPLL_ID_SKL_DPLL2;
9705                 break;
9706         default:
9707                 DRM_ERROR("Incorrect port type\n");
9708                 return;
9709         }
9710
9711         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9712 }
9713
9714 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9715                                 enum port port,
9716                                 struct intel_crtc_state *pipe_config)
9717 {
9718         enum intel_dpll_id id;
9719         u32 temp;
9720
9721         temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9722         id = temp >> (port * 3 + 1);
9723
9724         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
9725                 return;
9726
9727         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9728 }
9729
9730 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9731                                 enum port port,
9732                                 struct intel_crtc_state *pipe_config)
9733 {
9734         enum intel_dpll_id id;
9735         u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9736
9737         switch (ddi_pll_sel) {
9738         case PORT_CLK_SEL_WRPLL1:
9739                 id = DPLL_ID_WRPLL1;
9740                 break;
9741         case PORT_CLK_SEL_WRPLL2:
9742                 id = DPLL_ID_WRPLL2;
9743                 break;
9744         case PORT_CLK_SEL_SPLL:
9745                 id = DPLL_ID_SPLL;
9746                 break;
9747         case PORT_CLK_SEL_LCPLL_810:
9748                 id = DPLL_ID_LCPLL_810;
9749                 break;
9750         case PORT_CLK_SEL_LCPLL_1350:
9751                 id = DPLL_ID_LCPLL_1350;
9752                 break;
9753         case PORT_CLK_SEL_LCPLL_2700:
9754                 id = DPLL_ID_LCPLL_2700;
9755                 break;
9756         default:
9757                 MISSING_CASE(ddi_pll_sel);
9758                 /* fall through */
9759         case PORT_CLK_SEL_NONE:
9760                 return;
9761         }
9762
9763         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9764 }
9765
9766 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9767                                      struct intel_crtc_state *pipe_config,
9768                                      u64 *power_domain_mask,
9769                                      intel_wakeref_t *wakerefs)
9770 {
9771         struct drm_device *dev = crtc->base.dev;
9772         struct drm_i915_private *dev_priv = to_i915(dev);
9773         enum intel_display_power_domain power_domain;
9774         unsigned long panel_transcoder_mask = 0;
9775         unsigned long enabled_panel_transcoders = 0;
9776         enum transcoder panel_transcoder;
9777         intel_wakeref_t wf;
9778         u32 tmp;
9779
9780         if (INTEL_GEN(dev_priv) >= 11)
9781                 panel_transcoder_mask |=
9782                         BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
9783
9784         if (HAS_TRANSCODER_EDP(dev_priv))
9785                 panel_transcoder_mask |= BIT(TRANSCODER_EDP);
9786
9787         /*
9788          * The pipe->transcoder mapping is fixed with the exception of the eDP
9789          * and DSI transcoders handled below.
9790          */
9791         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9792
9793         /*
9794          * XXX: Do intel_display_power_get_if_enabled before reading this (for
9795          * consistency and less surprising code; it's in always on power).
9796          */
9797         for_each_set_bit(panel_transcoder,
9798                          &panel_transcoder_mask,
9799                          ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
9800                 enum pipe trans_pipe;
9801
9802                 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
9803                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
9804                         continue;
9805
9806                 /*
9807                  * Log all enabled ones, only use the first one.
9808                  *
9809                  * FIXME: This won't work for two separate DSI displays.
9810                  */
9811                 enabled_panel_transcoders |= BIT(panel_transcoder);
9812                 if (enabled_panel_transcoders != BIT(panel_transcoder))
9813                         continue;
9814
9815                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9816                 default:
9817                         WARN(1, "unknown pipe linked to transcoder %s\n",
9818                              transcoder_name(panel_transcoder));
9819                         /* fall through */
9820                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9821                 case TRANS_DDI_EDP_INPUT_A_ON:
9822                         trans_pipe = PIPE_A;
9823                         break;
9824                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9825                         trans_pipe = PIPE_B;
9826                         break;
9827                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9828                         trans_pipe = PIPE_C;
9829                         break;
9830                 }
9831
9832                 if (trans_pipe == crtc->pipe)
9833                         pipe_config->cpu_transcoder = panel_transcoder;
9834         }
9835
9836         /*
9837          * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
9838          */
9839         WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
9840                 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
9841
9842         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9843         WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
9844
9845         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
9846         if (!wf)
9847                 return false;
9848
9849         wakerefs[power_domain] = wf;
9850         *power_domain_mask |= BIT_ULL(power_domain);
9851
9852         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9853
9854         return tmp & PIPECONF_ENABLE;
9855 }
9856
9857 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
9858                                          struct intel_crtc_state *pipe_config,
9859                                          u64 *power_domain_mask,
9860                                          intel_wakeref_t *wakerefs)
9861 {
9862         struct drm_device *dev = crtc->base.dev;
9863         struct drm_i915_private *dev_priv = to_i915(dev);
9864         enum intel_display_power_domain power_domain;
9865         enum transcoder cpu_transcoder;
9866         intel_wakeref_t wf;
9867         enum port port;
9868         u32 tmp;
9869
9870         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
9871                 if (port == PORT_A)
9872                         cpu_transcoder = TRANSCODER_DSI_A;
9873                 else
9874                         cpu_transcoder = TRANSCODER_DSI_C;
9875
9876                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
9877                 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
9878
9879                 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
9880                 if (!wf)
9881                         continue;
9882
9883                 wakerefs[power_domain] = wf;
9884                 *power_domain_mask |= BIT_ULL(power_domain);
9885
9886                 /*
9887                  * The PLL needs to be enabled with a valid divider
9888                  * configuration, otherwise accessing DSI registers will hang
9889                  * the machine. See BSpec North Display Engine
9890                  * registers/MIPI[BXT]. We can break out here early, since we
9891                  * need the same DSI PLL to be enabled for both DSI ports.
9892                  */
9893                 if (!bxt_dsi_pll_is_enabled(dev_priv))
9894                         break;
9895
9896                 /* XXX: this works for video mode only */
9897                 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
9898                 if (!(tmp & DPI_ENABLE))
9899                         continue;
9900
9901                 tmp = I915_READ(MIPI_CTRL(port));
9902                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
9903                         continue;
9904
9905                 pipe_config->cpu_transcoder = cpu_transcoder;
9906                 break;
9907         }
9908
9909         return transcoder_is_dsi(pipe_config->cpu_transcoder);
9910 }
9911
9912 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9913                                        struct intel_crtc_state *pipe_config)
9914 {
9915         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9916         struct intel_shared_dpll *pll;
9917         enum port port;
9918         u32 tmp;
9919
9920         tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9921
9922         port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9923
9924         if (INTEL_GEN(dev_priv) >= 11)
9925                 icelake_get_ddi_pll(dev_priv, port, pipe_config);
9926         else if (IS_CANNONLAKE(dev_priv))
9927                 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
9928         else if (IS_GEN9_BC(dev_priv))
9929                 skylake_get_ddi_pll(dev_priv, port, pipe_config);
9930         else if (IS_GEN9_LP(dev_priv))
9931                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
9932         else
9933                 haswell_get_ddi_pll(dev_priv, port, pipe_config);
9934
9935         pll = pipe_config->shared_dpll;
9936         if (pll) {
9937                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9938                                                 &pipe_config->dpll_hw_state));
9939         }
9940
9941         /*
9942          * Haswell has only FDI/PCH transcoder A. It is which is connected to
9943          * DDI E. So just check whether this pipe is wired to DDI E and whether
9944          * the PCH transcoder is on.
9945          */
9946         if (INTEL_GEN(dev_priv) < 9 &&
9947             (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
9948                 pipe_config->has_pch_encoder = true;
9949
9950                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9951                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9952                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
9953
9954                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9955         }
9956 }
9957
9958 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9959                                     struct intel_crtc_state *pipe_config)
9960 {
9961         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9962         intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
9963         enum intel_display_power_domain power_domain;
9964         u64 power_domain_mask;
9965         bool active;
9966
9967         intel_crtc_init_scalers(crtc, pipe_config);
9968
9969         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9970         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
9971         if (!wf)
9972                 return false;
9973
9974         wakerefs[power_domain] = wf;
9975         power_domain_mask = BIT_ULL(power_domain);
9976
9977         pipe_config->shared_dpll = NULL;
9978
9979         active = hsw_get_transcoder_state(crtc, pipe_config,
9980                                           &power_domain_mask, wakerefs);
9981
9982         if (IS_GEN9_LP(dev_priv) &&
9983             bxt_get_dsi_transcoder_state(crtc, pipe_config,
9984                                          &power_domain_mask, wakerefs)) {
9985                 WARN_ON(active);
9986                 active = true;
9987         }
9988
9989         if (!active)
9990                 goto out;
9991
9992         if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
9993             INTEL_GEN(dev_priv) >= 11) {
9994                 haswell_get_ddi_port_state(crtc, pipe_config);
9995                 intel_get_pipe_timings(crtc, pipe_config);
9996         }
9997
9998         intel_get_pipe_src_size(crtc, pipe_config);
9999         intel_get_crtc_ycbcr_config(crtc, pipe_config);
10000
10001         pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
10002
10003         pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10004
10005         if (INTEL_GEN(dev_priv) >= 9) {
10006                 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
10007
10008                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
10009                         pipe_config->gamma_enable = true;
10010
10011                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
10012                         pipe_config->csc_enable = true;
10013         } else {
10014                 i9xx_get_pipe_color_config(pipe_config);
10015         }
10016
10017         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10018         WARN_ON(power_domain_mask & BIT_ULL(power_domain));
10019
10020         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10021         if (wf) {
10022                 wakerefs[power_domain] = wf;
10023                 power_domain_mask |= BIT_ULL(power_domain);
10024
10025                 if (INTEL_GEN(dev_priv) >= 9)
10026                         skylake_get_pfit_config(crtc, pipe_config);
10027                 else
10028                         ironlake_get_pfit_config(crtc, pipe_config);
10029         }
10030
10031         if (hsw_crtc_supports_ips(crtc)) {
10032                 if (IS_HASWELL(dev_priv))
10033                         pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
10034                 else {
10035                         /*
10036                          * We cannot readout IPS state on broadwell, set to
10037                          * true so we can set it to a defined state on first
10038                          * commit.
10039                          */
10040                         pipe_config->ips_enabled = true;
10041                 }
10042         }
10043
10044         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10045             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10046                 pipe_config->pixel_multiplier =
10047                         I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10048         } else {
10049                 pipe_config->pixel_multiplier = 1;
10050         }
10051
10052 out:
10053         for_each_power_domain(power_domain, power_domain_mask)
10054                 intel_display_power_put(dev_priv,
10055                                         power_domain, wakerefs[power_domain]);
10056
10057         return active;
10058 }
10059
10060 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
10061 {
10062         struct drm_i915_private *dev_priv =
10063                 to_i915(plane_state->base.plane->dev);
10064         const struct drm_framebuffer *fb = plane_state->base.fb;
10065         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10066         u32 base;
10067
10068         if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
10069                 base = obj->phys_handle->busaddr;
10070         else
10071                 base = intel_plane_ggtt_offset(plane_state);
10072
10073         base += plane_state->color_plane[0].offset;
10074
10075         /* ILK+ do this automagically */
10076         if (HAS_GMCH(dev_priv) &&
10077             plane_state->base.rotation & DRM_MODE_ROTATE_180)
10078                 base += (plane_state->base.crtc_h *
10079                          plane_state->base.crtc_w - 1) * fb->format->cpp[0];
10080
10081         return base;
10082 }
10083
10084 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
10085 {
10086         int x = plane_state->base.crtc_x;
10087         int y = plane_state->base.crtc_y;
10088         u32 pos = 0;
10089
10090         if (x < 0) {
10091                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10092                 x = -x;
10093         }
10094         pos |= x << CURSOR_X_SHIFT;
10095
10096         if (y < 0) {
10097                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10098                 y = -y;
10099         }
10100         pos |= y << CURSOR_Y_SHIFT;
10101
10102         return pos;
10103 }
10104
10105 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
10106 {
10107         const struct drm_mode_config *config =
10108                 &plane_state->base.plane->dev->mode_config;
10109         int width = plane_state->base.crtc_w;
10110         int height = plane_state->base.crtc_h;
10111
10112         return width > 0 && width <= config->cursor_width &&
10113                 height > 0 && height <= config->cursor_height;
10114 }
10115
10116 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
10117 {
10118         const struct drm_framebuffer *fb = plane_state->base.fb;
10119         unsigned int rotation = plane_state->base.rotation;
10120         int src_x, src_y;
10121         u32 offset;
10122         int ret;
10123
10124         intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
10125         plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
10126
10127         ret = intel_plane_check_stride(plane_state);
10128         if (ret)
10129                 return ret;
10130
10131         src_x = plane_state->base.src_x >> 16;
10132         src_y = plane_state->base.src_y >> 16;
10133
10134         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
10135         offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
10136                                                     plane_state, 0);
10137
10138         if (src_x != 0 || src_y != 0) {
10139                 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
10140                 return -EINVAL;
10141         }
10142
10143         plane_state->color_plane[0].offset = offset;
10144
10145         return 0;
10146 }
10147
10148 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
10149                               struct intel_plane_state *plane_state)
10150 {
10151         const struct drm_framebuffer *fb = plane_state->base.fb;
10152         int ret;
10153
10154         if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
10155                 DRM_DEBUG_KMS("cursor cannot be tiled\n");
10156                 return -EINVAL;
10157         }
10158
10159         ret = drm_atomic_helper_check_plane_state(&plane_state->base,
10160                                                   &crtc_state->base,
10161                                                   DRM_PLANE_HELPER_NO_SCALING,
10162                                                   DRM_PLANE_HELPER_NO_SCALING,
10163                                                   true, true);
10164         if (ret)
10165                 return ret;
10166
10167         if (!plane_state->base.visible)
10168                 return 0;
10169
10170         ret = intel_plane_check_src_coordinates(plane_state);
10171         if (ret)
10172                 return ret;
10173
10174         ret = intel_cursor_check_surface(plane_state);
10175         if (ret)
10176                 return ret;
10177
10178         return 0;
10179 }
10180
10181 static unsigned int
10182 i845_cursor_max_stride(struct intel_plane *plane,
10183                        u32 pixel_format, u64 modifier,
10184                        unsigned int rotation)
10185 {
10186         return 2048;
10187 }
10188
10189 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10190 {
10191         u32 cntl = 0;
10192
10193         if (crtc_state->gamma_enable)
10194                 cntl |= CURSOR_GAMMA_ENABLE;
10195
10196         return cntl;
10197 }
10198
10199 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
10200                            const struct intel_plane_state *plane_state)
10201 {
10202         return CURSOR_ENABLE |
10203                 CURSOR_FORMAT_ARGB |
10204                 CURSOR_STRIDE(plane_state->color_plane[0].stride);
10205 }
10206
10207 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
10208 {
10209         int width = plane_state->base.crtc_w;
10210
10211         /*
10212          * 845g/865g are only limited by the width of their cursors,
10213          * the height is arbitrary up to the precision of the register.
10214          */
10215         return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
10216 }
10217
10218 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
10219                              struct intel_plane_state *plane_state)
10220 {
10221         const struct drm_framebuffer *fb = plane_state->base.fb;
10222         int ret;
10223
10224         ret = intel_check_cursor(crtc_state, plane_state);
10225         if (ret)
10226                 return ret;
10227
10228         /* if we want to turn off the cursor ignore width and height */
10229         if (!fb)
10230                 return 0;
10231
10232         /* Check for which cursor types we support */
10233         if (!i845_cursor_size_ok(plane_state)) {
10234                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10235                           plane_state->base.crtc_w,
10236                           plane_state->base.crtc_h);
10237                 return -EINVAL;
10238         }
10239
10240         WARN_ON(plane_state->base.visible &&
10241                 plane_state->color_plane[0].stride != fb->pitches[0]);
10242
10243         switch (fb->pitches[0]) {
10244         case 256:
10245         case 512:
10246         case 1024:
10247         case 2048:
10248                 break;
10249         default:
10250                 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
10251                               fb->pitches[0]);
10252                 return -EINVAL;
10253         }
10254
10255         plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
10256
10257         return 0;
10258 }
10259
10260 static void i845_update_cursor(struct intel_plane *plane,
10261                                const struct intel_crtc_state *crtc_state,
10262                                const struct intel_plane_state *plane_state)
10263 {
10264         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10265         u32 cntl = 0, base = 0, pos = 0, size = 0;
10266         unsigned long irqflags;
10267
10268         if (plane_state && plane_state->base.visible) {
10269                 unsigned int width = plane_state->base.crtc_w;
10270                 unsigned int height = plane_state->base.crtc_h;
10271
10272                 cntl = plane_state->ctl |
10273                         i845_cursor_ctl_crtc(crtc_state);
10274
10275                 size = (height << 12) | width;
10276
10277                 base = intel_cursor_base(plane_state);
10278                 pos = intel_cursor_position(plane_state);
10279         }
10280
10281         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10282
10283         /* On these chipsets we can only modify the base/size/stride
10284          * whilst the cursor is disabled.
10285          */
10286         if (plane->cursor.base != base ||
10287             plane->cursor.size != size ||
10288             plane->cursor.cntl != cntl) {
10289                 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
10290                 I915_WRITE_FW(CURBASE(PIPE_A), base);
10291                 I915_WRITE_FW(CURSIZE, size);
10292                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10293                 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
10294
10295                 plane->cursor.base = base;
10296                 plane->cursor.size = size;
10297                 plane->cursor.cntl = cntl;
10298         } else {
10299                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10300         }
10301
10302         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10303 }
10304
10305 static void i845_disable_cursor(struct intel_plane *plane,
10306                                 const struct intel_crtc_state *crtc_state)
10307 {
10308         i845_update_cursor(plane, crtc_state, NULL);
10309 }
10310
10311 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
10312                                      enum pipe *pipe)
10313 {
10314         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10315         enum intel_display_power_domain power_domain;
10316         intel_wakeref_t wakeref;
10317         bool ret;
10318
10319         power_domain = POWER_DOMAIN_PIPE(PIPE_A);
10320         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10321         if (!wakeref)
10322                 return false;
10323
10324         ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
10325
10326         *pipe = PIPE_A;
10327
10328         intel_display_power_put(dev_priv, power_domain, wakeref);
10329
10330         return ret;
10331 }
10332
10333 static unsigned int
10334 i9xx_cursor_max_stride(struct intel_plane *plane,
10335                        u32 pixel_format, u64 modifier,
10336                        unsigned int rotation)
10337 {
10338         return plane->base.dev->mode_config.cursor_width * 4;
10339 }
10340
10341 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10342 {
10343         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
10344         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10345         u32 cntl = 0;
10346
10347         if (INTEL_GEN(dev_priv) >= 11)
10348                 return cntl;
10349
10350         if (crtc_state->gamma_enable)
10351                 cntl = MCURSOR_GAMMA_ENABLE;
10352
10353         if (crtc_state->csc_enable)
10354                 cntl |= MCURSOR_PIPE_CSC_ENABLE;
10355
10356         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10357                 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
10358
10359         return cntl;
10360 }
10361
10362 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
10363                            const struct intel_plane_state *plane_state)
10364 {
10365         struct drm_i915_private *dev_priv =
10366                 to_i915(plane_state->base.plane->dev);
10367         u32 cntl = 0;
10368
10369         if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
10370                 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
10371
10372         switch (plane_state->base.crtc_w) {
10373         case 64:
10374                 cntl |= MCURSOR_MODE_64_ARGB_AX;
10375                 break;
10376         case 128:
10377                 cntl |= MCURSOR_MODE_128_ARGB_AX;
10378                 break;
10379         case 256:
10380                 cntl |= MCURSOR_MODE_256_ARGB_AX;
10381                 break;
10382         default:
10383                 MISSING_CASE(plane_state->base.crtc_w);
10384                 return 0;
10385         }
10386
10387         if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
10388                 cntl |= MCURSOR_ROTATE_180;
10389
10390         return cntl;
10391 }
10392
10393 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
10394 {
10395         struct drm_i915_private *dev_priv =
10396                 to_i915(plane_state->base.plane->dev);
10397         int width = plane_state->base.crtc_w;
10398         int height = plane_state->base.crtc_h;
10399
10400         if (!intel_cursor_size_ok(plane_state))
10401                 return false;
10402
10403         /* Cursor width is limited to a few power-of-two sizes */
10404         switch (width) {
10405         case 256:
10406         case 128:
10407         case 64:
10408                 break;
10409         default:
10410                 return false;
10411         }
10412
10413         /*
10414          * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
10415          * height from 8 lines up to the cursor width, when the
10416          * cursor is not rotated. Everything else requires square
10417          * cursors.
10418          */
10419         if (HAS_CUR_FBC(dev_priv) &&
10420             plane_state->base.rotation & DRM_MODE_ROTATE_0) {
10421                 if (height < 8 || height > width)
10422                         return false;
10423         } else {
10424                 if (height != width)
10425                         return false;
10426         }
10427
10428         return true;
10429 }
10430
10431 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
10432                              struct intel_plane_state *plane_state)
10433 {
10434         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
10435         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10436         const struct drm_framebuffer *fb = plane_state->base.fb;
10437         enum pipe pipe = plane->pipe;
10438         int ret;
10439
10440         ret = intel_check_cursor(crtc_state, plane_state);
10441         if (ret)
10442                 return ret;
10443
10444         /* if we want to turn off the cursor ignore width and height */
10445         if (!fb)
10446                 return 0;
10447
10448         /* Check for which cursor types we support */
10449         if (!i9xx_cursor_size_ok(plane_state)) {
10450                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10451                           plane_state->base.crtc_w,
10452                           plane_state->base.crtc_h);
10453                 return -EINVAL;
10454         }
10455
10456         WARN_ON(plane_state->base.visible &&
10457                 plane_state->color_plane[0].stride != fb->pitches[0]);
10458
10459         if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
10460                 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
10461                               fb->pitches[0], plane_state->base.crtc_w);
10462                 return -EINVAL;
10463         }
10464
10465         /*
10466          * There's something wrong with the cursor on CHV pipe C.
10467          * If it straddles the left edge of the screen then
10468          * moving it away from the edge or disabling it often
10469          * results in a pipe underrun, and often that can lead to
10470          * dead pipe (constant underrun reported, and it scans
10471          * out just a solid color). To recover from that, the
10472          * display power well must be turned off and on again.
10473          * Refuse the put the cursor into that compromised position.
10474          */
10475         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
10476             plane_state->base.visible && plane_state->base.crtc_x < 0) {
10477                 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
10478                 return -EINVAL;
10479         }
10480
10481         plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
10482
10483         return 0;
10484 }
10485
10486 static void i9xx_update_cursor(struct intel_plane *plane,
10487                                const struct intel_crtc_state *crtc_state,
10488                                const struct intel_plane_state *plane_state)
10489 {
10490         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10491         enum pipe pipe = plane->pipe;
10492         u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
10493         unsigned long irqflags;
10494
10495         if (plane_state && plane_state->base.visible) {
10496                 cntl = plane_state->ctl |
10497                         i9xx_cursor_ctl_crtc(crtc_state);
10498
10499                 if (plane_state->base.crtc_h != plane_state->base.crtc_w)
10500                         fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
10501
10502                 base = intel_cursor_base(plane_state);
10503                 pos = intel_cursor_position(plane_state);
10504         }
10505
10506         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10507
10508         /*
10509          * On some platforms writing CURCNTR first will also
10510          * cause CURPOS to be armed by the CURBASE write.
10511          * Without the CURCNTR write the CURPOS write would
10512          * arm itself. Thus we always update CURCNTR before
10513          * CURPOS.
10514          *
10515          * On other platforms CURPOS always requires the
10516          * CURBASE write to arm the update. Additonally
10517          * a write to any of the cursor register will cancel
10518          * an already armed cursor update. Thus leaving out
10519          * the CURBASE write after CURPOS could lead to a
10520          * cursor that doesn't appear to move, or even change
10521          * shape. Thus we always write CURBASE.
10522          *
10523          * The other registers are armed by by the CURBASE write
10524          * except when the plane is getting enabled at which time
10525          * the CURCNTR write arms the update.
10526          */
10527
10528         if (INTEL_GEN(dev_priv) >= 9)
10529                 skl_write_cursor_wm(plane, crtc_state);
10530
10531         if (plane->cursor.base != base ||
10532             plane->cursor.size != fbc_ctl ||
10533             plane->cursor.cntl != cntl) {
10534                 if (HAS_CUR_FBC(dev_priv))
10535                         I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
10536                 I915_WRITE_FW(CURCNTR(pipe), cntl);
10537                 I915_WRITE_FW(CURPOS(pipe), pos);
10538                 I915_WRITE_FW(CURBASE(pipe), base);
10539
10540                 plane->cursor.base = base;
10541                 plane->cursor.size = fbc_ctl;
10542                 plane->cursor.cntl = cntl;
10543         } else {
10544                 I915_WRITE_FW(CURPOS(pipe), pos);
10545                 I915_WRITE_FW(CURBASE(pipe), base);
10546         }
10547
10548         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10549 }
10550
10551 static void i9xx_disable_cursor(struct intel_plane *plane,
10552                                 const struct intel_crtc_state *crtc_state)
10553 {
10554         i9xx_update_cursor(plane, crtc_state, NULL);
10555 }
10556
10557 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
10558                                      enum pipe *pipe)
10559 {
10560         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10561         enum intel_display_power_domain power_domain;
10562         intel_wakeref_t wakeref;
10563         bool ret;
10564         u32 val;
10565
10566         /*
10567          * Not 100% correct for planes that can move between pipes,
10568          * but that's only the case for gen2-3 which don't have any
10569          * display power wells.
10570          */
10571         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
10572         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10573         if (!wakeref)
10574                 return false;
10575
10576         val = I915_READ(CURCNTR(plane->pipe));
10577
10578         ret = val & MCURSOR_MODE;
10579
10580         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
10581                 *pipe = plane->pipe;
10582         else
10583                 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
10584                         MCURSOR_PIPE_SELECT_SHIFT;
10585
10586         intel_display_power_put(dev_priv, power_domain, wakeref);
10587
10588         return ret;
10589 }
10590
10591 /* VESA 640x480x72Hz mode to set on the pipe */
10592 static const struct drm_display_mode load_detect_mode = {
10593         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10594                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10595 };
10596
10597 struct drm_framebuffer *
10598 intel_framebuffer_create(struct drm_i915_gem_object *obj,
10599                          struct drm_mode_fb_cmd2 *mode_cmd)
10600 {
10601         struct intel_framebuffer *intel_fb;
10602         int ret;
10603
10604         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10605         if (!intel_fb)
10606                 return ERR_PTR(-ENOMEM);
10607
10608         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
10609         if (ret)
10610                 goto err;
10611
10612         return &intel_fb->base;
10613
10614 err:
10615         kfree(intel_fb);
10616         return ERR_PTR(ret);
10617 }
10618
10619 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
10620                                         struct drm_crtc *crtc)
10621 {
10622         struct drm_plane *plane;
10623         struct drm_plane_state *plane_state;
10624         int ret, i;
10625
10626         ret = drm_atomic_add_affected_planes(state, crtc);
10627         if (ret)
10628                 return ret;
10629
10630         for_each_new_plane_in_state(state, plane, plane_state, i) {
10631                 if (plane_state->crtc != crtc)
10632                         continue;
10633
10634                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
10635                 if (ret)
10636                         return ret;
10637
10638                 drm_atomic_set_fb_for_plane(plane_state, NULL);
10639         }
10640
10641         return 0;
10642 }
10643
10644 int intel_get_load_detect_pipe(struct drm_connector *connector,
10645                                const struct drm_display_mode *mode,
10646                                struct intel_load_detect_pipe *old,
10647                                struct drm_modeset_acquire_ctx *ctx)
10648 {
10649         struct intel_crtc *intel_crtc;
10650         struct intel_encoder *intel_encoder =
10651                 intel_attached_encoder(connector);
10652         struct drm_crtc *possible_crtc;
10653         struct drm_encoder *encoder = &intel_encoder->base;
10654         struct drm_crtc *crtc = NULL;
10655         struct drm_device *dev = encoder->dev;
10656         struct drm_i915_private *dev_priv = to_i915(dev);
10657         struct drm_mode_config *config = &dev->mode_config;
10658         struct drm_atomic_state *state = NULL, *restore_state = NULL;
10659         struct drm_connector_state *connector_state;
10660         struct intel_crtc_state *crtc_state;
10661         int ret, i = -1;
10662
10663         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10664                       connector->base.id, connector->name,
10665                       encoder->base.id, encoder->name);
10666
10667         old->restore_state = NULL;
10668
10669         WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
10670
10671         /*
10672          * Algorithm gets a little messy:
10673          *
10674          *   - if the connector already has an assigned crtc, use it (but make
10675          *     sure it's on first)
10676          *
10677          *   - try to find the first unused crtc that can drive this connector,
10678          *     and use that if we find one
10679          */
10680
10681         /* See if we already have a CRTC for this connector */
10682         if (connector->state->crtc) {
10683                 crtc = connector->state->crtc;
10684
10685                 ret = drm_modeset_lock(&crtc->mutex, ctx);
10686                 if (ret)
10687                         goto fail;
10688
10689                 /* Make sure the crtc and connector are running */
10690                 goto found;
10691         }
10692
10693         /* Find an unused one (if possible) */
10694         for_each_crtc(dev, possible_crtc) {
10695                 i++;
10696                 if (!(encoder->possible_crtcs & (1 << i)))
10697                         continue;
10698
10699                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10700                 if (ret)
10701                         goto fail;
10702
10703                 if (possible_crtc->state->enable) {
10704                         drm_modeset_unlock(&possible_crtc->mutex);
10705                         continue;
10706                 }
10707
10708                 crtc = possible_crtc;
10709                 break;
10710         }
10711
10712         /*
10713          * If we didn't find an unused CRTC, don't use any.
10714          */
10715         if (!crtc) {
10716                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
10717                 ret = -ENODEV;
10718                 goto fail;
10719         }
10720
10721 found:
10722         intel_crtc = to_intel_crtc(crtc);
10723
10724         state = drm_atomic_state_alloc(dev);
10725         restore_state = drm_atomic_state_alloc(dev);
10726         if (!state || !restore_state) {
10727                 ret = -ENOMEM;
10728                 goto fail;
10729         }
10730
10731         state->acquire_ctx = ctx;
10732         restore_state->acquire_ctx = ctx;
10733
10734         connector_state = drm_atomic_get_connector_state(state, connector);
10735         if (IS_ERR(connector_state)) {
10736                 ret = PTR_ERR(connector_state);
10737                 goto fail;
10738         }
10739
10740         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10741         if (ret)
10742                 goto fail;
10743
10744         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10745         if (IS_ERR(crtc_state)) {
10746                 ret = PTR_ERR(crtc_state);
10747                 goto fail;
10748         }
10749
10750         crtc_state->base.active = crtc_state->base.enable = true;
10751
10752         if (!mode)
10753                 mode = &load_detect_mode;
10754
10755         ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10756         if (ret)
10757                 goto fail;
10758
10759         ret = intel_modeset_disable_planes(state, crtc);
10760         if (ret)
10761                 goto fail;
10762
10763         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10764         if (!ret)
10765                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10766         if (!ret)
10767                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
10768         if (ret) {
10769                 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10770                 goto fail;
10771         }
10772
10773         ret = drm_atomic_commit(state);
10774         if (ret) {
10775                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10776                 goto fail;
10777         }
10778
10779         old->restore_state = restore_state;
10780         drm_atomic_state_put(state);
10781
10782         /* let the connector get through one full cycle before testing */
10783         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
10784         return true;
10785
10786 fail:
10787         if (state) {
10788                 drm_atomic_state_put(state);
10789                 state = NULL;
10790         }
10791         if (restore_state) {
10792                 drm_atomic_state_put(restore_state);
10793                 restore_state = NULL;
10794         }
10795
10796         if (ret == -EDEADLK)
10797                 return ret;
10798
10799         return false;
10800 }
10801
10802 void intel_release_load_detect_pipe(struct drm_connector *connector,
10803                                     struct intel_load_detect_pipe *old,
10804                                     struct drm_modeset_acquire_ctx *ctx)
10805 {
10806         struct intel_encoder *intel_encoder =
10807                 intel_attached_encoder(connector);
10808         struct drm_encoder *encoder = &intel_encoder->base;
10809         struct drm_atomic_state *state = old->restore_state;
10810         int ret;
10811
10812         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10813                       connector->base.id, connector->name,
10814                       encoder->base.id, encoder->name);
10815
10816         if (!state)
10817                 return;
10818
10819         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
10820         if (ret)
10821                 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10822         drm_atomic_state_put(state);
10823 }
10824
10825 static int i9xx_pll_refclk(struct drm_device *dev,
10826                            const struct intel_crtc_state *pipe_config)
10827 {
10828         struct drm_i915_private *dev_priv = to_i915(dev);
10829         u32 dpll = pipe_config->dpll_hw_state.dpll;
10830
10831         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10832                 return dev_priv->vbt.lvds_ssc_freq;
10833         else if (HAS_PCH_SPLIT(dev_priv))
10834                 return 120000;
10835         else if (!IS_GEN(dev_priv, 2))
10836                 return 96000;
10837         else
10838                 return 48000;
10839 }
10840
10841 /* Returns the clock of the currently programmed mode of the given pipe. */
10842 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10843                                 struct intel_crtc_state *pipe_config)
10844 {
10845         struct drm_device *dev = crtc->base.dev;
10846         struct drm_i915_private *dev_priv = to_i915(dev);
10847         int pipe = pipe_config->cpu_transcoder;
10848         u32 dpll = pipe_config->dpll_hw_state.dpll;
10849         u32 fp;
10850         struct dpll clock;
10851         int port_clock;
10852         int refclk = i9xx_pll_refclk(dev, pipe_config);
10853
10854         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10855                 fp = pipe_config->dpll_hw_state.fp0;
10856         else
10857                 fp = pipe_config->dpll_hw_state.fp1;
10858
10859         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10860         if (IS_PINEVIEW(dev_priv)) {
10861                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10862                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10863         } else {
10864                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10865                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10866         }
10867
10868         if (!IS_GEN(dev_priv, 2)) {
10869                 if (IS_PINEVIEW(dev_priv))
10870                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10871                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10872                 else
10873                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10874                                DPLL_FPA01_P1_POST_DIV_SHIFT);
10875
10876                 switch (dpll & DPLL_MODE_MASK) {
10877                 case DPLLB_MODE_DAC_SERIAL:
10878                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10879                                 5 : 10;
10880                         break;
10881                 case DPLLB_MODE_LVDS:
10882                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10883                                 7 : 14;
10884                         break;
10885                 default:
10886                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10887                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
10888                         return;
10889                 }
10890
10891                 if (IS_PINEVIEW(dev_priv))
10892                         port_clock = pnv_calc_dpll_params(refclk, &clock);
10893                 else
10894                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
10895         } else {
10896                 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
10897                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10898
10899                 if (is_lvds) {
10900                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10901                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
10902
10903                         if (lvds & LVDS_CLKB_POWER_UP)
10904                                 clock.p2 = 7;
10905                         else
10906                                 clock.p2 = 14;
10907                 } else {
10908                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
10909                                 clock.p1 = 2;
10910                         else {
10911                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10912                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10913                         }
10914                         if (dpll & PLL_P2_DIVIDE_BY_4)
10915                                 clock.p2 = 4;
10916                         else
10917                                 clock.p2 = 2;
10918                 }
10919
10920                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
10921         }
10922
10923         /*
10924          * This value includes pixel_multiplier. We will use
10925          * port_clock to compute adjusted_mode.crtc_clock in the
10926          * encoder's get_config() function.
10927          */
10928         pipe_config->port_clock = port_clock;
10929 }
10930
10931 int intel_dotclock_calculate(int link_freq,
10932                              const struct intel_link_m_n *m_n)
10933 {
10934         /*
10935          * The calculation for the data clock is:
10936          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10937          * But we want to avoid losing precison if possible, so:
10938          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10939          *
10940          * and the link clock is simpler:
10941          * link_clock = (m * link_clock) / n
10942          */
10943
10944         if (!m_n->link_n)
10945                 return 0;
10946
10947         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
10948 }
10949
10950 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10951                                    struct intel_crtc_state *pipe_config)
10952 {
10953         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10954
10955         /* read out port_clock from the DPLL */
10956         i9xx_crtc_clock_get(crtc, pipe_config);
10957
10958         /*
10959          * In case there is an active pipe without active ports,
10960          * we may need some idea for the dotclock anyway.
10961          * Calculate one based on the FDI configuration.
10962          */
10963         pipe_config->base.adjusted_mode.crtc_clock =
10964                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
10965                                          &pipe_config->fdi_m_n);
10966 }
10967
10968 /* Returns the currently programmed mode of the given encoder. */
10969 struct drm_display_mode *
10970 intel_encoder_current_mode(struct intel_encoder *encoder)
10971 {
10972         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10973         struct intel_crtc_state *crtc_state;
10974         struct drm_display_mode *mode;
10975         struct intel_crtc *crtc;
10976         enum pipe pipe;
10977
10978         if (!encoder->get_hw_state(encoder, &pipe))
10979                 return NULL;
10980
10981         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
10982
10983         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10984         if (!mode)
10985                 return NULL;
10986
10987         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
10988         if (!crtc_state) {
10989                 kfree(mode);
10990                 return NULL;
10991         }
10992
10993         crtc_state->base.crtc = &crtc->base;
10994
10995         if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
10996                 kfree(crtc_state);
10997                 kfree(mode);
10998                 return NULL;
10999         }
11000
11001         encoder->get_config(encoder, crtc_state);
11002
11003         intel_mode_from_pipe_config(mode, crtc_state);
11004
11005         kfree(crtc_state);
11006
11007         return mode;
11008 }
11009
11010 static void intel_crtc_destroy(struct drm_crtc *crtc)
11011 {
11012         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11013
11014         drm_crtc_cleanup(crtc);
11015         kfree(intel_crtc);
11016 }
11017
11018 /**
11019  * intel_wm_need_update - Check whether watermarks need updating
11020  * @cur: current plane state
11021  * @new: new plane state
11022  *
11023  * Check current plane state versus the new one to determine whether
11024  * watermarks need to be recalculated.
11025  *
11026  * Returns true or false.
11027  */
11028 static bool intel_wm_need_update(struct intel_plane_state *cur,
11029                                  struct intel_plane_state *new)
11030 {
11031         /* Update watermarks on tiling or size changes. */
11032         if (new->base.visible != cur->base.visible)
11033                 return true;
11034
11035         if (!cur->base.fb || !new->base.fb)
11036                 return false;
11037
11038         if (cur->base.fb->modifier != new->base.fb->modifier ||
11039             cur->base.rotation != new->base.rotation ||
11040             drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
11041             drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
11042             drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
11043             drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
11044                 return true;
11045
11046         return false;
11047 }
11048
11049 static bool needs_scaling(const struct intel_plane_state *state)
11050 {
11051         int src_w = drm_rect_width(&state->base.src) >> 16;
11052         int src_h = drm_rect_height(&state->base.src) >> 16;
11053         int dst_w = drm_rect_width(&state->base.dst);
11054         int dst_h = drm_rect_height(&state->base.dst);
11055
11056         return (src_w != dst_w || src_h != dst_h);
11057 }
11058
11059 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
11060                                     struct drm_crtc_state *crtc_state,
11061                                     const struct intel_plane_state *old_plane_state,
11062                                     struct drm_plane_state *plane_state)
11063 {
11064         struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
11065         struct drm_crtc *crtc = crtc_state->crtc;
11066         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11067         struct intel_plane *plane = to_intel_plane(plane_state->plane);
11068         struct drm_device *dev = crtc->dev;
11069         struct drm_i915_private *dev_priv = to_i915(dev);
11070         bool mode_changed = needs_modeset(crtc_state);
11071         bool was_crtc_enabled = old_crtc_state->base.active;
11072         bool is_crtc_enabled = crtc_state->active;
11073         bool turn_off, turn_on, visible, was_visible;
11074         struct drm_framebuffer *fb = plane_state->fb;
11075         int ret;
11076
11077         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
11078                 ret = skl_update_scaler_plane(
11079                         to_intel_crtc_state(crtc_state),
11080                         to_intel_plane_state(plane_state));
11081                 if (ret)
11082                         return ret;
11083         }
11084
11085         was_visible = old_plane_state->base.visible;
11086         visible = plane_state->visible;
11087
11088         if (!was_crtc_enabled && WARN_ON(was_visible))
11089                 was_visible = false;
11090
11091         /*
11092          * Visibility is calculated as if the crtc was on, but
11093          * after scaler setup everything depends on it being off
11094          * when the crtc isn't active.
11095          *
11096          * FIXME this is wrong for watermarks. Watermarks should also
11097          * be computed as if the pipe would be active. Perhaps move
11098          * per-plane wm computation to the .check_plane() hook, and
11099          * only combine the results from all planes in the current place?
11100          */
11101         if (!is_crtc_enabled) {
11102                 plane_state->visible = visible = false;
11103                 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
11104         }
11105
11106         if (!was_visible && !visible)
11107                 return 0;
11108
11109         if (fb != old_plane_state->base.fb)
11110                 pipe_config->fb_changed = true;
11111
11112         turn_off = was_visible && (!visible || mode_changed);
11113         turn_on = visible && (!was_visible || mode_changed);
11114
11115         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
11116                          intel_crtc->base.base.id, intel_crtc->base.name,
11117                          plane->base.base.id, plane->base.name,
11118                          fb ? fb->base.id : -1);
11119
11120         DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11121                          plane->base.base.id, plane->base.name,
11122                          was_visible, visible,
11123                          turn_off, turn_on, mode_changed);
11124
11125         if (turn_on) {
11126                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11127                         pipe_config->update_wm_pre = true;
11128
11129                 /* must disable cxsr around plane enable/disable */
11130                 if (plane->id != PLANE_CURSOR)
11131                         pipe_config->disable_cxsr = true;
11132         } else if (turn_off) {
11133                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11134                         pipe_config->update_wm_post = true;
11135
11136                 /* must disable cxsr around plane enable/disable */
11137                 if (plane->id != PLANE_CURSOR)
11138                         pipe_config->disable_cxsr = true;
11139         } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
11140                                         to_intel_plane_state(plane_state))) {
11141                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
11142                         /* FIXME bollocks */
11143                         pipe_config->update_wm_pre = true;
11144                         pipe_config->update_wm_post = true;
11145                 }
11146         }
11147
11148         if (visible || was_visible)
11149                 pipe_config->fb_bits |= plane->frontbuffer_bit;
11150
11151         /*
11152          * ILK/SNB DVSACNTR/Sprite Enable
11153          * IVB SPR_CTL/Sprite Enable
11154          * "When in Self Refresh Big FIFO mode, a write to enable the
11155          *  plane will be internally buffered and delayed while Big FIFO
11156          *  mode is exiting."
11157          *
11158          * Which means that enabling the sprite can take an extra frame
11159          * when we start in big FIFO mode (LP1+). Thus we need to drop
11160          * down to LP0 and wait for vblank in order to make sure the
11161          * sprite gets enabled on the next vblank after the register write.
11162          * Doing otherwise would risk enabling the sprite one frame after
11163          * we've already signalled flip completion. We can resume LP1+
11164          * once the sprite has been enabled.
11165          *
11166          *
11167          * WaCxSRDisabledForSpriteScaling:ivb
11168          * IVB SPR_SCALE/Scaling Enable
11169          * "Low Power watermarks must be disabled for at least one
11170          *  frame before enabling sprite scaling, and kept disabled
11171          *  until sprite scaling is disabled."
11172          *
11173          * ILK/SNB DVSASCALE/Scaling Enable
11174          * "When in Self Refresh Big FIFO mode, scaling enable will be
11175          *  masked off while Big FIFO mode is exiting."
11176          *
11177          * Despite the w/a only being listed for IVB we assume that
11178          * the ILK/SNB note has similar ramifications, hence we apply
11179          * the w/a on all three platforms.
11180          *
11181          * With experimental results seems this is needed also for primary
11182          * plane, not only sprite plane.
11183          */
11184         if (plane->id != PLANE_CURSOR &&
11185             (IS_GEN_RANGE(dev_priv, 5, 6) ||
11186              IS_IVYBRIDGE(dev_priv)) &&
11187             (turn_on || (!needs_scaling(old_plane_state) &&
11188                          needs_scaling(to_intel_plane_state(plane_state)))))
11189                 pipe_config->disable_lp_wm = true;
11190
11191         return 0;
11192 }
11193
11194 static bool encoders_cloneable(const struct intel_encoder *a,
11195                                const struct intel_encoder *b)
11196 {
11197         /* masks could be asymmetric, so check both ways */
11198         return a == b || (a->cloneable & (1 << b->type) &&
11199                           b->cloneable & (1 << a->type));
11200 }
11201
11202 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11203                                          struct intel_crtc *crtc,
11204                                          struct intel_encoder *encoder)
11205 {
11206         struct intel_encoder *source_encoder;
11207         struct drm_connector *connector;
11208         struct drm_connector_state *connector_state;
11209         int i;
11210
11211         for_each_new_connector_in_state(state, connector, connector_state, i) {
11212                 if (connector_state->crtc != &crtc->base)
11213                         continue;
11214
11215                 source_encoder =
11216                         to_intel_encoder(connector_state->best_encoder);
11217                 if (!encoders_cloneable(encoder, source_encoder))
11218                         return false;
11219         }
11220
11221         return true;
11222 }
11223
11224 static int icl_add_linked_planes(struct intel_atomic_state *state)
11225 {
11226         struct intel_plane *plane, *linked;
11227         struct intel_plane_state *plane_state, *linked_plane_state;
11228         int i;
11229
11230         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11231                 linked = plane_state->linked_plane;
11232
11233                 if (!linked)
11234                         continue;
11235
11236                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
11237                 if (IS_ERR(linked_plane_state))
11238                         return PTR_ERR(linked_plane_state);
11239
11240                 WARN_ON(linked_plane_state->linked_plane != plane);
11241                 WARN_ON(linked_plane_state->slave == plane_state->slave);
11242         }
11243
11244         return 0;
11245 }
11246
11247 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
11248 {
11249         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
11250         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11251         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
11252         struct intel_plane *plane, *linked;
11253         struct intel_plane_state *plane_state;
11254         int i;
11255
11256         if (INTEL_GEN(dev_priv) < 11)
11257                 return 0;
11258
11259         /*
11260          * Destroy all old plane links and make the slave plane invisible
11261          * in the crtc_state->active_planes mask.
11262          */
11263         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11264                 if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
11265                         continue;
11266
11267                 plane_state->linked_plane = NULL;
11268                 if (plane_state->slave && !plane_state->base.visible) {
11269                         crtc_state->active_planes &= ~BIT(plane->id);
11270                         crtc_state->update_planes |= BIT(plane->id);
11271                 }
11272
11273                 plane_state->slave = false;
11274         }
11275
11276         if (!crtc_state->nv12_planes)
11277                 return 0;
11278
11279         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11280                 struct intel_plane_state *linked_state = NULL;
11281
11282                 if (plane->pipe != crtc->pipe ||
11283                     !(crtc_state->nv12_planes & BIT(plane->id)))
11284                         continue;
11285
11286                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
11287                         if (!icl_is_nv12_y_plane(linked->id))
11288                                 continue;
11289
11290                         if (crtc_state->active_planes & BIT(linked->id))
11291                                 continue;
11292
11293                         linked_state = intel_atomic_get_plane_state(state, linked);
11294                         if (IS_ERR(linked_state))
11295                                 return PTR_ERR(linked_state);
11296
11297                         break;
11298                 }
11299
11300                 if (!linked_state) {
11301                         DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
11302                                       hweight8(crtc_state->nv12_planes));
11303
11304                         return -EINVAL;
11305                 }
11306
11307                 plane_state->linked_plane = linked;
11308
11309                 linked_state->slave = true;
11310                 linked_state->linked_plane = plane;
11311                 crtc_state->active_planes |= BIT(linked->id);
11312                 crtc_state->update_planes |= BIT(linked->id);
11313                 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
11314         }
11315
11316         return 0;
11317 }
11318
11319 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11320                                    struct drm_crtc_state *crtc_state)
11321 {
11322         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
11323         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11324         struct intel_crtc_state *pipe_config =
11325                 to_intel_crtc_state(crtc_state);
11326         int ret;
11327         bool mode_changed = needs_modeset(crtc_state);
11328
11329         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
11330             mode_changed && !crtc_state->active)
11331                 pipe_config->update_wm_post = true;
11332
11333         if (mode_changed && crtc_state->enable &&
11334             dev_priv->display.crtc_compute_clock &&
11335             !WARN_ON(pipe_config->shared_dpll)) {
11336                 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11337                                                            pipe_config);
11338                 if (ret)
11339                         return ret;
11340         }
11341
11342         if (mode_changed || pipe_config->update_pipe ||
11343             crtc_state->color_mgmt_changed) {
11344                 ret = intel_color_check(pipe_config);
11345                 if (ret)
11346                         return ret;
11347         }
11348
11349         ret = 0;
11350         if (dev_priv->display.compute_pipe_wm) {
11351                 ret = dev_priv->display.compute_pipe_wm(pipe_config);
11352                 if (ret) {
11353                         DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
11354                         return ret;
11355                 }
11356         }
11357
11358         if (dev_priv->display.compute_intermediate_wm) {
11359                 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
11360                         return 0;
11361
11362                 /*
11363                  * Calculate 'intermediate' watermarks that satisfy both the
11364                  * old state and the new state.  We can program these
11365                  * immediately.
11366                  */
11367                 ret = dev_priv->display.compute_intermediate_wm(pipe_config);
11368                 if (ret) {
11369                         DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
11370                         return ret;
11371                 }
11372         }
11373
11374         if (INTEL_GEN(dev_priv) >= 9) {
11375                 if (mode_changed || pipe_config->update_pipe)
11376                         ret = skl_update_scaler_crtc(pipe_config);
11377
11378                 if (!ret)
11379                         ret = icl_check_nv12_planes(pipe_config);
11380                 if (!ret)
11381                         ret = skl_check_pipe_max_pixel_rate(intel_crtc,
11382                                                             pipe_config);
11383                 if (!ret)
11384                         ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
11385                                                          pipe_config);
11386         }
11387
11388         if (HAS_IPS(dev_priv))
11389                 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
11390
11391         return ret;
11392 }
11393
11394 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
11395         .atomic_check = intel_crtc_atomic_check,
11396 };
11397
11398 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11399 {
11400         struct intel_connector *connector;
11401         struct drm_connector_list_iter conn_iter;
11402
11403         drm_connector_list_iter_begin(dev, &conn_iter);
11404         for_each_intel_connector_iter(connector, &conn_iter) {
11405                 if (connector->base.state->crtc)
11406                         drm_connector_put(&connector->base);
11407
11408                 if (connector->base.encoder) {
11409                         connector->base.state->best_encoder =
11410                                 connector->base.encoder;
11411                         connector->base.state->crtc =
11412                                 connector->base.encoder->crtc;
11413
11414                         drm_connector_get(&connector->base);
11415                 } else {
11416                         connector->base.state->best_encoder = NULL;
11417                         connector->base.state->crtc = NULL;
11418                 }
11419         }
11420         drm_connector_list_iter_end(&conn_iter);
11421 }
11422
11423 static int
11424 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
11425                       struct intel_crtc_state *pipe_config)
11426 {
11427         struct drm_connector *connector = conn_state->connector;
11428         const struct drm_display_info *info = &connector->display_info;
11429         int bpp;
11430
11431         switch (conn_state->max_bpc) {
11432         case 6 ... 7:
11433                 bpp = 6 * 3;
11434                 break;
11435         case 8 ... 9:
11436                 bpp = 8 * 3;
11437                 break;
11438         case 10 ... 11:
11439                 bpp = 10 * 3;
11440                 break;
11441         case 12:
11442                 bpp = 12 * 3;
11443                 break;
11444         default:
11445                 return -EINVAL;
11446         }
11447
11448         if (bpp < pipe_config->pipe_bpp) {
11449                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
11450                               "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
11451                               connector->base.id, connector->name,
11452                               bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
11453                               pipe_config->pipe_bpp);
11454
11455                 pipe_config->pipe_bpp = bpp;
11456         }
11457
11458         return 0;
11459 }
11460
11461 static int
11462 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
11463                           struct intel_crtc_state *pipe_config)
11464 {
11465         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11466         struct drm_atomic_state *state = pipe_config->base.state;
11467         struct drm_connector *connector;
11468         struct drm_connector_state *connector_state;
11469         int bpp, i;
11470
11471         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
11472             IS_CHERRYVIEW(dev_priv)))
11473                 bpp = 10*3;
11474         else if (INTEL_GEN(dev_priv) >= 5)
11475                 bpp = 12*3;
11476         else
11477                 bpp = 8*3;
11478
11479         pipe_config->pipe_bpp = bpp;
11480
11481         /* Clamp display bpp to connector max bpp */
11482         for_each_new_connector_in_state(state, connector, connector_state, i) {
11483                 int ret;
11484
11485                 if (connector_state->crtc != &crtc->base)
11486                         continue;
11487
11488                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
11489                 if (ret)
11490                         return ret;
11491         }
11492
11493         return 0;
11494 }
11495
11496 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
11497 {
11498         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
11499                         "type: 0x%x flags: 0x%x\n",
11500                 mode->crtc_clock,
11501                 mode->crtc_hdisplay, mode->crtc_hsync_start,
11502                 mode->crtc_hsync_end, mode->crtc_htotal,
11503                 mode->crtc_vdisplay, mode->crtc_vsync_start,
11504                 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
11505 }
11506
11507 static inline void
11508 intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
11509                       unsigned int lane_count, struct intel_link_m_n *m_n)
11510 {
11511         DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
11512                       id, lane_count,
11513                       m_n->gmch_m, m_n->gmch_n,
11514                       m_n->link_m, m_n->link_n, m_n->tu);
11515 }
11516
11517 static void
11518 intel_dump_infoframe(struct drm_i915_private *dev_priv,
11519                      const union hdmi_infoframe *frame)
11520 {
11521         if ((drm_debug & DRM_UT_KMS) == 0)
11522                 return;
11523
11524         hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
11525 }
11526
11527 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
11528
11529 static const char * const output_type_str[] = {
11530         OUTPUT_TYPE(UNUSED),
11531         OUTPUT_TYPE(ANALOG),
11532         OUTPUT_TYPE(DVO),
11533         OUTPUT_TYPE(SDVO),
11534         OUTPUT_TYPE(LVDS),
11535         OUTPUT_TYPE(TVOUT),
11536         OUTPUT_TYPE(HDMI),
11537         OUTPUT_TYPE(DP),
11538         OUTPUT_TYPE(EDP),
11539         OUTPUT_TYPE(DSI),
11540         OUTPUT_TYPE(DDI),
11541         OUTPUT_TYPE(DP_MST),
11542 };
11543
11544 #undef OUTPUT_TYPE
11545
11546 static void snprintf_output_types(char *buf, size_t len,
11547                                   unsigned int output_types)
11548 {
11549         char *str = buf;
11550         int i;
11551
11552         str[0] = '\0';
11553
11554         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
11555                 int r;
11556
11557                 if ((output_types & BIT(i)) == 0)
11558                         continue;
11559
11560                 r = snprintf(str, len, "%s%s",
11561                              str != buf ? "," : "", output_type_str[i]);
11562                 if (r >= len)
11563                         break;
11564                 str += r;
11565                 len -= r;
11566
11567                 output_types &= ~BIT(i);
11568         }
11569
11570         WARN_ON_ONCE(output_types != 0);
11571 }
11572
11573 static const char * const output_format_str[] = {
11574         [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
11575         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
11576         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
11577         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
11578 };
11579
11580 static const char *output_formats(enum intel_output_format format)
11581 {
11582         if (format >= ARRAY_SIZE(output_format_str))
11583                 format = INTEL_OUTPUT_FORMAT_INVALID;
11584         return output_format_str[format];
11585 }
11586
11587 static void intel_dump_pipe_config(struct intel_crtc *crtc,
11588                                    struct intel_crtc_state *pipe_config,
11589                                    const char *context)
11590 {
11591         struct drm_device *dev = crtc->base.dev;
11592         struct drm_i915_private *dev_priv = to_i915(dev);
11593         struct drm_plane *plane;
11594         struct intel_plane *intel_plane;
11595         struct intel_plane_state *state;
11596         struct drm_framebuffer *fb;
11597         char buf[64];
11598
11599         DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
11600                       crtc->base.base.id, crtc->base.name, context);
11601
11602         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
11603         DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
11604                       buf, pipe_config->output_types);
11605
11606         DRM_DEBUG_KMS("output format: %s\n",
11607                       output_formats(pipe_config->output_format));
11608
11609         DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
11610                       transcoder_name(pipe_config->cpu_transcoder),
11611                       pipe_config->pipe_bpp, pipe_config->dither);
11612
11613         if (pipe_config->has_pch_encoder)
11614                 intel_dump_m_n_config(pipe_config, "fdi",
11615                                       pipe_config->fdi_lanes,
11616                                       &pipe_config->fdi_m_n);
11617
11618         if (intel_crtc_has_dp_encoder(pipe_config)) {
11619                 intel_dump_m_n_config(pipe_config, "dp m_n",
11620                                 pipe_config->lane_count, &pipe_config->dp_m_n);
11621                 if (pipe_config->has_drrs)
11622                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
11623                                               pipe_config->lane_count,
11624                                               &pipe_config->dp_m2_n2);
11625         }
11626
11627         DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
11628                       pipe_config->has_audio, pipe_config->has_infoframe);
11629
11630         DRM_DEBUG_KMS("infoframes enabled: 0x%x\n",
11631                       pipe_config->infoframes.enable);
11632
11633         if (pipe_config->infoframes.enable &
11634             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
11635                 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
11636         if (pipe_config->infoframes.enable &
11637             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
11638                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
11639         if (pipe_config->infoframes.enable &
11640             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
11641                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
11642         if (pipe_config->infoframes.enable &
11643             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
11644                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
11645
11646         DRM_DEBUG_KMS("requested mode:\n");
11647         drm_mode_debug_printmodeline(&pipe_config->base.mode);
11648         DRM_DEBUG_KMS("adjusted mode:\n");
11649         drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
11650         intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
11651         DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
11652                       pipe_config->port_clock,
11653                       pipe_config->pipe_src_w, pipe_config->pipe_src_h,
11654                       pipe_config->pixel_rate);
11655
11656         if (INTEL_GEN(dev_priv) >= 9)
11657                 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
11658                               crtc->num_scalers,
11659                               pipe_config->scaler_state.scaler_users,
11660                               pipe_config->scaler_state.scaler_id);
11661
11662         if (HAS_GMCH(dev_priv))
11663                 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
11664                               pipe_config->gmch_pfit.control,
11665                               pipe_config->gmch_pfit.pgm_ratios,
11666                               pipe_config->gmch_pfit.lvds_border_bits);
11667         else
11668                 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
11669                               pipe_config->pch_pfit.pos,
11670                               pipe_config->pch_pfit.size,
11671                               enableddisabled(pipe_config->pch_pfit.enabled));
11672
11673         DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
11674                       pipe_config->ips_enabled, pipe_config->double_wide);
11675
11676         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
11677
11678         DRM_DEBUG_KMS("planes on this crtc\n");
11679         list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
11680                 struct drm_format_name_buf format_name;
11681                 intel_plane = to_intel_plane(plane);
11682                 if (intel_plane->pipe != crtc->pipe)
11683                         continue;
11684
11685                 state = to_intel_plane_state(plane->state);
11686                 fb = state->base.fb;
11687                 if (!fb) {
11688                         DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
11689                                       plane->base.id, plane->name, state->scaler_id);
11690                         continue;
11691                 }
11692
11693                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
11694                               plane->base.id, plane->name,
11695                               fb->base.id, fb->width, fb->height,
11696                               drm_get_format_name(fb->format->format, &format_name));
11697                 if (INTEL_GEN(dev_priv) >= 9)
11698                         DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
11699                                       state->scaler_id,
11700                                       state->base.src.x1 >> 16,
11701                                       state->base.src.y1 >> 16,
11702                                       drm_rect_width(&state->base.src) >> 16,
11703                                       drm_rect_height(&state->base.src) >> 16,
11704                                       state->base.dst.x1, state->base.dst.y1,
11705                                       drm_rect_width(&state->base.dst),
11706                                       drm_rect_height(&state->base.dst));
11707         }
11708 }
11709
11710 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
11711 {
11712         struct drm_device *dev = state->dev;
11713         struct drm_connector *connector;
11714         struct drm_connector_list_iter conn_iter;
11715         unsigned int used_ports = 0;
11716         unsigned int used_mst_ports = 0;
11717         bool ret = true;
11718
11719         /*
11720          * Walk the connector list instead of the encoder
11721          * list to detect the problem on ddi platforms
11722          * where there's just one encoder per digital port.
11723          */
11724         drm_connector_list_iter_begin(dev, &conn_iter);
11725         drm_for_each_connector_iter(connector, &conn_iter) {
11726                 struct drm_connector_state *connector_state;
11727                 struct intel_encoder *encoder;
11728
11729                 connector_state = drm_atomic_get_new_connector_state(state, connector);
11730                 if (!connector_state)
11731                         connector_state = connector->state;
11732
11733                 if (!connector_state->best_encoder)
11734                         continue;
11735
11736                 encoder = to_intel_encoder(connector_state->best_encoder);
11737
11738                 WARN_ON(!connector_state->crtc);
11739
11740                 switch (encoder->type) {
11741                         unsigned int port_mask;
11742                 case INTEL_OUTPUT_DDI:
11743                         if (WARN_ON(!HAS_DDI(to_i915(dev))))
11744                                 break;
11745                         /* else: fall through */
11746                 case INTEL_OUTPUT_DP:
11747                 case INTEL_OUTPUT_HDMI:
11748                 case INTEL_OUTPUT_EDP:
11749                         port_mask = 1 << encoder->port;
11750
11751                         /* the same port mustn't appear more than once */
11752                         if (used_ports & port_mask)
11753                                 ret = false;
11754
11755                         used_ports |= port_mask;
11756                         break;
11757                 case INTEL_OUTPUT_DP_MST:
11758                         used_mst_ports |=
11759                                 1 << encoder->port;
11760                         break;
11761                 default:
11762                         break;
11763                 }
11764         }
11765         drm_connector_list_iter_end(&conn_iter);
11766
11767         /* can't mix MST and SST/HDMI on the same port */
11768         if (used_ports & used_mst_ports)
11769                 return false;
11770
11771         return ret;
11772 }
11773
11774 static int
11775 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
11776 {
11777         struct drm_i915_private *dev_priv =
11778                 to_i915(crtc_state->base.crtc->dev);
11779         struct intel_crtc_state *saved_state;
11780
11781         saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
11782         if (!saved_state)
11783                 return -ENOMEM;
11784
11785         /* FIXME: before the switch to atomic started, a new pipe_config was
11786          * kzalloc'd. Code that depends on any field being zero should be
11787          * fixed, so that the crtc_state can be safely duplicated. For now,
11788          * only fields that are know to not cause problems are preserved. */
11789
11790         saved_state->scaler_state = crtc_state->scaler_state;
11791         saved_state->shared_dpll = crtc_state->shared_dpll;
11792         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
11793         saved_state->pch_pfit.force_thru = crtc_state->pch_pfit.force_thru;
11794         saved_state->crc_enabled = crtc_state->crc_enabled;
11795         if (IS_G4X(dev_priv) ||
11796             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11797                 saved_state->wm = crtc_state->wm;
11798
11799         /* Keep base drm_crtc_state intact, only clear our extended struct */
11800         BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
11801         memcpy(&crtc_state->base + 1, &saved_state->base + 1,
11802                sizeof(*crtc_state) - sizeof(crtc_state->base));
11803
11804         kfree(saved_state);
11805         return 0;
11806 }
11807
11808 static int
11809 intel_modeset_pipe_config(struct drm_crtc *crtc,
11810                           struct intel_crtc_state *pipe_config)
11811 {
11812         struct drm_atomic_state *state = pipe_config->base.state;
11813         struct intel_encoder *encoder;
11814         struct drm_connector *connector;
11815         struct drm_connector_state *connector_state;
11816         int base_bpp, ret;
11817         int i;
11818         bool retry = true;
11819
11820         ret = clear_intel_crtc_state(pipe_config);
11821         if (ret)
11822                 return ret;
11823
11824         pipe_config->cpu_transcoder =
11825                 (enum transcoder) to_intel_crtc(crtc)->pipe;
11826
11827         /*
11828          * Sanitize sync polarity flags based on requested ones. If neither
11829          * positive or negative polarity is requested, treat this as meaning
11830          * negative polarity.
11831          */
11832         if (!(pipe_config->base.adjusted_mode.flags &
11833               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
11834                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
11835
11836         if (!(pipe_config->base.adjusted_mode.flags &
11837               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
11838                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
11839
11840         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
11841                                         pipe_config);
11842         if (ret)
11843                 return ret;
11844
11845         base_bpp = pipe_config->pipe_bpp;
11846
11847         /*
11848          * Determine the real pipe dimensions. Note that stereo modes can
11849          * increase the actual pipe size due to the frame doubling and
11850          * insertion of additional space for blanks between the frame. This
11851          * is stored in the crtc timings. We use the requested mode to do this
11852          * computation to clearly distinguish it from the adjusted mode, which
11853          * can be changed by the connectors in the below retry loop.
11854          */
11855         drm_mode_get_hv_timing(&pipe_config->base.mode,
11856                                &pipe_config->pipe_src_w,
11857                                &pipe_config->pipe_src_h);
11858
11859         for_each_new_connector_in_state(state, connector, connector_state, i) {
11860                 if (connector_state->crtc != crtc)
11861                         continue;
11862
11863                 encoder = to_intel_encoder(connector_state->best_encoder);
11864
11865                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
11866                         DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11867                         return -EINVAL;
11868                 }
11869
11870                 /*
11871                  * Determine output_types before calling the .compute_config()
11872                  * hooks so that the hooks can use this information safely.
11873                  */
11874                 if (encoder->compute_output_type)
11875                         pipe_config->output_types |=
11876                                 BIT(encoder->compute_output_type(encoder, pipe_config,
11877                                                                  connector_state));
11878                 else
11879                         pipe_config->output_types |= BIT(encoder->type);
11880         }
11881
11882 encoder_retry:
11883         /* Ensure the port clock defaults are reset when retrying. */
11884         pipe_config->port_clock = 0;
11885         pipe_config->pixel_multiplier = 1;
11886
11887         /* Fill in default crtc timings, allow encoders to overwrite them. */
11888         drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
11889                               CRTC_STEREO_DOUBLE);
11890
11891         /* Pass our mode to the connectors and the CRTC to give them a chance to
11892          * adjust it according to limitations or connector properties, and also
11893          * a chance to reject the mode entirely.
11894          */
11895         for_each_new_connector_in_state(state, connector, connector_state, i) {
11896                 if (connector_state->crtc != crtc)
11897                         continue;
11898
11899                 encoder = to_intel_encoder(connector_state->best_encoder);
11900                 ret = encoder->compute_config(encoder, pipe_config,
11901                                               connector_state);
11902                 if (ret < 0) {
11903                         if (ret != -EDEADLK)
11904                                 DRM_DEBUG_KMS("Encoder config failure: %d\n",
11905                                               ret);
11906                         return ret;
11907                 }
11908         }
11909
11910         /* Set default port clock if not overwritten by the encoder. Needs to be
11911          * done afterwards in case the encoder adjusts the mode. */
11912         if (!pipe_config->port_clock)
11913                 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
11914                         * pipe_config->pixel_multiplier;
11915
11916         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
11917         if (ret == -EDEADLK)
11918                 return ret;
11919         if (ret < 0) {
11920                 DRM_DEBUG_KMS("CRTC fixup failed\n");
11921                 return ret;
11922         }
11923
11924         if (ret == RETRY) {
11925                 if (WARN(!retry, "loop in pipe configuration computation\n"))
11926                         return -EINVAL;
11927
11928                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
11929                 retry = false;
11930                 goto encoder_retry;
11931         }
11932
11933         /* Dithering seems to not pass-through bits correctly when it should, so
11934          * only enable it on 6bpc panels and when its not a compliance
11935          * test requesting 6bpc video pattern.
11936          */
11937         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
11938                 !pipe_config->dither_force_disable;
11939         DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
11940                       base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
11941
11942         return 0;
11943 }
11944
11945 static bool intel_fuzzy_clock_check(int clock1, int clock2)
11946 {
11947         int diff;
11948
11949         if (clock1 == clock2)
11950                 return true;
11951
11952         if (!clock1 || !clock2)
11953                 return false;
11954
11955         diff = abs(clock1 - clock2);
11956
11957         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
11958                 return true;
11959
11960         return false;
11961 }
11962
11963 static bool
11964 intel_compare_m_n(unsigned int m, unsigned int n,
11965                   unsigned int m2, unsigned int n2,
11966                   bool exact)
11967 {
11968         if (m == m2 && n == n2)
11969                 return true;
11970
11971         if (exact || !m || !n || !m2 || !n2)
11972                 return false;
11973
11974         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
11975
11976         if (n > n2) {
11977                 while (n > n2) {
11978                         m2 <<= 1;
11979                         n2 <<= 1;
11980                 }
11981         } else if (n < n2) {
11982                 while (n < n2) {
11983                         m <<= 1;
11984                         n <<= 1;
11985                 }
11986         }
11987
11988         if (n != n2)
11989                 return false;
11990
11991         return intel_fuzzy_clock_check(m, m2);
11992 }
11993
11994 static bool
11995 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
11996                        struct intel_link_m_n *m2_n2,
11997                        bool adjust)
11998 {
11999         if (m_n->tu == m2_n2->tu &&
12000             intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12001                               m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12002             intel_compare_m_n(m_n->link_m, m_n->link_n,
12003                               m2_n2->link_m, m2_n2->link_n, !adjust)) {
12004                 if (adjust)
12005                         *m2_n2 = *m_n;
12006
12007                 return true;
12008         }
12009
12010         return false;
12011 }
12012
12013 static bool
12014 intel_compare_infoframe(const union hdmi_infoframe *a,
12015                         const union hdmi_infoframe *b)
12016 {
12017         return memcmp(a, b, sizeof(*a)) == 0;
12018 }
12019
12020 static void
12021 pipe_config_infoframe_err(struct drm_i915_private *dev_priv,
12022                           bool adjust, const char *name,
12023                           const union hdmi_infoframe *a,
12024                           const union hdmi_infoframe *b)
12025 {
12026         if (adjust) {
12027                 if ((drm_debug & DRM_UT_KMS) == 0)
12028                         return;
12029
12030                 drm_dbg(DRM_UT_KMS, "mismatch in %s infoframe", name);
12031                 drm_dbg(DRM_UT_KMS, "expected:");
12032                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
12033                 drm_dbg(DRM_UT_KMS, "found");
12034                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
12035         } else {
12036                 drm_err("mismatch in %s infoframe", name);
12037                 drm_err("expected:");
12038                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
12039                 drm_err("found");
12040                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
12041         }
12042 }
12043
12044 static void __printf(3, 4)
12045 pipe_config_err(bool adjust, const char *name, const char *format, ...)
12046 {
12047         struct va_format vaf;
12048         va_list args;
12049
12050         va_start(args, format);
12051         vaf.fmt = format;
12052         vaf.va = &args;
12053
12054         if (adjust)
12055                 drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
12056         else
12057                 drm_err("mismatch in %s %pV", name, &vaf);
12058
12059         va_end(args);
12060 }
12061
12062 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
12063 {
12064         if (i915_modparams.fastboot != -1)
12065                 return i915_modparams.fastboot;
12066
12067         /* Enable fastboot by default on Skylake and newer */
12068         if (INTEL_GEN(dev_priv) >= 9)
12069                 return true;
12070
12071         /* Enable fastboot by default on VLV and CHV */
12072         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12073                 return true;
12074
12075         /* Disabled by default on all others */
12076         return false;
12077 }
12078
12079 static bool
12080 intel_pipe_config_compare(struct drm_i915_private *dev_priv,
12081                           struct intel_crtc_state *current_config,
12082                           struct intel_crtc_state *pipe_config,
12083                           bool adjust)
12084 {
12085         struct intel_crtc *crtc = to_intel_crtc(current_config->base.crtc);
12086         bool ret = true;
12087         bool fixup_inherited = adjust &&
12088                 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
12089                 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
12090
12091         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
12092                 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
12093                 ret = false;
12094         }
12095
12096 #define PIPE_CONF_CHECK_X(name) do { \
12097         if (current_config->name != pipe_config->name) { \
12098                 pipe_config_err(adjust, __stringify(name), \
12099                           "(expected 0x%08x, found 0x%08x)\n", \
12100                           current_config->name, \
12101                           pipe_config->name); \
12102                 ret = false; \
12103         } \
12104 } while (0)
12105
12106 #define PIPE_CONF_CHECK_I(name) do { \
12107         if (current_config->name != pipe_config->name) { \
12108                 pipe_config_err(adjust, __stringify(name), \
12109                           "(expected %i, found %i)\n", \
12110                           current_config->name, \
12111                           pipe_config->name); \
12112                 ret = false; \
12113         } \
12114 } while (0)
12115
12116 #define PIPE_CONF_CHECK_BOOL(name) do { \
12117         if (current_config->name != pipe_config->name) { \
12118                 pipe_config_err(adjust, __stringify(name), \
12119                           "(expected %s, found %s)\n", \
12120                           yesno(current_config->name), \
12121                           yesno(pipe_config->name)); \
12122                 ret = false; \
12123         } \
12124 } while (0)
12125
12126 /*
12127  * Checks state where we only read out the enabling, but not the entire
12128  * state itself (like full infoframes or ELD for audio). These states
12129  * require a full modeset on bootup to fix up.
12130  */
12131 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
12132         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
12133                 PIPE_CONF_CHECK_BOOL(name); \
12134         } else { \
12135                 pipe_config_err(adjust, __stringify(name), \
12136                           "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
12137                           yesno(current_config->name), \
12138                           yesno(pipe_config->name)); \
12139                 ret = false; \
12140         } \
12141 } while (0)
12142
12143 #define PIPE_CONF_CHECK_P(name) do { \
12144         if (current_config->name != pipe_config->name) { \
12145                 pipe_config_err(adjust, __stringify(name), \
12146                           "(expected %p, found %p)\n", \
12147                           current_config->name, \
12148                           pipe_config->name); \
12149                 ret = false; \
12150         } \
12151 } while (0)
12152
12153 #define PIPE_CONF_CHECK_M_N(name) do { \
12154         if (!intel_compare_link_m_n(&current_config->name, \
12155                                     &pipe_config->name,\
12156                                     adjust)) { \
12157                 pipe_config_err(adjust, __stringify(name), \
12158                           "(expected tu %i gmch %i/%i link %i/%i, " \
12159                           "found tu %i, gmch %i/%i link %i/%i)\n", \
12160                           current_config->name.tu, \
12161                           current_config->name.gmch_m, \
12162                           current_config->name.gmch_n, \
12163                           current_config->name.link_m, \
12164                           current_config->name.link_n, \
12165                           pipe_config->name.tu, \
12166                           pipe_config->name.gmch_m, \
12167                           pipe_config->name.gmch_n, \
12168                           pipe_config->name.link_m, \
12169                           pipe_config->name.link_n); \
12170                 ret = false; \
12171         } \
12172 } while (0)
12173
12174 /* This is required for BDW+ where there is only one set of registers for
12175  * switching between high and low RR.
12176  * This macro can be used whenever a comparison has to be made between one
12177  * hw state and multiple sw state variables.
12178  */
12179 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
12180         if (!intel_compare_link_m_n(&current_config->name, \
12181                                     &pipe_config->name, adjust) && \
12182             !intel_compare_link_m_n(&current_config->alt_name, \
12183                                     &pipe_config->name, adjust)) { \
12184                 pipe_config_err(adjust, __stringify(name), \
12185                           "(expected tu %i gmch %i/%i link %i/%i, " \
12186                           "or tu %i gmch %i/%i link %i/%i, " \
12187                           "found tu %i, gmch %i/%i link %i/%i)\n", \
12188                           current_config->name.tu, \
12189                           current_config->name.gmch_m, \
12190                           current_config->name.gmch_n, \
12191                           current_config->name.link_m, \
12192                           current_config->name.link_n, \
12193                           current_config->alt_name.tu, \
12194                           current_config->alt_name.gmch_m, \
12195                           current_config->alt_name.gmch_n, \
12196                           current_config->alt_name.link_m, \
12197                           current_config->alt_name.link_n, \
12198                           pipe_config->name.tu, \
12199                           pipe_config->name.gmch_m, \
12200                           pipe_config->name.gmch_n, \
12201                           pipe_config->name.link_m, \
12202                           pipe_config->name.link_n); \
12203                 ret = false; \
12204         } \
12205 } while (0)
12206
12207 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
12208         if ((current_config->name ^ pipe_config->name) & (mask)) { \
12209                 pipe_config_err(adjust, __stringify(name), \
12210                           "(%x) (expected %i, found %i)\n", \
12211                           (mask), \
12212                           current_config->name & (mask), \
12213                           pipe_config->name & (mask)); \
12214                 ret = false; \
12215         } \
12216 } while (0)
12217
12218 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
12219         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12220                 pipe_config_err(adjust, __stringify(name), \
12221                           "(expected %i, found %i)\n", \
12222                           current_config->name, \
12223                           pipe_config->name); \
12224                 ret = false; \
12225         } \
12226 } while (0)
12227
12228 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
12229         if (!intel_compare_infoframe(&current_config->infoframes.name, \
12230                                      &pipe_config->infoframes.name)) { \
12231                 pipe_config_infoframe_err(dev_priv, adjust, __stringify(name), \
12232                                           &current_config->infoframes.name, \
12233                                           &pipe_config->infoframes.name); \
12234                 ret = false; \
12235         } \
12236 } while (0)
12237
12238 #define PIPE_CONF_QUIRK(quirk) \
12239         ((current_config->quirks | pipe_config->quirks) & (quirk))
12240
12241         PIPE_CONF_CHECK_I(cpu_transcoder);
12242
12243         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
12244         PIPE_CONF_CHECK_I(fdi_lanes);
12245         PIPE_CONF_CHECK_M_N(fdi_m_n);
12246
12247         PIPE_CONF_CHECK_I(lane_count);
12248         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
12249
12250         if (INTEL_GEN(dev_priv) < 8) {
12251                 PIPE_CONF_CHECK_M_N(dp_m_n);
12252
12253                 if (current_config->has_drrs)
12254                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
12255         } else
12256                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12257
12258         PIPE_CONF_CHECK_X(output_types);
12259
12260         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12261         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12262         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12263         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12264         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12265         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12266
12267         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12268         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12269         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12270         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12271         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12272         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12273
12274         PIPE_CONF_CHECK_I(pixel_multiplier);
12275         PIPE_CONF_CHECK_I(output_format);
12276         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
12277         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
12278             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12279                 PIPE_CONF_CHECK_BOOL(limited_color_range);
12280
12281         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
12282         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
12283         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
12284
12285         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
12286
12287         PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12288                               DRM_MODE_FLAG_INTERLACE);
12289
12290         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12291                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12292                                       DRM_MODE_FLAG_PHSYNC);
12293                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12294                                       DRM_MODE_FLAG_NHSYNC);
12295                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12296                                       DRM_MODE_FLAG_PVSYNC);
12297                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12298                                       DRM_MODE_FLAG_NVSYNC);
12299         }
12300
12301         PIPE_CONF_CHECK_X(gmch_pfit.control);
12302         /* pfit ratios are autocomputed by the hw on gen4+ */
12303         if (INTEL_GEN(dev_priv) < 4)
12304                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
12305         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12306
12307         /*
12308          * Changing the EDP transcoder input mux
12309          * (A_ONOFF vs. A_ON) requires a full modeset.
12310          */
12311         if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A &&
12312             current_config->cpu_transcoder == TRANSCODER_EDP)
12313                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
12314
12315         if (!adjust) {
12316                 PIPE_CONF_CHECK_I(pipe_src_w);
12317                 PIPE_CONF_CHECK_I(pipe_src_h);
12318
12319                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
12320                 if (current_config->pch_pfit.enabled) {
12321                         PIPE_CONF_CHECK_X(pch_pfit.pos);
12322                         PIPE_CONF_CHECK_X(pch_pfit.size);
12323                 }
12324
12325                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12326                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
12327
12328                 PIPE_CONF_CHECK_X(gamma_mode);
12329                 if (IS_CHERRYVIEW(dev_priv))
12330                         PIPE_CONF_CHECK_X(cgm_mode);
12331                 else
12332                         PIPE_CONF_CHECK_X(csc_mode);
12333                 PIPE_CONF_CHECK_BOOL(gamma_enable);
12334                 PIPE_CONF_CHECK_BOOL(csc_enable);
12335         }
12336
12337         PIPE_CONF_CHECK_BOOL(double_wide);
12338
12339         PIPE_CONF_CHECK_P(shared_dpll);
12340         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12341         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12342         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12343         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12344         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12345         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12346         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12347         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12348         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12349         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
12350         PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
12351         PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
12352         PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
12353         PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
12354         PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
12355         PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
12356         PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
12357         PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
12358         PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
12359         PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
12360         PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
12361         PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
12362         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
12363         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
12364         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
12365         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
12366         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
12367         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
12368         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
12369         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
12370         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
12371
12372         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12373         PIPE_CONF_CHECK_X(dsi_pll.div);
12374
12375         if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
12376                 PIPE_CONF_CHECK_I(pipe_bpp);
12377
12378         PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12379         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12380
12381         PIPE_CONF_CHECK_I(min_voltage_level);
12382
12383         PIPE_CONF_CHECK_X(infoframes.enable);
12384         PIPE_CONF_CHECK_X(infoframes.gcp);
12385         PIPE_CONF_CHECK_INFOFRAME(avi);
12386         PIPE_CONF_CHECK_INFOFRAME(spd);
12387         PIPE_CONF_CHECK_INFOFRAME(hdmi);
12388
12389 #undef PIPE_CONF_CHECK_X
12390 #undef PIPE_CONF_CHECK_I
12391 #undef PIPE_CONF_CHECK_BOOL
12392 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
12393 #undef PIPE_CONF_CHECK_P
12394 #undef PIPE_CONF_CHECK_FLAGS
12395 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12396 #undef PIPE_CONF_QUIRK
12397
12398         return ret;
12399 }
12400
12401 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12402                                            const struct intel_crtc_state *pipe_config)
12403 {
12404         if (pipe_config->has_pch_encoder) {
12405                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12406                                                             &pipe_config->fdi_m_n);
12407                 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12408
12409                 /*
12410                  * FDI already provided one idea for the dotclock.
12411                  * Yell if the encoder disagrees.
12412                  */
12413                 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12414                      "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12415                      fdi_dotclock, dotclock);
12416         }
12417 }
12418
12419 static void verify_wm_state(struct drm_crtc *crtc,
12420                             struct drm_crtc_state *new_state)
12421 {
12422         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
12423         struct skl_hw_state {
12424                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
12425                 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
12426                 struct skl_ddb_allocation ddb;
12427                 struct skl_pipe_wm wm;
12428         } *hw;
12429         struct skl_ddb_allocation *sw_ddb;
12430         struct skl_pipe_wm *sw_wm;
12431         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
12432         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12433         const enum pipe pipe = intel_crtc->pipe;
12434         int plane, level, max_level = ilk_wm_max_level(dev_priv);
12435
12436         if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
12437                 return;
12438
12439         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
12440         if (!hw)
12441                 return;
12442
12443         skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm);
12444         sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
12445
12446         skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv);
12447
12448         skl_ddb_get_hw_state(dev_priv, &hw->ddb);
12449         sw_ddb = &dev_priv->wm.skl_hw.ddb;
12450
12451         if (INTEL_GEN(dev_priv) >= 11 &&
12452             hw->ddb.enabled_slices != sw_ddb->enabled_slices)
12453                 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
12454                           sw_ddb->enabled_slices,
12455                           hw->ddb.enabled_slices);
12456
12457         /* planes */
12458         for_each_universal_plane(dev_priv, pipe, plane) {
12459                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12460
12461                 hw_plane_wm = &hw->wm.planes[plane];
12462                 sw_plane_wm = &sw_wm->planes[plane];
12463
12464                 /* Watermarks */
12465                 for (level = 0; level <= max_level; level++) {
12466                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12467                                                 &sw_plane_wm->wm[level]))
12468                                 continue;
12469
12470                         DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12471                                   pipe_name(pipe), plane + 1, level,
12472                                   sw_plane_wm->wm[level].plane_en,
12473                                   sw_plane_wm->wm[level].plane_res_b,
12474                                   sw_plane_wm->wm[level].plane_res_l,
12475                                   hw_plane_wm->wm[level].plane_en,
12476                                   hw_plane_wm->wm[level].plane_res_b,
12477                                   hw_plane_wm->wm[level].plane_res_l);
12478                 }
12479
12480                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12481                                          &sw_plane_wm->trans_wm)) {
12482                         DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12483                                   pipe_name(pipe), plane + 1,
12484                                   sw_plane_wm->trans_wm.plane_en,
12485                                   sw_plane_wm->trans_wm.plane_res_b,
12486                                   sw_plane_wm->trans_wm.plane_res_l,
12487                                   hw_plane_wm->trans_wm.plane_en,
12488                                   hw_plane_wm->trans_wm.plane_res_b,
12489                                   hw_plane_wm->trans_wm.plane_res_l);
12490                 }
12491
12492                 /* DDB */
12493                 hw_ddb_entry = &hw->ddb_y[plane];
12494                 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
12495
12496                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12497                         DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
12498                                   pipe_name(pipe), plane + 1,
12499                                   sw_ddb_entry->start, sw_ddb_entry->end,
12500                                   hw_ddb_entry->start, hw_ddb_entry->end);
12501                 }
12502         }
12503
12504         /*
12505          * cursor
12506          * If the cursor plane isn't active, we may not have updated it's ddb
12507          * allocation. In that case since the ddb allocation will be updated
12508          * once the plane becomes visible, we can skip this check
12509          */
12510         if (1) {
12511                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12512
12513                 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
12514                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
12515
12516                 /* Watermarks */
12517                 for (level = 0; level <= max_level; level++) {
12518                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12519                                                 &sw_plane_wm->wm[level]))
12520                                 continue;
12521
12522                         DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12523                                   pipe_name(pipe), level,
12524                                   sw_plane_wm->wm[level].plane_en,
12525                                   sw_plane_wm->wm[level].plane_res_b,
12526                                   sw_plane_wm->wm[level].plane_res_l,
12527                                   hw_plane_wm->wm[level].plane_en,
12528                                   hw_plane_wm->wm[level].plane_res_b,
12529                                   hw_plane_wm->wm[level].plane_res_l);
12530                 }
12531
12532                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12533                                          &sw_plane_wm->trans_wm)) {
12534                         DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12535                                   pipe_name(pipe),
12536                                   sw_plane_wm->trans_wm.plane_en,
12537                                   sw_plane_wm->trans_wm.plane_res_b,
12538                                   sw_plane_wm->trans_wm.plane_res_l,
12539                                   hw_plane_wm->trans_wm.plane_en,
12540                                   hw_plane_wm->trans_wm.plane_res_b,
12541                                   hw_plane_wm->trans_wm.plane_res_l);
12542                 }
12543
12544                 /* DDB */
12545                 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
12546                 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
12547
12548                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12549                         DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
12550                                   pipe_name(pipe),
12551                                   sw_ddb_entry->start, sw_ddb_entry->end,
12552                                   hw_ddb_entry->start, hw_ddb_entry->end);
12553                 }
12554         }
12555
12556         kfree(hw);
12557 }
12558
12559 static void
12560 verify_connector_state(struct drm_device *dev,
12561                        struct drm_atomic_state *state,
12562                        struct drm_crtc *crtc)
12563 {
12564         struct drm_connector *connector;
12565         struct drm_connector_state *new_conn_state;
12566         int i;
12567
12568         for_each_new_connector_in_state(state, connector, new_conn_state, i) {
12569                 struct drm_encoder *encoder = connector->encoder;
12570                 struct drm_crtc_state *crtc_state = NULL;
12571
12572                 if (new_conn_state->crtc != crtc)
12573                         continue;
12574
12575                 if (crtc)
12576                         crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
12577
12578                 intel_connector_verify_state(crtc_state, new_conn_state);
12579
12580                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
12581                      "connector's atomic encoder doesn't match legacy encoder\n");
12582         }
12583 }
12584
12585 static void
12586 verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
12587 {
12588         struct intel_encoder *encoder;
12589         struct drm_connector *connector;
12590         struct drm_connector_state *old_conn_state, *new_conn_state;
12591         int i;
12592
12593         for_each_intel_encoder(dev, encoder) {
12594                 bool enabled = false, found = false;
12595                 enum pipe pipe;
12596
12597                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12598                               encoder->base.base.id,
12599                               encoder->base.name);
12600
12601                 for_each_oldnew_connector_in_state(state, connector, old_conn_state,
12602                                                    new_conn_state, i) {
12603                         if (old_conn_state->best_encoder == &encoder->base)
12604                                 found = true;
12605
12606                         if (new_conn_state->best_encoder != &encoder->base)
12607                                 continue;
12608                         found = enabled = true;
12609
12610                         I915_STATE_WARN(new_conn_state->crtc !=
12611                                         encoder->base.crtc,
12612                              "connector's crtc doesn't match encoder crtc\n");
12613                 }
12614
12615                 if (!found)
12616                         continue;
12617
12618                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
12619                      "encoder's enabled state mismatch "
12620                      "(expected %i, found %i)\n",
12621                      !!encoder->base.crtc, enabled);
12622
12623                 if (!encoder->base.crtc) {
12624                         bool active;
12625
12626                         active = encoder->get_hw_state(encoder, &pipe);
12627                         I915_STATE_WARN(active,
12628                              "encoder detached but still enabled on pipe %c.\n",
12629                              pipe_name(pipe));
12630                 }
12631         }
12632 }
12633
12634 static void
12635 verify_crtc_state(struct drm_crtc *crtc,
12636                   struct drm_crtc_state *old_crtc_state,
12637                   struct drm_crtc_state *new_crtc_state)
12638 {
12639         struct drm_device *dev = crtc->dev;
12640         struct drm_i915_private *dev_priv = to_i915(dev);
12641         struct intel_encoder *encoder;
12642         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12643         struct intel_crtc_state *pipe_config, *sw_config;
12644         struct drm_atomic_state *old_state;
12645         bool active;
12646
12647         old_state = old_crtc_state->state;
12648         __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
12649         pipe_config = to_intel_crtc_state(old_crtc_state);
12650         memset(pipe_config, 0, sizeof(*pipe_config));
12651         pipe_config->base.crtc = crtc;
12652         pipe_config->base.state = old_state;
12653
12654         DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
12655
12656         active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
12657
12658         /* we keep both pipes enabled on 830 */
12659         if (IS_I830(dev_priv))
12660                 active = new_crtc_state->active;
12661
12662         I915_STATE_WARN(new_crtc_state->active != active,
12663              "crtc active state doesn't match with hw state "
12664              "(expected %i, found %i)\n", new_crtc_state->active, active);
12665
12666         I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
12667              "transitional active state does not match atomic hw state "
12668              "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
12669
12670         for_each_encoder_on_crtc(dev, crtc, encoder) {
12671                 enum pipe pipe;
12672
12673                 active = encoder->get_hw_state(encoder, &pipe);
12674                 I915_STATE_WARN(active != new_crtc_state->active,
12675                         "[ENCODER:%i] active %i with crtc active %i\n",
12676                         encoder->base.base.id, active, new_crtc_state->active);
12677
12678                 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12679                                 "Encoder connected to wrong pipe %c\n",
12680                                 pipe_name(pipe));
12681
12682                 if (active)
12683                         encoder->get_config(encoder, pipe_config);
12684         }
12685
12686         intel_crtc_compute_pixel_rate(pipe_config);
12687
12688         if (!new_crtc_state->active)
12689                 return;
12690
12691         intel_pipe_config_sanity_check(dev_priv, pipe_config);
12692
12693         sw_config = to_intel_crtc_state(new_crtc_state);
12694         if (!intel_pipe_config_compare(dev_priv, sw_config,
12695                                        pipe_config, false)) {
12696                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
12697                 intel_dump_pipe_config(intel_crtc, pipe_config,
12698                                        "[hw state]");
12699                 intel_dump_pipe_config(intel_crtc, sw_config,
12700                                        "[sw state]");
12701         }
12702 }
12703
12704 static void
12705 intel_verify_planes(struct intel_atomic_state *state)
12706 {
12707         struct intel_plane *plane;
12708         const struct intel_plane_state *plane_state;
12709         int i;
12710
12711         for_each_new_intel_plane_in_state(state, plane,
12712                                           plane_state, i)
12713                 assert_plane(plane, plane_state->slave ||
12714                              plane_state->base.visible);
12715 }
12716
12717 static void
12718 verify_single_dpll_state(struct drm_i915_private *dev_priv,
12719                          struct intel_shared_dpll *pll,
12720                          struct drm_crtc *crtc,
12721                          struct drm_crtc_state *new_state)
12722 {
12723         struct intel_dpll_hw_state dpll_hw_state;
12724         unsigned int crtc_mask;
12725         bool active;
12726
12727         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
12728
12729         DRM_DEBUG_KMS("%s\n", pll->info->name);
12730
12731         active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
12732
12733         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
12734                 I915_STATE_WARN(!pll->on && pll->active_mask,
12735                      "pll in active use but not on in sw tracking\n");
12736                 I915_STATE_WARN(pll->on && !pll->active_mask,
12737                      "pll is on but not used by any active crtc\n");
12738                 I915_STATE_WARN(pll->on != active,
12739                      "pll on state mismatch (expected %i, found %i)\n",
12740                      pll->on, active);
12741         }
12742
12743         if (!crtc) {
12744                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
12745                                 "more active pll users than references: %x vs %x\n",
12746                                 pll->active_mask, pll->state.crtc_mask);
12747
12748                 return;
12749         }
12750
12751         crtc_mask = drm_crtc_mask(crtc);
12752
12753         if (new_state->active)
12754                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
12755                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
12756                                 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12757         else
12758                 I915_STATE_WARN(pll->active_mask & crtc_mask,
12759                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
12760                                 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12761
12762         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
12763                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
12764                         crtc_mask, pll->state.crtc_mask);
12765
12766         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
12767                                           &dpll_hw_state,
12768                                           sizeof(dpll_hw_state)),
12769                         "pll hw state mismatch\n");
12770 }
12771
12772 static void
12773 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
12774                          struct drm_crtc_state *old_crtc_state,
12775                          struct drm_crtc_state *new_crtc_state)
12776 {
12777         struct drm_i915_private *dev_priv = to_i915(dev);
12778         struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
12779         struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
12780
12781         if (new_state->shared_dpll)
12782                 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
12783
12784         if (old_state->shared_dpll &&
12785             old_state->shared_dpll != new_state->shared_dpll) {
12786                 unsigned int crtc_mask = drm_crtc_mask(crtc);
12787                 struct intel_shared_dpll *pll = old_state->shared_dpll;
12788
12789                 I915_STATE_WARN(pll->active_mask & crtc_mask,
12790                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
12791                                 pipe_name(drm_crtc_index(crtc)));
12792                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
12793                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
12794                                 pipe_name(drm_crtc_index(crtc)));
12795         }
12796 }
12797
12798 static void
12799 intel_modeset_verify_crtc(struct drm_crtc *crtc,
12800                           struct drm_atomic_state *state,
12801                           struct drm_crtc_state *old_state,
12802                           struct drm_crtc_state *new_state)
12803 {
12804         if (!needs_modeset(new_state) &&
12805             !to_intel_crtc_state(new_state)->update_pipe)
12806                 return;
12807
12808         verify_wm_state(crtc, new_state);
12809         verify_connector_state(crtc->dev, state, crtc);
12810         verify_crtc_state(crtc, old_state, new_state);
12811         verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
12812 }
12813
12814 static void
12815 verify_disabled_dpll_state(struct drm_device *dev)
12816 {
12817         struct drm_i915_private *dev_priv = to_i915(dev);
12818         int i;
12819
12820         for (i = 0; i < dev_priv->num_shared_dpll; i++)
12821                 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
12822 }
12823
12824 static void
12825 intel_modeset_verify_disabled(struct drm_device *dev,
12826                               struct drm_atomic_state *state)
12827 {
12828         verify_encoder_state(dev, state);
12829         verify_connector_state(dev, state, NULL);
12830         verify_disabled_dpll_state(dev);
12831 }
12832
12833 static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
12834 {
12835         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
12836         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12837
12838         /*
12839          * The scanline counter increments at the leading edge of hsync.
12840          *
12841          * On most platforms it starts counting from vtotal-1 on the
12842          * first active line. That means the scanline counter value is
12843          * always one less than what we would expect. Ie. just after
12844          * start of vblank, which also occurs at start of hsync (on the
12845          * last active line), the scanline counter will read vblank_start-1.
12846          *
12847          * On gen2 the scanline counter starts counting from 1 instead
12848          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
12849          * to keep the value positive), instead of adding one.
12850          *
12851          * On HSW+ the behaviour of the scanline counter depends on the output
12852          * type. For DP ports it behaves like most other platforms, but on HDMI
12853          * there's an extra 1 line difference. So we need to add two instead of
12854          * one to the value.
12855          *
12856          * On VLV/CHV DSI the scanline counter would appear to increment
12857          * approx. 1/3 of a scanline before start of vblank. Unfortunately
12858          * that means we can't tell whether we're in vblank or not while
12859          * we're on that particular line. We must still set scanline_offset
12860          * to 1 so that the vblank timestamps come out correct when we query
12861          * the scanline counter from within the vblank interrupt handler.
12862          * However if queried just before the start of vblank we'll get an
12863          * answer that's slightly in the future.
12864          */
12865         if (IS_GEN(dev_priv, 2)) {
12866                 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
12867                 int vtotal;
12868
12869                 vtotal = adjusted_mode->crtc_vtotal;
12870                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
12871                         vtotal /= 2;
12872
12873                 crtc->scanline_offset = vtotal - 1;
12874         } else if (HAS_DDI(dev_priv) &&
12875                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
12876                 crtc->scanline_offset = 2;
12877         } else
12878                 crtc->scanline_offset = 1;
12879 }
12880
12881 static void intel_modeset_clear_plls(struct drm_atomic_state *state)
12882 {
12883         struct drm_device *dev = state->dev;
12884         struct drm_i915_private *dev_priv = to_i915(dev);
12885         struct drm_crtc *crtc;
12886         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12887         int i;
12888
12889         if (!dev_priv->display.crtc_compute_clock)
12890                 return;
12891
12892         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12893                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12894                 struct intel_shared_dpll *old_dpll =
12895                         to_intel_crtc_state(old_crtc_state)->shared_dpll;
12896
12897                 if (!needs_modeset(new_crtc_state))
12898                         continue;
12899
12900                 to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
12901
12902                 if (!old_dpll)
12903                         continue;
12904
12905                 intel_release_shared_dpll(old_dpll, intel_crtc, state);
12906         }
12907 }
12908
12909 /*
12910  * This implements the workaround described in the "notes" section of the mode
12911  * set sequence documentation. When going from no pipes or single pipe to
12912  * multiple pipes, and planes are enabled after the pipe, we need to wait at
12913  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
12914  */
12915 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
12916 {
12917         struct drm_crtc_state *crtc_state;
12918         struct intel_crtc *intel_crtc;
12919         struct drm_crtc *crtc;
12920         struct intel_crtc_state *first_crtc_state = NULL;
12921         struct intel_crtc_state *other_crtc_state = NULL;
12922         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
12923         int i;
12924
12925         /* look at all crtc's that are going to be enabled in during modeset */
12926         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
12927                 intel_crtc = to_intel_crtc(crtc);
12928
12929                 if (!crtc_state->active || !needs_modeset(crtc_state))
12930                         continue;
12931
12932                 if (first_crtc_state) {
12933                         other_crtc_state = to_intel_crtc_state(crtc_state);
12934                         break;
12935                 } else {
12936                         first_crtc_state = to_intel_crtc_state(crtc_state);
12937                         first_pipe = intel_crtc->pipe;
12938                 }
12939         }
12940
12941         /* No workaround needed? */
12942         if (!first_crtc_state)
12943                 return 0;
12944
12945         /* w/a possibly needed, check how many crtc's are already enabled. */
12946         for_each_intel_crtc(state->dev, intel_crtc) {
12947                 struct intel_crtc_state *pipe_config;
12948
12949                 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
12950                 if (IS_ERR(pipe_config))
12951                         return PTR_ERR(pipe_config);
12952
12953                 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
12954
12955                 if (!pipe_config->base.active ||
12956                     needs_modeset(&pipe_config->base))
12957                         continue;
12958
12959                 /* 2 or more enabled crtcs means no need for w/a */
12960                 if (enabled_pipe != INVALID_PIPE)
12961                         return 0;
12962
12963                 enabled_pipe = intel_crtc->pipe;
12964         }
12965
12966         if (enabled_pipe != INVALID_PIPE)
12967                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
12968         else if (other_crtc_state)
12969                 other_crtc_state->hsw_workaround_pipe = first_pipe;
12970
12971         return 0;
12972 }
12973
12974 static int intel_lock_all_pipes(struct drm_atomic_state *state)
12975 {
12976         struct drm_crtc *crtc;
12977
12978         /* Add all pipes to the state */
12979         for_each_crtc(state->dev, crtc) {
12980                 struct drm_crtc_state *crtc_state;
12981
12982                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12983                 if (IS_ERR(crtc_state))
12984                         return PTR_ERR(crtc_state);
12985         }
12986
12987         return 0;
12988 }
12989
12990 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
12991 {
12992         struct drm_crtc *crtc;
12993
12994         /*
12995          * Add all pipes to the state, and force
12996          * a modeset on all the active ones.
12997          */
12998         for_each_crtc(state->dev, crtc) {
12999                 struct drm_crtc_state *crtc_state;
13000                 int ret;
13001
13002                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13003                 if (IS_ERR(crtc_state))
13004                         return PTR_ERR(crtc_state);
13005
13006                 if (!crtc_state->active || needs_modeset(crtc_state))
13007                         continue;
13008
13009                 crtc_state->mode_changed = true;
13010
13011                 ret = drm_atomic_add_affected_connectors(state, crtc);
13012                 if (ret)
13013                         return ret;
13014
13015                 ret = drm_atomic_add_affected_planes(state, crtc);
13016                 if (ret)
13017                         return ret;
13018         }
13019
13020         return 0;
13021 }
13022
13023 static int intel_modeset_checks(struct drm_atomic_state *state)
13024 {
13025         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13026         struct drm_i915_private *dev_priv = to_i915(state->dev);
13027         struct drm_crtc *crtc;
13028         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13029         int ret = 0, i;
13030
13031         if (!check_digital_port_conflicts(state)) {
13032                 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13033                 return -EINVAL;
13034         }
13035
13036         /* keep the current setting */
13037         if (!intel_state->cdclk.force_min_cdclk_changed)
13038                 intel_state->cdclk.force_min_cdclk =
13039                         dev_priv->cdclk.force_min_cdclk;
13040
13041         intel_state->modeset = true;
13042         intel_state->active_crtcs = dev_priv->active_crtcs;
13043         intel_state->cdclk.logical = dev_priv->cdclk.logical;
13044         intel_state->cdclk.actual = dev_priv->cdclk.actual;
13045         intel_state->cdclk.pipe = INVALID_PIPE;
13046
13047         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13048                 if (new_crtc_state->active)
13049                         intel_state->active_crtcs |= 1 << i;
13050                 else
13051                         intel_state->active_crtcs &= ~(1 << i);
13052
13053                 if (old_crtc_state->active != new_crtc_state->active)
13054                         intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
13055         }
13056
13057         /*
13058          * See if the config requires any additional preparation, e.g.
13059          * to adjust global state with pipes off.  We need to do this
13060          * here so we can get the modeset_pipe updated config for the new
13061          * mode set on this crtc.  For other crtcs we need to use the
13062          * adjusted_mode bits in the crtc directly.
13063          */
13064         if (dev_priv->display.modeset_calc_cdclk) {
13065                 enum pipe pipe;
13066
13067                 ret = dev_priv->display.modeset_calc_cdclk(state);
13068                 if (ret < 0)
13069                         return ret;
13070
13071                 /*
13072                  * Writes to dev_priv->cdclk.logical must protected by
13073                  * holding all the crtc locks, even if we don't end up
13074                  * touching the hardware
13075                  */
13076                 if (intel_cdclk_changed(&dev_priv->cdclk.logical,
13077                                         &intel_state->cdclk.logical)) {
13078                         ret = intel_lock_all_pipes(state);
13079                         if (ret < 0)
13080                                 return ret;
13081                 }
13082
13083                 if (is_power_of_2(intel_state->active_crtcs)) {
13084                         struct drm_crtc *crtc;
13085                         struct drm_crtc_state *crtc_state;
13086
13087                         pipe = ilog2(intel_state->active_crtcs);
13088                         crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base;
13089                         crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
13090                         if (crtc_state && needs_modeset(crtc_state))
13091                                 pipe = INVALID_PIPE;
13092                 } else {
13093                         pipe = INVALID_PIPE;
13094                 }
13095
13096                 /* All pipes must be switched off while we change the cdclk. */
13097                 if (pipe != INVALID_PIPE &&
13098                     intel_cdclk_needs_cd2x_update(dev_priv,
13099                                                   &dev_priv->cdclk.actual,
13100                                                   &intel_state->cdclk.actual)) {
13101                         ret = intel_lock_all_pipes(state);
13102                         if (ret < 0)
13103                                 return ret;
13104
13105                         intel_state->cdclk.pipe = pipe;
13106                 } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
13107                                                      &intel_state->cdclk.actual)) {
13108                         ret = intel_modeset_all_pipes(state);
13109                         if (ret < 0)
13110                                 return ret;
13111
13112                         intel_state->cdclk.pipe = INVALID_PIPE;
13113                 }
13114
13115                 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
13116                               intel_state->cdclk.logical.cdclk,
13117                               intel_state->cdclk.actual.cdclk);
13118                 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
13119                               intel_state->cdclk.logical.voltage_level,
13120                               intel_state->cdclk.actual.voltage_level);
13121         }
13122
13123         intel_modeset_clear_plls(state);
13124
13125         if (IS_HASWELL(dev_priv))
13126                 return haswell_mode_set_planes_workaround(state);
13127
13128         return 0;
13129 }
13130
13131 /*
13132  * Handle calculation of various watermark data at the end of the atomic check
13133  * phase.  The code here should be run after the per-crtc and per-plane 'check'
13134  * handlers to ensure that all derived state has been updated.
13135  */
13136 static int calc_watermark_data(struct intel_atomic_state *state)
13137 {
13138         struct drm_device *dev = state->base.dev;
13139         struct drm_i915_private *dev_priv = to_i915(dev);
13140
13141         /* Is there platform-specific watermark information to calculate? */
13142         if (dev_priv->display.compute_global_watermarks)
13143                 return dev_priv->display.compute_global_watermarks(state);
13144
13145         return 0;
13146 }
13147
13148 /**
13149  * intel_atomic_check - validate state object
13150  * @dev: drm device
13151  * @state: state to validate
13152  */
13153 static int intel_atomic_check(struct drm_device *dev,
13154                               struct drm_atomic_state *state)
13155 {
13156         struct drm_i915_private *dev_priv = to_i915(dev);
13157         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13158         struct drm_crtc *crtc;
13159         struct drm_crtc_state *old_crtc_state, *crtc_state;
13160         int ret, i;
13161         bool any_ms = intel_state->cdclk.force_min_cdclk_changed;
13162
13163         /* Catch I915_MODE_FLAG_INHERITED */
13164         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
13165                                       crtc_state, i) {
13166                 if (crtc_state->mode.private_flags !=
13167                     old_crtc_state->mode.private_flags)
13168                         crtc_state->mode_changed = true;
13169         }
13170
13171         ret = drm_atomic_helper_check_modeset(dev, state);
13172         if (ret)
13173                 return ret;
13174
13175         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
13176                 struct intel_crtc_state *pipe_config =
13177                         to_intel_crtc_state(crtc_state);
13178
13179                 if (!needs_modeset(crtc_state))
13180                         continue;
13181
13182                 if (!crtc_state->enable) {
13183                         any_ms = true;
13184                         continue;
13185                 }
13186
13187                 ret = intel_modeset_pipe_config(crtc, pipe_config);
13188                 if (ret == -EDEADLK)
13189                         return ret;
13190                 if (ret) {
13191                         intel_dump_pipe_config(to_intel_crtc(crtc),
13192                                                pipe_config, "[failed]");
13193                         return ret;
13194                 }
13195
13196                 if (intel_pipe_config_compare(dev_priv,
13197                                         to_intel_crtc_state(old_crtc_state),
13198                                         pipe_config, true)) {
13199                         crtc_state->mode_changed = false;
13200                         pipe_config->update_pipe = true;
13201                 }
13202
13203                 if (needs_modeset(crtc_state))
13204                         any_ms = true;
13205
13206                 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13207                                        needs_modeset(crtc_state) ?
13208                                        "[modeset]" : "[fastset]");
13209         }
13210
13211         ret = drm_dp_mst_atomic_check(state);
13212         if (ret)
13213                 return ret;
13214
13215         if (any_ms) {
13216                 ret = intel_modeset_checks(state);
13217
13218                 if (ret)
13219                         return ret;
13220         } else {
13221                 intel_state->cdclk.logical = dev_priv->cdclk.logical;
13222         }
13223
13224         ret = icl_add_linked_planes(intel_state);
13225         if (ret)
13226                 return ret;
13227
13228         ret = drm_atomic_helper_check_planes(dev, state);
13229         if (ret)
13230                 return ret;
13231
13232         intel_fbc_choose_crtc(dev_priv, intel_state);
13233         return calc_watermark_data(intel_state);
13234 }
13235
13236 static int intel_atomic_prepare_commit(struct drm_device *dev,
13237                                        struct drm_atomic_state *state)
13238 {
13239         return drm_atomic_helper_prepare_planes(dev, state);
13240 }
13241
13242 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
13243 {
13244         struct drm_device *dev = crtc->base.dev;
13245         struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
13246
13247         if (!vblank->max_vblank_count)
13248                 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
13249
13250         return dev->driver->get_vblank_counter(dev, crtc->pipe);
13251 }
13252
13253 static void intel_update_crtc(struct drm_crtc *crtc,
13254                               struct drm_atomic_state *state,
13255                               struct drm_crtc_state *old_crtc_state,
13256                               struct drm_crtc_state *new_crtc_state)
13257 {
13258         struct drm_device *dev = crtc->dev;
13259         struct drm_i915_private *dev_priv = to_i915(dev);
13260         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13261         struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
13262         bool modeset = needs_modeset(new_crtc_state);
13263         struct intel_plane_state *new_plane_state =
13264                 intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
13265                                                  to_intel_plane(crtc->primary));
13266
13267         if (modeset) {
13268                 update_scanline_offset(pipe_config);
13269                 dev_priv->display.crtc_enable(pipe_config, state);
13270
13271                 /* vblanks work again, re-enable pipe CRC. */
13272                 intel_crtc_enable_pipe_crc(intel_crtc);
13273         } else {
13274                 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
13275                                        pipe_config);
13276
13277                 if (pipe_config->update_pipe)
13278                         intel_encoders_update_pipe(crtc, pipe_config, state);
13279         }
13280
13281         if (pipe_config->update_pipe && !pipe_config->enable_fbc)
13282                 intel_fbc_disable(intel_crtc);
13283         else if (new_plane_state)
13284                 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
13285
13286         intel_begin_crtc_commit(to_intel_atomic_state(state), intel_crtc);
13287
13288         if (INTEL_GEN(dev_priv) >= 9)
13289                 skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
13290         else
13291                 i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
13292
13293         intel_finish_crtc_commit(to_intel_atomic_state(state), intel_crtc);
13294 }
13295
13296 static void intel_update_crtcs(struct drm_atomic_state *state)
13297 {
13298         struct drm_crtc *crtc;
13299         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13300         int i;
13301
13302         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13303                 if (!new_crtc_state->active)
13304                         continue;
13305
13306                 intel_update_crtc(crtc, state, old_crtc_state,
13307                                   new_crtc_state);
13308         }
13309 }
13310
13311 static void skl_update_crtcs(struct drm_atomic_state *state)
13312 {
13313         struct drm_i915_private *dev_priv = to_i915(state->dev);
13314         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13315         struct drm_crtc *crtc;
13316         struct intel_crtc *intel_crtc;
13317         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13318         struct intel_crtc_state *cstate;
13319         unsigned int updated = 0;
13320         bool progress;
13321         enum pipe pipe;
13322         int i;
13323         u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
13324         u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
13325         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
13326
13327         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
13328                 /* ignore allocations for crtc's that have been turned off. */
13329                 if (new_crtc_state->active)
13330                         entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
13331
13332         /* If 2nd DBuf slice required, enable it here */
13333         if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
13334                 icl_dbuf_slices_update(dev_priv, required_slices);
13335
13336         /*
13337          * Whenever the number of active pipes changes, we need to make sure we
13338          * update the pipes in the right order so that their ddb allocations
13339          * never overlap with eachother inbetween CRTC updates. Otherwise we'll
13340          * cause pipe underruns and other bad stuff.
13341          */
13342         do {
13343                 progress = false;
13344
13345                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13346                         bool vbl_wait = false;
13347                         unsigned int cmask = drm_crtc_mask(crtc);
13348
13349                         intel_crtc = to_intel_crtc(crtc);
13350                         cstate = to_intel_crtc_state(new_crtc_state);
13351                         pipe = intel_crtc->pipe;
13352
13353                         if (updated & cmask || !cstate->base.active)
13354                                 continue;
13355
13356                         if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
13357                                                         entries,
13358                                                         INTEL_INFO(dev_priv)->num_pipes, i))
13359                                 continue;
13360
13361                         updated |= cmask;
13362                         entries[i] = cstate->wm.skl.ddb;
13363
13364                         /*
13365                          * If this is an already active pipe, it's DDB changed,
13366                          * and this isn't the last pipe that needs updating
13367                          * then we need to wait for a vblank to pass for the
13368                          * new ddb allocation to take effect.
13369                          */
13370                         if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
13371                                                  &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
13372                             !new_crtc_state->active_changed &&
13373                             intel_state->wm_results.dirty_pipes != updated)
13374                                 vbl_wait = true;
13375
13376                         intel_update_crtc(crtc, state, old_crtc_state,
13377                                           new_crtc_state);
13378
13379                         if (vbl_wait)
13380                                 intel_wait_for_vblank(dev_priv, pipe);
13381
13382                         progress = true;
13383                 }
13384         } while (progress);
13385
13386         /* If 2nd DBuf slice is no more required disable it */
13387         if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
13388                 icl_dbuf_slices_update(dev_priv, required_slices);
13389 }
13390
13391 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
13392 {
13393         struct intel_atomic_state *state, *next;
13394         struct llist_node *freed;
13395
13396         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
13397         llist_for_each_entry_safe(state, next, freed, freed)
13398                 drm_atomic_state_put(&state->base);
13399 }
13400
13401 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
13402 {
13403         struct drm_i915_private *dev_priv =
13404                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
13405
13406         intel_atomic_helper_free_state(dev_priv);
13407 }
13408
13409 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
13410 {
13411         struct wait_queue_entry wait_fence, wait_reset;
13412         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
13413
13414         init_wait_entry(&wait_fence, 0);
13415         init_wait_entry(&wait_reset, 0);
13416         for (;;) {
13417                 prepare_to_wait(&intel_state->commit_ready.wait,
13418                                 &wait_fence, TASK_UNINTERRUPTIBLE);
13419                 prepare_to_wait(&dev_priv->gpu_error.wait_queue,
13420                                 &wait_reset, TASK_UNINTERRUPTIBLE);
13421
13422
13423                 if (i915_sw_fence_done(&intel_state->commit_ready)
13424                     || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
13425                         break;
13426
13427                 schedule();
13428         }
13429         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
13430         finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
13431 }
13432
13433 static void intel_atomic_cleanup_work(struct work_struct *work)
13434 {
13435         struct drm_atomic_state *state =
13436                 container_of(work, struct drm_atomic_state, commit_work);
13437         struct drm_i915_private *i915 = to_i915(state->dev);
13438
13439         drm_atomic_helper_cleanup_planes(&i915->drm, state);
13440         drm_atomic_helper_commit_cleanup_done(state);
13441         drm_atomic_state_put(state);
13442
13443         intel_atomic_helper_free_state(i915);
13444 }
13445
13446 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
13447 {
13448         struct drm_device *dev = state->dev;
13449         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13450         struct drm_i915_private *dev_priv = to_i915(dev);
13451         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13452         struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
13453         struct drm_crtc *crtc;
13454         struct intel_crtc *intel_crtc;
13455         u64 put_domains[I915_MAX_PIPES] = {};
13456         intel_wakeref_t wakeref = 0;
13457         int i;
13458
13459         intel_atomic_commit_fence_wait(intel_state);
13460
13461         drm_atomic_helper_wait_for_dependencies(state);
13462
13463         if (intel_state->modeset)
13464                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13465
13466         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13467                 old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
13468                 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13469                 intel_crtc = to_intel_crtc(crtc);
13470
13471                 if (needs_modeset(new_crtc_state) ||
13472                     to_intel_crtc_state(new_crtc_state)->update_pipe) {
13473
13474                         put_domains[intel_crtc->pipe] =
13475                                 modeset_get_crtc_power_domains(crtc,
13476                                         new_intel_crtc_state);
13477                 }
13478
13479                 if (!needs_modeset(new_crtc_state))
13480                         continue;
13481
13482                 intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
13483
13484                 if (old_crtc_state->active) {
13485                         intel_crtc_disable_planes(intel_state, intel_crtc);
13486
13487                         /*
13488                          * We need to disable pipe CRC before disabling the pipe,
13489                          * or we race against vblank off.
13490                          */
13491                         intel_crtc_disable_pipe_crc(intel_crtc);
13492
13493                         dev_priv->display.crtc_disable(old_intel_crtc_state, state);
13494                         intel_crtc->active = false;
13495                         intel_fbc_disable(intel_crtc);
13496                         intel_disable_shared_dpll(old_intel_crtc_state);
13497
13498                         /*
13499                          * Underruns don't always raise
13500                          * interrupts, so check manually.
13501                          */
13502                         intel_check_cpu_fifo_underruns(dev_priv);
13503                         intel_check_pch_fifo_underruns(dev_priv);
13504
13505                         /* FIXME unify this for all platforms */
13506                         if (!new_crtc_state->active &&
13507                             !HAS_GMCH(dev_priv) &&
13508                             dev_priv->display.initial_watermarks)
13509                                 dev_priv->display.initial_watermarks(intel_state,
13510                                                                      new_intel_crtc_state);
13511                 }
13512         }
13513
13514         /* FIXME: Eventually get rid of our intel_crtc->config pointer */
13515         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
13516                 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
13517
13518         if (intel_state->modeset) {
13519                 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13520
13521                 intel_set_cdclk_pre_plane_update(dev_priv,
13522                                                  &intel_state->cdclk.actual,
13523                                                  &dev_priv->cdclk.actual,
13524                                                  intel_state->cdclk.pipe);
13525
13526                 /*
13527                  * SKL workaround: bspec recommends we disable the SAGV when we
13528                  * have more then one pipe enabled
13529                  */
13530                 if (!intel_can_enable_sagv(state))
13531                         intel_disable_sagv(dev_priv);
13532
13533                 intel_modeset_verify_disabled(dev, state);
13534         }
13535
13536         /* Complete the events for pipes that have now been disabled */
13537         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13538                 bool modeset = needs_modeset(new_crtc_state);
13539
13540                 /* Complete events for now disable pipes here. */
13541                 if (modeset && !new_crtc_state->active && new_crtc_state->event) {
13542                         spin_lock_irq(&dev->event_lock);
13543                         drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
13544                         spin_unlock_irq(&dev->event_lock);
13545
13546                         new_crtc_state->event = NULL;
13547                 }
13548         }
13549
13550         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
13551         dev_priv->display.update_crtcs(state);
13552
13553         if (intel_state->modeset)
13554                 intel_set_cdclk_post_plane_update(dev_priv,
13555                                                   &intel_state->cdclk.actual,
13556                                                   &dev_priv->cdclk.actual,
13557                                                   intel_state->cdclk.pipe);
13558
13559         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
13560          * already, but still need the state for the delayed optimization. To
13561          * fix this:
13562          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
13563          * - schedule that vblank worker _before_ calling hw_done
13564          * - at the start of commit_tail, cancel it _synchrously
13565          * - switch over to the vblank wait helper in the core after that since
13566          *   we don't need out special handling any more.
13567          */
13568         drm_atomic_helper_wait_for_flip_done(dev, state);
13569
13570         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13571                 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13572
13573                 if (new_crtc_state->active &&
13574                     !needs_modeset(new_crtc_state) &&
13575                     (new_intel_crtc_state->base.color_mgmt_changed ||
13576                      new_intel_crtc_state->update_pipe))
13577                         intel_color_load_luts(new_intel_crtc_state);
13578         }
13579
13580         /*
13581          * Now that the vblank has passed, we can go ahead and program the
13582          * optimal watermarks on platforms that need two-step watermark
13583          * programming.
13584          *
13585          * TODO: Move this (and other cleanup) to an async worker eventually.
13586          */
13587         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13588                 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13589
13590                 if (dev_priv->display.optimize_watermarks)
13591                         dev_priv->display.optimize_watermarks(intel_state,
13592                                                               new_intel_crtc_state);
13593         }
13594
13595         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13596                 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13597
13598                 if (put_domains[i])
13599                         modeset_put_power_domains(dev_priv, put_domains[i]);
13600
13601                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
13602         }
13603
13604         if (intel_state->modeset)
13605                 intel_verify_planes(intel_state);
13606
13607         if (intel_state->modeset && intel_can_enable_sagv(state))
13608                 intel_enable_sagv(dev_priv);
13609
13610         drm_atomic_helper_commit_hw_done(state);
13611
13612         if (intel_state->modeset) {
13613                 /* As one of the primary mmio accessors, KMS has a high
13614                  * likelihood of triggering bugs in unclaimed access. After we
13615                  * finish modesetting, see if an error has been flagged, and if
13616                  * so enable debugging for the next modeset - and hope we catch
13617                  * the culprit.
13618                  */
13619                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
13620                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
13621         }
13622
13623         /*
13624          * Defer the cleanup of the old state to a separate worker to not
13625          * impede the current task (userspace for blocking modesets) that
13626          * are executed inline. For out-of-line asynchronous modesets/flips,
13627          * deferring to a new worker seems overkill, but we would place a
13628          * schedule point (cond_resched()) here anyway to keep latencies
13629          * down.
13630          */
13631         INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
13632         queue_work(system_highpri_wq, &state->commit_work);
13633 }
13634
13635 static void intel_atomic_commit_work(struct work_struct *work)
13636 {
13637         struct drm_atomic_state *state =
13638                 container_of(work, struct drm_atomic_state, commit_work);
13639
13640         intel_atomic_commit_tail(state);
13641 }
13642
13643 static int __i915_sw_fence_call
13644 intel_atomic_commit_ready(struct i915_sw_fence *fence,
13645                           enum i915_sw_fence_notify notify)
13646 {
13647         struct intel_atomic_state *state =
13648                 container_of(fence, struct intel_atomic_state, commit_ready);
13649
13650         switch (notify) {
13651         case FENCE_COMPLETE:
13652                 /* we do blocking waits in the worker, nothing to do here */
13653                 break;
13654         case FENCE_FREE:
13655                 {
13656                         struct intel_atomic_helper *helper =
13657                                 &to_i915(state->base.dev)->atomic_helper;
13658
13659                         if (llist_add(&state->freed, &helper->free_list))
13660                                 schedule_work(&helper->free_work);
13661                         break;
13662                 }
13663         }
13664
13665         return NOTIFY_DONE;
13666 }
13667
13668 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
13669 {
13670         struct drm_plane_state *old_plane_state, *new_plane_state;
13671         struct drm_plane *plane;
13672         int i;
13673
13674         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
13675                 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
13676                                   intel_fb_obj(new_plane_state->fb),
13677                                   to_intel_plane(plane)->frontbuffer_bit);
13678 }
13679
13680 /**
13681  * intel_atomic_commit - commit validated state object
13682  * @dev: DRM device
13683  * @state: the top-level driver state object
13684  * @nonblock: nonblocking commit
13685  *
13686  * This function commits a top-level state object that has been validated
13687  * with drm_atomic_helper_check().
13688  *
13689  * RETURNS
13690  * Zero for success or -errno.
13691  */
13692 static int intel_atomic_commit(struct drm_device *dev,
13693                                struct drm_atomic_state *state,
13694                                bool nonblock)
13695 {
13696         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13697         struct drm_i915_private *dev_priv = to_i915(dev);
13698         int ret = 0;
13699
13700         drm_atomic_state_get(state);
13701         i915_sw_fence_init(&intel_state->commit_ready,
13702                            intel_atomic_commit_ready);
13703
13704         /*
13705          * The intel_legacy_cursor_update() fast path takes care
13706          * of avoiding the vblank waits for simple cursor
13707          * movement and flips. For cursor on/off and size changes,
13708          * we want to perform the vblank waits so that watermark
13709          * updates happen during the correct frames. Gen9+ have
13710          * double buffered watermarks and so shouldn't need this.
13711          *
13712          * Unset state->legacy_cursor_update before the call to
13713          * drm_atomic_helper_setup_commit() because otherwise
13714          * drm_atomic_helper_wait_for_flip_done() is a noop and
13715          * we get FIFO underruns because we didn't wait
13716          * for vblank.
13717          *
13718          * FIXME doing watermarks and fb cleanup from a vblank worker
13719          * (assuming we had any) would solve these problems.
13720          */
13721         if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
13722                 struct intel_crtc_state *new_crtc_state;
13723                 struct intel_crtc *crtc;
13724                 int i;
13725
13726                 for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
13727                         if (new_crtc_state->wm.need_postvbl_update ||
13728                             new_crtc_state->update_wm_post)
13729                                 state->legacy_cursor_update = false;
13730         }
13731
13732         ret = intel_atomic_prepare_commit(dev, state);
13733         if (ret) {
13734                 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13735                 i915_sw_fence_commit(&intel_state->commit_ready);
13736                 return ret;
13737         }
13738
13739         ret = drm_atomic_helper_setup_commit(state, nonblock);
13740         if (!ret)
13741                 ret = drm_atomic_helper_swap_state(state, true);
13742
13743         if (ret) {
13744                 i915_sw_fence_commit(&intel_state->commit_ready);
13745
13746                 drm_atomic_helper_cleanup_planes(dev, state);
13747                 return ret;
13748         }
13749         dev_priv->wm.distrust_bios_wm = false;
13750         intel_shared_dpll_swap_state(state);
13751         intel_atomic_track_fbs(state);
13752
13753         if (intel_state->modeset) {
13754                 memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
13755                        sizeof(intel_state->min_cdclk));
13756                 memcpy(dev_priv->min_voltage_level,
13757                        intel_state->min_voltage_level,
13758                        sizeof(intel_state->min_voltage_level));
13759                 dev_priv->active_crtcs = intel_state->active_crtcs;
13760                 dev_priv->cdclk.force_min_cdclk =
13761                         intel_state->cdclk.force_min_cdclk;
13762
13763                 intel_cdclk_swap_state(intel_state);
13764         }
13765
13766         drm_atomic_state_get(state);
13767         INIT_WORK(&state->commit_work, intel_atomic_commit_work);
13768
13769         i915_sw_fence_commit(&intel_state->commit_ready);
13770         if (nonblock && intel_state->modeset) {
13771                 queue_work(dev_priv->modeset_wq, &state->commit_work);
13772         } else if (nonblock) {
13773                 queue_work(system_unbound_wq, &state->commit_work);
13774         } else {
13775                 if (intel_state->modeset)
13776                         flush_workqueue(dev_priv->modeset_wq);
13777                 intel_atomic_commit_tail(state);
13778         }
13779
13780         return 0;
13781 }
13782
13783 static const struct drm_crtc_funcs intel_crtc_funcs = {
13784         .gamma_set = drm_atomic_helper_legacy_gamma_set,
13785         .set_config = drm_atomic_helper_set_config,
13786         .destroy = intel_crtc_destroy,
13787         .page_flip = drm_atomic_helper_page_flip,
13788         .atomic_duplicate_state = intel_crtc_duplicate_state,
13789         .atomic_destroy_state = intel_crtc_destroy_state,
13790         .set_crc_source = intel_crtc_set_crc_source,
13791         .verify_crc_source = intel_crtc_verify_crc_source,
13792         .get_crc_sources = intel_crtc_get_crc_sources,
13793 };
13794
13795 struct wait_rps_boost {
13796         struct wait_queue_entry wait;
13797
13798         struct drm_crtc *crtc;
13799         struct i915_request *request;
13800 };
13801
13802 static int do_rps_boost(struct wait_queue_entry *_wait,
13803                         unsigned mode, int sync, void *key)
13804 {
13805         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
13806         struct i915_request *rq = wait->request;
13807
13808         /*
13809          * If we missed the vblank, but the request is already running it
13810          * is reasonable to assume that it will complete before the next
13811          * vblank without our intervention, so leave RPS alone.
13812          */
13813         if (!i915_request_started(rq))
13814                 gen6_rps_boost(rq);
13815         i915_request_put(rq);
13816
13817         drm_crtc_vblank_put(wait->crtc);
13818
13819         list_del(&wait->wait.entry);
13820         kfree(wait);
13821         return 1;
13822 }
13823
13824 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
13825                                        struct dma_fence *fence)
13826 {
13827         struct wait_rps_boost *wait;
13828
13829         if (!dma_fence_is_i915(fence))
13830                 return;
13831
13832         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
13833                 return;
13834
13835         if (drm_crtc_vblank_get(crtc))
13836                 return;
13837
13838         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
13839         if (!wait) {
13840                 drm_crtc_vblank_put(crtc);
13841                 return;
13842         }
13843
13844         wait->request = to_request(dma_fence_get(fence));
13845         wait->crtc = crtc;
13846
13847         wait->wait.func = do_rps_boost;
13848         wait->wait.flags = 0;
13849
13850         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
13851 }
13852
13853 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
13854 {
13855         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
13856         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
13857         struct drm_framebuffer *fb = plane_state->base.fb;
13858         struct i915_vma *vma;
13859
13860         if (plane->id == PLANE_CURSOR &&
13861             INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
13862                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13863                 const int align = intel_cursor_alignment(dev_priv);
13864                 int err;
13865
13866                 err = i915_gem_object_attach_phys(obj, align);
13867                 if (err)
13868                         return err;
13869         }
13870
13871         vma = intel_pin_and_fence_fb_obj(fb,
13872                                          &plane_state->view,
13873                                          intel_plane_uses_fence(plane_state),
13874                                          &plane_state->flags);
13875         if (IS_ERR(vma))
13876                 return PTR_ERR(vma);
13877
13878         plane_state->vma = vma;
13879
13880         return 0;
13881 }
13882
13883 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
13884 {
13885         struct i915_vma *vma;
13886
13887         vma = fetch_and_zero(&old_plane_state->vma);
13888         if (vma)
13889                 intel_unpin_fb_vma(vma, old_plane_state->flags);
13890 }
13891
13892 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
13893 {
13894         struct i915_sched_attr attr = {
13895                 .priority = I915_PRIORITY_DISPLAY,
13896         };
13897
13898         i915_gem_object_wait_priority(obj, 0, &attr);
13899 }
13900
13901 /**
13902  * intel_prepare_plane_fb - Prepare fb for usage on plane
13903  * @plane: drm plane to prepare for
13904  * @new_state: the plane state being prepared
13905  *
13906  * Prepares a framebuffer for usage on a display plane.  Generally this
13907  * involves pinning the underlying object and updating the frontbuffer tracking
13908  * bits.  Some older platforms need special physical address handling for
13909  * cursor planes.
13910  *
13911  * Must be called with struct_mutex held.
13912  *
13913  * Returns 0 on success, negative error code on failure.
13914  */
13915 int
13916 intel_prepare_plane_fb(struct drm_plane *plane,
13917                        struct drm_plane_state *new_state)
13918 {
13919         struct intel_atomic_state *intel_state =
13920                 to_intel_atomic_state(new_state->state);
13921         struct drm_i915_private *dev_priv = to_i915(plane->dev);
13922         struct drm_framebuffer *fb = new_state->fb;
13923         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13924         struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13925         int ret;
13926
13927         if (old_obj) {
13928                 struct drm_crtc_state *crtc_state =
13929                         drm_atomic_get_new_crtc_state(new_state->state,
13930                                                       plane->state->crtc);
13931
13932                 /* Big Hammer, we also need to ensure that any pending
13933                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13934                  * current scanout is retired before unpinning the old
13935                  * framebuffer. Note that we rely on userspace rendering
13936                  * into the buffer attached to the pipe they are waiting
13937                  * on. If not, userspace generates a GPU hang with IPEHR
13938                  * point to the MI_WAIT_FOR_EVENT.
13939                  *
13940                  * This should only fail upon a hung GPU, in which case we
13941                  * can safely continue.
13942                  */
13943                 if (needs_modeset(crtc_state)) {
13944                         ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13945                                                               old_obj->resv, NULL,
13946                                                               false, 0,
13947                                                               GFP_KERNEL);
13948                         if (ret < 0)
13949                                 return ret;
13950                 }
13951         }
13952
13953         if (new_state->fence) { /* explicit fencing */
13954                 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
13955                                                     new_state->fence,
13956                                                     I915_FENCE_TIMEOUT,
13957                                                     GFP_KERNEL);
13958                 if (ret < 0)
13959                         return ret;
13960         }
13961
13962         if (!obj)
13963                 return 0;
13964
13965         ret = i915_gem_object_pin_pages(obj);
13966         if (ret)
13967                 return ret;
13968
13969         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13970         if (ret) {
13971                 i915_gem_object_unpin_pages(obj);
13972                 return ret;
13973         }
13974
13975         ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
13976
13977         mutex_unlock(&dev_priv->drm.struct_mutex);
13978         i915_gem_object_unpin_pages(obj);
13979         if (ret)
13980                 return ret;
13981
13982         fb_obj_bump_render_priority(obj);
13983         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
13984
13985         if (!new_state->fence) { /* implicit fencing */
13986                 struct dma_fence *fence;
13987
13988                 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13989                                                       obj->resv, NULL,
13990                                                       false, I915_FENCE_TIMEOUT,
13991                                                       GFP_KERNEL);
13992                 if (ret < 0)
13993                         return ret;
13994
13995                 fence = reservation_object_get_excl_rcu(obj->resv);
13996                 if (fence) {
13997                         add_rps_boost_after_vblank(new_state->crtc, fence);
13998                         dma_fence_put(fence);
13999                 }
14000         } else {
14001                 add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
14002         }
14003
14004         /*
14005          * We declare pageflips to be interactive and so merit a small bias
14006          * towards upclocking to deliver the frame on time. By only changing
14007          * the RPS thresholds to sample more regularly and aim for higher
14008          * clocks we can hopefully deliver low power workloads (like kodi)
14009          * that are not quite steady state without resorting to forcing
14010          * maximum clocks following a vblank miss (see do_rps_boost()).
14011          */
14012         if (!intel_state->rps_interactive) {
14013                 intel_rps_mark_interactive(dev_priv, true);
14014                 intel_state->rps_interactive = true;
14015         }
14016
14017         return 0;
14018 }
14019
14020 /**
14021  * intel_cleanup_plane_fb - Cleans up an fb after plane use
14022  * @plane: drm plane to clean up for
14023  * @old_state: the state from the previous modeset
14024  *
14025  * Cleans up a framebuffer that has just been removed from a plane.
14026  *
14027  * Must be called with struct_mutex held.
14028  */
14029 void
14030 intel_cleanup_plane_fb(struct drm_plane *plane,
14031                        struct drm_plane_state *old_state)
14032 {
14033         struct intel_atomic_state *intel_state =
14034                 to_intel_atomic_state(old_state->state);
14035         struct drm_i915_private *dev_priv = to_i915(plane->dev);
14036
14037         if (intel_state->rps_interactive) {
14038                 intel_rps_mark_interactive(dev_priv, false);
14039                 intel_state->rps_interactive = false;
14040         }
14041
14042         /* Should only be called after a successful intel_prepare_plane_fb()! */
14043         mutex_lock(&dev_priv->drm.struct_mutex);
14044         intel_plane_unpin_fb(to_intel_plane_state(old_state));
14045         mutex_unlock(&dev_priv->drm.struct_mutex);
14046 }
14047
14048 int
14049 skl_max_scale(const struct intel_crtc_state *crtc_state,
14050               u32 pixel_format)
14051 {
14052         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
14053         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14054         int max_scale, mult;
14055         int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
14056
14057         if (!crtc_state->base.enable)
14058                 return DRM_PLANE_HELPER_NO_SCALING;
14059
14060         crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
14061         max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
14062
14063         if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
14064                 max_dotclk *= 2;
14065
14066         if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
14067                 return DRM_PLANE_HELPER_NO_SCALING;
14068
14069         /*
14070          * skl max scale is lower of:
14071          *    close to 3 but not 3, -1 is for that purpose
14072          *            or
14073          *    cdclk/crtc_clock
14074          */
14075         mult = is_planar_yuv_format(pixel_format) ? 2 : 3;
14076         tmpclk1 = (1 << 16) * mult - 1;
14077         tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
14078         max_scale = min(tmpclk1, tmpclk2);
14079
14080         return max_scale;
14081 }
14082
14083 static void intel_begin_crtc_commit(struct intel_atomic_state *state,
14084                                     struct intel_crtc *crtc)
14085 {
14086         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14087         struct intel_crtc_state *old_crtc_state =
14088                 intel_atomic_get_old_crtc_state(state, crtc);
14089         struct intel_crtc_state *new_crtc_state =
14090                 intel_atomic_get_new_crtc_state(state, crtc);
14091         bool modeset = needs_modeset(&new_crtc_state->base);
14092
14093         /* Perform vblank evasion around commit operation */
14094         intel_pipe_update_start(new_crtc_state);
14095
14096         if (modeset)
14097                 goto out;
14098
14099         if (new_crtc_state->base.color_mgmt_changed ||
14100             new_crtc_state->update_pipe)
14101                 intel_color_commit(new_crtc_state);
14102
14103         if (new_crtc_state->update_pipe)
14104                 intel_update_pipe_config(old_crtc_state, new_crtc_state);
14105         else if (INTEL_GEN(dev_priv) >= 9)
14106                 skl_detach_scalers(new_crtc_state);
14107
14108 out:
14109         if (dev_priv->display.atomic_update_watermarks)
14110                 dev_priv->display.atomic_update_watermarks(state,
14111                                                            new_crtc_state);
14112 }
14113
14114 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14115                                   struct intel_crtc_state *crtc_state)
14116 {
14117         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14118
14119         if (!IS_GEN(dev_priv, 2))
14120                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14121
14122         if (crtc_state->has_pch_encoder) {
14123                 enum pipe pch_transcoder =
14124                         intel_crtc_pch_transcoder(crtc);
14125
14126                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14127         }
14128 }
14129
14130 static void intel_finish_crtc_commit(struct intel_atomic_state *state,
14131                                      struct intel_crtc *crtc)
14132 {
14133         struct intel_crtc_state *old_crtc_state =
14134                 intel_atomic_get_old_crtc_state(state, crtc);
14135         struct intel_crtc_state *new_crtc_state =
14136                 intel_atomic_get_new_crtc_state(state, crtc);
14137
14138         intel_pipe_update_end(new_crtc_state);
14139
14140         if (new_crtc_state->update_pipe &&
14141             !needs_modeset(&new_crtc_state->base) &&
14142             old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
14143                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14144 }
14145
14146 /**
14147  * intel_plane_destroy - destroy a plane
14148  * @plane: plane to destroy
14149  *
14150  * Common destruction function for all types of planes (primary, cursor,
14151  * sprite).
14152  */
14153 void intel_plane_destroy(struct drm_plane *plane)
14154 {
14155         drm_plane_cleanup(plane);
14156         kfree(to_intel_plane(plane));
14157 }
14158
14159 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
14160                                             u32 format, u64 modifier)
14161 {
14162         switch (modifier) {
14163         case DRM_FORMAT_MOD_LINEAR:
14164         case I915_FORMAT_MOD_X_TILED:
14165                 break;
14166         default:
14167                 return false;
14168         }
14169
14170         switch (format) {
14171         case DRM_FORMAT_C8:
14172         case DRM_FORMAT_RGB565:
14173         case DRM_FORMAT_XRGB1555:
14174         case DRM_FORMAT_XRGB8888:
14175                 return modifier == DRM_FORMAT_MOD_LINEAR ||
14176                         modifier == I915_FORMAT_MOD_X_TILED;
14177         default:
14178                 return false;
14179         }
14180 }
14181
14182 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
14183                                             u32 format, u64 modifier)
14184 {
14185         switch (modifier) {
14186         case DRM_FORMAT_MOD_LINEAR:
14187         case I915_FORMAT_MOD_X_TILED:
14188                 break;
14189         default:
14190                 return false;
14191         }
14192
14193         switch (format) {
14194         case DRM_FORMAT_C8:
14195         case DRM_FORMAT_RGB565:
14196         case DRM_FORMAT_XRGB8888:
14197         case DRM_FORMAT_XBGR8888:
14198         case DRM_FORMAT_XRGB2101010:
14199         case DRM_FORMAT_XBGR2101010:
14200                 return modifier == DRM_FORMAT_MOD_LINEAR ||
14201                         modifier == I915_FORMAT_MOD_X_TILED;
14202         default:
14203                 return false;
14204         }
14205 }
14206
14207 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
14208                                               u32 format, u64 modifier)
14209 {
14210         return modifier == DRM_FORMAT_MOD_LINEAR &&
14211                 format == DRM_FORMAT_ARGB8888;
14212 }
14213
14214 static const struct drm_plane_funcs i965_plane_funcs = {
14215         .update_plane = drm_atomic_helper_update_plane,
14216         .disable_plane = drm_atomic_helper_disable_plane,
14217         .destroy = intel_plane_destroy,
14218         .atomic_get_property = intel_plane_atomic_get_property,
14219         .atomic_set_property = intel_plane_atomic_set_property,
14220         .atomic_duplicate_state = intel_plane_duplicate_state,
14221         .atomic_destroy_state = intel_plane_destroy_state,
14222         .format_mod_supported = i965_plane_format_mod_supported,
14223 };
14224
14225 static const struct drm_plane_funcs i8xx_plane_funcs = {
14226         .update_plane = drm_atomic_helper_update_plane,
14227         .disable_plane = drm_atomic_helper_disable_plane,
14228         .destroy = intel_plane_destroy,
14229         .atomic_get_property = intel_plane_atomic_get_property,
14230         .atomic_set_property = intel_plane_atomic_set_property,
14231         .atomic_duplicate_state = intel_plane_duplicate_state,
14232         .atomic_destroy_state = intel_plane_destroy_state,
14233         .format_mod_supported = i8xx_plane_format_mod_supported,
14234 };
14235
14236 static int
14237 intel_legacy_cursor_update(struct drm_plane *plane,
14238                            struct drm_crtc *crtc,
14239                            struct drm_framebuffer *fb,
14240                            int crtc_x, int crtc_y,
14241                            unsigned int crtc_w, unsigned int crtc_h,
14242                            u32 src_x, u32 src_y,
14243                            u32 src_w, u32 src_h,
14244                            struct drm_modeset_acquire_ctx *ctx)
14245 {
14246         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
14247         int ret;
14248         struct drm_plane_state *old_plane_state, *new_plane_state;
14249         struct intel_plane *intel_plane = to_intel_plane(plane);
14250         struct drm_framebuffer *old_fb;
14251         struct intel_crtc_state *crtc_state =
14252                 to_intel_crtc_state(crtc->state);
14253         struct intel_crtc_state *new_crtc_state;
14254
14255         /*
14256          * When crtc is inactive or there is a modeset pending,
14257          * wait for it to complete in the slowpath
14258          */
14259         if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
14260             crtc_state->update_pipe)
14261                 goto slow;
14262
14263         old_plane_state = plane->state;
14264         /*
14265          * Don't do an async update if there is an outstanding commit modifying
14266          * the plane.  This prevents our async update's changes from getting
14267          * overridden by a previous synchronous update's state.
14268          */
14269         if (old_plane_state->commit &&
14270             !try_wait_for_completion(&old_plane_state->commit->hw_done))
14271                 goto slow;
14272
14273         /*
14274          * If any parameters change that may affect watermarks,
14275          * take the slowpath. Only changing fb or position should be
14276          * in the fastpath.
14277          */
14278         if (old_plane_state->crtc != crtc ||
14279             old_plane_state->src_w != src_w ||
14280             old_plane_state->src_h != src_h ||
14281             old_plane_state->crtc_w != crtc_w ||
14282             old_plane_state->crtc_h != crtc_h ||
14283             !old_plane_state->fb != !fb)
14284                 goto slow;
14285
14286         new_plane_state = intel_plane_duplicate_state(plane);
14287         if (!new_plane_state)
14288                 return -ENOMEM;
14289
14290         new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
14291         if (!new_crtc_state) {
14292                 ret = -ENOMEM;
14293                 goto out_free;
14294         }
14295
14296         drm_atomic_set_fb_for_plane(new_plane_state, fb);
14297
14298         new_plane_state->src_x = src_x;
14299         new_plane_state->src_y = src_y;
14300         new_plane_state->src_w = src_w;
14301         new_plane_state->src_h = src_h;
14302         new_plane_state->crtc_x = crtc_x;
14303         new_plane_state->crtc_y = crtc_y;
14304         new_plane_state->crtc_w = crtc_w;
14305         new_plane_state->crtc_h = crtc_h;
14306
14307         ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
14308                                                   to_intel_plane_state(old_plane_state),
14309                                                   to_intel_plane_state(new_plane_state));
14310         if (ret)
14311                 goto out_free;
14312
14313         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14314         if (ret)
14315                 goto out_free;
14316
14317         ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
14318         if (ret)
14319                 goto out_unlock;
14320
14321         intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
14322
14323         old_fb = old_plane_state->fb;
14324         i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
14325                           intel_plane->frontbuffer_bit);
14326
14327         /* Swap plane state */
14328         plane->state = new_plane_state;
14329
14330         /*
14331          * We cannot swap crtc_state as it may be in use by an atomic commit or
14332          * page flip that's running simultaneously. If we swap crtc_state and
14333          * destroy the old state, we will cause a use-after-free there.
14334          *
14335          * Only update active_planes, which is needed for our internal
14336          * bookkeeping. Either value will do the right thing when updating
14337          * planes atomically. If the cursor was part of the atomic update then
14338          * we would have taken the slowpath.
14339          */
14340         crtc_state->active_planes = new_crtc_state->active_planes;
14341
14342         if (plane->state->visible)
14343                 intel_update_plane(intel_plane, crtc_state,
14344                                    to_intel_plane_state(plane->state));
14345         else
14346                 intel_disable_plane(intel_plane, crtc_state);
14347
14348         intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
14349
14350 out_unlock:
14351         mutex_unlock(&dev_priv->drm.struct_mutex);
14352 out_free:
14353         if (new_crtc_state)
14354                 intel_crtc_destroy_state(crtc, &new_crtc_state->base);
14355         if (ret)
14356                 intel_plane_destroy_state(plane, new_plane_state);
14357         else
14358                 intel_plane_destroy_state(plane, old_plane_state);
14359         return ret;
14360
14361 slow:
14362         return drm_atomic_helper_update_plane(plane, crtc, fb,
14363                                               crtc_x, crtc_y, crtc_w, crtc_h,
14364                                               src_x, src_y, src_w, src_h, ctx);
14365 }
14366
14367 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
14368         .update_plane = intel_legacy_cursor_update,
14369         .disable_plane = drm_atomic_helper_disable_plane,
14370         .destroy = intel_plane_destroy,
14371         .atomic_get_property = intel_plane_atomic_get_property,
14372         .atomic_set_property = intel_plane_atomic_set_property,
14373         .atomic_duplicate_state = intel_plane_duplicate_state,
14374         .atomic_destroy_state = intel_plane_destroy_state,
14375         .format_mod_supported = intel_cursor_format_mod_supported,
14376 };
14377
14378 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
14379                                enum i9xx_plane_id i9xx_plane)
14380 {
14381         if (!HAS_FBC(dev_priv))
14382                 return false;
14383
14384         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
14385                 return i9xx_plane == PLANE_A; /* tied to pipe A */
14386         else if (IS_IVYBRIDGE(dev_priv))
14387                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
14388                         i9xx_plane == PLANE_C;
14389         else if (INTEL_GEN(dev_priv) >= 4)
14390                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
14391         else
14392                 return i9xx_plane == PLANE_A;
14393 }
14394
14395 static struct intel_plane *
14396 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
14397 {
14398         struct intel_plane *plane;
14399         const struct drm_plane_funcs *plane_funcs;
14400         unsigned int supported_rotations;
14401         unsigned int possible_crtcs;
14402         const u64 *modifiers;
14403         const u32 *formats;
14404         int num_formats;
14405         int ret;
14406
14407         if (INTEL_GEN(dev_priv) >= 9)
14408                 return skl_universal_plane_create(dev_priv, pipe,
14409                                                   PLANE_PRIMARY);
14410
14411         plane = intel_plane_alloc();
14412         if (IS_ERR(plane))
14413                 return plane;
14414
14415         plane->pipe = pipe;
14416         /*
14417          * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
14418          * port is hooked to pipe B. Hence we want plane A feeding pipe B.
14419          */
14420         if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
14421                 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
14422         else
14423                 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
14424         plane->id = PLANE_PRIMARY;
14425         plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
14426
14427         plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
14428         if (plane->has_fbc) {
14429                 struct intel_fbc *fbc = &dev_priv->fbc;
14430
14431                 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
14432         }
14433
14434         if (INTEL_GEN(dev_priv) >= 4) {
14435                 formats = i965_primary_formats;
14436                 num_formats = ARRAY_SIZE(i965_primary_formats);
14437                 modifiers = i9xx_format_modifiers;
14438
14439                 plane->max_stride = i9xx_plane_max_stride;
14440                 plane->update_plane = i9xx_update_plane;
14441                 plane->disable_plane = i9xx_disable_plane;
14442                 plane->get_hw_state = i9xx_plane_get_hw_state;
14443                 plane->check_plane = i9xx_plane_check;
14444
14445                 plane_funcs = &i965_plane_funcs;
14446         } else {
14447                 formats = i8xx_primary_formats;
14448                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
14449                 modifiers = i9xx_format_modifiers;
14450
14451                 plane->max_stride = i9xx_plane_max_stride;
14452                 plane->update_plane = i9xx_update_plane;
14453                 plane->disable_plane = i9xx_disable_plane;
14454                 plane->get_hw_state = i9xx_plane_get_hw_state;
14455                 plane->check_plane = i9xx_plane_check;
14456
14457                 plane_funcs = &i8xx_plane_funcs;
14458         }
14459
14460         possible_crtcs = BIT(pipe);
14461
14462         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
14463                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14464                                                possible_crtcs, plane_funcs,
14465                                                formats, num_formats, modifiers,
14466                                                DRM_PLANE_TYPE_PRIMARY,
14467                                                "primary %c", pipe_name(pipe));
14468         else
14469                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14470                                                possible_crtcs, plane_funcs,
14471                                                formats, num_formats, modifiers,
14472                                                DRM_PLANE_TYPE_PRIMARY,
14473                                                "plane %c",
14474                                                plane_name(plane->i9xx_plane));
14475         if (ret)
14476                 goto fail;
14477
14478         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
14479                 supported_rotations =
14480                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
14481                         DRM_MODE_REFLECT_X;
14482         } else if (INTEL_GEN(dev_priv) >= 4) {
14483                 supported_rotations =
14484                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
14485         } else {
14486                 supported_rotations = DRM_MODE_ROTATE_0;
14487         }
14488
14489         if (INTEL_GEN(dev_priv) >= 4)
14490                 drm_plane_create_rotation_property(&plane->base,
14491                                                    DRM_MODE_ROTATE_0,
14492                                                    supported_rotations);
14493
14494         drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
14495
14496         return plane;
14497
14498 fail:
14499         intel_plane_free(plane);
14500
14501         return ERR_PTR(ret);
14502 }
14503
14504 static struct intel_plane *
14505 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
14506                           enum pipe pipe)
14507 {
14508         unsigned int possible_crtcs;
14509         struct intel_plane *cursor;
14510         int ret;
14511
14512         cursor = intel_plane_alloc();
14513         if (IS_ERR(cursor))
14514                 return cursor;
14515
14516         cursor->pipe = pipe;
14517         cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
14518         cursor->id = PLANE_CURSOR;
14519         cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
14520
14521         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
14522                 cursor->max_stride = i845_cursor_max_stride;
14523                 cursor->update_plane = i845_update_cursor;
14524                 cursor->disable_plane = i845_disable_cursor;
14525                 cursor->get_hw_state = i845_cursor_get_hw_state;
14526                 cursor->check_plane = i845_check_cursor;
14527         } else {
14528                 cursor->max_stride = i9xx_cursor_max_stride;
14529                 cursor->update_plane = i9xx_update_cursor;
14530                 cursor->disable_plane = i9xx_disable_cursor;
14531                 cursor->get_hw_state = i9xx_cursor_get_hw_state;
14532                 cursor->check_plane = i9xx_check_cursor;
14533         }
14534
14535         cursor->cursor.base = ~0;
14536         cursor->cursor.cntl = ~0;
14537
14538         if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
14539                 cursor->cursor.size = ~0;
14540
14541         possible_crtcs = BIT(pipe);
14542
14543         ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
14544                                        possible_crtcs, &intel_cursor_plane_funcs,
14545                                        intel_cursor_formats,
14546                                        ARRAY_SIZE(intel_cursor_formats),
14547                                        cursor_format_modifiers,
14548                                        DRM_PLANE_TYPE_CURSOR,
14549                                        "cursor %c", pipe_name(pipe));
14550         if (ret)
14551                 goto fail;
14552
14553         if (INTEL_GEN(dev_priv) >= 4)
14554                 drm_plane_create_rotation_property(&cursor->base,
14555                                                    DRM_MODE_ROTATE_0,
14556                                                    DRM_MODE_ROTATE_0 |
14557                                                    DRM_MODE_ROTATE_180);
14558
14559         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14560
14561         return cursor;
14562
14563 fail:
14564         intel_plane_free(cursor);
14565
14566         return ERR_PTR(ret);
14567 }
14568
14569 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
14570                                     struct intel_crtc_state *crtc_state)
14571 {
14572         struct intel_crtc_scaler_state *scaler_state =
14573                 &crtc_state->scaler_state;
14574         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14575         int i;
14576
14577         crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
14578         if (!crtc->num_scalers)
14579                 return;
14580
14581         for (i = 0; i < crtc->num_scalers; i++) {
14582                 struct intel_scaler *scaler = &scaler_state->scalers[i];
14583
14584                 scaler->in_use = 0;
14585                 scaler->mode = 0;
14586         }
14587
14588         scaler_state->scaler_id = -1;
14589 }
14590
14591 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
14592 {
14593         struct intel_crtc *intel_crtc;
14594         struct intel_crtc_state *crtc_state = NULL;
14595         struct intel_plane *primary = NULL;
14596         struct intel_plane *cursor = NULL;
14597         int sprite, ret;
14598
14599         intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
14600         if (!intel_crtc)
14601                 return -ENOMEM;
14602
14603         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14604         if (!crtc_state) {
14605                 ret = -ENOMEM;
14606                 goto fail;
14607         }
14608         intel_crtc->config = crtc_state;
14609         intel_crtc->base.state = &crtc_state->base;
14610         crtc_state->base.crtc = &intel_crtc->base;
14611
14612         primary = intel_primary_plane_create(dev_priv, pipe);
14613         if (IS_ERR(primary)) {
14614                 ret = PTR_ERR(primary);
14615                 goto fail;
14616         }
14617         intel_crtc->plane_ids_mask |= BIT(primary->id);
14618
14619         for_each_sprite(dev_priv, pipe, sprite) {
14620                 struct intel_plane *plane;
14621
14622                 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
14623                 if (IS_ERR(plane)) {
14624                         ret = PTR_ERR(plane);
14625                         goto fail;
14626                 }
14627                 intel_crtc->plane_ids_mask |= BIT(plane->id);
14628         }
14629
14630         cursor = intel_cursor_plane_create(dev_priv, pipe);
14631         if (IS_ERR(cursor)) {
14632                 ret = PTR_ERR(cursor);
14633                 goto fail;
14634         }
14635         intel_crtc->plane_ids_mask |= BIT(cursor->id);
14636
14637         ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
14638                                         &primary->base, &cursor->base,
14639                                         &intel_crtc_funcs,
14640                                         "pipe %c", pipe_name(pipe));
14641         if (ret)
14642                 goto fail;
14643
14644         intel_crtc->pipe = pipe;
14645
14646         /* initialize shared scalers */
14647         intel_crtc_init_scalers(intel_crtc, crtc_state);
14648
14649         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
14650                dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
14651         dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
14652
14653         if (INTEL_GEN(dev_priv) < 9) {
14654                 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
14655
14656                 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14657                        dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
14658                 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
14659         }
14660
14661         drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14662
14663         intel_color_init(intel_crtc);
14664
14665         WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14666
14667         return 0;
14668
14669 fail:
14670         /*
14671          * drm_mode_config_cleanup() will free up any
14672          * crtcs/planes already initialized.
14673          */
14674         kfree(crtc_state);
14675         kfree(intel_crtc);
14676
14677         return ret;
14678 }
14679
14680 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
14681                                       struct drm_file *file)
14682 {
14683         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14684         struct drm_crtc *drmmode_crtc;
14685         struct intel_crtc *crtc;
14686
14687         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
14688         if (!drmmode_crtc)
14689                 return -ENOENT;
14690
14691         crtc = to_intel_crtc(drmmode_crtc);
14692         pipe_from_crtc_id->pipe = crtc->pipe;
14693
14694         return 0;
14695 }
14696
14697 static int intel_encoder_clones(struct intel_encoder *encoder)
14698 {
14699         struct drm_device *dev = encoder->base.dev;
14700         struct intel_encoder *source_encoder;
14701         int index_mask = 0;
14702         int entry = 0;
14703
14704         for_each_intel_encoder(dev, source_encoder) {
14705                 if (encoders_cloneable(encoder, source_encoder))
14706                         index_mask |= (1 << entry);
14707
14708                 entry++;
14709         }
14710
14711         return index_mask;
14712 }
14713
14714 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
14715 {
14716         if (!IS_MOBILE(dev_priv))
14717                 return false;
14718
14719         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14720                 return false;
14721
14722         if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14723                 return false;
14724
14725         return true;
14726 }
14727
14728 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
14729 {
14730         if (INTEL_GEN(dev_priv) >= 9)
14731                 return false;
14732
14733         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
14734                 return false;
14735
14736         if (HAS_PCH_LPT_H(dev_priv) &&
14737             I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14738                 return false;
14739
14740         /* DDI E can't be used if DDI A requires 4 lanes */
14741         if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14742                 return false;
14743
14744         if (!dev_priv->vbt.int_crt_support)
14745                 return false;
14746
14747         return true;
14748 }
14749
14750 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
14751 {
14752         int pps_num;
14753         int pps_idx;
14754
14755         if (HAS_DDI(dev_priv))
14756                 return;
14757         /*
14758          * This w/a is needed at least on CPT/PPT, but to be sure apply it
14759          * everywhere where registers can be write protected.
14760          */
14761         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14762                 pps_num = 2;
14763         else
14764                 pps_num = 1;
14765
14766         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
14767                 u32 val = I915_READ(PP_CONTROL(pps_idx));
14768
14769                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
14770                 I915_WRITE(PP_CONTROL(pps_idx), val);
14771         }
14772 }
14773
14774 static void intel_pps_init(struct drm_i915_private *dev_priv)
14775 {
14776         if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
14777                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
14778         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14779                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
14780         else
14781                 dev_priv->pps_mmio_base = PPS_BASE;
14782
14783         intel_pps_unlock_regs_wa(dev_priv);
14784 }
14785
14786 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
14787 {
14788         struct intel_encoder *encoder;
14789         bool dpd_is_edp = false;
14790
14791         intel_pps_init(dev_priv);
14792
14793         if (!HAS_DISPLAY(dev_priv))
14794                 return;
14795
14796         if (IS_ELKHARTLAKE(dev_priv)) {
14797                 intel_ddi_init(dev_priv, PORT_A);
14798                 intel_ddi_init(dev_priv, PORT_B);
14799                 intel_ddi_init(dev_priv, PORT_C);
14800                 icl_dsi_init(dev_priv);
14801         } else if (INTEL_GEN(dev_priv) >= 11) {
14802                 intel_ddi_init(dev_priv, PORT_A);
14803                 intel_ddi_init(dev_priv, PORT_B);
14804                 intel_ddi_init(dev_priv, PORT_C);
14805                 intel_ddi_init(dev_priv, PORT_D);
14806                 intel_ddi_init(dev_priv, PORT_E);
14807                 /*
14808                  * On some ICL SKUs port F is not present. No strap bits for
14809                  * this, so rely on VBT.
14810                  * Work around broken VBTs on SKUs known to have no port F.
14811                  */
14812                 if (IS_ICL_WITH_PORT_F(dev_priv) &&
14813                     intel_bios_is_port_present(dev_priv, PORT_F))
14814                         intel_ddi_init(dev_priv, PORT_F);
14815
14816                 icl_dsi_init(dev_priv);
14817         } else if (IS_GEN9_LP(dev_priv)) {
14818                 /*
14819                  * FIXME: Broxton doesn't support port detection via the
14820                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14821                  * detect the ports.
14822                  */
14823                 intel_ddi_init(dev_priv, PORT_A);
14824                 intel_ddi_init(dev_priv, PORT_B);
14825                 intel_ddi_init(dev_priv, PORT_C);
14826
14827                 vlv_dsi_init(dev_priv);
14828         } else if (HAS_DDI(dev_priv)) {
14829                 int found;
14830
14831                 if (intel_ddi_crt_present(dev_priv))
14832                         intel_crt_init(dev_priv);
14833
14834                 /*
14835                  * Haswell uses DDI functions to detect digital outputs.
14836                  * On SKL pre-D0 the strap isn't connected, so we assume
14837                  * it's there.
14838                  */
14839                 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14840                 /* WaIgnoreDDIAStrap: skl */
14841                 if (found || IS_GEN9_BC(dev_priv))
14842                         intel_ddi_init(dev_priv, PORT_A);
14843
14844                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
14845                  * register */
14846                 found = I915_READ(SFUSE_STRAP);
14847
14848                 if (found & SFUSE_STRAP_DDIB_DETECTED)
14849                         intel_ddi_init(dev_priv, PORT_B);
14850                 if (found & SFUSE_STRAP_DDIC_DETECTED)
14851                         intel_ddi_init(dev_priv, PORT_C);
14852                 if (found & SFUSE_STRAP_DDID_DETECTED)
14853                         intel_ddi_init(dev_priv, PORT_D);
14854                 if (found & SFUSE_STRAP_DDIF_DETECTED)
14855                         intel_ddi_init(dev_priv, PORT_F);
14856                 /*
14857                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14858                  */
14859                 if (IS_GEN9_BC(dev_priv) &&
14860                     intel_bios_is_port_present(dev_priv, PORT_E))
14861                         intel_ddi_init(dev_priv, PORT_E);
14862
14863         } else if (HAS_PCH_SPLIT(dev_priv)) {
14864                 int found;
14865
14866                 /*
14867                  * intel_edp_init_connector() depends on this completing first,
14868                  * to prevent the registration of both eDP and LVDS and the
14869                  * incorrect sharing of the PPS.
14870                  */
14871                 intel_lvds_init(dev_priv);
14872                 intel_crt_init(dev_priv);
14873
14874                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
14875
14876                 if (ilk_has_edp_a(dev_priv))
14877                         intel_dp_init(dev_priv, DP_A, PORT_A);
14878
14879                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14880                         /* PCH SDVOB multiplex with HDMIB */
14881                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
14882                         if (!found)
14883                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
14884                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14885                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
14886                 }
14887
14888                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14889                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
14890
14891                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14892                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
14893
14894                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
14895                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
14896
14897                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
14898                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
14899         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
14900                 bool has_edp, has_port;
14901
14902                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
14903                         intel_crt_init(dev_priv);
14904
14905                 /*
14906                  * The DP_DETECTED bit is the latched state of the DDC
14907                  * SDA pin at boot. However since eDP doesn't require DDC
14908                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
14909                  * eDP ports may have been muxed to an alternate function.
14910                  * Thus we can't rely on the DP_DETECTED bit alone to detect
14911                  * eDP ports. Consult the VBT as well as DP_DETECTED to
14912                  * detect eDP ports.
14913                  *
14914                  * Sadly the straps seem to be missing sometimes even for HDMI
14915                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14916                  * and VBT for the presence of the port. Additionally we can't
14917                  * trust the port type the VBT declares as we've seen at least
14918                  * HDMI ports that the VBT claim are DP or eDP.
14919                  */
14920                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
14921                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14922                 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14923                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
14924                 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
14925                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
14926
14927                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
14928                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14929                 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14930                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
14931                 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
14932                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
14933
14934                 if (IS_CHERRYVIEW(dev_priv)) {
14935                         /*
14936                          * eDP not supported on port D,
14937                          * so no need to worry about it
14938                          */
14939                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14940                         if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14941                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
14942                         if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14943                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
14944                 }
14945
14946                 vlv_dsi_init(dev_priv);
14947         } else if (IS_PINEVIEW(dev_priv)) {
14948                 intel_lvds_init(dev_priv);
14949                 intel_crt_init(dev_priv);
14950         } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
14951                 bool found = false;
14952
14953                 if (IS_MOBILE(dev_priv))
14954                         intel_lvds_init(dev_priv);
14955
14956                 intel_crt_init(dev_priv);
14957
14958                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14959                         DRM_DEBUG_KMS("probing SDVOB\n");
14960                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
14961                         if (!found && IS_G4X(dev_priv)) {
14962                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14963                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
14964                         }
14965
14966                         if (!found && IS_G4X(dev_priv))
14967                                 intel_dp_init(dev_priv, DP_B, PORT_B);
14968                 }
14969
14970                 /* Before G4X SDVOC doesn't have its own detect register */
14971
14972                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14973                         DRM_DEBUG_KMS("probing SDVOC\n");
14974                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
14975                 }
14976
14977                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14978
14979                         if (IS_G4X(dev_priv)) {
14980                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14981                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
14982                         }
14983                         if (IS_G4X(dev_priv))
14984                                 intel_dp_init(dev_priv, DP_C, PORT_C);
14985                 }
14986
14987                 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
14988                         intel_dp_init(dev_priv, DP_D, PORT_D);
14989
14990                 if (SUPPORTS_TV(dev_priv))
14991                         intel_tv_init(dev_priv);
14992         } else if (IS_GEN(dev_priv, 2)) {
14993                 if (IS_I85X(dev_priv))
14994                         intel_lvds_init(dev_priv);
14995
14996                 intel_crt_init(dev_priv);
14997                 intel_dvo_init(dev_priv);
14998         }
14999
15000         intel_psr_init(dev_priv);
15001
15002         for_each_intel_encoder(&dev_priv->drm, encoder) {
15003                 encoder->base.possible_crtcs = encoder->crtc_mask;
15004                 encoder->base.possible_clones =
15005                         intel_encoder_clones(encoder);
15006         }
15007
15008         intel_init_pch_refclk(dev_priv);
15009
15010         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
15011 }
15012
15013 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
15014 {
15015         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
15016         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15017
15018         drm_framebuffer_cleanup(fb);
15019
15020         i915_gem_object_lock(obj);
15021         WARN_ON(!obj->framebuffer_references--);
15022         i915_gem_object_unlock(obj);
15023
15024         i915_gem_object_put(obj);
15025
15026         kfree(intel_fb);
15027 }
15028
15029 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
15030                                                 struct drm_file *file,
15031                                                 unsigned int *handle)
15032 {
15033         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15034
15035         if (obj->userptr.mm) {
15036                 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
15037                 return -EINVAL;
15038         }
15039
15040         return drm_gem_handle_create(file, &obj->base, handle);
15041 }
15042
15043 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
15044                                         struct drm_file *file,
15045                                         unsigned flags, unsigned color,
15046                                         struct drm_clip_rect *clips,
15047                                         unsigned num_clips)
15048 {
15049         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15050
15051         i915_gem_object_flush_if_display(obj);
15052         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
15053
15054         return 0;
15055 }
15056
15057 static const struct drm_framebuffer_funcs intel_fb_funcs = {
15058         .destroy = intel_user_framebuffer_destroy,
15059         .create_handle = intel_user_framebuffer_create_handle,
15060         .dirty = intel_user_framebuffer_dirty,
15061 };
15062
15063 static
15064 u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
15065                          u32 pixel_format, u64 fb_modifier)
15066 {
15067         struct intel_crtc *crtc;
15068         struct intel_plane *plane;
15069
15070         /*
15071          * We assume the primary plane for pipe A has
15072          * the highest stride limits of them all.
15073          */
15074         crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
15075         plane = to_intel_plane(crtc->base.primary);
15076
15077         return plane->max_stride(plane, pixel_format, fb_modifier,
15078                                  DRM_MODE_ROTATE_0);
15079 }
15080
15081 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
15082                                   struct drm_i915_gem_object *obj,
15083                                   struct drm_mode_fb_cmd2 *mode_cmd)
15084 {
15085         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
15086         struct drm_framebuffer *fb = &intel_fb->base;
15087         u32 pitch_limit;
15088         unsigned int tiling, stride;
15089         int ret = -EINVAL;
15090         int i;
15091
15092         i915_gem_object_lock(obj);
15093         obj->framebuffer_references++;
15094         tiling = i915_gem_object_get_tiling(obj);
15095         stride = i915_gem_object_get_stride(obj);
15096         i915_gem_object_unlock(obj);
15097
15098         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
15099                 /*
15100                  * If there's a fence, enforce that
15101                  * the fb modifier and tiling mode match.
15102                  */
15103                 if (tiling != I915_TILING_NONE &&
15104                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
15105                         DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
15106                         goto err;
15107                 }
15108         } else {
15109                 if (tiling == I915_TILING_X) {
15110                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
15111                 } else if (tiling == I915_TILING_Y) {
15112                         DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
15113                         goto err;
15114                 }
15115         }
15116
15117         if (!drm_any_plane_has_format(&dev_priv->drm,
15118                                       mode_cmd->pixel_format,
15119                                       mode_cmd->modifier[0])) {
15120                 struct drm_format_name_buf format_name;
15121
15122                 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
15123                               drm_get_format_name(mode_cmd->pixel_format,
15124                                                   &format_name),
15125                               mode_cmd->modifier[0]);
15126                 goto err;
15127         }
15128
15129         /*
15130          * gen2/3 display engine uses the fence if present,
15131          * so the tiling mode must match the fb modifier exactly.
15132          */
15133         if (INTEL_GEN(dev_priv) < 4 &&
15134             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
15135                 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
15136                 goto err;
15137         }
15138
15139         pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->pixel_format,
15140                                            mode_cmd->modifier[0]);
15141         if (mode_cmd->pitches[0] > pitch_limit) {
15142                 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
15143                               mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
15144                               "tiled" : "linear",
15145                               mode_cmd->pitches[0], pitch_limit);
15146                 goto err;
15147         }
15148
15149         /*
15150          * If there's a fence, enforce that
15151          * the fb pitch and fence stride match.
15152          */
15153         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
15154                 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
15155                               mode_cmd->pitches[0], stride);
15156                 goto err;
15157         }
15158
15159         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
15160         if (mode_cmd->offsets[0] != 0)
15161                 goto err;
15162
15163         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
15164
15165         for (i = 0; i < fb->format->num_planes; i++) {
15166                 u32 stride_alignment;
15167
15168                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
15169                         DRM_DEBUG_KMS("bad plane %d handle\n", i);
15170                         goto err;
15171                 }
15172
15173                 stride_alignment = intel_fb_stride_alignment(fb, i);
15174
15175                 /*
15176                  * Display WA #0531: skl,bxt,kbl,glk
15177                  *
15178                  * Render decompression and plane width > 3840
15179                  * combined with horizontal panning requires the
15180                  * plane stride to be a multiple of 4. We'll just
15181                  * require the entire fb to accommodate that to avoid
15182                  * potential runtime errors at plane configuration time.
15183                  */
15184                 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
15185                     is_ccs_modifier(fb->modifier))
15186                         stride_alignment *= 4;
15187
15188                 if (fb->pitches[i] & (stride_alignment - 1)) {
15189                         DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
15190                                       i, fb->pitches[i], stride_alignment);
15191                         goto err;
15192                 }
15193
15194                 fb->obj[i] = &obj->base;
15195         }
15196
15197         ret = intel_fill_fb_info(dev_priv, fb);
15198         if (ret)
15199                 goto err;
15200
15201         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
15202         if (ret) {
15203                 DRM_ERROR("framebuffer init failed %d\n", ret);
15204                 goto err;
15205         }
15206
15207         return 0;
15208
15209 err:
15210         i915_gem_object_lock(obj);
15211         obj->framebuffer_references--;
15212         i915_gem_object_unlock(obj);
15213         return ret;
15214 }
15215
15216 static struct drm_framebuffer *
15217 intel_user_framebuffer_create(struct drm_device *dev,
15218                               struct drm_file *filp,
15219                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
15220 {
15221         struct drm_framebuffer *fb;
15222         struct drm_i915_gem_object *obj;
15223         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
15224
15225         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
15226         if (!obj)
15227                 return ERR_PTR(-ENOENT);
15228
15229         fb = intel_framebuffer_create(obj, &mode_cmd);
15230         if (IS_ERR(fb))
15231                 i915_gem_object_put(obj);
15232
15233         return fb;
15234 }
15235
15236 static void intel_atomic_state_free(struct drm_atomic_state *state)
15237 {
15238         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
15239
15240         drm_atomic_state_default_release(state);
15241
15242         i915_sw_fence_fini(&intel_state->commit_ready);
15243
15244         kfree(state);
15245 }
15246
15247 static enum drm_mode_status
15248 intel_mode_valid(struct drm_device *dev,
15249                  const struct drm_display_mode *mode)
15250 {
15251         struct drm_i915_private *dev_priv = to_i915(dev);
15252         int hdisplay_max, htotal_max;
15253         int vdisplay_max, vtotal_max;
15254
15255         /*
15256          * Can't reject DBLSCAN here because Xorg ddxen can add piles
15257          * of DBLSCAN modes to the output's mode list when they detect
15258          * the scaling mode property on the connector. And they don't
15259          * ask the kernel to validate those modes in any way until
15260          * modeset time at which point the client gets a protocol error.
15261          * So in order to not upset those clients we silently ignore the
15262          * DBLSCAN flag on such connectors. For other connectors we will
15263          * reject modes with the DBLSCAN flag in encoder->compute_config().
15264          * And we always reject DBLSCAN modes in connector->mode_valid()
15265          * as we never want such modes on the connector's mode list.
15266          */
15267
15268         if (mode->vscan > 1)
15269                 return MODE_NO_VSCAN;
15270
15271         if (mode->flags & DRM_MODE_FLAG_HSKEW)
15272                 return MODE_H_ILLEGAL;
15273
15274         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
15275                            DRM_MODE_FLAG_NCSYNC |
15276                            DRM_MODE_FLAG_PCSYNC))
15277                 return MODE_HSYNC;
15278
15279         if (mode->flags & (DRM_MODE_FLAG_BCAST |
15280                            DRM_MODE_FLAG_PIXMUX |
15281                            DRM_MODE_FLAG_CLKDIV2))
15282                 return MODE_BAD;
15283
15284         if (INTEL_GEN(dev_priv) >= 9 ||
15285             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
15286                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
15287                 vdisplay_max = 4096;
15288                 htotal_max = 8192;
15289                 vtotal_max = 8192;
15290         } else if (INTEL_GEN(dev_priv) >= 3) {
15291                 hdisplay_max = 4096;
15292                 vdisplay_max = 4096;
15293                 htotal_max = 8192;
15294                 vtotal_max = 8192;
15295         } else {
15296                 hdisplay_max = 2048;
15297                 vdisplay_max = 2048;
15298                 htotal_max = 4096;
15299                 vtotal_max = 4096;
15300         }
15301
15302         if (mode->hdisplay > hdisplay_max ||
15303             mode->hsync_start > htotal_max ||
15304             mode->hsync_end > htotal_max ||
15305             mode->htotal > htotal_max)
15306                 return MODE_H_ILLEGAL;
15307
15308         if (mode->vdisplay > vdisplay_max ||
15309             mode->vsync_start > vtotal_max ||
15310             mode->vsync_end > vtotal_max ||
15311             mode->vtotal > vtotal_max)
15312                 return MODE_V_ILLEGAL;
15313
15314         return MODE_OK;
15315 }
15316
15317 static const struct drm_mode_config_funcs intel_mode_funcs = {
15318         .fb_create = intel_user_framebuffer_create,
15319         .get_format_info = intel_get_format_info,
15320         .output_poll_changed = intel_fbdev_output_poll_changed,
15321         .mode_valid = intel_mode_valid,
15322         .atomic_check = intel_atomic_check,
15323         .atomic_commit = intel_atomic_commit,
15324         .atomic_state_alloc = intel_atomic_state_alloc,
15325         .atomic_state_clear = intel_atomic_state_clear,
15326         .atomic_state_free = intel_atomic_state_free,
15327 };
15328
15329 /**
15330  * intel_init_display_hooks - initialize the display modesetting hooks
15331  * @dev_priv: device private
15332  */
15333 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15334 {
15335         intel_init_cdclk_hooks(dev_priv);
15336
15337         if (INTEL_GEN(dev_priv) >= 9) {
15338                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15339                 dev_priv->display.get_initial_plane_config =
15340                         skylake_get_initial_plane_config;
15341                 dev_priv->display.crtc_compute_clock =
15342                         haswell_crtc_compute_clock;
15343                 dev_priv->display.crtc_enable = haswell_crtc_enable;
15344                 dev_priv->display.crtc_disable = haswell_crtc_disable;
15345         } else if (HAS_DDI(dev_priv)) {
15346                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15347                 dev_priv->display.get_initial_plane_config =
15348                         i9xx_get_initial_plane_config;
15349                 dev_priv->display.crtc_compute_clock =
15350                         haswell_crtc_compute_clock;
15351                 dev_priv->display.crtc_enable = haswell_crtc_enable;
15352                 dev_priv->display.crtc_disable = haswell_crtc_disable;
15353         } else if (HAS_PCH_SPLIT(dev_priv)) {
15354                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
15355                 dev_priv->display.get_initial_plane_config =
15356                         i9xx_get_initial_plane_config;
15357                 dev_priv->display.crtc_compute_clock =
15358                         ironlake_crtc_compute_clock;
15359                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
15360                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
15361         } else if (IS_CHERRYVIEW(dev_priv)) {
15362                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15363                 dev_priv->display.get_initial_plane_config =
15364                         i9xx_get_initial_plane_config;
15365                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
15366                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15367                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15368         } else if (IS_VALLEYVIEW(dev_priv)) {
15369                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15370                 dev_priv->display.get_initial_plane_config =
15371                         i9xx_get_initial_plane_config;
15372                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
15373                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15374                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15375         } else if (IS_G4X(dev_priv)) {
15376                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15377                 dev_priv->display.get_initial_plane_config =
15378                         i9xx_get_initial_plane_config;
15379                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
15380                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15381                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15382         } else if (IS_PINEVIEW(dev_priv)) {
15383                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15384                 dev_priv->display.get_initial_plane_config =
15385                         i9xx_get_initial_plane_config;
15386                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
15387                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15388                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15389         } else if (!IS_GEN(dev_priv, 2)) {
15390                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15391                 dev_priv->display.get_initial_plane_config =
15392                         i9xx_get_initial_plane_config;
15393                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
15394                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15395                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15396         } else {
15397                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15398                 dev_priv->display.get_initial_plane_config =
15399                         i9xx_get_initial_plane_config;
15400                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
15401                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15402                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15403         }
15404
15405         if (IS_GEN(dev_priv, 5)) {
15406                 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15407         } else if (IS_GEN(dev_priv, 6)) {
15408                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15409         } else if (IS_IVYBRIDGE(dev_priv)) {
15410                 /* FIXME: detect B0+ stepping and use auto training */
15411                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15412         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
15413                 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15414         }
15415
15416         if (INTEL_GEN(dev_priv) >= 9)
15417                 dev_priv->display.update_crtcs = skl_update_crtcs;
15418         else
15419                 dev_priv->display.update_crtcs = intel_update_crtcs;
15420 }
15421
15422 /* Disable the VGA plane that we never use */
15423 static void i915_disable_vga(struct drm_i915_private *dev_priv)
15424 {
15425         struct pci_dev *pdev = dev_priv->drm.pdev;
15426         u8 sr1;
15427         i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15428
15429         /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15430         vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
15431         outb(SR01, VGA_SR_INDEX);
15432         sr1 = inb(VGA_SR_DATA);
15433         outb(sr1 | 1<<5, VGA_SR_DATA);
15434         vga_put(pdev, VGA_RSRC_LEGACY_IO);
15435         udelay(300);
15436
15437         I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15438         POSTING_READ(vga_reg);
15439 }
15440
15441 void intel_modeset_init_hw(struct drm_device *dev)
15442 {
15443         struct drm_i915_private *dev_priv = to_i915(dev);
15444
15445         intel_update_cdclk(dev_priv);
15446         intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
15447         dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
15448 }
15449
15450 /*
15451  * Calculate what we think the watermarks should be for the state we've read
15452  * out of the hardware and then immediately program those watermarks so that
15453  * we ensure the hardware settings match our internal state.
15454  *
15455  * We can calculate what we think WM's should be by creating a duplicate of the
15456  * current state (which was constructed during hardware readout) and running it
15457  * through the atomic check code to calculate new watermark values in the
15458  * state object.
15459  */
15460 static void sanitize_watermarks(struct drm_device *dev)
15461 {
15462         struct drm_i915_private *dev_priv = to_i915(dev);
15463         struct drm_atomic_state *state;
15464         struct intel_atomic_state *intel_state;
15465         struct drm_crtc *crtc;
15466         struct drm_crtc_state *cstate;
15467         struct drm_modeset_acquire_ctx ctx;
15468         int ret;
15469         int i;
15470
15471         /* Only supported on platforms that use atomic watermark design */
15472         if (!dev_priv->display.optimize_watermarks)
15473                 return;
15474
15475         /*
15476          * We need to hold connection_mutex before calling duplicate_state so
15477          * that the connector loop is protected.
15478          */
15479         drm_modeset_acquire_init(&ctx, 0);
15480 retry:
15481         ret = drm_modeset_lock_all_ctx(dev, &ctx);
15482         if (ret == -EDEADLK) {
15483                 drm_modeset_backoff(&ctx);
15484                 goto retry;
15485         } else if (WARN_ON(ret)) {
15486                 goto fail;
15487         }
15488
15489         state = drm_atomic_helper_duplicate_state(dev, &ctx);
15490         if (WARN_ON(IS_ERR(state)))
15491                 goto fail;
15492
15493         intel_state = to_intel_atomic_state(state);
15494
15495         /*
15496          * Hardware readout is the only time we don't want to calculate
15497          * intermediate watermarks (since we don't trust the current
15498          * watermarks).
15499          */
15500         if (!HAS_GMCH(dev_priv))
15501                 intel_state->skip_intermediate_wm = true;
15502
15503         ret = intel_atomic_check(dev, state);
15504         if (ret) {
15505                 /*
15506                  * If we fail here, it means that the hardware appears to be
15507                  * programmed in a way that shouldn't be possible, given our
15508                  * understanding of watermark requirements.  This might mean a
15509                  * mistake in the hardware readout code or a mistake in the
15510                  * watermark calculations for a given platform.  Raise a WARN
15511                  * so that this is noticeable.
15512                  *
15513                  * If this actually happens, we'll have to just leave the
15514                  * BIOS-programmed watermarks untouched and hope for the best.
15515                  */
15516                 WARN(true, "Could not determine valid watermarks for inherited state\n");
15517                 goto put_state;
15518         }
15519
15520         /* Write calculated watermark values back */
15521         for_each_new_crtc_in_state(state, crtc, cstate, i) {
15522                 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15523
15524                 cs->wm.need_postvbl_update = true;
15525                 dev_priv->display.optimize_watermarks(intel_state, cs);
15526
15527                 to_intel_crtc_state(crtc->state)->wm = cs->wm;
15528         }
15529
15530 put_state:
15531         drm_atomic_state_put(state);
15532 fail:
15533         drm_modeset_drop_locks(&ctx);
15534         drm_modeset_acquire_fini(&ctx);
15535 }
15536
15537 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
15538 {
15539         if (IS_GEN(dev_priv, 5)) {
15540                 u32 fdi_pll_clk =
15541                         I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
15542
15543                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
15544         } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
15545                 dev_priv->fdi_pll_freq = 270000;
15546         } else {
15547                 return;
15548         }
15549
15550         DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
15551 }
15552
15553 static int intel_initial_commit(struct drm_device *dev)
15554 {
15555         struct drm_atomic_state *state = NULL;
15556         struct drm_modeset_acquire_ctx ctx;
15557         struct drm_crtc *crtc;
15558         struct drm_crtc_state *crtc_state;
15559         int ret = 0;
15560
15561         state = drm_atomic_state_alloc(dev);
15562         if (!state)
15563                 return -ENOMEM;
15564
15565         drm_modeset_acquire_init(&ctx, 0);
15566
15567 retry:
15568         state->acquire_ctx = &ctx;
15569
15570         drm_for_each_crtc(crtc, dev) {
15571                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
15572                 if (IS_ERR(crtc_state)) {
15573                         ret = PTR_ERR(crtc_state);
15574                         goto out;
15575                 }
15576
15577                 if (crtc_state->active) {
15578                         ret = drm_atomic_add_affected_planes(state, crtc);
15579                         if (ret)
15580                                 goto out;
15581
15582                         /*
15583                          * FIXME hack to force a LUT update to avoid the
15584                          * plane update forcing the pipe gamma on without
15585                          * having a proper LUT loaded. Remove once we
15586                          * have readout for pipe gamma enable.
15587                          */
15588                         crtc_state->color_mgmt_changed = true;
15589                 }
15590         }
15591
15592         ret = drm_atomic_commit(state);
15593
15594 out:
15595         if (ret == -EDEADLK) {
15596                 drm_atomic_state_clear(state);
15597                 drm_modeset_backoff(&ctx);
15598                 goto retry;
15599         }
15600
15601         drm_atomic_state_put(state);
15602
15603         drm_modeset_drop_locks(&ctx);
15604         drm_modeset_acquire_fini(&ctx);
15605
15606         return ret;
15607 }
15608
15609 int intel_modeset_init(struct drm_device *dev)
15610 {
15611         struct drm_i915_private *dev_priv = to_i915(dev);
15612         struct i915_ggtt *ggtt = &dev_priv->ggtt;
15613         enum pipe pipe;
15614         struct intel_crtc *crtc;
15615         int ret;
15616
15617         dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
15618
15619         drm_mode_config_init(dev);
15620
15621         dev->mode_config.min_width = 0;
15622         dev->mode_config.min_height = 0;
15623
15624         dev->mode_config.preferred_depth = 24;
15625         dev->mode_config.prefer_shadow = 1;
15626
15627         dev->mode_config.allow_fb_modifiers = true;
15628
15629         dev->mode_config.funcs = &intel_mode_funcs;
15630
15631         init_llist_head(&dev_priv->atomic_helper.free_list);
15632         INIT_WORK(&dev_priv->atomic_helper.free_work,
15633                   intel_atomic_helper_free_state_worker);
15634
15635         intel_init_quirks(dev_priv);
15636
15637         intel_fbc_init(dev_priv);
15638
15639         intel_init_pm(dev_priv);
15640
15641         /*
15642          * There may be no VBT; and if the BIOS enabled SSC we can
15643          * just keep using it to avoid unnecessary flicker.  Whereas if the
15644          * BIOS isn't using it, don't assume it will work even if the VBT
15645          * indicates as much.
15646          */
15647         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
15648                 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15649                                             DREF_SSC1_ENABLE);
15650
15651                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15652                         DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15653                                      bios_lvds_use_ssc ? "en" : "dis",
15654                                      dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15655                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15656                 }
15657         }
15658
15659         /* maximum framebuffer dimensions */
15660         if (IS_GEN(dev_priv, 2)) {
15661                 dev->mode_config.max_width = 2048;
15662                 dev->mode_config.max_height = 2048;
15663         } else if (IS_GEN(dev_priv, 3)) {
15664                 dev->mode_config.max_width = 4096;
15665                 dev->mode_config.max_height = 4096;
15666         } else {
15667                 dev->mode_config.max_width = 8192;
15668                 dev->mode_config.max_height = 8192;
15669         }
15670
15671         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15672                 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
15673                 dev->mode_config.cursor_height = 1023;
15674         } else if (IS_GEN(dev_priv, 2)) {
15675                 dev->mode_config.cursor_width = 64;
15676                 dev->mode_config.cursor_height = 64;
15677         } else {
15678                 dev->mode_config.cursor_width = 256;
15679                 dev->mode_config.cursor_height = 256;
15680         }
15681
15682         dev->mode_config.fb_base = ggtt->gmadr.start;
15683
15684         DRM_DEBUG_KMS("%d display pipe%s available.\n",
15685                       INTEL_INFO(dev_priv)->num_pipes,
15686                       INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
15687
15688         for_each_pipe(dev_priv, pipe) {
15689                 ret = intel_crtc_init(dev_priv, pipe);
15690                 if (ret) {
15691                         drm_mode_config_cleanup(dev);
15692                         return ret;
15693                 }
15694         }
15695
15696         intel_shared_dpll_init(dev);
15697         intel_update_fdi_pll_freq(dev_priv);
15698
15699         intel_update_czclk(dev_priv);
15700         intel_modeset_init_hw(dev);
15701
15702         intel_hdcp_component_init(dev_priv);
15703
15704         if (dev_priv->max_cdclk_freq == 0)
15705                 intel_update_max_cdclk(dev_priv);
15706
15707         /* Just disable it once at startup */
15708         i915_disable_vga(dev_priv);
15709         intel_setup_outputs(dev_priv);
15710
15711         drm_modeset_lock_all(dev);
15712         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
15713         drm_modeset_unlock_all(dev);
15714
15715         for_each_intel_crtc(dev, crtc) {
15716                 struct intel_initial_plane_config plane_config = {};
15717
15718                 if (!crtc->active)
15719                         continue;
15720
15721                 /*
15722                  * Note that reserving the BIOS fb up front prevents us
15723                  * from stuffing other stolen allocations like the ring
15724                  * on top.  This prevents some ugliness at boot time, and
15725                  * can even allow for smooth boot transitions if the BIOS
15726                  * fb is large enough for the active pipe configuration.
15727                  */
15728                 dev_priv->display.get_initial_plane_config(crtc,
15729                                                            &plane_config);
15730
15731                 /*
15732                  * If the fb is shared between multiple heads, we'll
15733                  * just get the first one.
15734                  */
15735                 intel_find_initial_plane_obj(crtc, &plane_config);
15736         }
15737
15738         /*
15739          * Make sure hardware watermarks really match the state we read out.
15740          * Note that we need to do this after reconstructing the BIOS fb's
15741          * since the watermark calculation done here will use pstate->fb.
15742          */
15743         if (!HAS_GMCH(dev_priv))
15744                 sanitize_watermarks(dev);
15745
15746         /*
15747          * Force all active planes to recompute their states. So that on
15748          * mode_setcrtc after probe, all the intel_plane_state variables
15749          * are already calculated and there is no assert_plane warnings
15750          * during bootup.
15751          */
15752         ret = intel_initial_commit(dev);
15753         if (ret)
15754                 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
15755
15756         return 0;
15757 }
15758
15759 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15760 {
15761         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15762         /* 640x480@60Hz, ~25175 kHz */
15763         struct dpll clock = {
15764                 .m1 = 18,
15765                 .m2 = 7,
15766                 .p1 = 13,
15767                 .p2 = 4,
15768                 .n = 2,
15769         };
15770         u32 dpll, fp;
15771         int i;
15772
15773         WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
15774
15775         DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
15776                       pipe_name(pipe), clock.vco, clock.dot);
15777
15778         fp = i9xx_dpll_compute_fp(&clock);
15779         dpll = DPLL_DVO_2X_MODE |
15780                 DPLL_VGA_MODE_DIS |
15781                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
15782                 PLL_P2_DIVIDE_BY_4 |
15783                 PLL_REF_INPUT_DREFCLK |
15784                 DPLL_VCO_ENABLE;
15785
15786         I915_WRITE(FP0(pipe), fp);
15787         I915_WRITE(FP1(pipe), fp);
15788
15789         I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
15790         I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
15791         I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
15792         I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
15793         I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
15794         I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
15795         I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
15796
15797         /*
15798          * Apparently we need to have VGA mode enabled prior to changing
15799          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
15800          * dividers, even though the register value does change.
15801          */
15802         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
15803         I915_WRITE(DPLL(pipe), dpll);
15804
15805         /* Wait for the clocks to stabilize. */
15806         POSTING_READ(DPLL(pipe));
15807         udelay(150);
15808
15809         /* The pixel multiplier can only be updated once the
15810          * DPLL is enabled and the clocks are stable.
15811          *
15812          * So write it again.
15813          */
15814         I915_WRITE(DPLL(pipe), dpll);
15815
15816         /* We do this three times for luck */
15817         for (i = 0; i < 3 ; i++) {
15818                 I915_WRITE(DPLL(pipe), dpll);
15819                 POSTING_READ(DPLL(pipe));
15820                 udelay(150); /* wait for warmup */
15821         }
15822
15823         I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
15824         POSTING_READ(PIPECONF(pipe));
15825
15826         intel_wait_for_pipe_scanline_moving(crtc);
15827 }
15828
15829 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15830 {
15831         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15832
15833         DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
15834                       pipe_name(pipe));
15835
15836         WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
15837         WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
15838         WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
15839         WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
15840         WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
15841
15842         I915_WRITE(PIPECONF(pipe), 0);
15843         POSTING_READ(PIPECONF(pipe));
15844
15845         intel_wait_for_pipe_scanline_stopped(crtc);
15846
15847         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
15848         POSTING_READ(DPLL(pipe));
15849 }
15850
15851 static void
15852 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
15853 {
15854         struct intel_crtc *crtc;
15855
15856         if (INTEL_GEN(dev_priv) >= 4)
15857                 return;
15858
15859         for_each_intel_crtc(&dev_priv->drm, crtc) {
15860                 struct intel_plane *plane =
15861                         to_intel_plane(crtc->base.primary);
15862                 struct intel_crtc *plane_crtc;
15863                 enum pipe pipe;
15864
15865                 if (!plane->get_hw_state(plane, &pipe))
15866                         continue;
15867
15868                 if (pipe == crtc->pipe)
15869                         continue;
15870
15871                 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
15872                               plane->base.base.id, plane->base.name);
15873
15874                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15875                 intel_plane_disable_noatomic(plane_crtc, plane);
15876         }
15877 }
15878
15879 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15880 {
15881         struct drm_device *dev = crtc->base.dev;
15882         struct intel_encoder *encoder;
15883
15884         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15885                 return true;
15886
15887         return false;
15888 }
15889
15890 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
15891 {
15892         struct drm_device *dev = encoder->base.dev;
15893         struct intel_connector *connector;
15894
15895         for_each_connector_on_encoder(dev, &encoder->base, connector)
15896                 return connector;
15897
15898         return NULL;
15899 }
15900
15901 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
15902                               enum pipe pch_transcoder)
15903 {
15904         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
15905                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
15906 }
15907
15908 static void intel_sanitize_crtc(struct intel_crtc *crtc,
15909                                 struct drm_modeset_acquire_ctx *ctx)
15910 {
15911         struct drm_device *dev = crtc->base.dev;
15912         struct drm_i915_private *dev_priv = to_i915(dev);
15913         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
15914         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
15915
15916         /* Clear any frame start delays used for debugging left by the BIOS */
15917         if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
15918                 i915_reg_t reg = PIPECONF(cpu_transcoder);
15919
15920                 I915_WRITE(reg,
15921                            I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15922         }
15923
15924         if (crtc_state->base.active) {
15925                 struct intel_plane *plane;
15926
15927                 /* Disable everything but the primary plane */
15928                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
15929                         const struct intel_plane_state *plane_state =
15930                                 to_intel_plane_state(plane->base.state);
15931
15932                         if (plane_state->base.visible &&
15933                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
15934                                 intel_plane_disable_noatomic(crtc, plane);
15935                 }
15936
15937                 /*
15938                  * Disable any background color set by the BIOS, but enable the
15939                  * gamma and CSC to match how we program our planes.
15940                  */
15941                 if (INTEL_GEN(dev_priv) >= 9)
15942                         I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
15943                                    SKL_BOTTOM_COLOR_GAMMA_ENABLE |
15944                                    SKL_BOTTOM_COLOR_CSC_ENABLE);
15945         }
15946
15947         /* Adjust the state of the output pipe according to whether we
15948          * have active connectors/encoders. */
15949         if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
15950                 intel_crtc_disable_noatomic(&crtc->base, ctx);
15951
15952         if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
15953                 /*
15954                  * We start out with underrun reporting disabled to avoid races.
15955                  * For correct bookkeeping mark this on active crtcs.
15956                  *
15957                  * Also on gmch platforms we dont have any hardware bits to
15958                  * disable the underrun reporting. Which means we need to start
15959                  * out with underrun reporting disabled also on inactive pipes,
15960                  * since otherwise we'll complain about the garbage we read when
15961                  * e.g. coming up after runtime pm.
15962                  *
15963                  * No protection against concurrent access is required - at
15964                  * worst a fifo underrun happens which also sets this to false.
15965                  */
15966                 crtc->cpu_fifo_underrun_disabled = true;
15967                 /*
15968                  * We track the PCH trancoder underrun reporting state
15969                  * within the crtc. With crtc for pipe A housing the underrun
15970                  * reporting state for PCH transcoder A, crtc for pipe B housing
15971                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
15972                  * and marking underrun reporting as disabled for the non-existing
15973                  * PCH transcoders B and C would prevent enabling the south
15974                  * error interrupt (see cpt_can_enable_serr_int()).
15975                  */
15976                 if (has_pch_trancoder(dev_priv, crtc->pipe))
15977                         crtc->pch_fifo_underrun_disabled = true;
15978         }
15979 }
15980
15981 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
15982 {
15983         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
15984
15985         /*
15986          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
15987          * the hardware when a high res displays plugged in. DPLL P
15988          * divider is zero, and the pipe timings are bonkers. We'll
15989          * try to disable everything in that case.
15990          *
15991          * FIXME would be nice to be able to sanitize this state
15992          * without several WARNs, but for now let's take the easy
15993          * road.
15994          */
15995         return IS_GEN(dev_priv, 6) &&
15996                 crtc_state->base.active &&
15997                 crtc_state->shared_dpll &&
15998                 crtc_state->port_clock == 0;
15999 }
16000
16001 static void intel_sanitize_encoder(struct intel_encoder *encoder)
16002 {
16003         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
16004         struct intel_connector *connector;
16005         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
16006         struct intel_crtc_state *crtc_state = crtc ?
16007                 to_intel_crtc_state(crtc->base.state) : NULL;
16008
16009         /* We need to check both for a crtc link (meaning that the
16010          * encoder is active and trying to read from a pipe) and the
16011          * pipe itself being active. */
16012         bool has_active_crtc = crtc_state &&
16013                 crtc_state->base.active;
16014
16015         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
16016                 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
16017                               pipe_name(crtc->pipe));
16018                 has_active_crtc = false;
16019         }
16020
16021         connector = intel_encoder_find_connector(encoder);
16022         if (connector && !has_active_crtc) {
16023                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
16024                               encoder->base.base.id,
16025                               encoder->base.name);
16026
16027                 /* Connector is active, but has no active pipe. This is
16028                  * fallout from our resume register restoring. Disable
16029                  * the encoder manually again. */
16030                 if (crtc_state) {
16031                         struct drm_encoder *best_encoder;
16032
16033                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
16034                                       encoder->base.base.id,
16035                                       encoder->base.name);
16036
16037                         /* avoid oopsing in case the hooks consult best_encoder */
16038                         best_encoder = connector->base.state->best_encoder;
16039                         connector->base.state->best_encoder = &encoder->base;
16040
16041                         if (encoder->disable)
16042                                 encoder->disable(encoder, crtc_state,
16043                                                  connector->base.state);
16044                         if (encoder->post_disable)
16045                                 encoder->post_disable(encoder, crtc_state,
16046                                                       connector->base.state);
16047
16048                         connector->base.state->best_encoder = best_encoder;
16049                 }
16050                 encoder->base.crtc = NULL;
16051
16052                 /* Inconsistent output/port/pipe state happens presumably due to
16053                  * a bug in one of the get_hw_state functions. Or someplace else
16054                  * in our code, like the register restore mess on resume. Clamp
16055                  * things to off as a safer default. */
16056
16057                 connector->base.dpms = DRM_MODE_DPMS_OFF;
16058                 connector->base.encoder = NULL;
16059         }
16060
16061         /* notify opregion of the sanitized encoder state */
16062         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
16063
16064         if (INTEL_GEN(dev_priv) >= 11)
16065                 icl_sanitize_encoder_pll_mapping(encoder);
16066 }
16067
16068 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
16069 {
16070         i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
16071
16072         if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
16073                 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
16074                 i915_disable_vga(dev_priv);
16075         }
16076 }
16077
16078 void i915_redisable_vga(struct drm_i915_private *dev_priv)
16079 {
16080         intel_wakeref_t wakeref;
16081
16082         /*
16083          * This function can be called both from intel_modeset_setup_hw_state or
16084          * at a very early point in our resume sequence, where the power well
16085          * structures are not yet restored. Since this function is at a very
16086          * paranoid "someone might have enabled VGA while we were not looking"
16087          * level, just check if the power well is enabled instead of trying to
16088          * follow the "don't touch the power well if we don't need it" policy
16089          * the rest of the driver uses.
16090          */
16091         wakeref = intel_display_power_get_if_enabled(dev_priv,
16092                                                      POWER_DOMAIN_VGA);
16093         if (!wakeref)
16094                 return;
16095
16096         i915_redisable_vga_power_on(dev_priv);
16097
16098         intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
16099 }
16100
16101 /* FIXME read out full plane state for all planes */
16102 static void readout_plane_state(struct drm_i915_private *dev_priv)
16103 {
16104         struct intel_plane *plane;
16105         struct intel_crtc *crtc;
16106
16107         for_each_intel_plane(&dev_priv->drm, plane) {
16108                 struct intel_plane_state *plane_state =
16109                         to_intel_plane_state(plane->base.state);
16110                 struct intel_crtc_state *crtc_state;
16111                 enum pipe pipe = PIPE_A;
16112                 bool visible;
16113
16114                 visible = plane->get_hw_state(plane, &pipe);
16115
16116                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16117                 crtc_state = to_intel_crtc_state(crtc->base.state);
16118
16119                 intel_set_plane_visible(crtc_state, plane_state, visible);
16120
16121                 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
16122                               plane->base.base.id, plane->base.name,
16123                               enableddisabled(visible), pipe_name(pipe));
16124         }
16125
16126         for_each_intel_crtc(&dev_priv->drm, crtc) {
16127                 struct intel_crtc_state *crtc_state =
16128                         to_intel_crtc_state(crtc->base.state);
16129
16130                 fixup_active_planes(crtc_state);
16131         }
16132 }
16133
16134 static void intel_modeset_readout_hw_state(struct drm_device *dev)
16135 {
16136         struct drm_i915_private *dev_priv = to_i915(dev);
16137         enum pipe pipe;
16138         struct intel_crtc *crtc;
16139         struct intel_encoder *encoder;
16140         struct intel_connector *connector;
16141         struct drm_connector_list_iter conn_iter;
16142         int i;
16143
16144         dev_priv->active_crtcs = 0;
16145
16146         for_each_intel_crtc(dev, crtc) {
16147                 struct intel_crtc_state *crtc_state =
16148                         to_intel_crtc_state(crtc->base.state);
16149
16150                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
16151                 memset(crtc_state, 0, sizeof(*crtc_state));
16152                 crtc_state->base.crtc = &crtc->base;
16153
16154                 crtc_state->base.active = crtc_state->base.enable =
16155                         dev_priv->display.get_pipe_config(crtc, crtc_state);
16156
16157                 crtc->base.enabled = crtc_state->base.enable;
16158                 crtc->active = crtc_state->base.active;
16159
16160                 if (crtc_state->base.active)
16161                         dev_priv->active_crtcs |= 1 << crtc->pipe;
16162
16163                 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
16164                               crtc->base.base.id, crtc->base.name,
16165                               enableddisabled(crtc_state->base.active));
16166         }
16167
16168         readout_plane_state(dev_priv);
16169
16170         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16171                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16172
16173                 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
16174                                                         &pll->state.hw_state);
16175                 pll->state.crtc_mask = 0;
16176                 for_each_intel_crtc(dev, crtc) {
16177                         struct intel_crtc_state *crtc_state =
16178                                 to_intel_crtc_state(crtc->base.state);
16179
16180                         if (crtc_state->base.active &&
16181                             crtc_state->shared_dpll == pll)
16182                                 pll->state.crtc_mask |= 1 << crtc->pipe;
16183                 }
16184                 pll->active_mask = pll->state.crtc_mask;
16185
16186                 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
16187                               pll->info->name, pll->state.crtc_mask, pll->on);
16188         }
16189
16190         for_each_intel_encoder(dev, encoder) {
16191                 pipe = 0;
16192
16193                 if (encoder->get_hw_state(encoder, &pipe)) {
16194                         struct intel_crtc_state *crtc_state;
16195
16196                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16197                         crtc_state = to_intel_crtc_state(crtc->base.state);
16198
16199                         encoder->base.crtc = &crtc->base;
16200                         encoder->get_config(encoder, crtc_state);
16201                 } else {
16202                         encoder->base.crtc = NULL;
16203                 }
16204
16205                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
16206                               encoder->base.base.id, encoder->base.name,
16207                               enableddisabled(encoder->base.crtc),
16208                               pipe_name(pipe));
16209         }
16210
16211         drm_connector_list_iter_begin(dev, &conn_iter);
16212         for_each_intel_connector_iter(connector, &conn_iter) {
16213                 if (connector->get_hw_state(connector)) {
16214                         connector->base.dpms = DRM_MODE_DPMS_ON;
16215
16216                         encoder = connector->encoder;
16217                         connector->base.encoder = &encoder->base;
16218
16219                         if (encoder->base.crtc &&
16220                             encoder->base.crtc->state->active) {
16221                                 /*
16222                                  * This has to be done during hardware readout
16223                                  * because anything calling .crtc_disable may
16224                                  * rely on the connector_mask being accurate.
16225                                  */
16226                                 encoder->base.crtc->state->connector_mask |=
16227                                         drm_connector_mask(&connector->base);
16228                                 encoder->base.crtc->state->encoder_mask |=
16229                                         drm_encoder_mask(&encoder->base);
16230                         }
16231
16232                 } else {
16233                         connector->base.dpms = DRM_MODE_DPMS_OFF;
16234                         connector->base.encoder = NULL;
16235                 }
16236                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
16237                               connector->base.base.id, connector->base.name,
16238                               enableddisabled(connector->base.encoder));
16239         }
16240         drm_connector_list_iter_end(&conn_iter);
16241
16242         for_each_intel_crtc(dev, crtc) {
16243                 struct intel_crtc_state *crtc_state =
16244                         to_intel_crtc_state(crtc->base.state);
16245                 int min_cdclk = 0;
16246
16247                 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
16248                 if (crtc_state->base.active) {
16249                         intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
16250                         crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
16251                         crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
16252                         intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
16253                         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
16254
16255                         /*
16256                          * The initial mode needs to be set in order to keep
16257                          * the atomic core happy. It wants a valid mode if the
16258                          * crtc's enabled, so we do the above call.
16259                          *
16260                          * But we don't set all the derived state fully, hence
16261                          * set a flag to indicate that a full recalculation is
16262                          * needed on the next commit.
16263                          */
16264                         crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
16265
16266                         intel_crtc_compute_pixel_rate(crtc_state);
16267
16268                         if (dev_priv->display.modeset_calc_cdclk) {
16269                                 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
16270                                 if (WARN_ON(min_cdclk < 0))
16271                                         min_cdclk = 0;
16272                         }
16273
16274                         drm_calc_timestamping_constants(&crtc->base,
16275                                                         &crtc_state->base.adjusted_mode);
16276                         update_scanline_offset(crtc_state);
16277                 }
16278
16279                 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
16280                 dev_priv->min_voltage_level[crtc->pipe] =
16281                         crtc_state->min_voltage_level;
16282
16283                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
16284         }
16285 }
16286
16287 static void
16288 get_encoder_power_domains(struct drm_i915_private *dev_priv)
16289 {
16290         struct intel_encoder *encoder;
16291
16292         for_each_intel_encoder(&dev_priv->drm, encoder) {
16293                 struct intel_crtc_state *crtc_state;
16294
16295                 if (!encoder->get_power_domains)
16296                         continue;
16297
16298                 /*
16299                  * MST-primary and inactive encoders don't have a crtc state
16300                  * and neither of these require any power domain references.
16301                  */
16302                 if (!encoder->base.crtc)
16303                         continue;
16304
16305                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
16306                 encoder->get_power_domains(encoder, crtc_state);
16307         }
16308 }
16309
16310 static void intel_early_display_was(struct drm_i915_private *dev_priv)
16311 {
16312         /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
16313         if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
16314                 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
16315                            DARBF_GATING_DIS);
16316
16317         if (IS_HASWELL(dev_priv)) {
16318                 /*
16319                  * WaRsPkgCStateDisplayPMReq:hsw
16320                  * System hang if this isn't done before disabling all planes!
16321                  */
16322                 I915_WRITE(CHICKEN_PAR1_1,
16323                            I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
16324         }
16325 }
16326
16327 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
16328                                        enum port port, i915_reg_t hdmi_reg)
16329 {
16330         u32 val = I915_READ(hdmi_reg);
16331
16332         if (val & SDVO_ENABLE ||
16333             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
16334                 return;
16335
16336         DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
16337                       port_name(port));
16338
16339         val &= ~SDVO_PIPE_SEL_MASK;
16340         val |= SDVO_PIPE_SEL(PIPE_A);
16341
16342         I915_WRITE(hdmi_reg, val);
16343 }
16344
16345 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
16346                                      enum port port, i915_reg_t dp_reg)
16347 {
16348         u32 val = I915_READ(dp_reg);
16349
16350         if (val & DP_PORT_EN ||
16351             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
16352                 return;
16353
16354         DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
16355                       port_name(port));
16356
16357         val &= ~DP_PIPE_SEL_MASK;
16358         val |= DP_PIPE_SEL(PIPE_A);
16359
16360         I915_WRITE(dp_reg, val);
16361 }
16362
16363 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
16364 {
16365         /*
16366          * The BIOS may select transcoder B on some of the PCH
16367          * ports even it doesn't enable the port. This would trip
16368          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
16369          * Sanitize the transcoder select bits to prevent that. We
16370          * assume that the BIOS never actually enabled the port,
16371          * because if it did we'd actually have to toggle the port
16372          * on and back off to make the transcoder A select stick
16373          * (see. intel_dp_link_down(), intel_disable_hdmi(),
16374          * intel_disable_sdvo()).
16375          */
16376         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
16377         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
16378         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
16379
16380         /* PCH SDVOB multiplex with HDMIB */
16381         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
16382         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
16383         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
16384 }
16385
16386 /* Scan out the current hw modeset state,
16387  * and sanitizes it to the current state
16388  */
16389 static void
16390 intel_modeset_setup_hw_state(struct drm_device *dev,
16391                              struct drm_modeset_acquire_ctx *ctx)
16392 {
16393         struct drm_i915_private *dev_priv = to_i915(dev);
16394         struct intel_crtc_state *crtc_state;
16395         struct intel_encoder *encoder;
16396         struct intel_crtc *crtc;
16397         intel_wakeref_t wakeref;
16398         int i;
16399
16400         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
16401
16402         intel_early_display_was(dev_priv);
16403         intel_modeset_readout_hw_state(dev);
16404
16405         /* HW state is read out, now we need to sanitize this mess. */
16406         get_encoder_power_domains(dev_priv);
16407
16408         if (HAS_PCH_IBX(dev_priv))
16409                 ibx_sanitize_pch_ports(dev_priv);
16410
16411         /*
16412          * intel_sanitize_plane_mapping() may need to do vblank
16413          * waits, so we need vblank interrupts restored beforehand.
16414          */
16415         for_each_intel_crtc(&dev_priv->drm, crtc) {
16416                 crtc_state = to_intel_crtc_state(crtc->base.state);
16417
16418                 drm_crtc_vblank_reset(&crtc->base);
16419
16420                 if (crtc_state->base.active)
16421                         intel_crtc_vblank_on(crtc_state);
16422         }
16423
16424         intel_sanitize_plane_mapping(dev_priv);
16425
16426         for_each_intel_encoder(dev, encoder)
16427                 intel_sanitize_encoder(encoder);
16428
16429         for_each_intel_crtc(&dev_priv->drm, crtc) {
16430                 crtc_state = to_intel_crtc_state(crtc->base.state);
16431                 intel_sanitize_crtc(crtc, ctx);
16432                 intel_dump_pipe_config(crtc, crtc_state,
16433                                        "[setup_hw_state]");
16434         }
16435
16436         intel_modeset_update_connector_atomic_state(dev);
16437
16438         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16439                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16440
16441                 if (!pll->on || pll->active_mask)
16442                         continue;
16443
16444                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
16445                               pll->info->name);
16446
16447                 pll->info->funcs->disable(dev_priv, pll);
16448                 pll->on = false;
16449         }
16450
16451         if (IS_G4X(dev_priv)) {
16452                 g4x_wm_get_hw_state(dev_priv);
16453                 g4x_wm_sanitize(dev_priv);
16454         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16455                 vlv_wm_get_hw_state(dev_priv);
16456                 vlv_wm_sanitize(dev_priv);
16457         } else if (INTEL_GEN(dev_priv) >= 9) {
16458                 skl_wm_get_hw_state(dev_priv);
16459         } else if (HAS_PCH_SPLIT(dev_priv)) {
16460                 ilk_wm_get_hw_state(dev_priv);
16461         }
16462
16463         for_each_intel_crtc(dev, crtc) {
16464                 u64 put_domains;
16465
16466                 crtc_state = to_intel_crtc_state(crtc->base.state);
16467                 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
16468                 if (WARN_ON(put_domains))
16469                         modeset_put_power_domains(dev_priv, put_domains);
16470         }
16471
16472         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
16473
16474         intel_fbc_init_pipe_state(dev_priv);
16475 }
16476
16477 void intel_display_resume(struct drm_device *dev)
16478 {
16479         struct drm_i915_private *dev_priv = to_i915(dev);
16480         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
16481         struct drm_modeset_acquire_ctx ctx;
16482         int ret;
16483
16484         dev_priv->modeset_restore_state = NULL;
16485         if (state)
16486                 state->acquire_ctx = &ctx;
16487
16488         drm_modeset_acquire_init(&ctx, 0);
16489
16490         while (1) {
16491                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
16492                 if (ret != -EDEADLK)
16493                         break;
16494
16495                 drm_modeset_backoff(&ctx);
16496         }
16497
16498         if (!ret)
16499                 ret = __intel_display_resume(dev, state, &ctx);
16500
16501         intel_enable_ipc(dev_priv);
16502         drm_modeset_drop_locks(&ctx);
16503         drm_modeset_acquire_fini(&ctx);
16504
16505         if (ret)
16506                 DRM_ERROR("Restoring old state failed with %i\n", ret);
16507         if (state)
16508                 drm_atomic_state_put(state);
16509 }
16510
16511 static void intel_hpd_poll_fini(struct drm_device *dev)
16512 {
16513         struct intel_connector *connector;
16514         struct drm_connector_list_iter conn_iter;
16515
16516         /* Kill all the work that may have been queued by hpd. */
16517         drm_connector_list_iter_begin(dev, &conn_iter);
16518         for_each_intel_connector_iter(connector, &conn_iter) {
16519                 if (connector->modeset_retry_work.func)
16520                         cancel_work_sync(&connector->modeset_retry_work);
16521                 if (connector->hdcp.shim) {
16522                         cancel_delayed_work_sync(&connector->hdcp.check_work);
16523                         cancel_work_sync(&connector->hdcp.prop_work);
16524                 }
16525         }
16526         drm_connector_list_iter_end(&conn_iter);
16527 }
16528
16529 void intel_modeset_cleanup(struct drm_device *dev)
16530 {
16531         struct drm_i915_private *dev_priv = to_i915(dev);
16532
16533         flush_workqueue(dev_priv->modeset_wq);
16534
16535         flush_work(&dev_priv->atomic_helper.free_work);
16536         WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
16537
16538         /*
16539          * Interrupts and polling as the first thing to avoid creating havoc.
16540          * Too much stuff here (turning of connectors, ...) would
16541          * experience fancy races otherwise.
16542          */
16543         intel_irq_uninstall(dev_priv);
16544
16545         /*
16546          * Due to the hpd irq storm handling the hotplug work can re-arm the
16547          * poll handlers. Hence disable polling after hpd handling is shut down.
16548          */
16549         intel_hpd_poll_fini(dev);
16550
16551         /* poll work can call into fbdev, hence clean that up afterwards */
16552         intel_fbdev_fini(dev_priv);
16553
16554         intel_unregister_dsm_handler();
16555
16556         intel_fbc_global_disable(dev_priv);
16557
16558         /* flush any delayed tasks or pending work */
16559         flush_scheduled_work();
16560
16561         intel_hdcp_component_fini(dev_priv);
16562
16563         drm_mode_config_cleanup(dev);
16564
16565         intel_overlay_cleanup(dev_priv);
16566
16567         intel_teardown_gmbus(dev_priv);
16568
16569         destroy_workqueue(dev_priv->modeset_wq);
16570
16571         intel_fbc_cleanup_cfb(dev_priv);
16572 }
16573
16574 /*
16575  * set vga decode state - true == enable VGA decode
16576  */
16577 int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
16578 {
16579         unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16580         u16 gmch_ctrl;
16581
16582         if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16583                 DRM_ERROR("failed to read control word\n");
16584                 return -EIO;
16585         }
16586
16587         if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16588                 return 0;
16589
16590         if (state)
16591                 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16592         else
16593                 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16594
16595         if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16596                 DRM_ERROR("failed to write control word\n");
16597                 return -EIO;
16598         }
16599
16600         return 0;
16601 }
16602
16603 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
16604
16605 struct intel_display_error_state {
16606
16607         u32 power_well_driver;
16608
16609         struct intel_cursor_error_state {
16610                 u32 control;
16611                 u32 position;
16612                 u32 base;
16613                 u32 size;
16614         } cursor[I915_MAX_PIPES];
16615
16616         struct intel_pipe_error_state {
16617                 bool power_domain_on;
16618                 u32 source;
16619                 u32 stat;
16620         } pipe[I915_MAX_PIPES];
16621
16622         struct intel_plane_error_state {
16623                 u32 control;
16624                 u32 stride;
16625                 u32 size;
16626                 u32 pos;
16627                 u32 addr;
16628                 u32 surface;
16629                 u32 tile_offset;
16630         } plane[I915_MAX_PIPES];
16631
16632         struct intel_transcoder_error_state {
16633                 bool available;
16634                 bool power_domain_on;
16635                 enum transcoder cpu_transcoder;
16636
16637                 u32 conf;
16638
16639                 u32 htotal;
16640                 u32 hblank;
16641                 u32 hsync;
16642                 u32 vtotal;
16643                 u32 vblank;
16644                 u32 vsync;
16645         } transcoder[4];
16646 };
16647
16648 struct intel_display_error_state *
16649 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
16650 {
16651         struct intel_display_error_state *error;
16652         int transcoders[] = {
16653                 TRANSCODER_A,
16654                 TRANSCODER_B,
16655                 TRANSCODER_C,
16656                 TRANSCODER_EDP,
16657         };
16658         int i;
16659
16660         BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
16661
16662         if (!HAS_DISPLAY(dev_priv))
16663                 return NULL;
16664
16665         error = kzalloc(sizeof(*error), GFP_ATOMIC);
16666         if (error == NULL)
16667                 return NULL;
16668
16669         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16670                 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
16671
16672         for_each_pipe(dev_priv, i) {
16673                 error->pipe[i].power_domain_on =
16674                         __intel_display_power_is_enabled(dev_priv,
16675                                                          POWER_DOMAIN_PIPE(i));
16676                 if (!error->pipe[i].power_domain_on)
16677                         continue;
16678
16679                 error->cursor[i].control = I915_READ(CURCNTR(i));
16680                 error->cursor[i].position = I915_READ(CURPOS(i));
16681                 error->cursor[i].base = I915_READ(CURBASE(i));
16682
16683                 error->plane[i].control = I915_READ(DSPCNTR(i));
16684                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16685                 if (INTEL_GEN(dev_priv) <= 3) {
16686                         error->plane[i].size = I915_READ(DSPSIZE(i));
16687                         error->plane[i].pos = I915_READ(DSPPOS(i));
16688                 }
16689                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16690                         error->plane[i].addr = I915_READ(DSPADDR(i));
16691                 if (INTEL_GEN(dev_priv) >= 4) {
16692                         error->plane[i].surface = I915_READ(DSPSURF(i));
16693                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16694                 }
16695
16696                 error->pipe[i].source = I915_READ(PIPESRC(i));
16697
16698                 if (HAS_GMCH(dev_priv))
16699                         error->pipe[i].stat = I915_READ(PIPESTAT(i));
16700         }
16701
16702         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
16703                 enum transcoder cpu_transcoder = transcoders[i];
16704
16705                 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
16706                         continue;
16707
16708                 error->transcoder[i].available = true;
16709                 error->transcoder[i].power_domain_on =
16710                         __intel_display_power_is_enabled(dev_priv,
16711                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16712                 if (!error->transcoder[i].power_domain_on)
16713                         continue;
16714
16715                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
16716
16717                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16718                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16719                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16720                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16721                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16722                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16723                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16724         }
16725
16726         return error;
16727 }
16728
16729 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16730
16731 void
16732 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16733                                 struct intel_display_error_state *error)
16734 {
16735         struct drm_i915_private *dev_priv = m->i915;
16736         int i;
16737
16738         if (!error)
16739                 return;
16740
16741         err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
16742         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16743                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
16744                            error->power_well_driver);
16745         for_each_pipe(dev_priv, i) {
16746                 err_printf(m, "Pipe [%d]:\n", i);
16747                 err_printf(m, "  Power: %s\n",
16748                            onoff(error->pipe[i].power_domain_on));
16749                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
16750                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
16751
16752                 err_printf(m, "Plane [%d]:\n", i);
16753                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
16754                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
16755                 if (INTEL_GEN(dev_priv) <= 3) {
16756                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
16757                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
16758                 }
16759                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16760                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
16761                 if (INTEL_GEN(dev_priv) >= 4) {
16762                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
16763                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
16764                 }
16765
16766                 err_printf(m, "Cursor [%d]:\n", i);
16767                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
16768                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
16769                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
16770         }
16771
16772         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
16773                 if (!error->transcoder[i].available)
16774                         continue;
16775
16776                 err_printf(m, "CPU transcoder: %s\n",
16777                            transcoder_name(error->transcoder[i].cpu_transcoder));
16778                 err_printf(m, "  Power: %s\n",
16779                            onoff(error->transcoder[i].power_domain_on));
16780                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
16781                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
16782                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
16783                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
16784                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
16785                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
16786                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
16787         }
16788 }
16789
16790 #endif