Merge drm/drm-next into drm-intel-next-queued
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/reservation.h>
33 #include <linux/slab.h>
34 #include <linux/vgaarb.h>
35
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/drm_rect.h>
45 #include <drm/i915_drm.h>
46
47 #include "i915_drv.h"
48 #include "i915_gem_clflush.h"
49 #include "i915_trace.h"
50 #include "intel_acpi.h"
51 #include "intel_atomic.h"
52 #include "intel_atomic_plane.h"
53 #include "intel_bw.h"
54 #include "intel_color.h"
55 #include "intel_cdclk.h"
56 #include "intel_crt.h"
57 #include "intel_ddi.h"
58 #include "intel_dp.h"
59 #include "intel_drv.h"
60 #include "intel_dsi.h"
61 #include "intel_dvo.h"
62 #include "intel_fbc.h"
63 #include "intel_fbdev.h"
64 #include "intel_fifo_underrun.h"
65 #include "intel_frontbuffer.h"
66 #include "intel_gmbus.h"
67 #include "intel_hdcp.h"
68 #include "intel_hdmi.h"
69 #include "intel_hotplug.h"
70 #include "intel_lvds.h"
71 #include "intel_overlay.h"
72 #include "intel_pipe_crc.h"
73 #include "intel_pm.h"
74 #include "intel_psr.h"
75 #include "intel_quirks.h"
76 #include "intel_sdvo.h"
77 #include "intel_sideband.h"
78 #include "intel_sprite.h"
79 #include "intel_tv.h"
80 #include "intel_vdsc.h"
81
82 /* Primary plane formats for gen <= 3 */
83 static const u32 i8xx_primary_formats[] = {
84         DRM_FORMAT_C8,
85         DRM_FORMAT_RGB565,
86         DRM_FORMAT_XRGB1555,
87         DRM_FORMAT_XRGB8888,
88 };
89
90 /* Primary plane formats for gen >= 4 */
91 static const u32 i965_primary_formats[] = {
92         DRM_FORMAT_C8,
93         DRM_FORMAT_RGB565,
94         DRM_FORMAT_XRGB8888,
95         DRM_FORMAT_XBGR8888,
96         DRM_FORMAT_XRGB2101010,
97         DRM_FORMAT_XBGR2101010,
98 };
99
100 static const u64 i9xx_format_modifiers[] = {
101         I915_FORMAT_MOD_X_TILED,
102         DRM_FORMAT_MOD_LINEAR,
103         DRM_FORMAT_MOD_INVALID
104 };
105
106 /* Cursor formats */
107 static const u32 intel_cursor_formats[] = {
108         DRM_FORMAT_ARGB8888,
109 };
110
111 static const u64 cursor_format_modifiers[] = {
112         DRM_FORMAT_MOD_LINEAR,
113         DRM_FORMAT_MOD_INVALID
114 };
115
116 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
117                                 struct intel_crtc_state *pipe_config);
118 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
119                                    struct intel_crtc_state *pipe_config);
120
121 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
122                                   struct drm_i915_gem_object *obj,
123                                   struct drm_mode_fb_cmd2 *mode_cmd);
124 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
125 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
126 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
127                                          const struct intel_link_m_n *m_n,
128                                          const struct intel_link_m_n *m2_n2);
129 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
130 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
131 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
132 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
133 static void vlv_prepare_pll(struct intel_crtc *crtc,
134                             const struct intel_crtc_state *pipe_config);
135 static void chv_prepare_pll(struct intel_crtc *crtc,
136                             const struct intel_crtc_state *pipe_config);
137 static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
138 static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
139 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
140                                     struct intel_crtc_state *crtc_state);
141 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
142 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
143 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
144 static void intel_modeset_setup_hw_state(struct drm_device *dev,
145                                          struct drm_modeset_acquire_ctx *ctx);
146 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
147
148 struct intel_limit {
149         struct {
150                 int min, max;
151         } dot, vco, n, m, m1, m2, p, p1;
152
153         struct {
154                 int dot_limit;
155                 int p2_slow, p2_fast;
156         } p2;
157 };
158
159 /* returns HPLL frequency in kHz */
160 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
161 {
162         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
163
164         /* Obtain SKU information */
165         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
166                 CCK_FUSE_HPLL_FREQ_MASK;
167
168         return vco_freq[hpll_freq] * 1000;
169 }
170
171 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
172                       const char *name, u32 reg, int ref_freq)
173 {
174         u32 val;
175         int divider;
176
177         val = vlv_cck_read(dev_priv, reg);
178         divider = val & CCK_FREQUENCY_VALUES;
179
180         WARN((val & CCK_FREQUENCY_STATUS) !=
181              (divider << CCK_FREQUENCY_STATUS_SHIFT),
182              "%s change in progress\n", name);
183
184         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
185 }
186
187 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
188                            const char *name, u32 reg)
189 {
190         int hpll;
191
192         vlv_cck_get(dev_priv);
193
194         if (dev_priv->hpll_freq == 0)
195                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
196
197         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
198
199         vlv_cck_put(dev_priv);
200
201         return hpll;
202 }
203
204 static void intel_update_czclk(struct drm_i915_private *dev_priv)
205 {
206         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
207                 return;
208
209         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
210                                                       CCK_CZ_CLOCK_CONTROL);
211
212         DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
213 }
214
215 static inline u32 /* units of 100MHz */
216 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
217                     const struct intel_crtc_state *pipe_config)
218 {
219         if (HAS_DDI(dev_priv))
220                 return pipe_config->port_clock; /* SPLL */
221         else
222                 return dev_priv->fdi_pll_freq;
223 }
224
225 static const struct intel_limit intel_limits_i8xx_dac = {
226         .dot = { .min = 25000, .max = 350000 },
227         .vco = { .min = 908000, .max = 1512000 },
228         .n = { .min = 2, .max = 16 },
229         .m = { .min = 96, .max = 140 },
230         .m1 = { .min = 18, .max = 26 },
231         .m2 = { .min = 6, .max = 16 },
232         .p = { .min = 4, .max = 128 },
233         .p1 = { .min = 2, .max = 33 },
234         .p2 = { .dot_limit = 165000,
235                 .p2_slow = 4, .p2_fast = 2 },
236 };
237
238 static const struct intel_limit intel_limits_i8xx_dvo = {
239         .dot = { .min = 25000, .max = 350000 },
240         .vco = { .min = 908000, .max = 1512000 },
241         .n = { .min = 2, .max = 16 },
242         .m = { .min = 96, .max = 140 },
243         .m1 = { .min = 18, .max = 26 },
244         .m2 = { .min = 6, .max = 16 },
245         .p = { .min = 4, .max = 128 },
246         .p1 = { .min = 2, .max = 33 },
247         .p2 = { .dot_limit = 165000,
248                 .p2_slow = 4, .p2_fast = 4 },
249 };
250
251 static const struct intel_limit intel_limits_i8xx_lvds = {
252         .dot = { .min = 25000, .max = 350000 },
253         .vco = { .min = 908000, .max = 1512000 },
254         .n = { .min = 2, .max = 16 },
255         .m = { .min = 96, .max = 140 },
256         .m1 = { .min = 18, .max = 26 },
257         .m2 = { .min = 6, .max = 16 },
258         .p = { .min = 4, .max = 128 },
259         .p1 = { .min = 1, .max = 6 },
260         .p2 = { .dot_limit = 165000,
261                 .p2_slow = 14, .p2_fast = 7 },
262 };
263
264 static const struct intel_limit intel_limits_i9xx_sdvo = {
265         .dot = { .min = 20000, .max = 400000 },
266         .vco = { .min = 1400000, .max = 2800000 },
267         .n = { .min = 1, .max = 6 },
268         .m = { .min = 70, .max = 120 },
269         .m1 = { .min = 8, .max = 18 },
270         .m2 = { .min = 3, .max = 7 },
271         .p = { .min = 5, .max = 80 },
272         .p1 = { .min = 1, .max = 8 },
273         .p2 = { .dot_limit = 200000,
274                 .p2_slow = 10, .p2_fast = 5 },
275 };
276
277 static const struct intel_limit intel_limits_i9xx_lvds = {
278         .dot = { .min = 20000, .max = 400000 },
279         .vco = { .min = 1400000, .max = 2800000 },
280         .n = { .min = 1, .max = 6 },
281         .m = { .min = 70, .max = 120 },
282         .m1 = { .min = 8, .max = 18 },
283         .m2 = { .min = 3, .max = 7 },
284         .p = { .min = 7, .max = 98 },
285         .p1 = { .min = 1, .max = 8 },
286         .p2 = { .dot_limit = 112000,
287                 .p2_slow = 14, .p2_fast = 7 },
288 };
289
290
291 static const struct intel_limit intel_limits_g4x_sdvo = {
292         .dot = { .min = 25000, .max = 270000 },
293         .vco = { .min = 1750000, .max = 3500000},
294         .n = { .min = 1, .max = 4 },
295         .m = { .min = 104, .max = 138 },
296         .m1 = { .min = 17, .max = 23 },
297         .m2 = { .min = 5, .max = 11 },
298         .p = { .min = 10, .max = 30 },
299         .p1 = { .min = 1, .max = 3},
300         .p2 = { .dot_limit = 270000,
301                 .p2_slow = 10,
302                 .p2_fast = 10
303         },
304 };
305
306 static const struct intel_limit intel_limits_g4x_hdmi = {
307         .dot = { .min = 22000, .max = 400000 },
308         .vco = { .min = 1750000, .max = 3500000},
309         .n = { .min = 1, .max = 4 },
310         .m = { .min = 104, .max = 138 },
311         .m1 = { .min = 16, .max = 23 },
312         .m2 = { .min = 5, .max = 11 },
313         .p = { .min = 5, .max = 80 },
314         .p1 = { .min = 1, .max = 8},
315         .p2 = { .dot_limit = 165000,
316                 .p2_slow = 10, .p2_fast = 5 },
317 };
318
319 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
320         .dot = { .min = 20000, .max = 115000 },
321         .vco = { .min = 1750000, .max = 3500000 },
322         .n = { .min = 1, .max = 3 },
323         .m = { .min = 104, .max = 138 },
324         .m1 = { .min = 17, .max = 23 },
325         .m2 = { .min = 5, .max = 11 },
326         .p = { .min = 28, .max = 112 },
327         .p1 = { .min = 2, .max = 8 },
328         .p2 = { .dot_limit = 0,
329                 .p2_slow = 14, .p2_fast = 14
330         },
331 };
332
333 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
334         .dot = { .min = 80000, .max = 224000 },
335         .vco = { .min = 1750000, .max = 3500000 },
336         .n = { .min = 1, .max = 3 },
337         .m = { .min = 104, .max = 138 },
338         .m1 = { .min = 17, .max = 23 },
339         .m2 = { .min = 5, .max = 11 },
340         .p = { .min = 14, .max = 42 },
341         .p1 = { .min = 2, .max = 6 },
342         .p2 = { .dot_limit = 0,
343                 .p2_slow = 7, .p2_fast = 7
344         },
345 };
346
347 static const struct intel_limit intel_limits_pineview_sdvo = {
348         .dot = { .min = 20000, .max = 400000},
349         .vco = { .min = 1700000, .max = 3500000 },
350         /* Pineview's Ncounter is a ring counter */
351         .n = { .min = 3, .max = 6 },
352         .m = { .min = 2, .max = 256 },
353         /* Pineview only has one combined m divider, which we treat as m2. */
354         .m1 = { .min = 0, .max = 0 },
355         .m2 = { .min = 0, .max = 254 },
356         .p = { .min = 5, .max = 80 },
357         .p1 = { .min = 1, .max = 8 },
358         .p2 = { .dot_limit = 200000,
359                 .p2_slow = 10, .p2_fast = 5 },
360 };
361
362 static const struct intel_limit intel_limits_pineview_lvds = {
363         .dot = { .min = 20000, .max = 400000 },
364         .vco = { .min = 1700000, .max = 3500000 },
365         .n = { .min = 3, .max = 6 },
366         .m = { .min = 2, .max = 256 },
367         .m1 = { .min = 0, .max = 0 },
368         .m2 = { .min = 0, .max = 254 },
369         .p = { .min = 7, .max = 112 },
370         .p1 = { .min = 1, .max = 8 },
371         .p2 = { .dot_limit = 112000,
372                 .p2_slow = 14, .p2_fast = 14 },
373 };
374
375 /* Ironlake / Sandybridge
376  *
377  * We calculate clock using (register_value + 2) for N/M1/M2, so here
378  * the range value for them is (actual_value - 2).
379  */
380 static const struct intel_limit intel_limits_ironlake_dac = {
381         .dot = { .min = 25000, .max = 350000 },
382         .vco = { .min = 1760000, .max = 3510000 },
383         .n = { .min = 1, .max = 5 },
384         .m = { .min = 79, .max = 127 },
385         .m1 = { .min = 12, .max = 22 },
386         .m2 = { .min = 5, .max = 9 },
387         .p = { .min = 5, .max = 80 },
388         .p1 = { .min = 1, .max = 8 },
389         .p2 = { .dot_limit = 225000,
390                 .p2_slow = 10, .p2_fast = 5 },
391 };
392
393 static const struct intel_limit intel_limits_ironlake_single_lvds = {
394         .dot = { .min = 25000, .max = 350000 },
395         .vco = { .min = 1760000, .max = 3510000 },
396         .n = { .min = 1, .max = 3 },
397         .m = { .min = 79, .max = 118 },
398         .m1 = { .min = 12, .max = 22 },
399         .m2 = { .min = 5, .max = 9 },
400         .p = { .min = 28, .max = 112 },
401         .p1 = { .min = 2, .max = 8 },
402         .p2 = { .dot_limit = 225000,
403                 .p2_slow = 14, .p2_fast = 14 },
404 };
405
406 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
407         .dot = { .min = 25000, .max = 350000 },
408         .vco = { .min = 1760000, .max = 3510000 },
409         .n = { .min = 1, .max = 3 },
410         .m = { .min = 79, .max = 127 },
411         .m1 = { .min = 12, .max = 22 },
412         .m2 = { .min = 5, .max = 9 },
413         .p = { .min = 14, .max = 56 },
414         .p1 = { .min = 2, .max = 8 },
415         .p2 = { .dot_limit = 225000,
416                 .p2_slow = 7, .p2_fast = 7 },
417 };
418
419 /* LVDS 100mhz refclk limits. */
420 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
421         .dot = { .min = 25000, .max = 350000 },
422         .vco = { .min = 1760000, .max = 3510000 },
423         .n = { .min = 1, .max = 2 },
424         .m = { .min = 79, .max = 126 },
425         .m1 = { .min = 12, .max = 22 },
426         .m2 = { .min = 5, .max = 9 },
427         .p = { .min = 28, .max = 112 },
428         .p1 = { .min = 2, .max = 8 },
429         .p2 = { .dot_limit = 225000,
430                 .p2_slow = 14, .p2_fast = 14 },
431 };
432
433 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
434         .dot = { .min = 25000, .max = 350000 },
435         .vco = { .min = 1760000, .max = 3510000 },
436         .n = { .min = 1, .max = 3 },
437         .m = { .min = 79, .max = 126 },
438         .m1 = { .min = 12, .max = 22 },
439         .m2 = { .min = 5, .max = 9 },
440         .p = { .min = 14, .max = 42 },
441         .p1 = { .min = 2, .max = 6 },
442         .p2 = { .dot_limit = 225000,
443                 .p2_slow = 7, .p2_fast = 7 },
444 };
445
446 static const struct intel_limit intel_limits_vlv = {
447          /*
448           * These are the data rate limits (measured in fast clocks)
449           * since those are the strictest limits we have. The fast
450           * clock and actual rate limits are more relaxed, so checking
451           * them would make no difference.
452           */
453         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
454         .vco = { .min = 4000000, .max = 6000000 },
455         .n = { .min = 1, .max = 7 },
456         .m1 = { .min = 2, .max = 3 },
457         .m2 = { .min = 11, .max = 156 },
458         .p1 = { .min = 2, .max = 3 },
459         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
460 };
461
462 static const struct intel_limit intel_limits_chv = {
463         /*
464          * These are the data rate limits (measured in fast clocks)
465          * since those are the strictest limits we have.  The fast
466          * clock and actual rate limits are more relaxed, so checking
467          * them would make no difference.
468          */
469         .dot = { .min = 25000 * 5, .max = 540000 * 5},
470         .vco = { .min = 4800000, .max = 6480000 },
471         .n = { .min = 1, .max = 1 },
472         .m1 = { .min = 2, .max = 2 },
473         .m2 = { .min = 24 << 22, .max = 175 << 22 },
474         .p1 = { .min = 2, .max = 4 },
475         .p2 = { .p2_slow = 1, .p2_fast = 14 },
476 };
477
478 static const struct intel_limit intel_limits_bxt = {
479         /* FIXME: find real dot limits */
480         .dot = { .min = 0, .max = INT_MAX },
481         .vco = { .min = 4800000, .max = 6700000 },
482         .n = { .min = 1, .max = 1 },
483         .m1 = { .min = 2, .max = 2 },
484         /* FIXME: find real m2 limits */
485         .m2 = { .min = 2 << 22, .max = 255 << 22 },
486         .p1 = { .min = 2, .max = 4 },
487         .p2 = { .p2_slow = 1, .p2_fast = 20 },
488 };
489
490 /* WA Display #0827: Gen9:all */
491 static void
492 skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
493 {
494         if (enable)
495                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
496                            I915_READ(CLKGATE_DIS_PSL(pipe)) |
497                            DUPS1_GATING_DIS | DUPS2_GATING_DIS);
498         else
499                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
500                            I915_READ(CLKGATE_DIS_PSL(pipe)) &
501                            ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
502 }
503
504 /* Wa_2006604312:icl */
505 static void
506 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
507                        bool enable)
508 {
509         if (enable)
510                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
511                            I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
512         else
513                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
514                            I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
515 }
516
517 static bool
518 needs_modeset(const struct drm_crtc_state *state)
519 {
520         return drm_atomic_crtc_needs_modeset(state);
521 }
522
523 /*
524  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
525  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
526  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
527  * The helpers' return value is the rate of the clock that is fed to the
528  * display engine's pipe which can be the above fast dot clock rate or a
529  * divided-down version of it.
530  */
531 /* m1 is reserved as 0 in Pineview, n is a ring counter */
532 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
533 {
534         clock->m = clock->m2 + 2;
535         clock->p = clock->p1 * clock->p2;
536         if (WARN_ON(clock->n == 0 || clock->p == 0))
537                 return 0;
538         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
539         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
540
541         return clock->dot;
542 }
543
544 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
545 {
546         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
547 }
548
549 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
550 {
551         clock->m = i9xx_dpll_compute_m(clock);
552         clock->p = clock->p1 * clock->p2;
553         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
554                 return 0;
555         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
556         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
557
558         return clock->dot;
559 }
560
561 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
562 {
563         clock->m = clock->m1 * clock->m2;
564         clock->p = clock->p1 * clock->p2;
565         if (WARN_ON(clock->n == 0 || clock->p == 0))
566                 return 0;
567         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
568         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
569
570         return clock->dot / 5;
571 }
572
573 int chv_calc_dpll_params(int refclk, struct dpll *clock)
574 {
575         clock->m = clock->m1 * clock->m2;
576         clock->p = clock->p1 * clock->p2;
577         if (WARN_ON(clock->n == 0 || clock->p == 0))
578                 return 0;
579         clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
580                                            clock->n << 22);
581         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
582
583         return clock->dot / 5;
584 }
585
586 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
587
588 /*
589  * Returns whether the given set of divisors are valid for a given refclk with
590  * the given connectors.
591  */
592 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
593                                const struct intel_limit *limit,
594                                const struct dpll *clock)
595 {
596         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
597                 INTELPllInvalid("n out of range\n");
598         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
599                 INTELPllInvalid("p1 out of range\n");
600         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
601                 INTELPllInvalid("m2 out of range\n");
602         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
603                 INTELPllInvalid("m1 out of range\n");
604
605         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
606             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
607                 if (clock->m1 <= clock->m2)
608                         INTELPllInvalid("m1 <= m2\n");
609
610         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
611             !IS_GEN9_LP(dev_priv)) {
612                 if (clock->p < limit->p.min || limit->p.max < clock->p)
613                         INTELPllInvalid("p out of range\n");
614                 if (clock->m < limit->m.min || limit->m.max < clock->m)
615                         INTELPllInvalid("m out of range\n");
616         }
617
618         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
619                 INTELPllInvalid("vco out of range\n");
620         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
621          * connector, etc., rather than just a single range.
622          */
623         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
624                 INTELPllInvalid("dot out of range\n");
625
626         return true;
627 }
628
629 static int
630 i9xx_select_p2_div(const struct intel_limit *limit,
631                    const struct intel_crtc_state *crtc_state,
632                    int target)
633 {
634         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
635
636         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
637                 /*
638                  * For LVDS just rely on its current settings for dual-channel.
639                  * We haven't figured out how to reliably set up different
640                  * single/dual channel state, if we even can.
641                  */
642                 if (intel_is_dual_link_lvds(dev_priv))
643                         return limit->p2.p2_fast;
644                 else
645                         return limit->p2.p2_slow;
646         } else {
647                 if (target < limit->p2.dot_limit)
648                         return limit->p2.p2_slow;
649                 else
650                         return limit->p2.p2_fast;
651         }
652 }
653
654 /*
655  * Returns a set of divisors for the desired target clock with the given
656  * refclk, or FALSE.  The returned values represent the clock equation:
657  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
658  *
659  * Target and reference clocks are specified in kHz.
660  *
661  * If match_clock is provided, then best_clock P divider must match the P
662  * divider from @match_clock used for LVDS downclocking.
663  */
664 static bool
665 i9xx_find_best_dpll(const struct intel_limit *limit,
666                     struct intel_crtc_state *crtc_state,
667                     int target, int refclk, struct dpll *match_clock,
668                     struct dpll *best_clock)
669 {
670         struct drm_device *dev = crtc_state->base.crtc->dev;
671         struct dpll clock;
672         int err = target;
673
674         memset(best_clock, 0, sizeof(*best_clock));
675
676         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
677
678         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
679              clock.m1++) {
680                 for (clock.m2 = limit->m2.min;
681                      clock.m2 <= limit->m2.max; clock.m2++) {
682                         if (clock.m2 >= clock.m1)
683                                 break;
684                         for (clock.n = limit->n.min;
685                              clock.n <= limit->n.max; clock.n++) {
686                                 for (clock.p1 = limit->p1.min;
687                                         clock.p1 <= limit->p1.max; clock.p1++) {
688                                         int this_err;
689
690                                         i9xx_calc_dpll_params(refclk, &clock);
691                                         if (!intel_PLL_is_valid(to_i915(dev),
692                                                                 limit,
693                                                                 &clock))
694                                                 continue;
695                                         if (match_clock &&
696                                             clock.p != match_clock->p)
697                                                 continue;
698
699                                         this_err = abs(clock.dot - target);
700                                         if (this_err < err) {
701                                                 *best_clock = clock;
702                                                 err = this_err;
703                                         }
704                                 }
705                         }
706                 }
707         }
708
709         return (err != target);
710 }
711
712 /*
713  * Returns a set of divisors for the desired target clock with the given
714  * refclk, or FALSE.  The returned values represent the clock equation:
715  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
716  *
717  * Target and reference clocks are specified in kHz.
718  *
719  * If match_clock is provided, then best_clock P divider must match the P
720  * divider from @match_clock used for LVDS downclocking.
721  */
722 static bool
723 pnv_find_best_dpll(const struct intel_limit *limit,
724                    struct intel_crtc_state *crtc_state,
725                    int target, int refclk, struct dpll *match_clock,
726                    struct dpll *best_clock)
727 {
728         struct drm_device *dev = crtc_state->base.crtc->dev;
729         struct dpll clock;
730         int err = target;
731
732         memset(best_clock, 0, sizeof(*best_clock));
733
734         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
735
736         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
737              clock.m1++) {
738                 for (clock.m2 = limit->m2.min;
739                      clock.m2 <= limit->m2.max; clock.m2++) {
740                         for (clock.n = limit->n.min;
741                              clock.n <= limit->n.max; clock.n++) {
742                                 for (clock.p1 = limit->p1.min;
743                                         clock.p1 <= limit->p1.max; clock.p1++) {
744                                         int this_err;
745
746                                         pnv_calc_dpll_params(refclk, &clock);
747                                         if (!intel_PLL_is_valid(to_i915(dev),
748                                                                 limit,
749                                                                 &clock))
750                                                 continue;
751                                         if (match_clock &&
752                                             clock.p != match_clock->p)
753                                                 continue;
754
755                                         this_err = abs(clock.dot - target);
756                                         if (this_err < err) {
757                                                 *best_clock = clock;
758                                                 err = this_err;
759                                         }
760                                 }
761                         }
762                 }
763         }
764
765         return (err != target);
766 }
767
768 /*
769  * Returns a set of divisors for the desired target clock with the given
770  * refclk, or FALSE.  The returned values represent the clock equation:
771  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
772  *
773  * Target and reference clocks are specified in kHz.
774  *
775  * If match_clock is provided, then best_clock P divider must match the P
776  * divider from @match_clock used for LVDS downclocking.
777  */
778 static bool
779 g4x_find_best_dpll(const struct intel_limit *limit,
780                    struct intel_crtc_state *crtc_state,
781                    int target, int refclk, struct dpll *match_clock,
782                    struct dpll *best_clock)
783 {
784         struct drm_device *dev = crtc_state->base.crtc->dev;
785         struct dpll clock;
786         int max_n;
787         bool found = false;
788         /* approximately equals target * 0.00585 */
789         int err_most = (target >> 8) + (target >> 9);
790
791         memset(best_clock, 0, sizeof(*best_clock));
792
793         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
794
795         max_n = limit->n.max;
796         /* based on hardware requirement, prefer smaller n to precision */
797         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
798                 /* based on hardware requirement, prefere larger m1,m2 */
799                 for (clock.m1 = limit->m1.max;
800                      clock.m1 >= limit->m1.min; clock.m1--) {
801                         for (clock.m2 = limit->m2.max;
802                              clock.m2 >= limit->m2.min; clock.m2--) {
803                                 for (clock.p1 = limit->p1.max;
804                                      clock.p1 >= limit->p1.min; clock.p1--) {
805                                         int this_err;
806
807                                         i9xx_calc_dpll_params(refclk, &clock);
808                                         if (!intel_PLL_is_valid(to_i915(dev),
809                                                                 limit,
810                                                                 &clock))
811                                                 continue;
812
813                                         this_err = abs(clock.dot - target);
814                                         if (this_err < err_most) {
815                                                 *best_clock = clock;
816                                                 err_most = this_err;
817                                                 max_n = clock.n;
818                                                 found = true;
819                                         }
820                                 }
821                         }
822                 }
823         }
824         return found;
825 }
826
827 /*
828  * Check if the calculated PLL configuration is more optimal compared to the
829  * best configuration and error found so far. Return the calculated error.
830  */
831 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
832                                const struct dpll *calculated_clock,
833                                const struct dpll *best_clock,
834                                unsigned int best_error_ppm,
835                                unsigned int *error_ppm)
836 {
837         /*
838          * For CHV ignore the error and consider only the P value.
839          * Prefer a bigger P value based on HW requirements.
840          */
841         if (IS_CHERRYVIEW(to_i915(dev))) {
842                 *error_ppm = 0;
843
844                 return calculated_clock->p > best_clock->p;
845         }
846
847         if (WARN_ON_ONCE(!target_freq))
848                 return false;
849
850         *error_ppm = div_u64(1000000ULL *
851                                 abs(target_freq - calculated_clock->dot),
852                              target_freq);
853         /*
854          * Prefer a better P value over a better (smaller) error if the error
855          * is small. Ensure this preference for future configurations too by
856          * setting the error to 0.
857          */
858         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
859                 *error_ppm = 0;
860
861                 return true;
862         }
863
864         return *error_ppm + 10 < best_error_ppm;
865 }
866
867 /*
868  * Returns a set of divisors for the desired target clock with the given
869  * refclk, or FALSE.  The returned values represent the clock equation:
870  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
871  */
872 static bool
873 vlv_find_best_dpll(const struct intel_limit *limit,
874                    struct intel_crtc_state *crtc_state,
875                    int target, int refclk, struct dpll *match_clock,
876                    struct dpll *best_clock)
877 {
878         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
879         struct drm_device *dev = crtc->base.dev;
880         struct dpll clock;
881         unsigned int bestppm = 1000000;
882         /* min update 19.2 MHz */
883         int max_n = min(limit->n.max, refclk / 19200);
884         bool found = false;
885
886         target *= 5; /* fast clock */
887
888         memset(best_clock, 0, sizeof(*best_clock));
889
890         /* based on hardware requirement, prefer smaller n to precision */
891         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
892                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
893                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
894                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
895                                 clock.p = clock.p1 * clock.p2;
896                                 /* based on hardware requirement, prefer bigger m1,m2 values */
897                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
898                                         unsigned int ppm;
899
900                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
901                                                                      refclk * clock.m1);
902
903                                         vlv_calc_dpll_params(refclk, &clock);
904
905                                         if (!intel_PLL_is_valid(to_i915(dev),
906                                                                 limit,
907                                                                 &clock))
908                                                 continue;
909
910                                         if (!vlv_PLL_is_optimal(dev, target,
911                                                                 &clock,
912                                                                 best_clock,
913                                                                 bestppm, &ppm))
914                                                 continue;
915
916                                         *best_clock = clock;
917                                         bestppm = ppm;
918                                         found = true;
919                                 }
920                         }
921                 }
922         }
923
924         return found;
925 }
926
927 /*
928  * Returns a set of divisors for the desired target clock with the given
929  * refclk, or FALSE.  The returned values represent the clock equation:
930  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
931  */
932 static bool
933 chv_find_best_dpll(const struct intel_limit *limit,
934                    struct intel_crtc_state *crtc_state,
935                    int target, int refclk, struct dpll *match_clock,
936                    struct dpll *best_clock)
937 {
938         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
939         struct drm_device *dev = crtc->base.dev;
940         unsigned int best_error_ppm;
941         struct dpll clock;
942         u64 m2;
943         int found = false;
944
945         memset(best_clock, 0, sizeof(*best_clock));
946         best_error_ppm = 1000000;
947
948         /*
949          * Based on hardware doc, the n always set to 1, and m1 always
950          * set to 2.  If requires to support 200Mhz refclk, we need to
951          * revisit this because n may not 1 anymore.
952          */
953         clock.n = 1, clock.m1 = 2;
954         target *= 5;    /* fast clock */
955
956         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
957                 for (clock.p2 = limit->p2.p2_fast;
958                                 clock.p2 >= limit->p2.p2_slow;
959                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
960                         unsigned int error_ppm;
961
962                         clock.p = clock.p1 * clock.p2;
963
964                         m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
965                                                    refclk * clock.m1);
966
967                         if (m2 > INT_MAX/clock.m1)
968                                 continue;
969
970                         clock.m2 = m2;
971
972                         chv_calc_dpll_params(refclk, &clock);
973
974                         if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
975                                 continue;
976
977                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
978                                                 best_error_ppm, &error_ppm))
979                                 continue;
980
981                         *best_clock = clock;
982                         best_error_ppm = error_ppm;
983                         found = true;
984                 }
985         }
986
987         return found;
988 }
989
990 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
991                         struct dpll *best_clock)
992 {
993         int refclk = 100000;
994         const struct intel_limit *limit = &intel_limits_bxt;
995
996         return chv_find_best_dpll(limit, crtc_state,
997                                   crtc_state->port_clock, refclk,
998                                   NULL, best_clock);
999 }
1000
1001 bool intel_crtc_active(struct intel_crtc *crtc)
1002 {
1003         /* Be paranoid as we can arrive here with only partial
1004          * state retrieved from the hardware during setup.
1005          *
1006          * We can ditch the adjusted_mode.crtc_clock check as soon
1007          * as Haswell has gained clock readout/fastboot support.
1008          *
1009          * We can ditch the crtc->primary->state->fb check as soon as we can
1010          * properly reconstruct framebuffers.
1011          *
1012          * FIXME: The intel_crtc->active here should be switched to
1013          * crtc->state->active once we have proper CRTC states wired up
1014          * for atomic.
1015          */
1016         return crtc->active && crtc->base.primary->state->fb &&
1017                 crtc->config->base.adjusted_mode.crtc_clock;
1018 }
1019
1020 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1021                                              enum pipe pipe)
1022 {
1023         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1024
1025         return crtc->config->cpu_transcoder;
1026 }
1027
1028 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1029                                     enum pipe pipe)
1030 {
1031         i915_reg_t reg = PIPEDSL(pipe);
1032         u32 line1, line2;
1033         u32 line_mask;
1034
1035         if (IS_GEN(dev_priv, 2))
1036                 line_mask = DSL_LINEMASK_GEN2;
1037         else
1038                 line_mask = DSL_LINEMASK_GEN3;
1039
1040         line1 = I915_READ(reg) & line_mask;
1041         msleep(5);
1042         line2 = I915_READ(reg) & line_mask;
1043
1044         return line1 != line2;
1045 }
1046
1047 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1048 {
1049         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1050         enum pipe pipe = crtc->pipe;
1051
1052         /* Wait for the display line to settle/start moving */
1053         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1054                 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1055                           pipe_name(pipe), onoff(state));
1056 }
1057
1058 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1059 {
1060         wait_for_pipe_scanline_moving(crtc, false);
1061 }
1062
1063 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1064 {
1065         wait_for_pipe_scanline_moving(crtc, true);
1066 }
1067
1068 static void
1069 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1070 {
1071         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1072         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1073
1074         if (INTEL_GEN(dev_priv) >= 4) {
1075                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1076                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1077
1078                 /* Wait for the Pipe State to go off */
1079                 if (intel_wait_for_register(&dev_priv->uncore,
1080                                             reg, I965_PIPECONF_ACTIVE, 0,
1081                                             100))
1082                         WARN(1, "pipe_off wait timed out\n");
1083         } else {
1084                 intel_wait_for_pipe_scanline_stopped(crtc);
1085         }
1086 }
1087
1088 /* Only for pre-ILK configs */
1089 void assert_pll(struct drm_i915_private *dev_priv,
1090                 enum pipe pipe, bool state)
1091 {
1092         u32 val;
1093         bool cur_state;
1094
1095         val = I915_READ(DPLL(pipe));
1096         cur_state = !!(val & DPLL_VCO_ENABLE);
1097         I915_STATE_WARN(cur_state != state,
1098              "PLL state assertion failure (expected %s, current %s)\n",
1099                         onoff(state), onoff(cur_state));
1100 }
1101
1102 /* XXX: the dsi pll is shared between MIPI DSI ports */
1103 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1104 {
1105         u32 val;
1106         bool cur_state;
1107
1108         vlv_cck_get(dev_priv);
1109         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1110         vlv_cck_put(dev_priv);
1111
1112         cur_state = val & DSI_PLL_VCO_EN;
1113         I915_STATE_WARN(cur_state != state,
1114              "DSI PLL state assertion failure (expected %s, current %s)\n",
1115                         onoff(state), onoff(cur_state));
1116 }
1117
1118 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1119                           enum pipe pipe, bool state)
1120 {
1121         bool cur_state;
1122         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1123                                                                       pipe);
1124
1125         if (HAS_DDI(dev_priv)) {
1126                 /* DDI does not have a specific FDI_TX register */
1127                 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1128                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1129         } else {
1130                 u32 val = I915_READ(FDI_TX_CTL(pipe));
1131                 cur_state = !!(val & FDI_TX_ENABLE);
1132         }
1133         I915_STATE_WARN(cur_state != state,
1134              "FDI TX state assertion failure (expected %s, current %s)\n",
1135                         onoff(state), onoff(cur_state));
1136 }
1137 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1138 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1139
1140 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1141                           enum pipe pipe, bool state)
1142 {
1143         u32 val;
1144         bool cur_state;
1145
1146         val = I915_READ(FDI_RX_CTL(pipe));
1147         cur_state = !!(val & FDI_RX_ENABLE);
1148         I915_STATE_WARN(cur_state != state,
1149              "FDI RX state assertion failure (expected %s, current %s)\n",
1150                         onoff(state), onoff(cur_state));
1151 }
1152 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1153 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1154
1155 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1156                                       enum pipe pipe)
1157 {
1158         u32 val;
1159
1160         /* ILK FDI PLL is always enabled */
1161         if (IS_GEN(dev_priv, 5))
1162                 return;
1163
1164         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1165         if (HAS_DDI(dev_priv))
1166                 return;
1167
1168         val = I915_READ(FDI_TX_CTL(pipe));
1169         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1170 }
1171
1172 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1173                        enum pipe pipe, bool state)
1174 {
1175         u32 val;
1176         bool cur_state;
1177
1178         val = I915_READ(FDI_RX_CTL(pipe));
1179         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1180         I915_STATE_WARN(cur_state != state,
1181              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1182                         onoff(state), onoff(cur_state));
1183 }
1184
1185 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1186 {
1187         i915_reg_t pp_reg;
1188         u32 val;
1189         enum pipe panel_pipe = INVALID_PIPE;
1190         bool locked = true;
1191
1192         if (WARN_ON(HAS_DDI(dev_priv)))
1193                 return;
1194
1195         if (HAS_PCH_SPLIT(dev_priv)) {
1196                 u32 port_sel;
1197
1198                 pp_reg = PP_CONTROL(0);
1199                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1200
1201                 switch (port_sel) {
1202                 case PANEL_PORT_SELECT_LVDS:
1203                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1204                         break;
1205                 case PANEL_PORT_SELECT_DPA:
1206                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1207                         break;
1208                 case PANEL_PORT_SELECT_DPC:
1209                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1210                         break;
1211                 case PANEL_PORT_SELECT_DPD:
1212                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1213                         break;
1214                 default:
1215                         MISSING_CASE(port_sel);
1216                         break;
1217                 }
1218         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1219                 /* presumably write lock depends on pipe, not port select */
1220                 pp_reg = PP_CONTROL(pipe);
1221                 panel_pipe = pipe;
1222         } else {
1223                 u32 port_sel;
1224
1225                 pp_reg = PP_CONTROL(0);
1226                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1227
1228                 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1229                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1230         }
1231
1232         val = I915_READ(pp_reg);
1233         if (!(val & PANEL_POWER_ON) ||
1234             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1235                 locked = false;
1236
1237         I915_STATE_WARN(panel_pipe == pipe && locked,
1238              "panel assertion failure, pipe %c regs locked\n",
1239              pipe_name(pipe));
1240 }
1241
1242 void assert_pipe(struct drm_i915_private *dev_priv,
1243                  enum pipe pipe, bool state)
1244 {
1245         bool cur_state;
1246         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1247                                                                       pipe);
1248         enum intel_display_power_domain power_domain;
1249         intel_wakeref_t wakeref;
1250
1251         /* we keep both pipes enabled on 830 */
1252         if (IS_I830(dev_priv))
1253                 state = true;
1254
1255         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1256         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1257         if (wakeref) {
1258                 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1259                 cur_state = !!(val & PIPECONF_ENABLE);
1260
1261                 intel_display_power_put(dev_priv, power_domain, wakeref);
1262         } else {
1263                 cur_state = false;
1264         }
1265
1266         I915_STATE_WARN(cur_state != state,
1267              "pipe %c assertion failure (expected %s, current %s)\n",
1268                         pipe_name(pipe), onoff(state), onoff(cur_state));
1269 }
1270
1271 static void assert_plane(struct intel_plane *plane, bool state)
1272 {
1273         enum pipe pipe;
1274         bool cur_state;
1275
1276         cur_state = plane->get_hw_state(plane, &pipe);
1277
1278         I915_STATE_WARN(cur_state != state,
1279                         "%s assertion failure (expected %s, current %s)\n",
1280                         plane->base.name, onoff(state), onoff(cur_state));
1281 }
1282
1283 #define assert_plane_enabled(p) assert_plane(p, true)
1284 #define assert_plane_disabled(p) assert_plane(p, false)
1285
1286 static void assert_planes_disabled(struct intel_crtc *crtc)
1287 {
1288         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1289         struct intel_plane *plane;
1290
1291         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1292                 assert_plane_disabled(plane);
1293 }
1294
1295 static void assert_vblank_disabled(struct drm_crtc *crtc)
1296 {
1297         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1298                 drm_crtc_vblank_put(crtc);
1299 }
1300
1301 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1302                                     enum pipe pipe)
1303 {
1304         u32 val;
1305         bool enabled;
1306
1307         val = I915_READ(PCH_TRANSCONF(pipe));
1308         enabled = !!(val & TRANS_ENABLE);
1309         I915_STATE_WARN(enabled,
1310              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1311              pipe_name(pipe));
1312 }
1313
1314 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1315                                    enum pipe pipe, enum port port,
1316                                    i915_reg_t dp_reg)
1317 {
1318         enum pipe port_pipe;
1319         bool state;
1320
1321         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1322
1323         I915_STATE_WARN(state && port_pipe == pipe,
1324                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
1325                         port_name(port), pipe_name(pipe));
1326
1327         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1328                         "IBX PCH DP %c still using transcoder B\n",
1329                         port_name(port));
1330 }
1331
1332 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1333                                      enum pipe pipe, enum port port,
1334                                      i915_reg_t hdmi_reg)
1335 {
1336         enum pipe port_pipe;
1337         bool state;
1338
1339         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1340
1341         I915_STATE_WARN(state && port_pipe == pipe,
1342                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1343                         port_name(port), pipe_name(pipe));
1344
1345         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1346                         "IBX PCH HDMI %c still using transcoder B\n",
1347                         port_name(port));
1348 }
1349
1350 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1351                                       enum pipe pipe)
1352 {
1353         enum pipe port_pipe;
1354
1355         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1356         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1357         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1358
1359         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1360                         port_pipe == pipe,
1361                         "PCH VGA enabled on transcoder %c, should be disabled\n",
1362                         pipe_name(pipe));
1363
1364         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1365                         port_pipe == pipe,
1366                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
1367                         pipe_name(pipe));
1368
1369         /* PCH SDVOB multiplex with HDMIB */
1370         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1371         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1372         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1373 }
1374
1375 static void _vlv_enable_pll(struct intel_crtc *crtc,
1376                             const struct intel_crtc_state *pipe_config)
1377 {
1378         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1379         enum pipe pipe = crtc->pipe;
1380
1381         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1382         POSTING_READ(DPLL(pipe));
1383         udelay(150);
1384
1385         if (intel_wait_for_register(&dev_priv->uncore,
1386                                     DPLL(pipe),
1387                                     DPLL_LOCK_VLV,
1388                                     DPLL_LOCK_VLV,
1389                                     1))
1390                 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1391 }
1392
1393 static void vlv_enable_pll(struct intel_crtc *crtc,
1394                            const struct intel_crtc_state *pipe_config)
1395 {
1396         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1397         enum pipe pipe = crtc->pipe;
1398
1399         assert_pipe_disabled(dev_priv, pipe);
1400
1401         /* PLL is protected by panel, make sure we can write it */
1402         assert_panel_unlocked(dev_priv, pipe);
1403
1404         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1405                 _vlv_enable_pll(crtc, pipe_config);
1406
1407         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1408         POSTING_READ(DPLL_MD(pipe));
1409 }
1410
1411
1412 static void _chv_enable_pll(struct intel_crtc *crtc,
1413                             const struct intel_crtc_state *pipe_config)
1414 {
1415         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1416         enum pipe pipe = crtc->pipe;
1417         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1418         u32 tmp;
1419
1420         vlv_dpio_get(dev_priv);
1421
1422         /* Enable back the 10bit clock to display controller */
1423         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1424         tmp |= DPIO_DCLKP_EN;
1425         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1426
1427         vlv_dpio_put(dev_priv);
1428
1429         /*
1430          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1431          */
1432         udelay(1);
1433
1434         /* Enable PLL */
1435         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1436
1437         /* Check PLL is locked */
1438         if (intel_wait_for_register(&dev_priv->uncore,
1439                                     DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1440                                     1))
1441                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1442 }
1443
1444 static void chv_enable_pll(struct intel_crtc *crtc,
1445                            const struct intel_crtc_state *pipe_config)
1446 {
1447         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1448         enum pipe pipe = crtc->pipe;
1449
1450         assert_pipe_disabled(dev_priv, pipe);
1451
1452         /* PLL is protected by panel, make sure we can write it */
1453         assert_panel_unlocked(dev_priv, pipe);
1454
1455         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1456                 _chv_enable_pll(crtc, pipe_config);
1457
1458         if (pipe != PIPE_A) {
1459                 /*
1460                  * WaPixelRepeatModeFixForC0:chv
1461                  *
1462                  * DPLLCMD is AWOL. Use chicken bits to propagate
1463                  * the value from DPLLBMD to either pipe B or C.
1464                  */
1465                 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1466                 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1467                 I915_WRITE(CBR4_VLV, 0);
1468                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1469
1470                 /*
1471                  * DPLLB VGA mode also seems to cause problems.
1472                  * We should always have it disabled.
1473                  */
1474                 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1475         } else {
1476                 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1477                 POSTING_READ(DPLL_MD(pipe));
1478         }
1479 }
1480
1481 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1482 {
1483         if (IS_I830(dev_priv))
1484                 return false;
1485
1486         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1487 }
1488
1489 static void i9xx_enable_pll(struct intel_crtc *crtc,
1490                             const struct intel_crtc_state *crtc_state)
1491 {
1492         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1493         i915_reg_t reg = DPLL(crtc->pipe);
1494         u32 dpll = crtc_state->dpll_hw_state.dpll;
1495         int i;
1496
1497         assert_pipe_disabled(dev_priv, crtc->pipe);
1498
1499         /* PLL is protected by panel, make sure we can write it */
1500         if (i9xx_has_pps(dev_priv))
1501                 assert_panel_unlocked(dev_priv, crtc->pipe);
1502
1503         /*
1504          * Apparently we need to have VGA mode enabled prior to changing
1505          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1506          * dividers, even though the register value does change.
1507          */
1508         I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
1509         I915_WRITE(reg, dpll);
1510
1511         /* Wait for the clocks to stabilize. */
1512         POSTING_READ(reg);
1513         udelay(150);
1514
1515         if (INTEL_GEN(dev_priv) >= 4) {
1516                 I915_WRITE(DPLL_MD(crtc->pipe),
1517                            crtc_state->dpll_hw_state.dpll_md);
1518         } else {
1519                 /* The pixel multiplier can only be updated once the
1520                  * DPLL is enabled and the clocks are stable.
1521                  *
1522                  * So write it again.
1523                  */
1524                 I915_WRITE(reg, dpll);
1525         }
1526
1527         /* We do this three times for luck */
1528         for (i = 0; i < 3; i++) {
1529                 I915_WRITE(reg, dpll);
1530                 POSTING_READ(reg);
1531                 udelay(150); /* wait for warmup */
1532         }
1533 }
1534
1535 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1536 {
1537         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1538         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1539         enum pipe pipe = crtc->pipe;
1540
1541         /* Don't disable pipe or pipe PLLs if needed */
1542         if (IS_I830(dev_priv))
1543                 return;
1544
1545         /* Make sure the pipe isn't still relying on us */
1546         assert_pipe_disabled(dev_priv, pipe);
1547
1548         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1549         POSTING_READ(DPLL(pipe));
1550 }
1551
1552 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1553 {
1554         u32 val;
1555
1556         /* Make sure the pipe isn't still relying on us */
1557         assert_pipe_disabled(dev_priv, pipe);
1558
1559         val = DPLL_INTEGRATED_REF_CLK_VLV |
1560                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1561         if (pipe != PIPE_A)
1562                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1563
1564         I915_WRITE(DPLL(pipe), val);
1565         POSTING_READ(DPLL(pipe));
1566 }
1567
1568 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1569 {
1570         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1571         u32 val;
1572
1573         /* Make sure the pipe isn't still relying on us */
1574         assert_pipe_disabled(dev_priv, pipe);
1575
1576         val = DPLL_SSC_REF_CLK_CHV |
1577                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1578         if (pipe != PIPE_A)
1579                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1580
1581         I915_WRITE(DPLL(pipe), val);
1582         POSTING_READ(DPLL(pipe));
1583
1584         vlv_dpio_get(dev_priv);
1585
1586         /* Disable 10bit clock to display controller */
1587         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1588         val &= ~DPIO_DCLKP_EN;
1589         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1590
1591         vlv_dpio_put(dev_priv);
1592 }
1593
1594 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1595                          struct intel_digital_port *dport,
1596                          unsigned int expected_mask)
1597 {
1598         u32 port_mask;
1599         i915_reg_t dpll_reg;
1600
1601         switch (dport->base.port) {
1602         case PORT_B:
1603                 port_mask = DPLL_PORTB_READY_MASK;
1604                 dpll_reg = DPLL(0);
1605                 break;
1606         case PORT_C:
1607                 port_mask = DPLL_PORTC_READY_MASK;
1608                 dpll_reg = DPLL(0);
1609                 expected_mask <<= 4;
1610                 break;
1611         case PORT_D:
1612                 port_mask = DPLL_PORTD_READY_MASK;
1613                 dpll_reg = DPIO_PHY_STATUS;
1614                 break;
1615         default:
1616                 BUG();
1617         }
1618
1619         if (intel_wait_for_register(&dev_priv->uncore,
1620                                     dpll_reg, port_mask, expected_mask,
1621                                     1000))
1622                 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1623                      port_name(dport->base.port),
1624                      I915_READ(dpll_reg) & port_mask, expected_mask);
1625 }
1626
1627 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1628 {
1629         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1630         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1631         enum pipe pipe = crtc->pipe;
1632         i915_reg_t reg;
1633         u32 val, pipeconf_val;
1634
1635         /* Make sure PCH DPLL is enabled */
1636         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1637
1638         /* FDI must be feeding us bits for PCH ports */
1639         assert_fdi_tx_enabled(dev_priv, pipe);
1640         assert_fdi_rx_enabled(dev_priv, pipe);
1641
1642         if (HAS_PCH_CPT(dev_priv)) {
1643                 /* Workaround: Set the timing override bit before enabling the
1644                  * pch transcoder. */
1645                 reg = TRANS_CHICKEN2(pipe);
1646                 val = I915_READ(reg);
1647                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1648                 I915_WRITE(reg, val);
1649         }
1650
1651         reg = PCH_TRANSCONF(pipe);
1652         val = I915_READ(reg);
1653         pipeconf_val = I915_READ(PIPECONF(pipe));
1654
1655         if (HAS_PCH_IBX(dev_priv)) {
1656                 /*
1657                  * Make the BPC in transcoder be consistent with
1658                  * that in pipeconf reg. For HDMI we must use 8bpc
1659                  * here for both 8bpc and 12bpc.
1660                  */
1661                 val &= ~PIPECONF_BPC_MASK;
1662                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1663                         val |= PIPECONF_8BPC;
1664                 else
1665                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1666         }
1667
1668         val &= ~TRANS_INTERLACE_MASK;
1669         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1670                 if (HAS_PCH_IBX(dev_priv) &&
1671                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1672                         val |= TRANS_LEGACY_INTERLACED_ILK;
1673                 else
1674                         val |= TRANS_INTERLACED;
1675         } else {
1676                 val |= TRANS_PROGRESSIVE;
1677         }
1678
1679         I915_WRITE(reg, val | TRANS_ENABLE);
1680         if (intel_wait_for_register(&dev_priv->uncore,
1681                                     reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1682                                     100))
1683                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1684 }
1685
1686 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1687                                       enum transcoder cpu_transcoder)
1688 {
1689         u32 val, pipeconf_val;
1690
1691         /* FDI must be feeding us bits for PCH ports */
1692         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1693         assert_fdi_rx_enabled(dev_priv, PIPE_A);
1694
1695         /* Workaround: set timing override bit. */
1696         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1697         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1698         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1699
1700         val = TRANS_ENABLE;
1701         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1702
1703         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1704             PIPECONF_INTERLACED_ILK)
1705                 val |= TRANS_INTERLACED;
1706         else
1707                 val |= TRANS_PROGRESSIVE;
1708
1709         I915_WRITE(LPT_TRANSCONF, val);
1710         if (intel_wait_for_register(&dev_priv->uncore,
1711                                     LPT_TRANSCONF,
1712                                     TRANS_STATE_ENABLE,
1713                                     TRANS_STATE_ENABLE,
1714                                     100))
1715                 DRM_ERROR("Failed to enable PCH transcoder\n");
1716 }
1717
1718 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1719                                             enum pipe pipe)
1720 {
1721         i915_reg_t reg;
1722         u32 val;
1723
1724         /* FDI relies on the transcoder */
1725         assert_fdi_tx_disabled(dev_priv, pipe);
1726         assert_fdi_rx_disabled(dev_priv, pipe);
1727
1728         /* Ports must be off as well */
1729         assert_pch_ports_disabled(dev_priv, pipe);
1730
1731         reg = PCH_TRANSCONF(pipe);
1732         val = I915_READ(reg);
1733         val &= ~TRANS_ENABLE;
1734         I915_WRITE(reg, val);
1735         /* wait for PCH transcoder off, transcoder state */
1736         if (intel_wait_for_register(&dev_priv->uncore,
1737                                     reg, TRANS_STATE_ENABLE, 0,
1738                                     50))
1739                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1740
1741         if (HAS_PCH_CPT(dev_priv)) {
1742                 /* Workaround: Clear the timing override chicken bit again. */
1743                 reg = TRANS_CHICKEN2(pipe);
1744                 val = I915_READ(reg);
1745                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1746                 I915_WRITE(reg, val);
1747         }
1748 }
1749
1750 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1751 {
1752         u32 val;
1753
1754         val = I915_READ(LPT_TRANSCONF);
1755         val &= ~TRANS_ENABLE;
1756         I915_WRITE(LPT_TRANSCONF, val);
1757         /* wait for PCH transcoder off, transcoder state */
1758         if (intel_wait_for_register(&dev_priv->uncore,
1759                                     LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1760                                     50))
1761                 DRM_ERROR("Failed to disable PCH transcoder\n");
1762
1763         /* Workaround: clear timing override bit. */
1764         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1765         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1766         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1767 }
1768
1769 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1770 {
1771         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1772
1773         if (HAS_PCH_LPT(dev_priv))
1774                 return PIPE_A;
1775         else
1776                 return crtc->pipe;
1777 }
1778
1779 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1780 {
1781         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1782
1783         /*
1784          * On i965gm the hardware frame counter reads
1785          * zero when the TV encoder is enabled :(
1786          */
1787         if (IS_I965GM(dev_priv) &&
1788             (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1789                 return 0;
1790
1791         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1792                 return 0xffffffff; /* full 32 bit counter */
1793         else if (INTEL_GEN(dev_priv) >= 3)
1794                 return 0xffffff; /* only 24 bits of frame count */
1795         else
1796                 return 0; /* Gen2 doesn't have a hardware frame counter */
1797 }
1798
1799 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1800 {
1801         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1802
1803         drm_crtc_set_max_vblank_count(&crtc->base,
1804                                       intel_crtc_max_vblank_count(crtc_state));
1805         drm_crtc_vblank_on(&crtc->base);
1806 }
1807
1808 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1809 {
1810         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1811         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1812         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1813         enum pipe pipe = crtc->pipe;
1814         i915_reg_t reg;
1815         u32 val;
1816
1817         DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1818
1819         assert_planes_disabled(crtc);
1820
1821         /*
1822          * A pipe without a PLL won't actually be able to drive bits from
1823          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1824          * need the check.
1825          */
1826         if (HAS_GMCH(dev_priv)) {
1827                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1828                         assert_dsi_pll_enabled(dev_priv);
1829                 else
1830                         assert_pll_enabled(dev_priv, pipe);
1831         } else {
1832                 if (new_crtc_state->has_pch_encoder) {
1833                         /* if driving the PCH, we need FDI enabled */
1834                         assert_fdi_rx_pll_enabled(dev_priv,
1835                                                   intel_crtc_pch_transcoder(crtc));
1836                         assert_fdi_tx_pll_enabled(dev_priv,
1837                                                   (enum pipe) cpu_transcoder);
1838                 }
1839                 /* FIXME: assert CPU port conditions for SNB+ */
1840         }
1841
1842         trace_intel_pipe_enable(dev_priv, pipe);
1843
1844         reg = PIPECONF(cpu_transcoder);
1845         val = I915_READ(reg);
1846         if (val & PIPECONF_ENABLE) {
1847                 /* we keep both pipes enabled on 830 */
1848                 WARN_ON(!IS_I830(dev_priv));
1849                 return;
1850         }
1851
1852         I915_WRITE(reg, val | PIPECONF_ENABLE);
1853         POSTING_READ(reg);
1854
1855         /*
1856          * Until the pipe starts PIPEDSL reads will return a stale value,
1857          * which causes an apparent vblank timestamp jump when PIPEDSL
1858          * resets to its proper value. That also messes up the frame count
1859          * when it's derived from the timestamps. So let's wait for the
1860          * pipe to start properly before we call drm_crtc_vblank_on()
1861          */
1862         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1863                 intel_wait_for_pipe_scanline_moving(crtc);
1864 }
1865
1866 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1867 {
1868         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1869         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1870         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1871         enum pipe pipe = crtc->pipe;
1872         i915_reg_t reg;
1873         u32 val;
1874
1875         DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1876
1877         /*
1878          * Make sure planes won't keep trying to pump pixels to us,
1879          * or we might hang the display.
1880          */
1881         assert_planes_disabled(crtc);
1882
1883         trace_intel_pipe_disable(dev_priv, pipe);
1884
1885         reg = PIPECONF(cpu_transcoder);
1886         val = I915_READ(reg);
1887         if ((val & PIPECONF_ENABLE) == 0)
1888                 return;
1889
1890         /*
1891          * Double wide has implications for planes
1892          * so best keep it disabled when not needed.
1893          */
1894         if (old_crtc_state->double_wide)
1895                 val &= ~PIPECONF_DOUBLE_WIDE;
1896
1897         /* Don't disable pipe or pipe PLLs if needed */
1898         if (!IS_I830(dev_priv))
1899                 val &= ~PIPECONF_ENABLE;
1900
1901         I915_WRITE(reg, val);
1902         if ((val & PIPECONF_ENABLE) == 0)
1903                 intel_wait_for_pipe_off(old_crtc_state);
1904 }
1905
1906 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1907 {
1908         return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1909 }
1910
1911 static unsigned int
1912 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1913 {
1914         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1915         unsigned int cpp = fb->format->cpp[color_plane];
1916
1917         switch (fb->modifier) {
1918         case DRM_FORMAT_MOD_LINEAR:
1919                 return intel_tile_size(dev_priv);
1920         case I915_FORMAT_MOD_X_TILED:
1921                 if (IS_GEN(dev_priv, 2))
1922                         return 128;
1923                 else
1924                         return 512;
1925         case I915_FORMAT_MOD_Y_TILED_CCS:
1926                 if (color_plane == 1)
1927                         return 128;
1928                 /* fall through */
1929         case I915_FORMAT_MOD_Y_TILED:
1930                 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1931                         return 128;
1932                 else
1933                         return 512;
1934         case I915_FORMAT_MOD_Yf_TILED_CCS:
1935                 if (color_plane == 1)
1936                         return 128;
1937                 /* fall through */
1938         case I915_FORMAT_MOD_Yf_TILED:
1939                 switch (cpp) {
1940                 case 1:
1941                         return 64;
1942                 case 2:
1943                 case 4:
1944                         return 128;
1945                 case 8:
1946                 case 16:
1947                         return 256;
1948                 default:
1949                         MISSING_CASE(cpp);
1950                         return cpp;
1951                 }
1952                 break;
1953         default:
1954                 MISSING_CASE(fb->modifier);
1955                 return cpp;
1956         }
1957 }
1958
1959 static unsigned int
1960 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1961 {
1962         return intel_tile_size(to_i915(fb->dev)) /
1963                 intel_tile_width_bytes(fb, color_plane);
1964 }
1965
1966 /* Return the tile dimensions in pixel units */
1967 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1968                             unsigned int *tile_width,
1969                             unsigned int *tile_height)
1970 {
1971         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1972         unsigned int cpp = fb->format->cpp[color_plane];
1973
1974         *tile_width = tile_width_bytes / cpp;
1975         *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1976 }
1977
1978 unsigned int
1979 intel_fb_align_height(const struct drm_framebuffer *fb,
1980                       int color_plane, unsigned int height)
1981 {
1982         unsigned int tile_height = intel_tile_height(fb, color_plane);
1983
1984         return ALIGN(height, tile_height);
1985 }
1986
1987 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1988 {
1989         unsigned int size = 0;
1990         int i;
1991
1992         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1993                 size += rot_info->plane[i].width * rot_info->plane[i].height;
1994
1995         return size;
1996 }
1997
1998 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
1999 {
2000         unsigned int size = 0;
2001         int i;
2002
2003         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2004                 size += rem_info->plane[i].width * rem_info->plane[i].height;
2005
2006         return size;
2007 }
2008
2009 static void
2010 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2011                         const struct drm_framebuffer *fb,
2012                         unsigned int rotation)
2013 {
2014         view->type = I915_GGTT_VIEW_NORMAL;
2015         if (drm_rotation_90_or_270(rotation)) {
2016                 view->type = I915_GGTT_VIEW_ROTATED;
2017                 view->rotated = to_intel_framebuffer(fb)->rot_info;
2018         }
2019 }
2020
2021 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2022 {
2023         if (IS_I830(dev_priv))
2024                 return 16 * 1024;
2025         else if (IS_I85X(dev_priv))
2026                 return 256;
2027         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2028                 return 32;
2029         else
2030                 return 4 * 1024;
2031 }
2032
2033 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2034 {
2035         if (INTEL_GEN(dev_priv) >= 9)
2036                 return 256 * 1024;
2037         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2038                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2039                 return 128 * 1024;
2040         else if (INTEL_GEN(dev_priv) >= 4)
2041                 return 4 * 1024;
2042         else
2043                 return 0;
2044 }
2045
2046 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2047                                          int color_plane)
2048 {
2049         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2050
2051         /* AUX_DIST needs only 4K alignment */
2052         if (color_plane == 1)
2053                 return 4096;
2054
2055         switch (fb->modifier) {
2056         case DRM_FORMAT_MOD_LINEAR:
2057                 return intel_linear_alignment(dev_priv);
2058         case I915_FORMAT_MOD_X_TILED:
2059                 if (INTEL_GEN(dev_priv) >= 9)
2060                         return 256 * 1024;
2061                 return 0;
2062         case I915_FORMAT_MOD_Y_TILED_CCS:
2063         case I915_FORMAT_MOD_Yf_TILED_CCS:
2064         case I915_FORMAT_MOD_Y_TILED:
2065         case I915_FORMAT_MOD_Yf_TILED:
2066                 return 1 * 1024 * 1024;
2067         default:
2068                 MISSING_CASE(fb->modifier);
2069                 return 0;
2070         }
2071 }
2072
2073 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2074 {
2075         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2076         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2077
2078         return INTEL_GEN(dev_priv) < 4 ||
2079                 (plane->has_fbc &&
2080                  plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2081 }
2082
2083 struct i915_vma *
2084 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2085                            const struct i915_ggtt_view *view,
2086                            bool uses_fence,
2087                            unsigned long *out_flags)
2088 {
2089         struct drm_device *dev = fb->dev;
2090         struct drm_i915_private *dev_priv = to_i915(dev);
2091         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2092         intel_wakeref_t wakeref;
2093         struct i915_vma *vma;
2094         unsigned int pinctl;
2095         u32 alignment;
2096
2097         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2098
2099         alignment = intel_surf_alignment(fb, 0);
2100
2101         /* Note that the w/a also requires 64 PTE of padding following the
2102          * bo. We currently fill all unused PTE with the shadow page and so
2103          * we should always have valid PTE following the scanout preventing
2104          * the VT-d warning.
2105          */
2106         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2107                 alignment = 256 * 1024;
2108
2109         /*
2110          * Global gtt pte registers are special registers which actually forward
2111          * writes to a chunk of system memory. Which means that there is no risk
2112          * that the register values disappear as soon as we call
2113          * intel_runtime_pm_put(), so it is correct to wrap only the
2114          * pin/unpin/fence and not more.
2115          */
2116         wakeref = intel_runtime_pm_get(dev_priv);
2117
2118         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2119
2120         pinctl = 0;
2121
2122         /* Valleyview is definitely limited to scanning out the first
2123          * 512MiB. Lets presume this behaviour was inherited from the
2124          * g4x display engine and that all earlier gen are similarly
2125          * limited. Testing suggests that it is a little more
2126          * complicated than this. For example, Cherryview appears quite
2127          * happy to scanout from anywhere within its global aperture.
2128          */
2129         if (HAS_GMCH(dev_priv))
2130                 pinctl |= PIN_MAPPABLE;
2131
2132         vma = i915_gem_object_pin_to_display_plane(obj,
2133                                                    alignment, view, pinctl);
2134         if (IS_ERR(vma))
2135                 goto err;
2136
2137         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2138                 int ret;
2139
2140                 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2141                  * fence, whereas 965+ only requires a fence if using
2142                  * framebuffer compression.  For simplicity, we always, when
2143                  * possible, install a fence as the cost is not that onerous.
2144                  *
2145                  * If we fail to fence the tiled scanout, then either the
2146                  * modeset will reject the change (which is highly unlikely as
2147                  * the affected systems, all but one, do not have unmappable
2148                  * space) or we will not be able to enable full powersaving
2149                  * techniques (also likely not to apply due to various limits
2150                  * FBC and the like impose on the size of the buffer, which
2151                  * presumably we violated anyway with this unmappable buffer).
2152                  * Anyway, it is presumably better to stumble onwards with
2153                  * something and try to run the system in a "less than optimal"
2154                  * mode that matches the user configuration.
2155                  */
2156                 ret = i915_vma_pin_fence(vma);
2157                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2158                         i915_gem_object_unpin_from_display_plane(vma);
2159                         vma = ERR_PTR(ret);
2160                         goto err;
2161                 }
2162
2163                 if (ret == 0 && vma->fence)
2164                         *out_flags |= PLANE_HAS_FENCE;
2165         }
2166
2167         i915_vma_get(vma);
2168 err:
2169         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2170
2171         intel_runtime_pm_put(dev_priv, wakeref);
2172         return vma;
2173 }
2174
2175 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2176 {
2177         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2178
2179         if (flags & PLANE_HAS_FENCE)
2180                 i915_vma_unpin_fence(vma);
2181         i915_gem_object_unpin_from_display_plane(vma);
2182         i915_vma_put(vma);
2183 }
2184
2185 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2186                           unsigned int rotation)
2187 {
2188         if (drm_rotation_90_or_270(rotation))
2189                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2190         else
2191                 return fb->pitches[color_plane];
2192 }
2193
2194 /*
2195  * Convert the x/y offsets into a linear offset.
2196  * Only valid with 0/180 degree rotation, which is fine since linear
2197  * offset is only used with linear buffers on pre-hsw and tiled buffers
2198  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2199  */
2200 u32 intel_fb_xy_to_linear(int x, int y,
2201                           const struct intel_plane_state *state,
2202                           int color_plane)
2203 {
2204         const struct drm_framebuffer *fb = state->base.fb;
2205         unsigned int cpp = fb->format->cpp[color_plane];
2206         unsigned int pitch = state->color_plane[color_plane].stride;
2207
2208         return y * pitch + x * cpp;
2209 }
2210
2211 /*
2212  * Add the x/y offsets derived from fb->offsets[] to the user
2213  * specified plane src x/y offsets. The resulting x/y offsets
2214  * specify the start of scanout from the beginning of the gtt mapping.
2215  */
2216 void intel_add_fb_offsets(int *x, int *y,
2217                           const struct intel_plane_state *state,
2218                           int color_plane)
2219
2220 {
2221         *x += state->color_plane[color_plane].x;
2222         *y += state->color_plane[color_plane].y;
2223 }
2224
2225 static u32 intel_adjust_tile_offset(int *x, int *y,
2226                                     unsigned int tile_width,
2227                                     unsigned int tile_height,
2228                                     unsigned int tile_size,
2229                                     unsigned int pitch_tiles,
2230                                     u32 old_offset,
2231                                     u32 new_offset)
2232 {
2233         unsigned int pitch_pixels = pitch_tiles * tile_width;
2234         unsigned int tiles;
2235
2236         WARN_ON(old_offset & (tile_size - 1));
2237         WARN_ON(new_offset & (tile_size - 1));
2238         WARN_ON(new_offset > old_offset);
2239
2240         tiles = (old_offset - new_offset) / tile_size;
2241
2242         *y += tiles / pitch_tiles * tile_height;
2243         *x += tiles % pitch_tiles * tile_width;
2244
2245         /* minimize x in case it got needlessly big */
2246         *y += *x / pitch_pixels * tile_height;
2247         *x %= pitch_pixels;
2248
2249         return new_offset;
2250 }
2251
2252 static bool is_surface_linear(u64 modifier, int color_plane)
2253 {
2254         return modifier == DRM_FORMAT_MOD_LINEAR;
2255 }
2256
2257 static u32 intel_adjust_aligned_offset(int *x, int *y,
2258                                        const struct drm_framebuffer *fb,
2259                                        int color_plane,
2260                                        unsigned int rotation,
2261                                        unsigned int pitch,
2262                                        u32 old_offset, u32 new_offset)
2263 {
2264         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2265         unsigned int cpp = fb->format->cpp[color_plane];
2266
2267         WARN_ON(new_offset > old_offset);
2268
2269         if (!is_surface_linear(fb->modifier, color_plane)) {
2270                 unsigned int tile_size, tile_width, tile_height;
2271                 unsigned int pitch_tiles;
2272
2273                 tile_size = intel_tile_size(dev_priv);
2274                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2275
2276                 if (drm_rotation_90_or_270(rotation)) {
2277                         pitch_tiles = pitch / tile_height;
2278                         swap(tile_width, tile_height);
2279                 } else {
2280                         pitch_tiles = pitch / (tile_width * cpp);
2281                 }
2282
2283                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2284                                          tile_size, pitch_tiles,
2285                                          old_offset, new_offset);
2286         } else {
2287                 old_offset += *y * pitch + *x * cpp;
2288
2289                 *y = (old_offset - new_offset) / pitch;
2290                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2291         }
2292
2293         return new_offset;
2294 }
2295
2296 /*
2297  * Adjust the tile offset by moving the difference into
2298  * the x/y offsets.
2299  */
2300 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2301                                              const struct intel_plane_state *state,
2302                                              int color_plane,
2303                                              u32 old_offset, u32 new_offset)
2304 {
2305         return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
2306                                            state->base.rotation,
2307                                            state->color_plane[color_plane].stride,
2308                                            old_offset, new_offset);
2309 }
2310
2311 /*
2312  * Computes the aligned offset to the base tile and adjusts
2313  * x, y. bytes per pixel is assumed to be a power-of-two.
2314  *
2315  * In the 90/270 rotated case, x and y are assumed
2316  * to be already rotated to match the rotated GTT view, and
2317  * pitch is the tile_height aligned framebuffer height.
2318  *
2319  * This function is used when computing the derived information
2320  * under intel_framebuffer, so using any of that information
2321  * here is not allowed. Anything under drm_framebuffer can be
2322  * used. This is why the user has to pass in the pitch since it
2323  * is specified in the rotated orientation.
2324  */
2325 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2326                                         int *x, int *y,
2327                                         const struct drm_framebuffer *fb,
2328                                         int color_plane,
2329                                         unsigned int pitch,
2330                                         unsigned int rotation,
2331                                         u32 alignment)
2332 {
2333         unsigned int cpp = fb->format->cpp[color_plane];
2334         u32 offset, offset_aligned;
2335
2336         if (alignment)
2337                 alignment--;
2338
2339         if (!is_surface_linear(fb->modifier, color_plane)) {
2340                 unsigned int tile_size, tile_width, tile_height;
2341                 unsigned int tile_rows, tiles, pitch_tiles;
2342
2343                 tile_size = intel_tile_size(dev_priv);
2344                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2345
2346                 if (drm_rotation_90_or_270(rotation)) {
2347                         pitch_tiles = pitch / tile_height;
2348                         swap(tile_width, tile_height);
2349                 } else {
2350                         pitch_tiles = pitch / (tile_width * cpp);
2351                 }
2352
2353                 tile_rows = *y / tile_height;
2354                 *y %= tile_height;
2355
2356                 tiles = *x / tile_width;
2357                 *x %= tile_width;
2358
2359                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2360                 offset_aligned = offset & ~alignment;
2361
2362                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2363                                          tile_size, pitch_tiles,
2364                                          offset, offset_aligned);
2365         } else {
2366                 offset = *y * pitch + *x * cpp;
2367                 offset_aligned = offset & ~alignment;
2368
2369                 *y = (offset & alignment) / pitch;
2370                 *x = ((offset & alignment) - *y * pitch) / cpp;
2371         }
2372
2373         return offset_aligned;
2374 }
2375
2376 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2377                                               const struct intel_plane_state *state,
2378                                               int color_plane)
2379 {
2380         struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2381         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2382         const struct drm_framebuffer *fb = state->base.fb;
2383         unsigned int rotation = state->base.rotation;
2384         int pitch = state->color_plane[color_plane].stride;
2385         u32 alignment;
2386
2387         if (intel_plane->id == PLANE_CURSOR)
2388                 alignment = intel_cursor_alignment(dev_priv);
2389         else
2390                 alignment = intel_surf_alignment(fb, color_plane);
2391
2392         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2393                                             pitch, rotation, alignment);
2394 }
2395
2396 /* Convert the fb->offset[] into x/y offsets */
2397 static int intel_fb_offset_to_xy(int *x, int *y,
2398                                  const struct drm_framebuffer *fb,
2399                                  int color_plane)
2400 {
2401         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2402         unsigned int height;
2403
2404         if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2405             fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2406                 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2407                               fb->offsets[color_plane], color_plane);
2408                 return -EINVAL;
2409         }
2410
2411         height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2412         height = ALIGN(height, intel_tile_height(fb, color_plane));
2413
2414         /* Catch potential overflows early */
2415         if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2416                             fb->offsets[color_plane])) {
2417                 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2418                               fb->offsets[color_plane], fb->pitches[color_plane],
2419                               color_plane);
2420                 return -ERANGE;
2421         }
2422
2423         *x = 0;
2424         *y = 0;
2425
2426         intel_adjust_aligned_offset(x, y,
2427                                     fb, color_plane, DRM_MODE_ROTATE_0,
2428                                     fb->pitches[color_plane],
2429                                     fb->offsets[color_plane], 0);
2430
2431         return 0;
2432 }
2433
2434 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2435 {
2436         switch (fb_modifier) {
2437         case I915_FORMAT_MOD_X_TILED:
2438                 return I915_TILING_X;
2439         case I915_FORMAT_MOD_Y_TILED:
2440         case I915_FORMAT_MOD_Y_TILED_CCS:
2441                 return I915_TILING_Y;
2442         default:
2443                 return I915_TILING_NONE;
2444         }
2445 }
2446
2447 /*
2448  * From the Sky Lake PRM:
2449  * "The Color Control Surface (CCS) contains the compression status of
2450  *  the cache-line pairs. The compression state of the cache-line pair
2451  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2452  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2453  *  cache-line-pairs. CCS is always Y tiled."
2454  *
2455  * Since cache line pairs refers to horizontally adjacent cache lines,
2456  * each cache line in the CCS corresponds to an area of 32x16 cache
2457  * lines on the main surface. Since each pixel is 4 bytes, this gives
2458  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2459  * main surface.
2460  */
2461 static const struct drm_format_info ccs_formats[] = {
2462         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2463         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2464         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2465         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2466 };
2467
2468 static const struct drm_format_info *
2469 lookup_format_info(const struct drm_format_info formats[],
2470                    int num_formats, u32 format)
2471 {
2472         int i;
2473
2474         for (i = 0; i < num_formats; i++) {
2475                 if (formats[i].format == format)
2476                         return &formats[i];
2477         }
2478
2479         return NULL;
2480 }
2481
2482 static const struct drm_format_info *
2483 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2484 {
2485         switch (cmd->modifier[0]) {
2486         case I915_FORMAT_MOD_Y_TILED_CCS:
2487         case I915_FORMAT_MOD_Yf_TILED_CCS:
2488                 return lookup_format_info(ccs_formats,
2489                                           ARRAY_SIZE(ccs_formats),
2490                                           cmd->pixel_format);
2491         default:
2492                 return NULL;
2493         }
2494 }
2495
2496 bool is_ccs_modifier(u64 modifier)
2497 {
2498         return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2499                modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2500 }
2501
2502 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2503                               u32 pixel_format, u64 modifier)
2504 {
2505         struct intel_crtc *crtc;
2506         struct intel_plane *plane;
2507
2508         /*
2509          * We assume the primary plane for pipe A has
2510          * the highest stride limits of them all.
2511          */
2512         crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
2513         plane = to_intel_plane(crtc->base.primary);
2514
2515         return plane->max_stride(plane, pixel_format, modifier,
2516                                  DRM_MODE_ROTATE_0);
2517 }
2518
2519 static
2520 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2521                         u32 pixel_format, u64 modifier)
2522 {
2523         /*
2524          * Arbitrary limit for gen4+ chosen to match the
2525          * render engine max stride.
2526          *
2527          * The new CCS hash mode makes remapping impossible
2528          */
2529         if (!is_ccs_modifier(modifier)) {
2530                 if (INTEL_GEN(dev_priv) >= 7)
2531                         return 256*1024;
2532                 else if (INTEL_GEN(dev_priv) >= 4)
2533                         return 128*1024;
2534         }
2535
2536         return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2537 }
2538
2539 static u32
2540 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2541 {
2542         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2543
2544         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2545                 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2546                                                            fb->format->format,
2547                                                            fb->modifier);
2548
2549                 /*
2550                  * To make remapping with linear generally feasible
2551                  * we need the stride to be page aligned.
2552                  */
2553                 if (fb->pitches[color_plane] > max_stride)
2554                         return intel_tile_size(dev_priv);
2555                 else
2556                         return 64;
2557         } else {
2558                 return intel_tile_width_bytes(fb, color_plane);
2559         }
2560 }
2561
2562 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2563 {
2564         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2565         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2566         const struct drm_framebuffer *fb = plane_state->base.fb;
2567         int i;
2568
2569         /* We don't want to deal with remapping with cursors */
2570         if (plane->id == PLANE_CURSOR)
2571                 return false;
2572
2573         /*
2574          * The display engine limits already match/exceed the
2575          * render engine limits, so not much point in remapping.
2576          * Would also need to deal with the fence POT alignment
2577          * and gen2 2KiB GTT tile size.
2578          */
2579         if (INTEL_GEN(dev_priv) < 4)
2580                 return false;
2581
2582         /*
2583          * The new CCS hash mode isn't compatible with remapping as
2584          * the virtual address of the pages affects the compressed data.
2585          */
2586         if (is_ccs_modifier(fb->modifier))
2587                 return false;
2588
2589         /* Linear needs a page aligned stride for remapping */
2590         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2591                 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2592
2593                 for (i = 0; i < fb->format->num_planes; i++) {
2594                         if (fb->pitches[i] & alignment)
2595                                 return false;
2596                 }
2597         }
2598
2599         return true;
2600 }
2601
2602 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2603 {
2604         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2605         const struct drm_framebuffer *fb = plane_state->base.fb;
2606         unsigned int rotation = plane_state->base.rotation;
2607         u32 stride, max_stride;
2608
2609         /*
2610          * No remapping for invisible planes since we don't have
2611          * an actual source viewport to remap.
2612          */
2613         if (!plane_state->base.visible)
2614                 return false;
2615
2616         if (!intel_plane_can_remap(plane_state))
2617                 return false;
2618
2619         /*
2620          * FIXME: aux plane limits on gen9+ are
2621          * unclear in Bspec, for now no checking.
2622          */
2623         stride = intel_fb_pitch(fb, 0, rotation);
2624         max_stride = plane->max_stride(plane, fb->format->format,
2625                                        fb->modifier, rotation);
2626
2627         return stride > max_stride;
2628 }
2629
2630 static int
2631 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2632                    struct drm_framebuffer *fb)
2633 {
2634         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2635         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2636         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2637         u32 gtt_offset_rotated = 0;
2638         unsigned int max_size = 0;
2639         int i, num_planes = fb->format->num_planes;
2640         unsigned int tile_size = intel_tile_size(dev_priv);
2641
2642         for (i = 0; i < num_planes; i++) {
2643                 unsigned int width, height;
2644                 unsigned int cpp, size;
2645                 u32 offset;
2646                 int x, y;
2647                 int ret;
2648
2649                 cpp = fb->format->cpp[i];
2650                 width = drm_framebuffer_plane_width(fb->width, fb, i);
2651                 height = drm_framebuffer_plane_height(fb->height, fb, i);
2652
2653                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2654                 if (ret) {
2655                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2656                                       i, fb->offsets[i]);
2657                         return ret;
2658                 }
2659
2660                 if (is_ccs_modifier(fb->modifier) && i == 1) {
2661                         int hsub = fb->format->hsub;
2662                         int vsub = fb->format->vsub;
2663                         int tile_width, tile_height;
2664                         int main_x, main_y;
2665                         int ccs_x, ccs_y;
2666
2667                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2668                         tile_width *= hsub;
2669                         tile_height *= vsub;
2670
2671                         ccs_x = (x * hsub) % tile_width;
2672                         ccs_y = (y * vsub) % tile_height;
2673                         main_x = intel_fb->normal[0].x % tile_width;
2674                         main_y = intel_fb->normal[0].y % tile_height;
2675
2676                         /*
2677                          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2678                          * x/y offsets must match between CCS and the main surface.
2679                          */
2680                         if (main_x != ccs_x || main_y != ccs_y) {
2681                                 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2682                                               main_x, main_y,
2683                                               ccs_x, ccs_y,
2684                                               intel_fb->normal[0].x,
2685                                               intel_fb->normal[0].y,
2686                                               x, y);
2687                                 return -EINVAL;
2688                         }
2689                 }
2690
2691                 /*
2692                  * The fence (if used) is aligned to the start of the object
2693                  * so having the framebuffer wrap around across the edge of the
2694                  * fenced region doesn't really work. We have no API to configure
2695                  * the fence start offset within the object (nor could we probably
2696                  * on gen2/3). So it's just easier if we just require that the
2697                  * fb layout agrees with the fence layout. We already check that the
2698                  * fb stride matches the fence stride elsewhere.
2699                  */
2700                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2701                     (x + width) * cpp > fb->pitches[i]) {
2702                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2703                                       i, fb->offsets[i]);
2704                         return -EINVAL;
2705                 }
2706
2707                 /*
2708                  * First pixel of the framebuffer from
2709                  * the start of the normal gtt mapping.
2710                  */
2711                 intel_fb->normal[i].x = x;
2712                 intel_fb->normal[i].y = y;
2713
2714                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2715                                                       fb->pitches[i],
2716                                                       DRM_MODE_ROTATE_0,
2717                                                       tile_size);
2718                 offset /= tile_size;
2719
2720                 if (!is_surface_linear(fb->modifier, i)) {
2721                         unsigned int tile_width, tile_height;
2722                         unsigned int pitch_tiles;
2723                         struct drm_rect r;
2724
2725                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2726
2727                         rot_info->plane[i].offset = offset;
2728                         rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2729                         rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2730                         rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2731
2732                         intel_fb->rotated[i].pitch =
2733                                 rot_info->plane[i].height * tile_height;
2734
2735                         /* how many tiles does this plane need */
2736                         size = rot_info->plane[i].stride * rot_info->plane[i].height;
2737                         /*
2738                          * If the plane isn't horizontally tile aligned,
2739                          * we need one more tile.
2740                          */
2741                         if (x != 0)
2742                                 size++;
2743
2744                         /* rotate the x/y offsets to match the GTT view */
2745                         r.x1 = x;
2746                         r.y1 = y;
2747                         r.x2 = x + width;
2748                         r.y2 = y + height;
2749                         drm_rect_rotate(&r,
2750                                         rot_info->plane[i].width * tile_width,
2751                                         rot_info->plane[i].height * tile_height,
2752                                         DRM_MODE_ROTATE_270);
2753                         x = r.x1;
2754                         y = r.y1;
2755
2756                         /* rotate the tile dimensions to match the GTT view */
2757                         pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2758                         swap(tile_width, tile_height);
2759
2760                         /*
2761                          * We only keep the x/y offsets, so push all of the
2762                          * gtt offset into the x/y offsets.
2763                          */
2764                         intel_adjust_tile_offset(&x, &y,
2765                                                  tile_width, tile_height,
2766                                                  tile_size, pitch_tiles,
2767                                                  gtt_offset_rotated * tile_size, 0);
2768
2769                         gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2770
2771                         /*
2772                          * First pixel of the framebuffer from
2773                          * the start of the rotated gtt mapping.
2774                          */
2775                         intel_fb->rotated[i].x = x;
2776                         intel_fb->rotated[i].y = y;
2777                 } else {
2778                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2779                                             x * cpp, tile_size);
2780                 }
2781
2782                 /* how many tiles in total needed in the bo */
2783                 max_size = max(max_size, offset + size);
2784         }
2785
2786         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2787                 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2788                               mul_u32_u32(max_size, tile_size), obj->base.size);
2789                 return -EINVAL;
2790         }
2791
2792         return 0;
2793 }
2794
2795 static void
2796 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2797 {
2798         struct drm_i915_private *dev_priv =
2799                 to_i915(plane_state->base.plane->dev);
2800         struct drm_framebuffer *fb = plane_state->base.fb;
2801         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2802         struct intel_rotation_info *info = &plane_state->view.rotated;
2803         unsigned int rotation = plane_state->base.rotation;
2804         int i, num_planes = fb->format->num_planes;
2805         unsigned int tile_size = intel_tile_size(dev_priv);
2806         unsigned int src_x, src_y;
2807         unsigned int src_w, src_h;
2808         u32 gtt_offset = 0;
2809
2810         memset(&plane_state->view, 0, sizeof(plane_state->view));
2811         plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2812                 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2813
2814         src_x = plane_state->base.src.x1 >> 16;
2815         src_y = plane_state->base.src.y1 >> 16;
2816         src_w = drm_rect_width(&plane_state->base.src) >> 16;
2817         src_h = drm_rect_height(&plane_state->base.src) >> 16;
2818
2819         WARN_ON(is_ccs_modifier(fb->modifier));
2820
2821         /* Make src coordinates relative to the viewport */
2822         drm_rect_translate(&plane_state->base.src,
2823                            -(src_x << 16), -(src_y << 16));
2824
2825         /* Rotate src coordinates to match rotated GTT view */
2826         if (drm_rotation_90_or_270(rotation))
2827                 drm_rect_rotate(&plane_state->base.src,
2828                                 src_w << 16, src_h << 16,
2829                                 DRM_MODE_ROTATE_270);
2830
2831         for (i = 0; i < num_planes; i++) {
2832                 unsigned int hsub = i ? fb->format->hsub : 1;
2833                 unsigned int vsub = i ? fb->format->vsub : 1;
2834                 unsigned int cpp = fb->format->cpp[i];
2835                 unsigned int tile_width, tile_height;
2836                 unsigned int width, height;
2837                 unsigned int pitch_tiles;
2838                 unsigned int x, y;
2839                 u32 offset;
2840
2841                 intel_tile_dims(fb, i, &tile_width, &tile_height);
2842
2843                 x = src_x / hsub;
2844                 y = src_y / vsub;
2845                 width = src_w / hsub;
2846                 height = src_h / vsub;
2847
2848                 /*
2849                  * First pixel of the src viewport from the
2850                  * start of the normal gtt mapping.
2851                  */
2852                 x += intel_fb->normal[i].x;
2853                 y += intel_fb->normal[i].y;
2854
2855                 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2856                                                       fb, i, fb->pitches[i],
2857                                                       DRM_MODE_ROTATE_0, tile_size);
2858                 offset /= tile_size;
2859
2860                 info->plane[i].offset = offset;
2861                 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2862                                                      tile_width * cpp);
2863                 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2864                 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2865
2866                 if (drm_rotation_90_or_270(rotation)) {
2867                         struct drm_rect r;
2868
2869                         /* rotate the x/y offsets to match the GTT view */
2870                         r.x1 = x;
2871                         r.y1 = y;
2872                         r.x2 = x + width;
2873                         r.y2 = y + height;
2874                         drm_rect_rotate(&r,
2875                                         info->plane[i].width * tile_width,
2876                                         info->plane[i].height * tile_height,
2877                                         DRM_MODE_ROTATE_270);
2878                         x = r.x1;
2879                         y = r.y1;
2880
2881                         pitch_tiles = info->plane[i].height;
2882                         plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2883
2884                         /* rotate the tile dimensions to match the GTT view */
2885                         swap(tile_width, tile_height);
2886                 } else {
2887                         pitch_tiles = info->plane[i].width;
2888                         plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2889                 }
2890
2891                 /*
2892                  * We only keep the x/y offsets, so push all of the
2893                  * gtt offset into the x/y offsets.
2894                  */
2895                 intel_adjust_tile_offset(&x, &y,
2896                                          tile_width, tile_height,
2897                                          tile_size, pitch_tiles,
2898                                          gtt_offset * tile_size, 0);
2899
2900                 gtt_offset += info->plane[i].width * info->plane[i].height;
2901
2902                 plane_state->color_plane[i].offset = 0;
2903                 plane_state->color_plane[i].x = x;
2904                 plane_state->color_plane[i].y = y;
2905         }
2906 }
2907
2908 static int
2909 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2910 {
2911         const struct intel_framebuffer *fb =
2912                 to_intel_framebuffer(plane_state->base.fb);
2913         unsigned int rotation = plane_state->base.rotation;
2914         int i, num_planes;
2915
2916         if (!fb)
2917                 return 0;
2918
2919         num_planes = fb->base.format->num_planes;
2920
2921         if (intel_plane_needs_remap(plane_state)) {
2922                 intel_plane_remap_gtt(plane_state);
2923
2924                 /*
2925                  * Sometimes even remapping can't overcome
2926                  * the stride limitations :( Can happen with
2927                  * big plane sizes and suitably misaligned
2928                  * offsets.
2929                  */
2930                 return intel_plane_check_stride(plane_state);
2931         }
2932
2933         intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2934
2935         for (i = 0; i < num_planes; i++) {
2936                 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2937                 plane_state->color_plane[i].offset = 0;
2938
2939                 if (drm_rotation_90_or_270(rotation)) {
2940                         plane_state->color_plane[i].x = fb->rotated[i].x;
2941                         plane_state->color_plane[i].y = fb->rotated[i].y;
2942                 } else {
2943                         plane_state->color_plane[i].x = fb->normal[i].x;
2944                         plane_state->color_plane[i].y = fb->normal[i].y;
2945                 }
2946         }
2947
2948         /* Rotate src coordinates to match rotated GTT view */
2949         if (drm_rotation_90_or_270(rotation))
2950                 drm_rect_rotate(&plane_state->base.src,
2951                                 fb->base.width << 16, fb->base.height << 16,
2952                                 DRM_MODE_ROTATE_270);
2953
2954         return intel_plane_check_stride(plane_state);
2955 }
2956
2957 static int i9xx_format_to_fourcc(int format)
2958 {
2959         switch (format) {
2960         case DISPPLANE_8BPP:
2961                 return DRM_FORMAT_C8;
2962         case DISPPLANE_BGRX555:
2963                 return DRM_FORMAT_XRGB1555;
2964         case DISPPLANE_BGRX565:
2965                 return DRM_FORMAT_RGB565;
2966         default:
2967         case DISPPLANE_BGRX888:
2968                 return DRM_FORMAT_XRGB8888;
2969         case DISPPLANE_RGBX888:
2970                 return DRM_FORMAT_XBGR8888;
2971         case DISPPLANE_BGRX101010:
2972                 return DRM_FORMAT_XRGB2101010;
2973         case DISPPLANE_RGBX101010:
2974                 return DRM_FORMAT_XBGR2101010;
2975         }
2976 }
2977
2978 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2979 {
2980         switch (format) {
2981         case PLANE_CTL_FORMAT_RGB_565:
2982                 return DRM_FORMAT_RGB565;
2983         case PLANE_CTL_FORMAT_NV12:
2984                 return DRM_FORMAT_NV12;
2985         case PLANE_CTL_FORMAT_P010:
2986                 return DRM_FORMAT_P010;
2987         case PLANE_CTL_FORMAT_P012:
2988                 return DRM_FORMAT_P012;
2989         case PLANE_CTL_FORMAT_P016:
2990                 return DRM_FORMAT_P016;
2991         case PLANE_CTL_FORMAT_Y210:
2992                 return DRM_FORMAT_Y210;
2993         case PLANE_CTL_FORMAT_Y212:
2994                 return DRM_FORMAT_Y212;
2995         case PLANE_CTL_FORMAT_Y216:
2996                 return DRM_FORMAT_Y216;
2997         case PLANE_CTL_FORMAT_Y410:
2998                 return DRM_FORMAT_XVYU2101010;
2999         case PLANE_CTL_FORMAT_Y412:
3000                 return DRM_FORMAT_XVYU12_16161616;
3001         case PLANE_CTL_FORMAT_Y416:
3002                 return DRM_FORMAT_XVYU16161616;
3003         default:
3004         case PLANE_CTL_FORMAT_XRGB_8888:
3005                 if (rgb_order) {
3006                         if (alpha)
3007                                 return DRM_FORMAT_ABGR8888;
3008                         else
3009                                 return DRM_FORMAT_XBGR8888;
3010                 } else {
3011                         if (alpha)
3012                                 return DRM_FORMAT_ARGB8888;
3013                         else
3014                                 return DRM_FORMAT_XRGB8888;
3015                 }
3016         case PLANE_CTL_FORMAT_XRGB_2101010:
3017                 if (rgb_order)