2 * Copyright © 2006-2007 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Eric Anholt <eric@anholt.net>
27 #include <linux/module.h>
28 #include <linux/input.h>
29 #include <linux/i2c.h>
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
32 #include <linux/vgaarb.h>
33 #include <drm/drm_edid.h>
34 #include <drm/i915_drm.h>
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_dp_helper.h>
38 #include <drm/drm_crtc_helper.h>
39 #include <drm/drm_plane_helper.h>
40 #include <drm/drm_rect.h>
41 #include <drm/drm_atomic_uapi.h>
42 #include <linux/intel-iommu.h>
43 #include <linux/reservation.h>
45 #include "intel_drv.h"
46 #include "intel_dsi.h"
47 #include "intel_frontbuffer.h"
50 #include "i915_gem_clflush.h"
51 #include "i915_reset.h"
52 #include "i915_trace.h"
54 /* Primary plane formats for gen <= 3 */
55 static const u32 i8xx_primary_formats[] = {
62 /* Primary plane formats for gen >= 4 */
63 static const u32 i965_primary_formats[] = {
68 DRM_FORMAT_XRGB2101010,
69 DRM_FORMAT_XBGR2101010,
72 static const u64 i9xx_format_modifiers[] = {
73 I915_FORMAT_MOD_X_TILED,
74 DRM_FORMAT_MOD_LINEAR,
75 DRM_FORMAT_MOD_INVALID
79 static const u32 intel_cursor_formats[] = {
83 static const u64 cursor_format_modifiers[] = {
84 DRM_FORMAT_MOD_LINEAR,
85 DRM_FORMAT_MOD_INVALID
88 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
89 struct intel_crtc_state *pipe_config);
90 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
91 struct intel_crtc_state *pipe_config);
93 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
94 struct drm_i915_gem_object *obj,
95 struct drm_mode_fb_cmd2 *mode_cmd);
96 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
97 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
98 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
99 const struct intel_link_m_n *m_n,
100 const struct intel_link_m_n *m2_n2);
101 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
102 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
103 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
104 static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state);
105 static void vlv_prepare_pll(struct intel_crtc *crtc,
106 const struct intel_crtc_state *pipe_config);
107 static void chv_prepare_pll(struct intel_crtc *crtc,
108 const struct intel_crtc_state *pipe_config);
109 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
110 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
111 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
112 struct intel_crtc_state *crtc_state);
113 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
114 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
115 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
116 static void intel_modeset_setup_hw_state(struct drm_device *dev,
117 struct drm_modeset_acquire_ctx *ctx);
118 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
123 } dot, vco, n, m, m1, m2, p, p1;
127 int p2_slow, p2_fast;
131 /* returns HPLL frequency in kHz */
132 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
134 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
136 /* Obtain SKU information */
137 mutex_lock(&dev_priv->sb_lock);
138 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
139 CCK_FUSE_HPLL_FREQ_MASK;
140 mutex_unlock(&dev_priv->sb_lock);
142 return vco_freq[hpll_freq] * 1000;
145 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
146 const char *name, u32 reg, int ref_freq)
151 mutex_lock(&dev_priv->sb_lock);
152 val = vlv_cck_read(dev_priv, reg);
153 mutex_unlock(&dev_priv->sb_lock);
155 divider = val & CCK_FREQUENCY_VALUES;
157 WARN((val & CCK_FREQUENCY_STATUS) !=
158 (divider << CCK_FREQUENCY_STATUS_SHIFT),
159 "%s change in progress\n", name);
161 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
164 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
165 const char *name, u32 reg)
167 if (dev_priv->hpll_freq == 0)
168 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
170 return vlv_get_cck_clock(dev_priv, name, reg,
171 dev_priv->hpll_freq);
174 static void intel_update_czclk(struct drm_i915_private *dev_priv)
176 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
179 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
180 CCK_CZ_CLOCK_CONTROL);
182 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
185 static inline u32 /* units of 100MHz */
186 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
187 const struct intel_crtc_state *pipe_config)
189 if (HAS_DDI(dev_priv))
190 return pipe_config->port_clock; /* SPLL */
192 return dev_priv->fdi_pll_freq;
195 static const struct intel_limit intel_limits_i8xx_dac = {
196 .dot = { .min = 25000, .max = 350000 },
197 .vco = { .min = 908000, .max = 1512000 },
198 .n = { .min = 2, .max = 16 },
199 .m = { .min = 96, .max = 140 },
200 .m1 = { .min = 18, .max = 26 },
201 .m2 = { .min = 6, .max = 16 },
202 .p = { .min = 4, .max = 128 },
203 .p1 = { .min = 2, .max = 33 },
204 .p2 = { .dot_limit = 165000,
205 .p2_slow = 4, .p2_fast = 2 },
208 static const struct intel_limit intel_limits_i8xx_dvo = {
209 .dot = { .min = 25000, .max = 350000 },
210 .vco = { .min = 908000, .max = 1512000 },
211 .n = { .min = 2, .max = 16 },
212 .m = { .min = 96, .max = 140 },
213 .m1 = { .min = 18, .max = 26 },
214 .m2 = { .min = 6, .max = 16 },
215 .p = { .min = 4, .max = 128 },
216 .p1 = { .min = 2, .max = 33 },
217 .p2 = { .dot_limit = 165000,
218 .p2_slow = 4, .p2_fast = 4 },
221 static const struct intel_limit intel_limits_i8xx_lvds = {
222 .dot = { .min = 25000, .max = 350000 },
223 .vco = { .min = 908000, .max = 1512000 },
224 .n = { .min = 2, .max = 16 },
225 .m = { .min = 96, .max = 140 },
226 .m1 = { .min = 18, .max = 26 },
227 .m2 = { .min = 6, .max = 16 },
228 .p = { .min = 4, .max = 128 },
229 .p1 = { .min = 1, .max = 6 },
230 .p2 = { .dot_limit = 165000,
231 .p2_slow = 14, .p2_fast = 7 },
234 static const struct intel_limit intel_limits_i9xx_sdvo = {
235 .dot = { .min = 20000, .max = 400000 },
236 .vco = { .min = 1400000, .max = 2800000 },
237 .n = { .min = 1, .max = 6 },
238 .m = { .min = 70, .max = 120 },
239 .m1 = { .min = 8, .max = 18 },
240 .m2 = { .min = 3, .max = 7 },
241 .p = { .min = 5, .max = 80 },
242 .p1 = { .min = 1, .max = 8 },
243 .p2 = { .dot_limit = 200000,
244 .p2_slow = 10, .p2_fast = 5 },
247 static const struct intel_limit intel_limits_i9xx_lvds = {
248 .dot = { .min = 20000, .max = 400000 },
249 .vco = { .min = 1400000, .max = 2800000 },
250 .n = { .min = 1, .max = 6 },
251 .m = { .min = 70, .max = 120 },
252 .m1 = { .min = 8, .max = 18 },
253 .m2 = { .min = 3, .max = 7 },
254 .p = { .min = 7, .max = 98 },
255 .p1 = { .min = 1, .max = 8 },
256 .p2 = { .dot_limit = 112000,
257 .p2_slow = 14, .p2_fast = 7 },
261 static const struct intel_limit intel_limits_g4x_sdvo = {
262 .dot = { .min = 25000, .max = 270000 },
263 .vco = { .min = 1750000, .max = 3500000},
264 .n = { .min = 1, .max = 4 },
265 .m = { .min = 104, .max = 138 },
266 .m1 = { .min = 17, .max = 23 },
267 .m2 = { .min = 5, .max = 11 },
268 .p = { .min = 10, .max = 30 },
269 .p1 = { .min = 1, .max = 3},
270 .p2 = { .dot_limit = 270000,
276 static const struct intel_limit intel_limits_g4x_hdmi = {
277 .dot = { .min = 22000, .max = 400000 },
278 .vco = { .min = 1750000, .max = 3500000},
279 .n = { .min = 1, .max = 4 },
280 .m = { .min = 104, .max = 138 },
281 .m1 = { .min = 16, .max = 23 },
282 .m2 = { .min = 5, .max = 11 },
283 .p = { .min = 5, .max = 80 },
284 .p1 = { .min = 1, .max = 8},
285 .p2 = { .dot_limit = 165000,
286 .p2_slow = 10, .p2_fast = 5 },
289 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
290 .dot = { .min = 20000, .max = 115000 },
291 .vco = { .min = 1750000, .max = 3500000 },
292 .n = { .min = 1, .max = 3 },
293 .m = { .min = 104, .max = 138 },
294 .m1 = { .min = 17, .max = 23 },
295 .m2 = { .min = 5, .max = 11 },
296 .p = { .min = 28, .max = 112 },
297 .p1 = { .min = 2, .max = 8 },
298 .p2 = { .dot_limit = 0,
299 .p2_slow = 14, .p2_fast = 14
303 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
304 .dot = { .min = 80000, .max = 224000 },
305 .vco = { .min = 1750000, .max = 3500000 },
306 .n = { .min = 1, .max = 3 },
307 .m = { .min = 104, .max = 138 },
308 .m1 = { .min = 17, .max = 23 },
309 .m2 = { .min = 5, .max = 11 },
310 .p = { .min = 14, .max = 42 },
311 .p1 = { .min = 2, .max = 6 },
312 .p2 = { .dot_limit = 0,
313 .p2_slow = 7, .p2_fast = 7
317 static const struct intel_limit intel_limits_pineview_sdvo = {
318 .dot = { .min = 20000, .max = 400000},
319 .vco = { .min = 1700000, .max = 3500000 },
320 /* Pineview's Ncounter is a ring counter */
321 .n = { .min = 3, .max = 6 },
322 .m = { .min = 2, .max = 256 },
323 /* Pineview only has one combined m divider, which we treat as m2. */
324 .m1 = { .min = 0, .max = 0 },
325 .m2 = { .min = 0, .max = 254 },
326 .p = { .min = 5, .max = 80 },
327 .p1 = { .min = 1, .max = 8 },
328 .p2 = { .dot_limit = 200000,
329 .p2_slow = 10, .p2_fast = 5 },
332 static const struct intel_limit intel_limits_pineview_lvds = {
333 .dot = { .min = 20000, .max = 400000 },
334 .vco = { .min = 1700000, .max = 3500000 },
335 .n = { .min = 3, .max = 6 },
336 .m = { .min = 2, .max = 256 },
337 .m1 = { .min = 0, .max = 0 },
338 .m2 = { .min = 0, .max = 254 },
339 .p = { .min = 7, .max = 112 },
340 .p1 = { .min = 1, .max = 8 },
341 .p2 = { .dot_limit = 112000,
342 .p2_slow = 14, .p2_fast = 14 },
345 /* Ironlake / Sandybridge
347 * We calculate clock using (register_value + 2) for N/M1/M2, so here
348 * the range value for them is (actual_value - 2).
350 static const struct intel_limit intel_limits_ironlake_dac = {
351 .dot = { .min = 25000, .max = 350000 },
352 .vco = { .min = 1760000, .max = 3510000 },
353 .n = { .min = 1, .max = 5 },
354 .m = { .min = 79, .max = 127 },
355 .m1 = { .min = 12, .max = 22 },
356 .m2 = { .min = 5, .max = 9 },
357 .p = { .min = 5, .max = 80 },
358 .p1 = { .min = 1, .max = 8 },
359 .p2 = { .dot_limit = 225000,
360 .p2_slow = 10, .p2_fast = 5 },
363 static const struct intel_limit intel_limits_ironlake_single_lvds = {
364 .dot = { .min = 25000, .max = 350000 },
365 .vco = { .min = 1760000, .max = 3510000 },
366 .n = { .min = 1, .max = 3 },
367 .m = { .min = 79, .max = 118 },
368 .m1 = { .min = 12, .max = 22 },
369 .m2 = { .min = 5, .max = 9 },
370 .p = { .min = 28, .max = 112 },
371 .p1 = { .min = 2, .max = 8 },
372 .p2 = { .dot_limit = 225000,
373 .p2_slow = 14, .p2_fast = 14 },
376 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
377 .dot = { .min = 25000, .max = 350000 },
378 .vco = { .min = 1760000, .max = 3510000 },
379 .n = { .min = 1, .max = 3 },
380 .m = { .min = 79, .max = 127 },
381 .m1 = { .min = 12, .max = 22 },
382 .m2 = { .min = 5, .max = 9 },
383 .p = { .min = 14, .max = 56 },
384 .p1 = { .min = 2, .max = 8 },
385 .p2 = { .dot_limit = 225000,
386 .p2_slow = 7, .p2_fast = 7 },
389 /* LVDS 100mhz refclk limits. */
390 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
391 .dot = { .min = 25000, .max = 350000 },
392 .vco = { .min = 1760000, .max = 3510000 },
393 .n = { .min = 1, .max = 2 },
394 .m = { .min = 79, .max = 126 },
395 .m1 = { .min = 12, .max = 22 },
396 .m2 = { .min = 5, .max = 9 },
397 .p = { .min = 28, .max = 112 },
398 .p1 = { .min = 2, .max = 8 },
399 .p2 = { .dot_limit = 225000,
400 .p2_slow = 14, .p2_fast = 14 },
403 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
404 .dot = { .min = 25000, .max = 350000 },
405 .vco = { .min = 1760000, .max = 3510000 },
406 .n = { .min = 1, .max = 3 },
407 .m = { .min = 79, .max = 126 },
408 .m1 = { .min = 12, .max = 22 },
409 .m2 = { .min = 5, .max = 9 },
410 .p = { .min = 14, .max = 42 },
411 .p1 = { .min = 2, .max = 6 },
412 .p2 = { .dot_limit = 225000,
413 .p2_slow = 7, .p2_fast = 7 },
416 static const struct intel_limit intel_limits_vlv = {
418 * These are the data rate limits (measured in fast clocks)
419 * since those are the strictest limits we have. The fast
420 * clock and actual rate limits are more relaxed, so checking
421 * them would make no difference.
423 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
424 .vco = { .min = 4000000, .max = 6000000 },
425 .n = { .min = 1, .max = 7 },
426 .m1 = { .min = 2, .max = 3 },
427 .m2 = { .min = 11, .max = 156 },
428 .p1 = { .min = 2, .max = 3 },
429 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
432 static const struct intel_limit intel_limits_chv = {
434 * These are the data rate limits (measured in fast clocks)
435 * since those are the strictest limits we have. The fast
436 * clock and actual rate limits are more relaxed, so checking
437 * them would make no difference.
439 .dot = { .min = 25000 * 5, .max = 540000 * 5},
440 .vco = { .min = 4800000, .max = 6480000 },
441 .n = { .min = 1, .max = 1 },
442 .m1 = { .min = 2, .max = 2 },
443 .m2 = { .min = 24 << 22, .max = 175 << 22 },
444 .p1 = { .min = 2, .max = 4 },
445 .p2 = { .p2_slow = 1, .p2_fast = 14 },
448 static const struct intel_limit intel_limits_bxt = {
449 /* FIXME: find real dot limits */
450 .dot = { .min = 0, .max = INT_MAX },
451 .vco = { .min = 4800000, .max = 6700000 },
452 .n = { .min = 1, .max = 1 },
453 .m1 = { .min = 2, .max = 2 },
454 /* FIXME: find real m2 limits */
455 .m2 = { .min = 2 << 22, .max = 255 << 22 },
456 .p1 = { .min = 2, .max = 4 },
457 .p2 = { .p2_slow = 1, .p2_fast = 20 },
461 skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
464 I915_WRITE(CLKGATE_DIS_PSL(pipe),
465 DUPS1_GATING_DIS | DUPS2_GATING_DIS);
467 I915_WRITE(CLKGATE_DIS_PSL(pipe),
468 I915_READ(CLKGATE_DIS_PSL(pipe)) &
469 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
473 needs_modeset(const struct drm_crtc_state *state)
475 return drm_atomic_crtc_needs_modeset(state);
479 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
480 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
481 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
482 * The helpers' return value is the rate of the clock that is fed to the
483 * display engine's pipe which can be the above fast dot clock rate or a
484 * divided-down version of it.
486 /* m1 is reserved as 0 in Pineview, n is a ring counter */
487 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
489 clock->m = clock->m2 + 2;
490 clock->p = clock->p1 * clock->p2;
491 if (WARN_ON(clock->n == 0 || clock->p == 0))
493 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
494 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
499 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
501 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
504 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
506 clock->m = i9xx_dpll_compute_m(clock);
507 clock->p = clock->p1 * clock->p2;
508 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
510 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
511 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
516 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
518 clock->m = clock->m1 * clock->m2;
519 clock->p = clock->p1 * clock->p2;
520 if (WARN_ON(clock->n == 0 || clock->p == 0))
522 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
523 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
525 return clock->dot / 5;
528 int chv_calc_dpll_params(int refclk, struct dpll *clock)
530 clock->m = clock->m1 * clock->m2;
531 clock->p = clock->p1 * clock->p2;
532 if (WARN_ON(clock->n == 0 || clock->p == 0))
534 clock->vco = DIV_ROUND_CLOSEST_ULL((u64)refclk * clock->m,
536 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
538 return clock->dot / 5;
541 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
544 * Returns whether the given set of divisors are valid for a given refclk with
545 * the given connectors.
547 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
548 const struct intel_limit *limit,
549 const struct dpll *clock)
551 if (clock->n < limit->n.min || limit->n.max < clock->n)
552 INTELPllInvalid("n out of range\n");
553 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
554 INTELPllInvalid("p1 out of range\n");
555 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
556 INTELPllInvalid("m2 out of range\n");
557 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
558 INTELPllInvalid("m1 out of range\n");
560 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
561 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
562 if (clock->m1 <= clock->m2)
563 INTELPllInvalid("m1 <= m2\n");
565 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
566 !IS_GEN9_LP(dev_priv)) {
567 if (clock->p < limit->p.min || limit->p.max < clock->p)
568 INTELPllInvalid("p out of range\n");
569 if (clock->m < limit->m.min || limit->m.max < clock->m)
570 INTELPllInvalid("m out of range\n");
573 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
574 INTELPllInvalid("vco out of range\n");
575 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
576 * connector, etc., rather than just a single range.
578 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
579 INTELPllInvalid("dot out of range\n");
585 i9xx_select_p2_div(const struct intel_limit *limit,
586 const struct intel_crtc_state *crtc_state,
589 struct drm_device *dev = crtc_state->base.crtc->dev;
591 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
593 * For LVDS just rely on its current settings for dual-channel.
594 * We haven't figured out how to reliably set up different
595 * single/dual channel state, if we even can.
597 if (intel_is_dual_link_lvds(dev))
598 return limit->p2.p2_fast;
600 return limit->p2.p2_slow;
602 if (target < limit->p2.dot_limit)
603 return limit->p2.p2_slow;
605 return limit->p2.p2_fast;
610 * Returns a set of divisors for the desired target clock with the given
611 * refclk, or FALSE. The returned values represent the clock equation:
612 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
614 * Target and reference clocks are specified in kHz.
616 * If match_clock is provided, then best_clock P divider must match the P
617 * divider from @match_clock used for LVDS downclocking.
620 i9xx_find_best_dpll(const struct intel_limit *limit,
621 struct intel_crtc_state *crtc_state,
622 int target, int refclk, struct dpll *match_clock,
623 struct dpll *best_clock)
625 struct drm_device *dev = crtc_state->base.crtc->dev;
629 memset(best_clock, 0, sizeof(*best_clock));
631 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
633 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
635 for (clock.m2 = limit->m2.min;
636 clock.m2 <= limit->m2.max; clock.m2++) {
637 if (clock.m2 >= clock.m1)
639 for (clock.n = limit->n.min;
640 clock.n <= limit->n.max; clock.n++) {
641 for (clock.p1 = limit->p1.min;
642 clock.p1 <= limit->p1.max; clock.p1++) {
645 i9xx_calc_dpll_params(refclk, &clock);
646 if (!intel_PLL_is_valid(to_i915(dev),
651 clock.p != match_clock->p)
654 this_err = abs(clock.dot - target);
655 if (this_err < err) {
664 return (err != target);
668 * Returns a set of divisors for the desired target clock with the given
669 * refclk, or FALSE. The returned values represent the clock equation:
670 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
672 * Target and reference clocks are specified in kHz.
674 * If match_clock is provided, then best_clock P divider must match the P
675 * divider from @match_clock used for LVDS downclocking.
678 pnv_find_best_dpll(const struct intel_limit *limit,
679 struct intel_crtc_state *crtc_state,
680 int target, int refclk, struct dpll *match_clock,
681 struct dpll *best_clock)
683 struct drm_device *dev = crtc_state->base.crtc->dev;
687 memset(best_clock, 0, sizeof(*best_clock));
689 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
691 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
693 for (clock.m2 = limit->m2.min;
694 clock.m2 <= limit->m2.max; clock.m2++) {
695 for (clock.n = limit->n.min;
696 clock.n <= limit->n.max; clock.n++) {
697 for (clock.p1 = limit->p1.min;
698 clock.p1 <= limit->p1.max; clock.p1++) {
701 pnv_calc_dpll_params(refclk, &clock);
702 if (!intel_PLL_is_valid(to_i915(dev),
707 clock.p != match_clock->p)
710 this_err = abs(clock.dot - target);
711 if (this_err < err) {
720 return (err != target);
724 * Returns a set of divisors for the desired target clock with the given
725 * refclk, or FALSE. The returned values represent the clock equation:
726 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
728 * Target and reference clocks are specified in kHz.
730 * If match_clock is provided, then best_clock P divider must match the P
731 * divider from @match_clock used for LVDS downclocking.
734 g4x_find_best_dpll(const struct intel_limit *limit,
735 struct intel_crtc_state *crtc_state,
736 int target, int refclk, struct dpll *match_clock,
737 struct dpll *best_clock)
739 struct drm_device *dev = crtc_state->base.crtc->dev;
743 /* approximately equals target * 0.00585 */
744 int err_most = (target >> 8) + (target >> 9);
746 memset(best_clock, 0, sizeof(*best_clock));
748 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
750 max_n = limit->n.max;
751 /* based on hardware requirement, prefer smaller n to precision */
752 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
753 /* based on hardware requirement, prefere larger m1,m2 */
754 for (clock.m1 = limit->m1.max;
755 clock.m1 >= limit->m1.min; clock.m1--) {
756 for (clock.m2 = limit->m2.max;
757 clock.m2 >= limit->m2.min; clock.m2--) {
758 for (clock.p1 = limit->p1.max;
759 clock.p1 >= limit->p1.min; clock.p1--) {
762 i9xx_calc_dpll_params(refclk, &clock);
763 if (!intel_PLL_is_valid(to_i915(dev),
768 this_err = abs(clock.dot - target);
769 if (this_err < err_most) {
783 * Check if the calculated PLL configuration is more optimal compared to the
784 * best configuration and error found so far. Return the calculated error.
786 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
787 const struct dpll *calculated_clock,
788 const struct dpll *best_clock,
789 unsigned int best_error_ppm,
790 unsigned int *error_ppm)
793 * For CHV ignore the error and consider only the P value.
794 * Prefer a bigger P value based on HW requirements.
796 if (IS_CHERRYVIEW(to_i915(dev))) {
799 return calculated_clock->p > best_clock->p;
802 if (WARN_ON_ONCE(!target_freq))
805 *error_ppm = div_u64(1000000ULL *
806 abs(target_freq - calculated_clock->dot),
809 * Prefer a better P value over a better (smaller) error if the error
810 * is small. Ensure this preference for future configurations too by
811 * setting the error to 0.
813 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
819 return *error_ppm + 10 < best_error_ppm;
823 * Returns a set of divisors for the desired target clock with the given
824 * refclk, or FALSE. The returned values represent the clock equation:
825 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
828 vlv_find_best_dpll(const struct intel_limit *limit,
829 struct intel_crtc_state *crtc_state,
830 int target, int refclk, struct dpll *match_clock,
831 struct dpll *best_clock)
833 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
834 struct drm_device *dev = crtc->base.dev;
836 unsigned int bestppm = 1000000;
837 /* min update 19.2 MHz */
838 int max_n = min(limit->n.max, refclk / 19200);
841 target *= 5; /* fast clock */
843 memset(best_clock, 0, sizeof(*best_clock));
845 /* based on hardware requirement, prefer smaller n to precision */
846 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
847 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
848 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
849 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
850 clock.p = clock.p1 * clock.p2;
851 /* based on hardware requirement, prefer bigger m1,m2 values */
852 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
855 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
858 vlv_calc_dpll_params(refclk, &clock);
860 if (!intel_PLL_is_valid(to_i915(dev),
865 if (!vlv_PLL_is_optimal(dev, target,
883 * Returns a set of divisors for the desired target clock with the given
884 * refclk, or FALSE. The returned values represent the clock equation:
885 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
888 chv_find_best_dpll(const struct intel_limit *limit,
889 struct intel_crtc_state *crtc_state,
890 int target, int refclk, struct dpll *match_clock,
891 struct dpll *best_clock)
893 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
894 struct drm_device *dev = crtc->base.dev;
895 unsigned int best_error_ppm;
900 memset(best_clock, 0, sizeof(*best_clock));
901 best_error_ppm = 1000000;
904 * Based on hardware doc, the n always set to 1, and m1 always
905 * set to 2. If requires to support 200Mhz refclk, we need to
906 * revisit this because n may not 1 anymore.
908 clock.n = 1, clock.m1 = 2;
909 target *= 5; /* fast clock */
911 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
912 for (clock.p2 = limit->p2.p2_fast;
913 clock.p2 >= limit->p2.p2_slow;
914 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
915 unsigned int error_ppm;
917 clock.p = clock.p1 * clock.p2;
919 m2 = DIV_ROUND_CLOSEST_ULL(((u64)target * clock.p *
920 clock.n) << 22, refclk * clock.m1);
922 if (m2 > INT_MAX/clock.m1)
927 chv_calc_dpll_params(refclk, &clock);
929 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
932 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
933 best_error_ppm, &error_ppm))
937 best_error_ppm = error_ppm;
945 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
946 struct dpll *best_clock)
949 const struct intel_limit *limit = &intel_limits_bxt;
951 return chv_find_best_dpll(limit, crtc_state,
952 target_clock, refclk, NULL, best_clock);
955 bool intel_crtc_active(struct intel_crtc *crtc)
957 /* Be paranoid as we can arrive here with only partial
958 * state retrieved from the hardware during setup.
960 * We can ditch the adjusted_mode.crtc_clock check as soon
961 * as Haswell has gained clock readout/fastboot support.
963 * We can ditch the crtc->primary->state->fb check as soon as we can
964 * properly reconstruct framebuffers.
966 * FIXME: The intel_crtc->active here should be switched to
967 * crtc->state->active once we have proper CRTC states wired up
970 return crtc->active && crtc->base.primary->state->fb &&
971 crtc->config->base.adjusted_mode.crtc_clock;
974 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
977 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
979 return crtc->config->cpu_transcoder;
982 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
985 i915_reg_t reg = PIPEDSL(pipe);
989 if (IS_GEN(dev_priv, 2))
990 line_mask = DSL_LINEMASK_GEN2;
992 line_mask = DSL_LINEMASK_GEN3;
994 line1 = I915_READ(reg) & line_mask;
996 line2 = I915_READ(reg) & line_mask;
998 return line1 != line2;
1001 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1003 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1004 enum pipe pipe = crtc->pipe;
1006 /* Wait for the display line to settle/start moving */
1007 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1008 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1009 pipe_name(pipe), onoff(state));
1012 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1014 wait_for_pipe_scanline_moving(crtc, false);
1017 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1019 wait_for_pipe_scanline_moving(crtc, true);
1023 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1025 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1026 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1028 if (INTEL_GEN(dev_priv) >= 4) {
1029 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1030 i915_reg_t reg = PIPECONF(cpu_transcoder);
1032 /* Wait for the Pipe State to go off */
1033 if (intel_wait_for_register(dev_priv,
1034 reg, I965_PIPECONF_ACTIVE, 0,
1036 WARN(1, "pipe_off wait timed out\n");
1038 intel_wait_for_pipe_scanline_stopped(crtc);
1042 /* Only for pre-ILK configs */
1043 void assert_pll(struct drm_i915_private *dev_priv,
1044 enum pipe pipe, bool state)
1049 val = I915_READ(DPLL(pipe));
1050 cur_state = !!(val & DPLL_VCO_ENABLE);
1051 I915_STATE_WARN(cur_state != state,
1052 "PLL state assertion failure (expected %s, current %s)\n",
1053 onoff(state), onoff(cur_state));
1056 /* XXX: the dsi pll is shared between MIPI DSI ports */
1057 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1062 mutex_lock(&dev_priv->sb_lock);
1063 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1064 mutex_unlock(&dev_priv->sb_lock);
1066 cur_state = val & DSI_PLL_VCO_EN;
1067 I915_STATE_WARN(cur_state != state,
1068 "DSI PLL state assertion failure (expected %s, current %s)\n",
1069 onoff(state), onoff(cur_state));
1072 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1073 enum pipe pipe, bool state)
1076 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1079 if (HAS_DDI(dev_priv)) {
1080 /* DDI does not have a specific FDI_TX register */
1081 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1082 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1084 u32 val = I915_READ(FDI_TX_CTL(pipe));
1085 cur_state = !!(val & FDI_TX_ENABLE);
1087 I915_STATE_WARN(cur_state != state,
1088 "FDI TX state assertion failure (expected %s, current %s)\n",
1089 onoff(state), onoff(cur_state));
1091 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1092 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1094 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1095 enum pipe pipe, bool state)
1100 val = I915_READ(FDI_RX_CTL(pipe));
1101 cur_state = !!(val & FDI_RX_ENABLE);
1102 I915_STATE_WARN(cur_state != state,
1103 "FDI RX state assertion failure (expected %s, current %s)\n",
1104 onoff(state), onoff(cur_state));
1106 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1107 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1109 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1114 /* ILK FDI PLL is always enabled */
1115 if (IS_GEN(dev_priv, 5))
1118 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1119 if (HAS_DDI(dev_priv))
1122 val = I915_READ(FDI_TX_CTL(pipe));
1123 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1126 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1127 enum pipe pipe, bool state)
1132 val = I915_READ(FDI_RX_CTL(pipe));
1133 cur_state = !!(val & FDI_RX_PLL_ENABLE);
1134 I915_STATE_WARN(cur_state != state,
1135 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1136 onoff(state), onoff(cur_state));
1139 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1143 enum pipe panel_pipe = INVALID_PIPE;
1146 if (WARN_ON(HAS_DDI(dev_priv)))
1149 if (HAS_PCH_SPLIT(dev_priv)) {
1152 pp_reg = PP_CONTROL(0);
1153 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1156 case PANEL_PORT_SELECT_LVDS:
1157 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1159 case PANEL_PORT_SELECT_DPA:
1160 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1162 case PANEL_PORT_SELECT_DPC:
1163 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1165 case PANEL_PORT_SELECT_DPD:
1166 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1169 MISSING_CASE(port_sel);
1172 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1173 /* presumably write lock depends on pipe, not port select */
1174 pp_reg = PP_CONTROL(pipe);
1179 pp_reg = PP_CONTROL(0);
1180 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1182 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1183 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1186 val = I915_READ(pp_reg);
1187 if (!(val & PANEL_POWER_ON) ||
1188 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1191 I915_STATE_WARN(panel_pipe == pipe && locked,
1192 "panel assertion failure, pipe %c regs locked\n",
1196 void assert_pipe(struct drm_i915_private *dev_priv,
1197 enum pipe pipe, bool state)
1200 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1202 enum intel_display_power_domain power_domain;
1203 intel_wakeref_t wakeref;
1205 /* we keep both pipes enabled on 830 */
1206 if (IS_I830(dev_priv))
1209 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1210 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1212 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1213 cur_state = !!(val & PIPECONF_ENABLE);
1215 intel_display_power_put(dev_priv, power_domain, wakeref);
1220 I915_STATE_WARN(cur_state != state,
1221 "pipe %c assertion failure (expected %s, current %s)\n",
1222 pipe_name(pipe), onoff(state), onoff(cur_state));
1225 static void assert_plane(struct intel_plane *plane, bool state)
1230 cur_state = plane->get_hw_state(plane, &pipe);
1232 I915_STATE_WARN(cur_state != state,
1233 "%s assertion failure (expected %s, current %s)\n",
1234 plane->base.name, onoff(state), onoff(cur_state));
1237 #define assert_plane_enabled(p) assert_plane(p, true)
1238 #define assert_plane_disabled(p) assert_plane(p, false)
1240 static void assert_planes_disabled(struct intel_crtc *crtc)
1242 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1243 struct intel_plane *plane;
1245 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1246 assert_plane_disabled(plane);
1249 static void assert_vblank_disabled(struct drm_crtc *crtc)
1251 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1252 drm_crtc_vblank_put(crtc);
1255 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1261 val = I915_READ(PCH_TRANSCONF(pipe));
1262 enabled = !!(val & TRANS_ENABLE);
1263 I915_STATE_WARN(enabled,
1264 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1268 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1269 enum pipe pipe, enum port port,
1272 enum pipe port_pipe;
1275 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1277 I915_STATE_WARN(state && port_pipe == pipe,
1278 "PCH DP %c enabled on transcoder %c, should be disabled\n",
1279 port_name(port), pipe_name(pipe));
1281 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1282 "IBX PCH DP %c still using transcoder B\n",
1286 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1287 enum pipe pipe, enum port port,
1288 i915_reg_t hdmi_reg)
1290 enum pipe port_pipe;
1293 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1295 I915_STATE_WARN(state && port_pipe == pipe,
1296 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1297 port_name(port), pipe_name(pipe));
1299 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1300 "IBX PCH HDMI %c still using transcoder B\n",
1304 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1307 enum pipe port_pipe;
1309 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1310 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1311 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1313 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1315 "PCH VGA enabled on transcoder %c, should be disabled\n",
1318 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1320 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1323 /* PCH SDVOB multiplex with HDMIB */
1324 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1325 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1326 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1329 static void _vlv_enable_pll(struct intel_crtc *crtc,
1330 const struct intel_crtc_state *pipe_config)
1332 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1333 enum pipe pipe = crtc->pipe;
1335 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1336 POSTING_READ(DPLL(pipe));
1339 if (intel_wait_for_register(dev_priv,
1344 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1347 static void vlv_enable_pll(struct intel_crtc *crtc,
1348 const struct intel_crtc_state *pipe_config)
1350 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1351 enum pipe pipe = crtc->pipe;
1353 assert_pipe_disabled(dev_priv, pipe);
1355 /* PLL is protected by panel, make sure we can write it */
1356 assert_panel_unlocked(dev_priv, pipe);
1358 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1359 _vlv_enable_pll(crtc, pipe_config);
1361 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1362 POSTING_READ(DPLL_MD(pipe));
1366 static void _chv_enable_pll(struct intel_crtc *crtc,
1367 const struct intel_crtc_state *pipe_config)
1369 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1370 enum pipe pipe = crtc->pipe;
1371 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1374 mutex_lock(&dev_priv->sb_lock);
1376 /* Enable back the 10bit clock to display controller */
1377 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1378 tmp |= DPIO_DCLKP_EN;
1379 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1381 mutex_unlock(&dev_priv->sb_lock);
1384 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1389 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1391 /* Check PLL is locked */
1392 if (intel_wait_for_register(dev_priv,
1393 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1395 DRM_ERROR("PLL %d failed to lock\n", pipe);
1398 static void chv_enable_pll(struct intel_crtc *crtc,
1399 const struct intel_crtc_state *pipe_config)
1401 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1402 enum pipe pipe = crtc->pipe;
1404 assert_pipe_disabled(dev_priv, pipe);
1406 /* PLL is protected by panel, make sure we can write it */
1407 assert_panel_unlocked(dev_priv, pipe);
1409 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1410 _chv_enable_pll(crtc, pipe_config);
1412 if (pipe != PIPE_A) {
1414 * WaPixelRepeatModeFixForC0:chv
1416 * DPLLCMD is AWOL. Use chicken bits to propagate
1417 * the value from DPLLBMD to either pipe B or C.
1419 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1420 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1421 I915_WRITE(CBR4_VLV, 0);
1422 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1425 * DPLLB VGA mode also seems to cause problems.
1426 * We should always have it disabled.
1428 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1430 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1431 POSTING_READ(DPLL_MD(pipe));
1435 static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
1437 struct intel_crtc *crtc;
1440 for_each_intel_crtc(&dev_priv->drm, crtc) {
1441 count += crtc->base.state->active &&
1442 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
1448 static void i9xx_enable_pll(struct intel_crtc *crtc,
1449 const struct intel_crtc_state *crtc_state)
1451 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1452 i915_reg_t reg = DPLL(crtc->pipe);
1453 u32 dpll = crtc_state->dpll_hw_state.dpll;
1456 assert_pipe_disabled(dev_priv, crtc->pipe);
1458 /* PLL is protected by panel, make sure we can write it */
1459 if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
1460 assert_panel_unlocked(dev_priv, crtc->pipe);
1462 /* Enable DVO 2x clock on both PLLs if necessary */
1463 if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
1465 * It appears to be important that we don't enable this
1466 * for the current pipe before otherwise configuring the
1467 * PLL. No idea how this should be handled if multiple
1468 * DVO outputs are enabled simultaneosly.
1470 dpll |= DPLL_DVO_2X_MODE;
1471 I915_WRITE(DPLL(!crtc->pipe),
1472 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1476 * Apparently we need to have VGA mode enabled prior to changing
1477 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1478 * dividers, even though the register value does change.
1482 I915_WRITE(reg, dpll);
1484 /* Wait for the clocks to stabilize. */
1488 if (INTEL_GEN(dev_priv) >= 4) {
1489 I915_WRITE(DPLL_MD(crtc->pipe),
1490 crtc_state->dpll_hw_state.dpll_md);
1492 /* The pixel multiplier can only be updated once the
1493 * DPLL is enabled and the clocks are stable.
1495 * So write it again.
1497 I915_WRITE(reg, dpll);
1500 /* We do this three times for luck */
1501 for (i = 0; i < 3; i++) {
1502 I915_WRITE(reg, dpll);
1504 udelay(150); /* wait for warmup */
1508 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1510 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1511 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1512 enum pipe pipe = crtc->pipe;
1514 /* Disable DVO 2x clock on both PLLs if necessary */
1515 if (IS_I830(dev_priv) &&
1516 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) &&
1517 !intel_num_dvo_pipes(dev_priv)) {
1518 I915_WRITE(DPLL(PIPE_B),
1519 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1520 I915_WRITE(DPLL(PIPE_A),
1521 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1524 /* Don't disable pipe or pipe PLLs if needed */
1525 if (IS_I830(dev_priv))
1528 /* Make sure the pipe isn't still relying on us */
1529 assert_pipe_disabled(dev_priv, pipe);
1531 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1532 POSTING_READ(DPLL(pipe));
1535 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1539 /* Make sure the pipe isn't still relying on us */
1540 assert_pipe_disabled(dev_priv, pipe);
1542 val = DPLL_INTEGRATED_REF_CLK_VLV |
1543 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1545 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1547 I915_WRITE(DPLL(pipe), val);
1548 POSTING_READ(DPLL(pipe));
1551 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1553 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1556 /* Make sure the pipe isn't still relying on us */
1557 assert_pipe_disabled(dev_priv, pipe);
1559 val = DPLL_SSC_REF_CLK_CHV |
1560 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1562 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1564 I915_WRITE(DPLL(pipe), val);
1565 POSTING_READ(DPLL(pipe));
1567 mutex_lock(&dev_priv->sb_lock);
1569 /* Disable 10bit clock to display controller */
1570 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1571 val &= ~DPIO_DCLKP_EN;
1572 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1574 mutex_unlock(&dev_priv->sb_lock);
1577 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1578 struct intel_digital_port *dport,
1579 unsigned int expected_mask)
1582 i915_reg_t dpll_reg;
1584 switch (dport->base.port) {
1586 port_mask = DPLL_PORTB_READY_MASK;
1590 port_mask = DPLL_PORTC_READY_MASK;
1592 expected_mask <<= 4;
1595 port_mask = DPLL_PORTD_READY_MASK;
1596 dpll_reg = DPIO_PHY_STATUS;
1602 if (intel_wait_for_register(dev_priv,
1603 dpll_reg, port_mask, expected_mask,
1605 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1606 port_name(dport->base.port),
1607 I915_READ(dpll_reg) & port_mask, expected_mask);
1610 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1612 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1613 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1614 enum pipe pipe = crtc->pipe;
1616 u32 val, pipeconf_val;
1618 /* Make sure PCH DPLL is enabled */
1619 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1621 /* FDI must be feeding us bits for PCH ports */
1622 assert_fdi_tx_enabled(dev_priv, pipe);
1623 assert_fdi_rx_enabled(dev_priv, pipe);
1625 if (HAS_PCH_CPT(dev_priv)) {
1626 /* Workaround: Set the timing override bit before enabling the
1627 * pch transcoder. */
1628 reg = TRANS_CHICKEN2(pipe);
1629 val = I915_READ(reg);
1630 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1631 I915_WRITE(reg, val);
1634 reg = PCH_TRANSCONF(pipe);
1635 val = I915_READ(reg);
1636 pipeconf_val = I915_READ(PIPECONF(pipe));
1638 if (HAS_PCH_IBX(dev_priv)) {
1640 * Make the BPC in transcoder be consistent with
1641 * that in pipeconf reg. For HDMI we must use 8bpc
1642 * here for both 8bpc and 12bpc.
1644 val &= ~PIPECONF_BPC_MASK;
1645 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1646 val |= PIPECONF_8BPC;
1648 val |= pipeconf_val & PIPECONF_BPC_MASK;
1651 val &= ~TRANS_INTERLACE_MASK;
1652 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1653 if (HAS_PCH_IBX(dev_priv) &&
1654 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1655 val |= TRANS_LEGACY_INTERLACED_ILK;
1657 val |= TRANS_INTERLACED;
1659 val |= TRANS_PROGRESSIVE;
1661 I915_WRITE(reg, val | TRANS_ENABLE);
1662 if (intel_wait_for_register(dev_priv,
1663 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1665 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1668 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1669 enum transcoder cpu_transcoder)
1671 u32 val, pipeconf_val;
1673 /* FDI must be feeding us bits for PCH ports */
1674 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1675 assert_fdi_rx_enabled(dev_priv, PIPE_A);
1677 /* Workaround: set timing override bit. */
1678 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1679 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1680 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1683 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1685 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1686 PIPECONF_INTERLACED_ILK)
1687 val |= TRANS_INTERLACED;
1689 val |= TRANS_PROGRESSIVE;
1691 I915_WRITE(LPT_TRANSCONF, val);
1692 if (intel_wait_for_register(dev_priv,
1697 DRM_ERROR("Failed to enable PCH transcoder\n");
1700 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1706 /* FDI relies on the transcoder */
1707 assert_fdi_tx_disabled(dev_priv, pipe);
1708 assert_fdi_rx_disabled(dev_priv, pipe);
1710 /* Ports must be off as well */
1711 assert_pch_ports_disabled(dev_priv, pipe);
1713 reg = PCH_TRANSCONF(pipe);
1714 val = I915_READ(reg);
1715 val &= ~TRANS_ENABLE;
1716 I915_WRITE(reg, val);
1717 /* wait for PCH transcoder off, transcoder state */
1718 if (intel_wait_for_register(dev_priv,
1719 reg, TRANS_STATE_ENABLE, 0,
1721 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1723 if (HAS_PCH_CPT(dev_priv)) {
1724 /* Workaround: Clear the timing override chicken bit again. */
1725 reg = TRANS_CHICKEN2(pipe);
1726 val = I915_READ(reg);
1727 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1728 I915_WRITE(reg, val);
1732 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1736 val = I915_READ(LPT_TRANSCONF);
1737 val &= ~TRANS_ENABLE;
1738 I915_WRITE(LPT_TRANSCONF, val);
1739 /* wait for PCH transcoder off, transcoder state */
1740 if (intel_wait_for_register(dev_priv,
1741 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1743 DRM_ERROR("Failed to disable PCH transcoder\n");
1745 /* Workaround: clear timing override bit. */
1746 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1747 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1748 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1751 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1753 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1755 if (HAS_PCH_LPT(dev_priv))
1761 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1763 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1766 * On i965gm the hardware frame counter reads
1767 * zero when the TV encoder is enabled :(
1769 if (IS_I965GM(dev_priv) &&
1770 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1773 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1774 return 0xffffffff; /* full 32 bit counter */
1775 else if (INTEL_GEN(dev_priv) >= 3)
1776 return 0xffffff; /* only 24 bits of frame count */
1778 return 0; /* Gen2 doesn't have a hardware frame counter */
1781 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1783 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1785 drm_crtc_set_max_vblank_count(&crtc->base,
1786 intel_crtc_max_vblank_count(crtc_state));
1787 drm_crtc_vblank_on(&crtc->base);
1790 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1792 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1793 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1794 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1795 enum pipe pipe = crtc->pipe;
1799 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1801 assert_planes_disabled(crtc);
1804 * A pipe without a PLL won't actually be able to drive bits from
1805 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1808 if (HAS_GMCH_DISPLAY(dev_priv)) {
1809 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1810 assert_dsi_pll_enabled(dev_priv);
1812 assert_pll_enabled(dev_priv, pipe);
1814 if (new_crtc_state->has_pch_encoder) {
1815 /* if driving the PCH, we need FDI enabled */
1816 assert_fdi_rx_pll_enabled(dev_priv,
1817 intel_crtc_pch_transcoder(crtc));
1818 assert_fdi_tx_pll_enabled(dev_priv,
1819 (enum pipe) cpu_transcoder);
1821 /* FIXME: assert CPU port conditions for SNB+ */
1824 reg = PIPECONF(cpu_transcoder);
1825 val = I915_READ(reg);
1826 if (val & PIPECONF_ENABLE) {
1827 /* we keep both pipes enabled on 830 */
1828 WARN_ON(!IS_I830(dev_priv));
1832 I915_WRITE(reg, val | PIPECONF_ENABLE);
1836 * Until the pipe starts PIPEDSL reads will return a stale value,
1837 * which causes an apparent vblank timestamp jump when PIPEDSL
1838 * resets to its proper value. That also messes up the frame count
1839 * when it's derived from the timestamps. So let's wait for the
1840 * pipe to start properly before we call drm_crtc_vblank_on()
1842 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1843 intel_wait_for_pipe_scanline_moving(crtc);
1846 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1848 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1849 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1850 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1851 enum pipe pipe = crtc->pipe;
1855 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1858 * Make sure planes won't keep trying to pump pixels to us,
1859 * or we might hang the display.
1861 assert_planes_disabled(crtc);
1863 reg = PIPECONF(cpu_transcoder);
1864 val = I915_READ(reg);
1865 if ((val & PIPECONF_ENABLE) == 0)
1869 * Double wide has implications for planes
1870 * so best keep it disabled when not needed.
1872 if (old_crtc_state->double_wide)
1873 val &= ~PIPECONF_DOUBLE_WIDE;
1875 /* Don't disable pipe or pipe PLLs if needed */
1876 if (!IS_I830(dev_priv))
1877 val &= ~PIPECONF_ENABLE;
1879 I915_WRITE(reg, val);
1880 if ((val & PIPECONF_ENABLE) == 0)
1881 intel_wait_for_pipe_off(old_crtc_state);
1884 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1886 return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1890 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1892 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1893 unsigned int cpp = fb->format->cpp[color_plane];
1895 switch (fb->modifier) {
1896 case DRM_FORMAT_MOD_LINEAR:
1898 case I915_FORMAT_MOD_X_TILED:
1899 if (IS_GEN(dev_priv, 2))
1903 case I915_FORMAT_MOD_Y_TILED_CCS:
1904 if (color_plane == 1)
1907 case I915_FORMAT_MOD_Y_TILED:
1908 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1912 case I915_FORMAT_MOD_Yf_TILED_CCS:
1913 if (color_plane == 1)
1916 case I915_FORMAT_MOD_Yf_TILED:
1932 MISSING_CASE(fb->modifier);
1938 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1940 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
1943 return intel_tile_size(to_i915(fb->dev)) /
1944 intel_tile_width_bytes(fb, color_plane);
1947 /* Return the tile dimensions in pixel units */
1948 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1949 unsigned int *tile_width,
1950 unsigned int *tile_height)
1952 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1953 unsigned int cpp = fb->format->cpp[color_plane];
1955 *tile_width = tile_width_bytes / cpp;
1956 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1960 intel_fb_align_height(const struct drm_framebuffer *fb,
1961 int color_plane, unsigned int height)
1963 unsigned int tile_height = intel_tile_height(fb, color_plane);
1965 return ALIGN(height, tile_height);
1968 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1970 unsigned int size = 0;
1973 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1974 size += rot_info->plane[i].width * rot_info->plane[i].height;
1980 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
1981 const struct drm_framebuffer *fb,
1982 unsigned int rotation)
1984 view->type = I915_GGTT_VIEW_NORMAL;
1985 if (drm_rotation_90_or_270(rotation)) {
1986 view->type = I915_GGTT_VIEW_ROTATED;
1987 view->rotated = to_intel_framebuffer(fb)->rot_info;
1991 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
1993 if (IS_I830(dev_priv))
1995 else if (IS_I85X(dev_priv))
1997 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2003 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2005 if (INTEL_GEN(dev_priv) >= 9)
2007 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2008 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2010 else if (INTEL_GEN(dev_priv) >= 4)
2016 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2019 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2021 /* AUX_DIST needs only 4K alignment */
2022 if (color_plane == 1)
2025 switch (fb->modifier) {
2026 case DRM_FORMAT_MOD_LINEAR:
2027 return intel_linear_alignment(dev_priv);
2028 case I915_FORMAT_MOD_X_TILED:
2029 if (INTEL_GEN(dev_priv) >= 9)
2032 case I915_FORMAT_MOD_Y_TILED_CCS:
2033 case I915_FORMAT_MOD_Yf_TILED_CCS:
2034 case I915_FORMAT_MOD_Y_TILED:
2035 case I915_FORMAT_MOD_Yf_TILED:
2036 return 1 * 1024 * 1024;
2038 MISSING_CASE(fb->modifier);
2043 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2045 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2046 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2048 return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
2052 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2053 const struct i915_ggtt_view *view,
2055 unsigned long *out_flags)
2057 struct drm_device *dev = fb->dev;
2058 struct drm_i915_private *dev_priv = to_i915(dev);
2059 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2060 intel_wakeref_t wakeref;
2061 struct i915_vma *vma;
2062 unsigned int pinctl;
2065 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2067 alignment = intel_surf_alignment(fb, 0);
2069 /* Note that the w/a also requires 64 PTE of padding following the
2070 * bo. We currently fill all unused PTE with the shadow page and so
2071 * we should always have valid PTE following the scanout preventing
2074 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2075 alignment = 256 * 1024;
2078 * Global gtt pte registers are special registers which actually forward
2079 * writes to a chunk of system memory. Which means that there is no risk
2080 * that the register values disappear as soon as we call
2081 * intel_runtime_pm_put(), so it is correct to wrap only the
2082 * pin/unpin/fence and not more.
2084 wakeref = intel_runtime_pm_get(dev_priv);
2086 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2090 /* Valleyview is definitely limited to scanning out the first
2091 * 512MiB. Lets presume this behaviour was inherited from the
2092 * g4x display engine and that all earlier gen are similarly
2093 * limited. Testing suggests that it is a little more
2094 * complicated than this. For example, Cherryview appears quite
2095 * happy to scanout from anywhere within its global aperture.
2097 if (HAS_GMCH_DISPLAY(dev_priv))
2098 pinctl |= PIN_MAPPABLE;
2100 vma = i915_gem_object_pin_to_display_plane(obj,
2101 alignment, view, pinctl);
2105 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2108 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2109 * fence, whereas 965+ only requires a fence if using
2110 * framebuffer compression. For simplicity, we always, when
2111 * possible, install a fence as the cost is not that onerous.
2113 * If we fail to fence the tiled scanout, then either the
2114 * modeset will reject the change (which is highly unlikely as
2115 * the affected systems, all but one, do not have unmappable
2116 * space) or we will not be able to enable full powersaving
2117 * techniques (also likely not to apply due to various limits
2118 * FBC and the like impose on the size of the buffer, which
2119 * presumably we violated anyway with this unmappable buffer).
2120 * Anyway, it is presumably better to stumble onwards with
2121 * something and try to run the system in a "less than optimal"
2122 * mode that matches the user configuration.
2124 ret = i915_vma_pin_fence(vma);
2125 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2126 i915_gem_object_unpin_from_display_plane(vma);
2131 if (ret == 0 && vma->fence)
2132 *out_flags |= PLANE_HAS_FENCE;
2137 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2139 intel_runtime_pm_put(dev_priv, wakeref);
2143 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2145 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2147 if (flags & PLANE_HAS_FENCE)
2148 i915_vma_unpin_fence(vma);
2149 i915_gem_object_unpin_from_display_plane(vma);
2153 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2154 unsigned int rotation)
2156 if (drm_rotation_90_or_270(rotation))
2157 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2159 return fb->pitches[color_plane];
2163 * Convert the x/y offsets into a linear offset.
2164 * Only valid with 0/180 degree rotation, which is fine since linear
2165 * offset is only used with linear buffers on pre-hsw and tiled buffers
2166 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2168 u32 intel_fb_xy_to_linear(int x, int y,
2169 const struct intel_plane_state *state,
2172 const struct drm_framebuffer *fb = state->base.fb;
2173 unsigned int cpp = fb->format->cpp[color_plane];
2174 unsigned int pitch = state->color_plane[color_plane].stride;
2176 return y * pitch + x * cpp;
2180 * Add the x/y offsets derived from fb->offsets[] to the user
2181 * specified plane src x/y offsets. The resulting x/y offsets
2182 * specify the start of scanout from the beginning of the gtt mapping.
2184 void intel_add_fb_offsets(int *x, int *y,
2185 const struct intel_plane_state *state,
2189 const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
2190 unsigned int rotation = state->base.rotation;
2192 if (drm_rotation_90_or_270(rotation)) {
2193 *x += intel_fb->rotated[color_plane].x;
2194 *y += intel_fb->rotated[color_plane].y;
2196 *x += intel_fb->normal[color_plane].x;
2197 *y += intel_fb->normal[color_plane].y;
2201 static u32 intel_adjust_tile_offset(int *x, int *y,
2202 unsigned int tile_width,
2203 unsigned int tile_height,
2204 unsigned int tile_size,
2205 unsigned int pitch_tiles,
2209 unsigned int pitch_pixels = pitch_tiles * tile_width;
2212 WARN_ON(old_offset & (tile_size - 1));
2213 WARN_ON(new_offset & (tile_size - 1));
2214 WARN_ON(new_offset > old_offset);
2216 tiles = (old_offset - new_offset) / tile_size;
2218 *y += tiles / pitch_tiles * tile_height;
2219 *x += tiles % pitch_tiles * tile_width;
2221 /* minimize x in case it got needlessly big */
2222 *y += *x / pitch_pixels * tile_height;
2228 static bool is_surface_linear(u64 modifier, int color_plane)
2230 return modifier == DRM_FORMAT_MOD_LINEAR;
2233 static u32 intel_adjust_aligned_offset(int *x, int *y,
2234 const struct drm_framebuffer *fb,
2236 unsigned int rotation,
2238 u32 old_offset, u32 new_offset)
2240 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2241 unsigned int cpp = fb->format->cpp[color_plane];
2243 WARN_ON(new_offset > old_offset);
2245 if (!is_surface_linear(fb->modifier, color_plane)) {
2246 unsigned int tile_size, tile_width, tile_height;
2247 unsigned int pitch_tiles;
2249 tile_size = intel_tile_size(dev_priv);
2250 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2252 if (drm_rotation_90_or_270(rotation)) {
2253 pitch_tiles = pitch / tile_height;
2254 swap(tile_width, tile_height);
2256 pitch_tiles = pitch / (tile_width * cpp);
2259 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2260 tile_size, pitch_tiles,
2261 old_offset, new_offset);
2263 old_offset += *y * pitch + *x * cpp;
2265 *y = (old_offset - new_offset) / pitch;
2266 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2273 * Adjust the tile offset by moving the difference into
2276 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2277 const struct intel_plane_state *state,
2279 u32 old_offset, u32 new_offset)
2281 return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
2282 state->base.rotation,
2283 state->color_plane[color_plane].stride,
2284 old_offset, new_offset);
2288 * Computes the aligned offset to the base tile and adjusts
2289 * x, y. bytes per pixel is assumed to be a power-of-two.
2291 * In the 90/270 rotated case, x and y are assumed
2292 * to be already rotated to match the rotated GTT view, and
2293 * pitch is the tile_height aligned framebuffer height.
2295 * This function is used when computing the derived information
2296 * under intel_framebuffer, so using any of that information
2297 * here is not allowed. Anything under drm_framebuffer can be
2298 * used. This is why the user has to pass in the pitch since it
2299 * is specified in the rotated orientation.
2301 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2303 const struct drm_framebuffer *fb,
2306 unsigned int rotation,
2309 unsigned int cpp = fb->format->cpp[color_plane];
2310 u32 offset, offset_aligned;
2315 if (!is_surface_linear(fb->modifier, color_plane)) {
2316 unsigned int tile_size, tile_width, tile_height;
2317 unsigned int tile_rows, tiles, pitch_tiles;
2319 tile_size = intel_tile_size(dev_priv);
2320 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2322 if (drm_rotation_90_or_270(rotation)) {
2323 pitch_tiles = pitch / tile_height;
2324 swap(tile_width, tile_height);
2326 pitch_tiles = pitch / (tile_width * cpp);
2329 tile_rows = *y / tile_height;
2332 tiles = *x / tile_width;
2335 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2336 offset_aligned = offset & ~alignment;
2338 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2339 tile_size, pitch_tiles,
2340 offset, offset_aligned);
2342 offset = *y * pitch + *x * cpp;
2343 offset_aligned = offset & ~alignment;
2345 *y = (offset & alignment) / pitch;
2346 *x = ((offset & alignment) - *y * pitch) / cpp;
2349 return offset_aligned;
2352 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2353 const struct intel_plane_state *state,
2356 struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2357 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2358 const struct drm_framebuffer *fb = state->base.fb;
2359 unsigned int rotation = state->base.rotation;
2360 int pitch = state->color_plane[color_plane].stride;
2363 if (intel_plane->id == PLANE_CURSOR)
2364 alignment = intel_cursor_alignment(dev_priv);
2366 alignment = intel_surf_alignment(fb, color_plane);
2368 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2369 pitch, rotation, alignment);
2372 /* Convert the fb->offset[] into x/y offsets */
2373 static int intel_fb_offset_to_xy(int *x, int *y,
2374 const struct drm_framebuffer *fb,
2377 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2378 unsigned int height;
2380 if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2381 fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2382 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2383 fb->offsets[color_plane], color_plane);
2387 height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2388 height = ALIGN(height, intel_tile_height(fb, color_plane));
2390 /* Catch potential overflows early */
2391 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2392 fb->offsets[color_plane])) {
2393 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2394 fb->offsets[color_plane], fb->pitches[color_plane],
2402 intel_adjust_aligned_offset(x, y,
2403 fb, color_plane, DRM_MODE_ROTATE_0,
2404 fb->pitches[color_plane],
2405 fb->offsets[color_plane], 0);
2410 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2412 switch (fb_modifier) {
2413 case I915_FORMAT_MOD_X_TILED:
2414 return I915_TILING_X;
2415 case I915_FORMAT_MOD_Y_TILED:
2416 case I915_FORMAT_MOD_Y_TILED_CCS:
2417 return I915_TILING_Y;
2419 return I915_TILING_NONE;
2424 * From the Sky Lake PRM:
2425 * "The Color Control Surface (CCS) contains the compression status of
2426 * the cache-line pairs. The compression state of the cache-line pair
2427 * is specified by 2 bits in the CCS. Each CCS cache-line represents
2428 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2429 * cache-line-pairs. CCS is always Y tiled."
2431 * Since cache line pairs refers to horizontally adjacent cache lines,
2432 * each cache line in the CCS corresponds to an area of 32x16 cache
2433 * lines on the main surface. Since each pixel is 4 bytes, this gives
2434 * us a ratio of one byte in the CCS for each 8x16 pixels in the
2437 static const struct drm_format_info ccs_formats[] = {
2438 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2439 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2440 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2441 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2444 static const struct drm_format_info *
2445 lookup_format_info(const struct drm_format_info formats[],
2446 int num_formats, u32 format)
2450 for (i = 0; i < num_formats; i++) {
2451 if (formats[i].format == format)
2458 static const struct drm_format_info *
2459 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2461 switch (cmd->modifier[0]) {
2462 case I915_FORMAT_MOD_Y_TILED_CCS:
2463 case I915_FORMAT_MOD_Yf_TILED_CCS:
2464 return lookup_format_info(ccs_formats,
2465 ARRAY_SIZE(ccs_formats),
2472 bool is_ccs_modifier(u64 modifier)
2474 return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2475 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2479 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2480 struct drm_framebuffer *fb)
2482 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2483 struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2484 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2485 u32 gtt_offset_rotated = 0;
2486 unsigned int max_size = 0;
2487 int i, num_planes = fb->format->num_planes;
2488 unsigned int tile_size = intel_tile_size(dev_priv);
2490 for (i = 0; i < num_planes; i++) {
2491 unsigned int width, height;
2492 unsigned int cpp, size;
2497 cpp = fb->format->cpp[i];
2498 width = drm_framebuffer_plane_width(fb->width, fb, i);
2499 height = drm_framebuffer_plane_height(fb->height, fb, i);
2501 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2503 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2508 if (is_ccs_modifier(fb->modifier) && i == 1) {
2509 int hsub = fb->format->hsub;
2510 int vsub = fb->format->vsub;
2511 int tile_width, tile_height;
2515 intel_tile_dims(fb, i, &tile_width, &tile_height);
2517 tile_height *= vsub;
2519 ccs_x = (x * hsub) % tile_width;
2520 ccs_y = (y * vsub) % tile_height;
2521 main_x = intel_fb->normal[0].x % tile_width;
2522 main_y = intel_fb->normal[0].y % tile_height;
2525 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2526 * x/y offsets must match between CCS and the main surface.
2528 if (main_x != ccs_x || main_y != ccs_y) {
2529 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2532 intel_fb->normal[0].x,
2533 intel_fb->normal[0].y,
2540 * The fence (if used) is aligned to the start of the object
2541 * so having the framebuffer wrap around across the edge of the
2542 * fenced region doesn't really work. We have no API to configure
2543 * the fence start offset within the object (nor could we probably
2544 * on gen2/3). So it's just easier if we just require that the
2545 * fb layout agrees with the fence layout. We already check that the
2546 * fb stride matches the fence stride elsewhere.
2548 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2549 (x + width) * cpp > fb->pitches[i]) {
2550 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2556 * First pixel of the framebuffer from
2557 * the start of the normal gtt mapping.
2559 intel_fb->normal[i].x = x;
2560 intel_fb->normal[i].y = y;
2562 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2566 offset /= tile_size;
2568 if (!is_surface_linear(fb->modifier, i)) {
2569 unsigned int tile_width, tile_height;
2570 unsigned int pitch_tiles;
2573 intel_tile_dims(fb, i, &tile_width, &tile_height);
2575 rot_info->plane[i].offset = offset;
2576 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2577 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2578 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2580 intel_fb->rotated[i].pitch =
2581 rot_info->plane[i].height * tile_height;
2583 /* how many tiles does this plane need */
2584 size = rot_info->plane[i].stride * rot_info->plane[i].height;
2586 * If the plane isn't horizontally tile aligned,
2587 * we need one more tile.
2592 /* rotate the x/y offsets to match the GTT view */
2598 rot_info->plane[i].width * tile_width,
2599 rot_info->plane[i].height * tile_height,
2600 DRM_MODE_ROTATE_270);
2604 /* rotate the tile dimensions to match the GTT view */
2605 pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2606 swap(tile_width, tile_height);
2609 * We only keep the x/y offsets, so push all of the
2610 * gtt offset into the x/y offsets.
2612 intel_adjust_tile_offset(&x, &y,
2613 tile_width, tile_height,
2614 tile_size, pitch_tiles,
2615 gtt_offset_rotated * tile_size, 0);
2617 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2620 * First pixel of the framebuffer from
2621 * the start of the rotated gtt mapping.
2623 intel_fb->rotated[i].x = x;
2624 intel_fb->rotated[i].y = y;
2626 size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2627 x * cpp, tile_size);
2630 /* how many tiles in total needed in the bo */
2631 max_size = max(max_size, offset + size);
2634 if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2635 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2636 mul_u32_u32(max_size, tile_size), obj->base.size);
2643 static int i9xx_format_to_fourcc(int format)
2646 case DISPPLANE_8BPP:
2647 return DRM_FORMAT_C8;
2648 case DISPPLANE_BGRX555:
2649 return DRM_FORMAT_XRGB1555;
2650 case DISPPLANE_BGRX565:
2651 return DRM_FORMAT_RGB565;
2653 case DISPPLANE_BGRX888:
2654 return DRM_FORMAT_XRGB8888;
2655 case DISPPLANE_RGBX888:
2656 return DRM_FORMAT_XBGR8888;
2657 case DISPPLANE_BGRX101010:
2658 return DRM_FORMAT_XRGB2101010;
2659 case DISPPLANE_RGBX101010:
2660 return DRM_FORMAT_XBGR2101010;
2664 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2667 case PLANE_CTL_FORMAT_RGB_565:
2668 return DRM_FORMAT_RGB565;
2669 case PLANE_CTL_FORMAT_NV12:
2670 return DRM_FORMAT_NV12;
2672 case PLANE_CTL_FORMAT_XRGB_8888:
2675 return DRM_FORMAT_ABGR8888;
2677 return DRM_FORMAT_XBGR8888;
2680 return DRM_FORMAT_ARGB8888;
2682 return DRM_FORMAT_XRGB8888;
2684 case PLANE_CTL_FORMAT_XRGB_2101010:
2686 return DRM_FORMAT_XBGR2101010;
2688 return DRM_FORMAT_XRGB2101010;
2693 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2694 struct intel_initial_plane_config *plane_config)
2696 struct drm_device *dev = crtc->base.dev;
2697 struct drm_i915_private *dev_priv = to_i915(dev);
2698 struct drm_i915_gem_object *obj = NULL;
2699 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2700 struct drm_framebuffer *fb = &plane_config->fb->base;
2701 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2702 u32 size_aligned = round_up(plane_config->base + plane_config->size,
2705 size_aligned -= base_aligned;
2707 if (plane_config->size == 0)
2710 /* If the FB is too big, just don't use it since fbdev is not very
2711 * important and we should probably use that space with FBC or other
2713 if (size_aligned * 2 > dev_priv->stolen_usable_size)
2716 switch (fb->modifier) {
2717 case DRM_FORMAT_MOD_LINEAR:
2718 case I915_FORMAT_MOD_X_TILED:
2719 case I915_FORMAT_MOD_Y_TILED:
2722 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
2727 mutex_lock(&dev->struct_mutex);
2728 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
2732 mutex_unlock(&dev->struct_mutex);
2736 switch (plane_config->tiling) {
2737 case I915_TILING_NONE:
2741 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
2744 MISSING_CASE(plane_config->tiling);
2748 mode_cmd.pixel_format = fb->format->format;
2749 mode_cmd.width = fb->width;
2750 mode_cmd.height = fb->height;
2751 mode_cmd.pitches[0] = fb->pitches[0];
2752 mode_cmd.modifier[0] = fb->modifier;
2753 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2755 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
2756 DRM_DEBUG_KMS("intel fb init failed\n");
2761 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2765 i915_gem_object_put(obj);
2770 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2771 struct intel_plane_state *plane_state,
2774 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2776 plane_state->base.visible = visible;
2779 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
2781 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
2784 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
2786 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2787 struct drm_plane *plane;
2790 * Active_planes aliases if multiple "primary" or cursor planes
2791 * have been used on the same (or wrong) pipe. plane_mask uses
2792 * unique ids, hence we can use that to reconstruct active_planes.
2794 crtc_state->active_planes = 0;
2796 drm_for_each_plane_mask(plane, &dev_priv->drm,
2797 crtc_state->base.plane_mask)
2798 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
2801 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2802 struct intel_plane *plane)
2804 struct intel_crtc_state *crtc_state =
2805 to_intel_crtc_state(crtc->base.state);
2806 struct intel_plane_state *plane_state =
2807 to_intel_plane_state(plane->base.state);
2809 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
2810 plane->base.base.id, plane->base.name,
2811 crtc->base.base.id, crtc->base.name);
2813 intel_set_plane_visible(crtc_state, plane_state, false);
2814 fixup_active_planes(crtc_state);
2816 if (plane->id == PLANE_PRIMARY)
2817 intel_pre_disable_primary_noatomic(&crtc->base);
2819 trace_intel_disable_plane(&plane->base, crtc);
2820 plane->disable_plane(plane, crtc_state);
2824 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2825 struct intel_initial_plane_config *plane_config)
2827 struct drm_device *dev = intel_crtc->base.dev;
2828 struct drm_i915_private *dev_priv = to_i915(dev);
2830 struct drm_i915_gem_object *obj;
2831 struct drm_plane *primary = intel_crtc->base.primary;
2832 struct drm_plane_state *plane_state = primary->state;
2833 struct intel_plane *intel_plane = to_intel_plane(primary);
2834 struct intel_plane_state *intel_state =
2835 to_intel_plane_state(plane_state);
2836 struct drm_framebuffer *fb;
2838 if (!plane_config->fb)
2841 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2842 fb = &plane_config->fb->base;
2846 kfree(plane_config->fb);
2849 * Failed to alloc the obj, check to see if we should share
2850 * an fb with another CRTC instead
2852 for_each_crtc(dev, c) {
2853 struct intel_plane_state *state;
2855 if (c == &intel_crtc->base)
2858 if (!to_intel_crtc(c)->active)
2861 state = to_intel_plane_state(c->primary->state);
2865 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2866 fb = state->base.fb;
2867 drm_framebuffer_get(fb);
2873 * We've failed to reconstruct the BIOS FB. Current display state
2874 * indicates that the primary plane is visible, but has a NULL FB,
2875 * which will lead to problems later if we don't fix it up. The
2876 * simplest solution is to just disable the primary plane now and
2877 * pretend the BIOS never had it enabled.
2879 intel_plane_disable_noatomic(intel_crtc, intel_plane);
2884 intel_state->base.rotation = plane_config->rotation;
2885 intel_fill_fb_ggtt_view(&intel_state->view, fb,
2886 intel_state->base.rotation);
2887 intel_state->color_plane[0].stride =
2888 intel_fb_pitch(fb, 0, intel_state->base.rotation);
2890 mutex_lock(&dev->struct_mutex);
2892 intel_pin_and_fence_fb_obj(fb,
2894 intel_plane_uses_fence(intel_state),
2895 &intel_state->flags);
2896 mutex_unlock(&dev->struct_mutex);
2897 if (IS_ERR(intel_state->vma)) {
2898 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2899 intel_crtc->pipe, PTR_ERR(intel_state->vma));
2901 intel_state->vma = NULL;
2902 drm_framebuffer_put(fb);
2906 obj = intel_fb_obj(fb);
2907 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
2909 plane_state->src_x = 0;
2910 plane_state->src_y = 0;
2911 plane_state->src_w = fb->width << 16;
2912 plane_state->src_h = fb->height << 16;
2914 plane_state->crtc_x = 0;
2915 plane_state->crtc_y = 0;
2916 plane_state->crtc_w = fb->width;
2917 plane_state->crtc_h = fb->height;
2919 intel_state->base.src = drm_plane_state_src(plane_state);
2920 intel_state->base.dst = drm_plane_state_dest(plane_state);
2922 if (i915_gem_object_is_tiled(obj))
2923 dev_priv->preserve_bios_swizzle = true;
2925 plane_state->fb = fb;
2926 plane_state->crtc = &intel_crtc->base;
2928 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2929 &obj->frontbuffer_bits);
2932 static int skl_max_plane_width(const struct drm_framebuffer *fb,
2934 unsigned int rotation)
2936 int cpp = fb->format->cpp[color_plane];
2938 switch (fb->modifier) {
2939 case DRM_FORMAT_MOD_LINEAR:
2940 case I915_FORMAT_MOD_X_TILED:
2953 case I915_FORMAT_MOD_Y_TILED_CCS:
2954 case I915_FORMAT_MOD_Yf_TILED_CCS:
2955 /* FIXME AUX plane? */
2956 case I915_FORMAT_MOD_Y_TILED:
2957 case I915_FORMAT_MOD_Yf_TILED:
2972 MISSING_CASE(fb->modifier);
2978 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
2979 int main_x, int main_y, u32 main_offset)
2981 const struct drm_framebuffer *fb = plane_state->base.fb;
2982 int hsub = fb->format->hsub;
2983 int vsub = fb->format->vsub;
2984 int aux_x = plane_state->color_plane[1].x;
2985 int aux_y = plane_state->color_plane[1].y;
2986 u32 aux_offset = plane_state->color_plane[1].offset;
2987 u32 alignment = intel_surf_alignment(fb, 1);
2989 while (aux_offset >= main_offset && aux_y <= main_y) {
2992 if (aux_x == main_x && aux_y == main_y)
2995 if (aux_offset == 0)
3000 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3001 aux_offset, aux_offset - alignment);
3002 aux_x = x * hsub + aux_x % hsub;
3003 aux_y = y * vsub + aux_y % vsub;
3006 if (aux_x != main_x || aux_y != main_y)
3009 plane_state->color_plane[1].offset = aux_offset;
3010 plane_state->color_plane[1].x = aux_x;
3011 plane_state->color_plane[1].y = aux_y;
3016 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3018 const struct drm_framebuffer *fb = plane_state->base.fb;
3019 unsigned int rotation = plane