2 * Copyright © 2006-2007 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Eric Anholt <eric@anholt.net>
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/reservation.h>
33 #include <linux/slab.h>
34 #include <linux/vgaarb.h>
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/drm_rect.h>
45 #include <drm/i915_drm.h>
48 #include "i915_gem_clflush.h"
49 #include "i915_trace.h"
50 #include "intel_drv.h"
51 #include "intel_dsi.h"
52 #include "intel_frontbuffer.h"
54 #include "intel_drv.h"
55 #include "intel_dsi.h"
56 #include "intel_frontbuffer.h"
59 #include "i915_gem_clflush.h"
60 #include "i915_reset.h"
61 #include "i915_trace.h"
63 /* Primary plane formats for gen <= 3 */
64 static const u32 i8xx_primary_formats[] = {
71 /* Primary plane formats for gen >= 4 */
72 static const u32 i965_primary_formats[] = {
77 DRM_FORMAT_XRGB2101010,
78 DRM_FORMAT_XBGR2101010,
81 static const u64 i9xx_format_modifiers[] = {
82 I915_FORMAT_MOD_X_TILED,
83 DRM_FORMAT_MOD_LINEAR,
84 DRM_FORMAT_MOD_INVALID
88 static const u32 intel_cursor_formats[] = {
92 static const u64 cursor_format_modifiers[] = {
93 DRM_FORMAT_MOD_LINEAR,
94 DRM_FORMAT_MOD_INVALID
97 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
98 struct intel_crtc_state *pipe_config);
99 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
100 struct intel_crtc_state *pipe_config);
102 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
103 struct drm_i915_gem_object *obj,
104 struct drm_mode_fb_cmd2 *mode_cmd);
105 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
106 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
107 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
108 const struct intel_link_m_n *m_n,
109 const struct intel_link_m_n *m2_n2);
110 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
111 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
112 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
113 static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state);
114 static void vlv_prepare_pll(struct intel_crtc *crtc,
115 const struct intel_crtc_state *pipe_config);
116 static void chv_prepare_pll(struct intel_crtc *crtc,
117 const struct intel_crtc_state *pipe_config);
118 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
119 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
120 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
121 struct intel_crtc_state *crtc_state);
122 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
123 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
124 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
125 static void intel_modeset_setup_hw_state(struct drm_device *dev,
126 struct drm_modeset_acquire_ctx *ctx);
127 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
132 } dot, vco, n, m, m1, m2, p, p1;
136 int p2_slow, p2_fast;
140 /* returns HPLL frequency in kHz */
141 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
143 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
145 /* Obtain SKU information */
146 mutex_lock(&dev_priv->sb_lock);
147 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
148 CCK_FUSE_HPLL_FREQ_MASK;
149 mutex_unlock(&dev_priv->sb_lock);
151 return vco_freq[hpll_freq] * 1000;
154 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
155 const char *name, u32 reg, int ref_freq)
160 mutex_lock(&dev_priv->sb_lock);
161 val = vlv_cck_read(dev_priv, reg);
162 mutex_unlock(&dev_priv->sb_lock);
164 divider = val & CCK_FREQUENCY_VALUES;
166 WARN((val & CCK_FREQUENCY_STATUS) !=
167 (divider << CCK_FREQUENCY_STATUS_SHIFT),
168 "%s change in progress\n", name);
170 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
173 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
174 const char *name, u32 reg)
176 if (dev_priv->hpll_freq == 0)
177 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
179 return vlv_get_cck_clock(dev_priv, name, reg,
180 dev_priv->hpll_freq);
183 static void intel_update_czclk(struct drm_i915_private *dev_priv)
185 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
188 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
189 CCK_CZ_CLOCK_CONTROL);
191 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
194 static inline u32 /* units of 100MHz */
195 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
196 const struct intel_crtc_state *pipe_config)
198 if (HAS_DDI(dev_priv))
199 return pipe_config->port_clock; /* SPLL */
201 return dev_priv->fdi_pll_freq;
204 static const struct intel_limit intel_limits_i8xx_dac = {
205 .dot = { .min = 25000, .max = 350000 },
206 .vco = { .min = 908000, .max = 1512000 },
207 .n = { .min = 2, .max = 16 },
208 .m = { .min = 96, .max = 140 },
209 .m1 = { .min = 18, .max = 26 },
210 .m2 = { .min = 6, .max = 16 },
211 .p = { .min = 4, .max = 128 },
212 .p1 = { .min = 2, .max = 33 },
213 .p2 = { .dot_limit = 165000,
214 .p2_slow = 4, .p2_fast = 2 },
217 static const struct intel_limit intel_limits_i8xx_dvo = {
218 .dot = { .min = 25000, .max = 350000 },
219 .vco = { .min = 908000, .max = 1512000 },
220 .n = { .min = 2, .max = 16 },
221 .m = { .min = 96, .max = 140 },
222 .m1 = { .min = 18, .max = 26 },
223 .m2 = { .min = 6, .max = 16 },
224 .p = { .min = 4, .max = 128 },
225 .p1 = { .min = 2, .max = 33 },
226 .p2 = { .dot_limit = 165000,
227 .p2_slow = 4, .p2_fast = 4 },
230 static const struct intel_limit intel_limits_i8xx_lvds = {
231 .dot = { .min = 25000, .max = 350000 },
232 .vco = { .min = 908000, .max = 1512000 },
233 .n = { .min = 2, .max = 16 },
234 .m = { .min = 96, .max = 140 },
235 .m1 = { .min = 18, .max = 26 },
236 .m2 = { .min = 6, .max = 16 },
237 .p = { .min = 4, .max = 128 },
238 .p1 = { .min = 1, .max = 6 },
239 .p2 = { .dot_limit = 165000,
240 .p2_slow = 14, .p2_fast = 7 },
243 static const struct intel_limit intel_limits_i9xx_sdvo = {
244 .dot = { .min = 20000, .max = 400000 },
245 .vco = { .min = 1400000, .max = 2800000 },
246 .n = { .min = 1, .max = 6 },
247 .m = { .min = 70, .max = 120 },
248 .m1 = { .min = 8, .max = 18 },
249 .m2 = { .min = 3, .max = 7 },
250 .p = { .min = 5, .max = 80 },
251 .p1 = { .min = 1, .max = 8 },
252 .p2 = { .dot_limit = 200000,
253 .p2_slow = 10, .p2_fast = 5 },
256 static const struct intel_limit intel_limits_i9xx_lvds = {
257 .dot = { .min = 20000, .max = 400000 },
258 .vco = { .min = 1400000, .max = 2800000 },
259 .n = { .min = 1, .max = 6 },
260 .m = { .min = 70, .max = 120 },
261 .m1 = { .min = 8, .max = 18 },
262 .m2 = { .min = 3, .max = 7 },
263 .p = { .min = 7, .max = 98 },
264 .p1 = { .min = 1, .max = 8 },
265 .p2 = { .dot_limit = 112000,
266 .p2_slow = 14, .p2_fast = 7 },
270 static const struct intel_limit intel_limits_g4x_sdvo = {
271 .dot = { .min = 25000, .max = 270000 },
272 .vco = { .min = 1750000, .max = 3500000},
273 .n = { .min = 1, .max = 4 },
274 .m = { .min = 104, .max = 138 },
275 .m1 = { .min = 17, .max = 23 },
276 .m2 = { .min = 5, .max = 11 },
277 .p = { .min = 10, .max = 30 },
278 .p1 = { .min = 1, .max = 3},
279 .p2 = { .dot_limit = 270000,
285 static const struct intel_limit intel_limits_g4x_hdmi = {
286 .dot = { .min = 22000, .max = 400000 },
287 .vco = { .min = 1750000, .max = 3500000},
288 .n = { .min = 1, .max = 4 },
289 .m = { .min = 104, .max = 138 },
290 .m1 = { .min = 16, .max = 23 },
291 .m2 = { .min = 5, .max = 11 },
292 .p = { .min = 5, .max = 80 },
293 .p1 = { .min = 1, .max = 8},
294 .p2 = { .dot_limit = 165000,
295 .p2_slow = 10, .p2_fast = 5 },
298 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
299 .dot = { .min = 20000, .max = 115000 },
300 .vco = { .min = 1750000, .max = 3500000 },
301 .n = { .min = 1, .max = 3 },
302 .m = { .min = 104, .max = 138 },
303 .m1 = { .min = 17, .max = 23 },
304 .m2 = { .min = 5, .max = 11 },
305 .p = { .min = 28, .max = 112 },
306 .p1 = { .min = 2, .max = 8 },
307 .p2 = { .dot_limit = 0,
308 .p2_slow = 14, .p2_fast = 14
312 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
313 .dot = { .min = 80000, .max = 224000 },
314 .vco = { .min = 1750000, .max = 3500000 },
315 .n = { .min = 1, .max = 3 },
316 .m = { .min = 104, .max = 138 },
317 .m1 = { .min = 17, .max = 23 },
318 .m2 = { .min = 5, .max = 11 },
319 .p = { .min = 14, .max = 42 },
320 .p1 = { .min = 2, .max = 6 },
321 .p2 = { .dot_limit = 0,
322 .p2_slow = 7, .p2_fast = 7
326 static const struct intel_limit intel_limits_pineview_sdvo = {
327 .dot = { .min = 20000, .max = 400000},
328 .vco = { .min = 1700000, .max = 3500000 },
329 /* Pineview's Ncounter is a ring counter */
330 .n = { .min = 3, .max = 6 },
331 .m = { .min = 2, .max = 256 },
332 /* Pineview only has one combined m divider, which we treat as m2. */
333 .m1 = { .min = 0, .max = 0 },
334 .m2 = { .min = 0, .max = 254 },
335 .p = { .min = 5, .max = 80 },
336 .p1 = { .min = 1, .max = 8 },
337 .p2 = { .dot_limit = 200000,
338 .p2_slow = 10, .p2_fast = 5 },
341 static const struct intel_limit intel_limits_pineview_lvds = {
342 .dot = { .min = 20000, .max = 400000 },
343 .vco = { .min = 1700000, .max = 3500000 },
344 .n = { .min = 3, .max = 6 },
345 .m = { .min = 2, .max = 256 },
346 .m1 = { .min = 0, .max = 0 },
347 .m2 = { .min = 0, .max = 254 },
348 .p = { .min = 7, .max = 112 },
349 .p1 = { .min = 1, .max = 8 },
350 .p2 = { .dot_limit = 112000,
351 .p2_slow = 14, .p2_fast = 14 },
354 /* Ironlake / Sandybridge
356 * We calculate clock using (register_value + 2) for N/M1/M2, so here
357 * the range value for them is (actual_value - 2).
359 static const struct intel_limit intel_limits_ironlake_dac = {
360 .dot = { .min = 25000, .max = 350000 },
361 .vco = { .min = 1760000, .max = 3510000 },
362 .n = { .min = 1, .max = 5 },
363 .m = { .min = 79, .max = 127 },
364 .m1 = { .min = 12, .max = 22 },
365 .m2 = { .min = 5, .max = 9 },
366 .p = { .min = 5, .max = 80 },
367 .p1 = { .min = 1, .max = 8 },
368 .p2 = { .dot_limit = 225000,
369 .p2_slow = 10, .p2_fast = 5 },
372 static const struct intel_limit intel_limits_ironlake_single_lvds = {
373 .dot = { .min = 25000, .max = 350000 },
374 .vco = { .min = 1760000, .max = 3510000 },
375 .n = { .min = 1, .max = 3 },
376 .m = { .min = 79, .max = 118 },
377 .m1 = { .min = 12, .max = 22 },
378 .m2 = { .min = 5, .max = 9 },
379 .p = { .min = 28, .max = 112 },
380 .p1 = { .min = 2, .max = 8 },
381 .p2 = { .dot_limit = 225000,
382 .p2_slow = 14, .p2_fast = 14 },
385 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
386 .dot = { .min = 25000, .max = 350000 },
387 .vco = { .min = 1760000, .max = 3510000 },
388 .n = { .min = 1, .max = 3 },
389 .m = { .min = 79, .max = 127 },
390 .m1 = { .min = 12, .max = 22 },
391 .m2 = { .min = 5, .max = 9 },
392 .p = { .min = 14, .max = 56 },
393 .p1 = { .min = 2, .max = 8 },
394 .p2 = { .dot_limit = 225000,
395 .p2_slow = 7, .p2_fast = 7 },
398 /* LVDS 100mhz refclk limits. */
399 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
400 .dot = { .min = 25000, .max = 350000 },
401 .vco = { .min = 1760000, .max = 3510000 },
402 .n = { .min = 1, .max = 2 },
403 .m = { .min = 79, .max = 126 },
404 .m1 = { .min = 12, .max = 22 },
405 .m2 = { .min = 5, .max = 9 },
406 .p = { .min = 28, .max = 112 },
407 .p1 = { .min = 2, .max = 8 },
408 .p2 = { .dot_limit = 225000,
409 .p2_slow = 14, .p2_fast = 14 },
412 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
413 .dot = { .min = 25000, .max = 350000 },
414 .vco = { .min = 1760000, .max = 3510000 },
415 .n = { .min = 1, .max = 3 },
416 .m = { .min = 79, .max = 126 },
417 .m1 = { .min = 12, .max = 22 },
418 .m2 = { .min = 5, .max = 9 },
419 .p = { .min = 14, .max = 42 },
420 .p1 = { .min = 2, .max = 6 },
421 .p2 = { .dot_limit = 225000,
422 .p2_slow = 7, .p2_fast = 7 },
425 static const struct intel_limit intel_limits_vlv = {
427 * These are the data rate limits (measured in fast clocks)
428 * since those are the strictest limits we have. The fast
429 * clock and actual rate limits are more relaxed, so checking
430 * them would make no difference.
432 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
433 .vco = { .min = 4000000, .max = 6000000 },
434 .n = { .min = 1, .max = 7 },
435 .m1 = { .min = 2, .max = 3 },
436 .m2 = { .min = 11, .max = 156 },
437 .p1 = { .min = 2, .max = 3 },
438 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
441 static const struct intel_limit intel_limits_chv = {
443 * These are the data rate limits (measured in fast clocks)
444 * since those are the strictest limits we have. The fast
445 * clock and actual rate limits are more relaxed, so checking
446 * them would make no difference.
448 .dot = { .min = 25000 * 5, .max = 540000 * 5},
449 .vco = { .min = 4800000, .max = 6480000 },
450 .n = { .min = 1, .max = 1 },
451 .m1 = { .min = 2, .max = 2 },
452 .m2 = { .min = 24 << 22, .max = 175 << 22 },
453 .p1 = { .min = 2, .max = 4 },
454 .p2 = { .p2_slow = 1, .p2_fast = 14 },
457 static const struct intel_limit intel_limits_bxt = {
458 /* FIXME: find real dot limits */
459 .dot = { .min = 0, .max = INT_MAX },
460 .vco = { .min = 4800000, .max = 6700000 },
461 .n = { .min = 1, .max = 1 },
462 .m1 = { .min = 2, .max = 2 },
463 /* FIXME: find real m2 limits */
464 .m2 = { .min = 2 << 22, .max = 255 << 22 },
465 .p1 = { .min = 2, .max = 4 },
466 .p2 = { .p2_slow = 1, .p2_fast = 20 },
470 skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
473 I915_WRITE(CLKGATE_DIS_PSL(pipe),
474 DUPS1_GATING_DIS | DUPS2_GATING_DIS);
476 I915_WRITE(CLKGATE_DIS_PSL(pipe),
477 I915_READ(CLKGATE_DIS_PSL(pipe)) &
478 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
482 needs_modeset(const struct drm_crtc_state *state)
484 return drm_atomic_crtc_needs_modeset(state);
488 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
489 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
490 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
491 * The helpers' return value is the rate of the clock that is fed to the
492 * display engine's pipe which can be the above fast dot clock rate or a
493 * divided-down version of it.
495 /* m1 is reserved as 0 in Pineview, n is a ring counter */
496 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
498 clock->m = clock->m2 + 2;
499 clock->p = clock->p1 * clock->p2;
500 if (WARN_ON(clock->n == 0 || clock->p == 0))
502 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
503 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
508 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
510 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
513 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
515 clock->m = i9xx_dpll_compute_m(clock);
516 clock->p = clock->p1 * clock->p2;
517 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
519 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
520 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
525 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
527 clock->m = clock->m1 * clock->m2;
528 clock->p = clock->p1 * clock->p2;
529 if (WARN_ON(clock->n == 0 || clock->p == 0))
531 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
532 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
534 return clock->dot / 5;
537 int chv_calc_dpll_params(int refclk, struct dpll *clock)
539 clock->m = clock->m1 * clock->m2;
540 clock->p = clock->p1 * clock->p2;
541 if (WARN_ON(clock->n == 0 || clock->p == 0))
543 clock->vco = DIV_ROUND_CLOSEST_ULL((u64)refclk * clock->m,
545 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
547 return clock->dot / 5;
550 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
553 * Returns whether the given set of divisors are valid for a given refclk with
554 * the given connectors.
556 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
557 const struct intel_limit *limit,
558 const struct dpll *clock)
560 if (clock->n < limit->n.min || limit->n.max < clock->n)
561 INTELPllInvalid("n out of range\n");
562 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
563 INTELPllInvalid("p1 out of range\n");
564 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
565 INTELPllInvalid("m2 out of range\n");
566 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
567 INTELPllInvalid("m1 out of range\n");
569 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
570 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
571 if (clock->m1 <= clock->m2)
572 INTELPllInvalid("m1 <= m2\n");
574 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
575 !IS_GEN9_LP(dev_priv)) {
576 if (clock->p < limit->p.min || limit->p.max < clock->p)
577 INTELPllInvalid("p out of range\n");
578 if (clock->m < limit->m.min || limit->m.max < clock->m)
579 INTELPllInvalid("m out of range\n");
582 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
583 INTELPllInvalid("vco out of range\n");
584 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
585 * connector, etc., rather than just a single range.
587 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
588 INTELPllInvalid("dot out of range\n");
594 i9xx_select_p2_div(const struct intel_limit *limit,
595 const struct intel_crtc_state *crtc_state,
598 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
600 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
602 * For LVDS just rely on its current settings for dual-channel.
603 * We haven't figured out how to reliably set up different
604 * single/dual channel state, if we even can.
606 if (intel_is_dual_link_lvds(dev_priv))
607 return limit->p2.p2_fast;
609 return limit->p2.p2_slow;
611 if (target < limit->p2.dot_limit)
612 return limit->p2.p2_slow;
614 return limit->p2.p2_fast;
619 * Returns a set of divisors for the desired target clock with the given
620 * refclk, or FALSE. The returned values represent the clock equation:
621 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
623 * Target and reference clocks are specified in kHz.
625 * If match_clock is provided, then best_clock P divider must match the P
626 * divider from @match_clock used for LVDS downclocking.
629 i9xx_find_best_dpll(const struct intel_limit *limit,
630 struct intel_crtc_state *crtc_state,
631 int target, int refclk, struct dpll *match_clock,
632 struct dpll *best_clock)
634 struct drm_device *dev = crtc_state->base.crtc->dev;
638 memset(best_clock, 0, sizeof(*best_clock));
640 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
642 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
644 for (clock.m2 = limit->m2.min;
645 clock.m2 <= limit->m2.max; clock.m2++) {
646 if (clock.m2 >= clock.m1)
648 for (clock.n = limit->n.min;
649 clock.n <= limit->n.max; clock.n++) {
650 for (clock.p1 = limit->p1.min;
651 clock.p1 <= limit->p1.max; clock.p1++) {
654 i9xx_calc_dpll_params(refclk, &clock);
655 if (!intel_PLL_is_valid(to_i915(dev),
660 clock.p != match_clock->p)
663 this_err = abs(clock.dot - target);
664 if (this_err < err) {
673 return (err != target);
677 * Returns a set of divisors for the desired target clock with the given
678 * refclk, or FALSE. The returned values represent the clock equation:
679 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
681 * Target and reference clocks are specified in kHz.
683 * If match_clock is provided, then best_clock P divider must match the P
684 * divider from @match_clock used for LVDS downclocking.
687 pnv_find_best_dpll(const struct intel_limit *limit,
688 struct intel_crtc_state *crtc_state,
689 int target, int refclk, struct dpll *match_clock,
690 struct dpll *best_clock)
692 struct drm_device *dev = crtc_state->base.crtc->dev;
696 memset(best_clock, 0, sizeof(*best_clock));
698 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
700 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
702 for (clock.m2 = limit->m2.min;
703 clock.m2 <= limit->m2.max; clock.m2++) {
704 for (clock.n = limit->n.min;
705 clock.n <= limit->n.max; clock.n++) {
706 for (clock.p1 = limit->p1.min;
707 clock.p1 <= limit->p1.max; clock.p1++) {
710 pnv_calc_dpll_params(refclk, &clock);
711 if (!intel_PLL_is_valid(to_i915(dev),
716 clock.p != match_clock->p)
719 this_err = abs(clock.dot - target);
720 if (this_err < err) {
729 return (err != target);
733 * Returns a set of divisors for the desired target clock with the given
734 * refclk, or FALSE. The returned values represent the clock equation:
735 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
737 * Target and reference clocks are specified in kHz.
739 * If match_clock is provided, then best_clock P divider must match the P
740 * divider from @match_clock used for LVDS downclocking.
743 g4x_find_best_dpll(const struct intel_limit *limit,
744 struct intel_crtc_state *crtc_state,
745 int target, int refclk, struct dpll *match_clock,
746 struct dpll *best_clock)
748 struct drm_device *dev = crtc_state->base.crtc->dev;
752 /* approximately equals target * 0.00585 */
753 int err_most = (target >> 8) + (target >> 9);
755 memset(best_clock, 0, sizeof(*best_clock));
757 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
759 max_n = limit->n.max;
760 /* based on hardware requirement, prefer smaller n to precision */
761 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
762 /* based on hardware requirement, prefere larger m1,m2 */
763 for (clock.m1 = limit->m1.max;
764 clock.m1 >= limit->m1.min; clock.m1--) {
765 for (clock.m2 = limit->m2.max;
766 clock.m2 >= limit->m2.min; clock.m2--) {
767 for (clock.p1 = limit->p1.max;
768 clock.p1 >= limit->p1.min; clock.p1--) {
771 i9xx_calc_dpll_params(refclk, &clock);
772 if (!intel_PLL_is_valid(to_i915(dev),
777 this_err = abs(clock.dot - target);
778 if (this_err < err_most) {
792 * Check if the calculated PLL configuration is more optimal compared to the
793 * best configuration and error found so far. Return the calculated error.
795 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
796 const struct dpll *calculated_clock,
797 const struct dpll *best_clock,
798 unsigned int best_error_ppm,
799 unsigned int *error_ppm)
802 * For CHV ignore the error and consider only the P value.
803 * Prefer a bigger P value based on HW requirements.
805 if (IS_CHERRYVIEW(to_i915(dev))) {
808 return calculated_clock->p > best_clock->p;
811 if (WARN_ON_ONCE(!target_freq))
814 *error_ppm = div_u64(1000000ULL *
815 abs(target_freq - calculated_clock->dot),
818 * Prefer a better P value over a better (smaller) error if the error
819 * is small. Ensure this preference for future configurations too by
820 * setting the error to 0.
822 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
828 return *error_ppm + 10 < best_error_ppm;
832 * Returns a set of divisors for the desired target clock with the given
833 * refclk, or FALSE. The returned values represent the clock equation:
834 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
837 vlv_find_best_dpll(const struct intel_limit *limit,
838 struct intel_crtc_state *crtc_state,
839 int target, int refclk, struct dpll *match_clock,
840 struct dpll *best_clock)
842 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
843 struct drm_device *dev = crtc->base.dev;
845 unsigned int bestppm = 1000000;
846 /* min update 19.2 MHz */
847 int max_n = min(limit->n.max, refclk / 19200);
850 target *= 5; /* fast clock */
852 memset(best_clock, 0, sizeof(*best_clock));
854 /* based on hardware requirement, prefer smaller n to precision */
855 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
856 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
857 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
858 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
859 clock.p = clock.p1 * clock.p2;
860 /* based on hardware requirement, prefer bigger m1,m2 values */
861 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
864 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
867 vlv_calc_dpll_params(refclk, &clock);
869 if (!intel_PLL_is_valid(to_i915(dev),
874 if (!vlv_PLL_is_optimal(dev, target,
892 * Returns a set of divisors for the desired target clock with the given
893 * refclk, or FALSE. The returned values represent the clock equation:
894 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
897 chv_find_best_dpll(const struct intel_limit *limit,
898 struct intel_crtc_state *crtc_state,
899 int target, int refclk, struct dpll *match_clock,
900 struct dpll *best_clock)
902 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
903 struct drm_device *dev = crtc->base.dev;
904 unsigned int best_error_ppm;
909 memset(best_clock, 0, sizeof(*best_clock));
910 best_error_ppm = 1000000;
913 * Based on hardware doc, the n always set to 1, and m1 always
914 * set to 2. If requires to support 200Mhz refclk, we need to
915 * revisit this because n may not 1 anymore.
917 clock.n = 1, clock.m1 = 2;
918 target *= 5; /* fast clock */
920 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
921 for (clock.p2 = limit->p2.p2_fast;
922 clock.p2 >= limit->p2.p2_slow;
923 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
924 unsigned int error_ppm;
926 clock.p = clock.p1 * clock.p2;
928 m2 = DIV_ROUND_CLOSEST_ULL(((u64)target * clock.p *
929 clock.n) << 22, refclk * clock.m1);
931 if (m2 > INT_MAX/clock.m1)
936 chv_calc_dpll_params(refclk, &clock);
938 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
941 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
942 best_error_ppm, &error_ppm))
946 best_error_ppm = error_ppm;
954 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
955 struct dpll *best_clock)
958 const struct intel_limit *limit = &intel_limits_bxt;
960 return chv_find_best_dpll(limit, crtc_state,
961 crtc_state->port_clock, refclk,
965 bool intel_crtc_active(struct intel_crtc *crtc)
967 /* Be paranoid as we can arrive here with only partial
968 * state retrieved from the hardware during setup.
970 * We can ditch the adjusted_mode.crtc_clock check as soon
971 * as Haswell has gained clock readout/fastboot support.
973 * We can ditch the crtc->primary->state->fb check as soon as we can
974 * properly reconstruct framebuffers.
976 * FIXME: The intel_crtc->active here should be switched to
977 * crtc->state->active once we have proper CRTC states wired up
980 return crtc->active && crtc->base.primary->state->fb &&
981 crtc->config->base.adjusted_mode.crtc_clock;
984 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
987 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
989 return crtc->config->cpu_transcoder;
992 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
995 i915_reg_t reg = PIPEDSL(pipe);
999 if (IS_GEN(dev_priv, 2))
1000 line_mask = DSL_LINEMASK_GEN2;
1002 line_mask = DSL_LINEMASK_GEN3;
1004 line1 = I915_READ(reg) & line_mask;
1006 line2 = I915_READ(reg) & line_mask;
1008 return line1 != line2;
1011 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1013 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1014 enum pipe pipe = crtc->pipe;
1016 /* Wait for the display line to settle/start moving */
1017 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1018 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1019 pipe_name(pipe), onoff(state));
1022 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1024 wait_for_pipe_scanline_moving(crtc, false);
1027 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1029 wait_for_pipe_scanline_moving(crtc, true);
1033 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1035 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1036 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1038 if (INTEL_GEN(dev_priv) >= 4) {
1039 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1040 i915_reg_t reg = PIPECONF(cpu_transcoder);
1042 /* Wait for the Pipe State to go off */
1043 if (intel_wait_for_register(dev_priv,
1044 reg, I965_PIPECONF_ACTIVE, 0,
1046 WARN(1, "pipe_off wait timed out\n");
1048 intel_wait_for_pipe_scanline_stopped(crtc);
1052 /* Only for pre-ILK configs */
1053 void assert_pll(struct drm_i915_private *dev_priv,
1054 enum pipe pipe, bool state)
1059 val = I915_READ(DPLL(pipe));
1060 cur_state = !!(val & DPLL_VCO_ENABLE);
1061 I915_STATE_WARN(cur_state != state,
1062 "PLL state assertion failure (expected %s, current %s)\n",
1063 onoff(state), onoff(cur_state));
1066 /* XXX: the dsi pll is shared between MIPI DSI ports */
1067 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1072 mutex_lock(&dev_priv->sb_lock);
1073 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1074 mutex_unlock(&dev_priv->sb_lock);
1076 cur_state = val & DSI_PLL_VCO_EN;
1077 I915_STATE_WARN(cur_state != state,
1078 "DSI PLL state assertion failure (expected %s, current %s)\n",
1079 onoff(state), onoff(cur_state));
1082 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1083 enum pipe pipe, bool state)
1086 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1089 if (HAS_DDI(dev_priv)) {
1090 /* DDI does not have a specific FDI_TX register */
1091 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1092 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1094 u32 val = I915_READ(FDI_TX_CTL(pipe));
1095 cur_state = !!(val & FDI_TX_ENABLE);
1097 I915_STATE_WARN(cur_state != state,
1098 "FDI TX state assertion failure (expected %s, current %s)\n",
1099 onoff(state), onoff(cur_state));
1101 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1102 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1104 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1105 enum pipe pipe, bool state)
1110 val = I915_READ(FDI_RX_CTL(pipe));
1111 cur_state = !!(val & FDI_RX_ENABLE);
1112 I915_STATE_WARN(cur_state != state,
1113 "FDI RX state assertion failure (expected %s, current %s)\n",
1114 onoff(state), onoff(cur_state));
1116 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1117 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1119 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1124 /* ILK FDI PLL is always enabled */
1125 if (IS_GEN(dev_priv, 5))
1128 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1129 if (HAS_DDI(dev_priv))
1132 val = I915_READ(FDI_TX_CTL(pipe));
1133 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1136 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1137 enum pipe pipe, bool state)
1142 val = I915_READ(FDI_RX_CTL(pipe));
1143 cur_state = !!(val & FDI_RX_PLL_ENABLE);
1144 I915_STATE_WARN(cur_state != state,
1145 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1146 onoff(state), onoff(cur_state));
1149 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1153 enum pipe panel_pipe = INVALID_PIPE;
1156 if (WARN_ON(HAS_DDI(dev_priv)))
1159 if (HAS_PCH_SPLIT(dev_priv)) {
1162 pp_reg = PP_CONTROL(0);
1163 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1166 case PANEL_PORT_SELECT_LVDS:
1167 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1169 case PANEL_PORT_SELECT_DPA:
1170 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1172 case PANEL_PORT_SELECT_DPC:
1173 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1175 case PANEL_PORT_SELECT_DPD:
1176 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1179 MISSING_CASE(port_sel);
1182 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1183 /* presumably write lock depends on pipe, not port select */
1184 pp_reg = PP_CONTROL(pipe);
1189 pp_reg = PP_CONTROL(0);
1190 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1192 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1193 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1196 val = I915_READ(pp_reg);
1197 if (!(val & PANEL_POWER_ON) ||
1198 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1201 I915_STATE_WARN(panel_pipe == pipe && locked,
1202 "panel assertion failure, pipe %c regs locked\n",
1206 void assert_pipe(struct drm_i915_private *dev_priv,
1207 enum pipe pipe, bool state)
1210 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1212 enum intel_display_power_domain power_domain;
1213 intel_wakeref_t wakeref;
1215 /* we keep both pipes enabled on 830 */
1216 if (IS_I830(dev_priv))
1219 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1220 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1222 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1223 cur_state = !!(val & PIPECONF_ENABLE);
1225 intel_display_power_put(dev_priv, power_domain, wakeref);
1230 I915_STATE_WARN(cur_state != state,
1231 "pipe %c assertion failure (expected %s, current %s)\n",
1232 pipe_name(pipe), onoff(state), onoff(cur_state));
1235 static void assert_plane(struct intel_plane *plane, bool state)
1240 cur_state = plane->get_hw_state(plane, &pipe);
1242 I915_STATE_WARN(cur_state != state,
1243 "%s assertion failure (expected %s, current %s)\n",
1244 plane->base.name, onoff(state), onoff(cur_state));
1247 #define assert_plane_enabled(p) assert_plane(p, true)
1248 #define assert_plane_disabled(p) assert_plane(p, false)
1250 static void assert_planes_disabled(struct intel_crtc *crtc)
1252 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1253 struct intel_plane *plane;
1255 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1256 assert_plane_disabled(plane);
1259 static void assert_vblank_disabled(struct drm_crtc *crtc)
1261 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1262 drm_crtc_vblank_put(crtc);
1265 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1271 val = I915_READ(PCH_TRANSCONF(pipe));
1272 enabled = !!(val & TRANS_ENABLE);
1273 I915_STATE_WARN(enabled,
1274 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1278 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1279 enum pipe pipe, enum port port,
1282 enum pipe port_pipe;
1285 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1287 I915_STATE_WARN(state && port_pipe == pipe,
1288 "PCH DP %c enabled on transcoder %c, should be disabled\n",
1289 port_name(port), pipe_name(pipe));
1291 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1292 "IBX PCH DP %c still using transcoder B\n",
1296 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1297 enum pipe pipe, enum port port,
1298 i915_reg_t hdmi_reg)
1300 enum pipe port_pipe;
1303 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1305 I915_STATE_WARN(state && port_pipe == pipe,
1306 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1307 port_name(port), pipe_name(pipe));
1309 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1310 "IBX PCH HDMI %c still using transcoder B\n",
1314 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1317 enum pipe port_pipe;
1319 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1320 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1321 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1323 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1325 "PCH VGA enabled on transcoder %c, should be disabled\n",
1328 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1330 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1333 /* PCH SDVOB multiplex with HDMIB */
1334 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1335 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1336 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1339 static void _vlv_enable_pll(struct intel_crtc *crtc,
1340 const struct intel_crtc_state *pipe_config)
1342 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1343 enum pipe pipe = crtc->pipe;
1345 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1346 POSTING_READ(DPLL(pipe));
1349 if (intel_wait_for_register(dev_priv,
1354 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1357 static void vlv_enable_pll(struct intel_crtc *crtc,
1358 const struct intel_crtc_state *pipe_config)
1360 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1361 enum pipe pipe = crtc->pipe;
1363 assert_pipe_disabled(dev_priv, pipe);
1365 /* PLL is protected by panel, make sure we can write it */
1366 assert_panel_unlocked(dev_priv, pipe);
1368 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1369 _vlv_enable_pll(crtc, pipe_config);
1371 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1372 POSTING_READ(DPLL_MD(pipe));
1376 static void _chv_enable_pll(struct intel_crtc *crtc,
1377 const struct intel_crtc_state *pipe_config)
1379 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1380 enum pipe pipe = crtc->pipe;
1381 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1384 mutex_lock(&dev_priv->sb_lock);
1386 /* Enable back the 10bit clock to display controller */
1387 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1388 tmp |= DPIO_DCLKP_EN;
1389 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1391 mutex_unlock(&dev_priv->sb_lock);
1394 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1399 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1401 /* Check PLL is locked */
1402 if (intel_wait_for_register(dev_priv,
1403 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1405 DRM_ERROR("PLL %d failed to lock\n", pipe);
1408 static void chv_enable_pll(struct intel_crtc *crtc,
1409 const struct intel_crtc_state *pipe_config)
1411 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1412 enum pipe pipe = crtc->pipe;
1414 assert_pipe_disabled(dev_priv, pipe);
1416 /* PLL is protected by panel, make sure we can write it */
1417 assert_panel_unlocked(dev_priv, pipe);
1419 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1420 _chv_enable_pll(crtc, pipe_config);
1422 if (pipe != PIPE_A) {
1424 * WaPixelRepeatModeFixForC0:chv
1426 * DPLLCMD is AWOL. Use chicken bits to propagate
1427 * the value from DPLLBMD to either pipe B or C.
1429 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1430 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1431 I915_WRITE(CBR4_VLV, 0);
1432 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1435 * DPLLB VGA mode also seems to cause problems.
1436 * We should always have it disabled.
1438 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1440 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1441 POSTING_READ(DPLL_MD(pipe));
1445 static void i9xx_enable_pll(struct intel_crtc *crtc,
1446 const struct intel_crtc_state *crtc_state)
1448 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1449 i915_reg_t reg = DPLL(crtc->pipe);
1450 u32 dpll = crtc_state->dpll_hw_state.dpll;
1453 assert_pipe_disabled(dev_priv, crtc->pipe);
1455 /* PLL is protected by panel, make sure we can write it */
1456 if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
1457 assert_panel_unlocked(dev_priv, crtc->pipe);
1460 * Apparently we need to have VGA mode enabled prior to changing
1461 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1462 * dividers, even though the register value does change.
1464 I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
1465 I915_WRITE(reg, dpll);
1467 /* Wait for the clocks to stabilize. */
1471 if (INTEL_GEN(dev_priv) >= 4) {
1472 I915_WRITE(DPLL_MD(crtc->pipe),
1473 crtc_state->dpll_hw_state.dpll_md);
1475 /* The pixel multiplier can only be updated once the
1476 * DPLL is enabled and the clocks are stable.
1478 * So write it again.
1480 I915_WRITE(reg, dpll);
1483 /* We do this three times for luck */
1484 for (i = 0; i < 3; i++) {
1485 I915_WRITE(reg, dpll);
1487 udelay(150); /* wait for warmup */
1491 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1493 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1494 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1495 enum pipe pipe = crtc->pipe;
1497 /* Don't disable pipe or pipe PLLs if needed */
1498 if (IS_I830(dev_priv))
1501 /* Make sure the pipe isn't still relying on us */
1502 assert_pipe_disabled(dev_priv, pipe);
1504 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1505 POSTING_READ(DPLL(pipe));
1508 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1512 /* Make sure the pipe isn't still relying on us */
1513 assert_pipe_disabled(dev_priv, pipe);
1515 val = DPLL_INTEGRATED_REF_CLK_VLV |
1516 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1518 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1520 I915_WRITE(DPLL(pipe), val);
1521 POSTING_READ(DPLL(pipe));
1524 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1526 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1529 /* Make sure the pipe isn't still relying on us */
1530 assert_pipe_disabled(dev_priv, pipe);
1532 val = DPLL_SSC_REF_CLK_CHV |
1533 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1535 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1537 I915_WRITE(DPLL(pipe), val);
1538 POSTING_READ(DPLL(pipe));
1540 mutex_lock(&dev_priv->sb_lock);
1542 /* Disable 10bit clock to display controller */
1543 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1544 val &= ~DPIO_DCLKP_EN;
1545 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1547 mutex_unlock(&dev_priv->sb_lock);
1550 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1551 struct intel_digital_port *dport,
1552 unsigned int expected_mask)
1555 i915_reg_t dpll_reg;
1557 switch (dport->base.port) {
1559 port_mask = DPLL_PORTB_READY_MASK;
1563 port_mask = DPLL_PORTC_READY_MASK;
1565 expected_mask <<= 4;
1568 port_mask = DPLL_PORTD_READY_MASK;
1569 dpll_reg = DPIO_PHY_STATUS;
1575 if (intel_wait_for_register(dev_priv,
1576 dpll_reg, port_mask, expected_mask,
1578 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1579 port_name(dport->base.port),
1580 I915_READ(dpll_reg) & port_mask, expected_mask);
1583 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1585 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1586 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1587 enum pipe pipe = crtc->pipe;
1589 u32 val, pipeconf_val;
1591 /* Make sure PCH DPLL is enabled */
1592 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1594 /* FDI must be feeding us bits for PCH ports */
1595 assert_fdi_tx_enabled(dev_priv, pipe);
1596 assert_fdi_rx_enabled(dev_priv, pipe);
1598 if (HAS_PCH_CPT(dev_priv)) {
1599 /* Workaround: Set the timing override bit before enabling the
1600 * pch transcoder. */
1601 reg = TRANS_CHICKEN2(pipe);
1602 val = I915_READ(reg);
1603 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1604 I915_WRITE(reg, val);
1607 reg = PCH_TRANSCONF(pipe);
1608 val = I915_READ(reg);
1609 pipeconf_val = I915_READ(PIPECONF(pipe));
1611 if (HAS_PCH_IBX(dev_priv)) {
1613 * Make the BPC in transcoder be consistent with
1614 * that in pipeconf reg. For HDMI we must use 8bpc
1615 * here for both 8bpc and 12bpc.
1617 val &= ~PIPECONF_BPC_MASK;
1618 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1619 val |= PIPECONF_8BPC;
1621 val |= pipeconf_val & PIPECONF_BPC_MASK;
1624 val &= ~TRANS_INTERLACE_MASK;
1625 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1626 if (HAS_PCH_IBX(dev_priv) &&
1627 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1628 val |= TRANS_LEGACY_INTERLACED_ILK;
1630 val |= TRANS_INTERLACED;
1632 val |= TRANS_PROGRESSIVE;
1635 I915_WRITE(reg, val | TRANS_ENABLE);
1636 if (intel_wait_for_register(dev_priv,
1637 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1639 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1642 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1643 enum transcoder cpu_transcoder)
1645 u32 val, pipeconf_val;
1647 /* FDI must be feeding us bits for PCH ports */
1648 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1649 assert_fdi_rx_enabled(dev_priv, PIPE_A);
1651 /* Workaround: set timing override bit. */
1652 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1653 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1654 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1657 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1659 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1660 PIPECONF_INTERLACED_ILK)
1661 val |= TRANS_INTERLACED;
1663 val |= TRANS_PROGRESSIVE;
1665 I915_WRITE(LPT_TRANSCONF, val);
1666 if (intel_wait_for_register(dev_priv,
1671 DRM_ERROR("Failed to enable PCH transcoder\n");
1674 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1680 /* FDI relies on the transcoder */
1681 assert_fdi_tx_disabled(dev_priv, pipe);
1682 assert_fdi_rx_disabled(dev_priv, pipe);
1684 /* Ports must be off as well */
1685 assert_pch_ports_disabled(dev_priv, pipe);
1687 reg = PCH_TRANSCONF(pipe);
1688 val = I915_READ(reg);
1689 val &= ~TRANS_ENABLE;
1690 I915_WRITE(reg, val);
1691 /* wait for PCH transcoder off, transcoder state */
1692 if (intel_wait_for_register(dev_priv,
1693 reg, TRANS_STATE_ENABLE, 0,
1695 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1697 if (HAS_PCH_CPT(dev_priv)) {
1698 /* Workaround: Clear the timing override chicken bit again. */
1699 reg = TRANS_CHICKEN2(pipe);
1700 val = I915_READ(reg);
1701 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1702 I915_WRITE(reg, val);
1706 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1710 val = I915_READ(LPT_TRANSCONF);
1711 val &= ~TRANS_ENABLE;
1712 I915_WRITE(LPT_TRANSCONF, val);
1713 /* wait for PCH transcoder off, transcoder state */
1714 if (intel_wait_for_register(dev_priv,
1715 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1717 DRM_ERROR("Failed to disable PCH transcoder\n");
1719 /* Workaround: clear timing override bit. */
1720 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1721 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1722 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1725 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1727 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1729 if (HAS_PCH_LPT(dev_priv))
1735 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1737 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1740 * On i965gm the hardware frame counter reads
1741 * zero when the TV encoder is enabled :(
1743 if (IS_I965GM(dev_priv) &&
1744 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1747 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1748 return 0xffffffff; /* full 32 bit counter */
1749 else if (INTEL_GEN(dev_priv) >= 3)
1750 return 0xffffff; /* only 24 bits of frame count */
1752 return 0; /* Gen2 doesn't have a hardware frame counter */
1755 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1757 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1759 drm_crtc_set_max_vblank_count(&crtc->base,
1760 intel_crtc_max_vblank_count(crtc_state));
1761 drm_crtc_vblank_on(&crtc->base);
1764 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1766 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1767 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1768 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1769 enum pipe pipe = crtc->pipe;
1773 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1775 assert_planes_disabled(crtc);
1778 * A pipe without a PLL won't actually be able to drive bits from
1779 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1782 if (HAS_GMCH(dev_priv)) {
1783 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1784 assert_dsi_pll_enabled(dev_priv);
1786 assert_pll_enabled(dev_priv, pipe);
1788 if (new_crtc_state->has_pch_encoder) {
1789 /* if driving the PCH, we need FDI enabled */
1790 assert_fdi_rx_pll_enabled(dev_priv,
1791 intel_crtc_pch_transcoder(crtc));
1792 assert_fdi_tx_pll_enabled(dev_priv,
1793 (enum pipe) cpu_transcoder);
1795 /* FIXME: assert CPU port conditions for SNB+ */
1798 trace_intel_pipe_enable(dev_priv, pipe);
1800 reg = PIPECONF(cpu_transcoder);
1801 val = I915_READ(reg);
1802 if (val & PIPECONF_ENABLE) {
1803 /* we keep both pipes enabled on 830 */
1804 WARN_ON(!IS_I830(dev_priv));
1808 I915_WRITE(reg, val | PIPECONF_ENABLE);
1812 * Until the pipe starts PIPEDSL reads will return a stale value,
1813 * which causes an apparent vblank timestamp jump when PIPEDSL
1814 * resets to its proper value. That also messes up the frame count
1815 * when it's derived from the timestamps. So let's wait for the
1816 * pipe to start properly before we call drm_crtc_vblank_on()
1818 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1819 intel_wait_for_pipe_scanline_moving(crtc);
1822 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1824 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1825 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1826 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1827 enum pipe pipe = crtc->pipe;
1831 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1834 * Make sure planes won't keep trying to pump pixels to us,
1835 * or we might hang the display.
1837 assert_planes_disabled(crtc);
1839 trace_intel_pipe_disable(dev_priv, pipe);
1841 reg = PIPECONF(cpu_transcoder);
1842 val = I915_READ(reg);
1843 if ((val & PIPECONF_ENABLE) == 0)
1847 * Double wide has implications for planes
1848 * so best keep it disabled when not needed.
1850 if (old_crtc_state->double_wide)
1851 val &= ~PIPECONF_DOUBLE_WIDE;
1853 /* Don't disable pipe or pipe PLLs if needed */
1854 if (!IS_I830(dev_priv))
1855 val &= ~PIPECONF_ENABLE;
1857 I915_WRITE(reg, val);
1858 if ((val & PIPECONF_ENABLE) == 0)
1859 intel_wait_for_pipe_off(old_crtc_state);
1862 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1864 return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1868 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1870 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1871 unsigned int cpp = fb->format->cpp[color_plane];
1873 switch (fb->modifier) {
1874 case DRM_FORMAT_MOD_LINEAR:
1876 case I915_FORMAT_MOD_X_TILED:
1877 if (IS_GEN(dev_priv, 2))
1881 case I915_FORMAT_MOD_Y_TILED_CCS:
1882 if (color_plane == 1)
1885 case I915_FORMAT_MOD_Y_TILED:
1886 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1890 case I915_FORMAT_MOD_Yf_TILED_CCS:
1891 if (color_plane == 1)
1894 case I915_FORMAT_MOD_Yf_TILED:
1910 MISSING_CASE(fb->modifier);
1916 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1918 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
1921 return intel_tile_size(to_i915(fb->dev)) /
1922 intel_tile_width_bytes(fb, color_plane);
1925 /* Return the tile dimensions in pixel units */
1926 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1927 unsigned int *tile_width,
1928 unsigned int *tile_height)
1930 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1931 unsigned int cpp = fb->format->cpp[color_plane];
1933 *tile_width = tile_width_bytes / cpp;
1934 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1938 intel_fb_align_height(const struct drm_framebuffer *fb,
1939 int color_plane, unsigned int height)
1941 unsigned int tile_height = intel_tile_height(fb, color_plane);
1943 return ALIGN(height, tile_height);
1946 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1948 unsigned int size = 0;
1951 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1952 size += rot_info->plane[i].width * rot_info->plane[i].height;
1958 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
1959 const struct drm_framebuffer *fb,
1960 unsigned int rotation)
1962 view->type = I915_GGTT_VIEW_NORMAL;
1963 if (drm_rotation_90_or_270(rotation)) {
1964 view->type = I915_GGTT_VIEW_ROTATED;
1965 view->rotated = to_intel_framebuffer(fb)->rot_info;
1969 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
1971 if (IS_I830(dev_priv))
1973 else if (IS_I85X(dev_priv))
1975 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
1981 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
1983 if (INTEL_GEN(dev_priv) >= 9)
1985 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
1986 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1988 else if (INTEL_GEN(dev_priv) >= 4)
1994 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
1997 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1999 /* AUX_DIST needs only 4K alignment */
2000 if (color_plane == 1)
2003 switch (fb->modifier) {
2004 case DRM_FORMAT_MOD_LINEAR:
2005 return intel_linear_alignment(dev_priv);
2006 case I915_FORMAT_MOD_X_TILED:
2007 if (INTEL_GEN(dev_priv) >= 9)
2010 case I915_FORMAT_MOD_Y_TILED_CCS:
2011 case I915_FORMAT_MOD_Yf_TILED_CCS:
2012 case I915_FORMAT_MOD_Y_TILED:
2013 case I915_FORMAT_MOD_Yf_TILED:
2014 return 1 * 1024 * 1024;
2016 MISSING_CASE(fb->modifier);
2021 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2023 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2024 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2026 return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
2030 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2031 const struct i915_ggtt_view *view,
2033 unsigned long *out_flags)
2035 struct drm_device *dev = fb->dev;
2036 struct drm_i915_private *dev_priv = to_i915(dev);
2037 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2038 intel_wakeref_t wakeref;
2039 struct i915_vma *vma;
2040 unsigned int pinctl;
2043 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2045 alignment = intel_surf_alignment(fb, 0);
2047 /* Note that the w/a also requires 64 PTE of padding following the
2048 * bo. We currently fill all unused PTE with the shadow page and so
2049 * we should always have valid PTE following the scanout preventing
2052 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2053 alignment = 256 * 1024;
2056 * Global gtt pte registers are special registers which actually forward
2057 * writes to a chunk of system memory. Which means that there is no risk
2058 * that the register values disappear as soon as we call
2059 * intel_runtime_pm_put(), so it is correct to wrap only the
2060 * pin/unpin/fence and not more.
2062 wakeref = intel_runtime_pm_get(dev_priv);
2064 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2068 /* Valleyview is definitely limited to scanning out the first
2069 * 512MiB. Lets presume this behaviour was inherited from the
2070 * g4x display engine and that all earlier gen are similarly
2071 * limited. Testing suggests that it is a little more
2072 * complicated than this. For example, Cherryview appears quite
2073 * happy to scanout from anywhere within its global aperture.
2075 if (HAS_GMCH(dev_priv))
2076 pinctl |= PIN_MAPPABLE;
2078 vma = i915_gem_object_pin_to_display_plane(obj,
2079 alignment, view, pinctl);
2083 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2086 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2087 * fence, whereas 965+ only requires a fence if using
2088 * framebuffer compression. For simplicity, we always, when
2089 * possible, install a fence as the cost is not that onerous.
2091 * If we fail to fence the tiled scanout, then either the
2092 * modeset will reject the change (which is highly unlikely as
2093 * the affected systems, all but one, do not have unmappable
2094 * space) or we will not be able to enable full powersaving
2095 * techniques (also likely not to apply due to various limits
2096 * FBC and the like impose on the size of the buffer, which
2097 * presumably we violated anyway with this unmappable buffer).
2098 * Anyway, it is presumably better to stumble onwards with
2099 * something and try to run the system in a "less than optimal"
2100 * mode that matches the user configuration.
2102 ret = i915_vma_pin_fence(vma);
2103 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2104 i915_gem_object_unpin_from_display_plane(vma);
2109 if (ret == 0 && vma->fence)
2110 *out_flags |= PLANE_HAS_FENCE;
2115 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2117 intel_runtime_pm_put(dev_priv, wakeref);
2121 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2123 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2125 if (flags & PLANE_HAS_FENCE)
2126 i915_vma_unpin_fence(vma);
2127 i915_gem_object_unpin_from_display_plane(vma);
2131 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2132 unsigned int rotation)
2134 if (drm_rotation_90_or_270(rotation))
2135 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2137 return fb->pitches[color_plane];
2141 * Convert the x/y offsets into a linear offset.
2142 * Only valid with 0/180 degree rotation, which is fine since linear
2143 * offset is only used with linear buffers on pre-hsw and tiled buffers
2144 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2146 u32 intel_fb_xy_to_linear(int x, int y,
2147 const struct intel_plane_state *state,
2150 const struct drm_framebuffer *fb = state->base.fb;
2151 unsigned int cpp = fb->format->cpp[color_plane];
2152 unsigned int pitch = state->color_plane[color_plane].stride;
2154 return y * pitch + x * cpp;
2158 * Add the x/y offsets derived from fb->offsets[] to the user
2159 * specified plane src x/y offsets. The resulting x/y offsets
2160 * specify the start of scanout from the beginning of the gtt mapping.
2162 void intel_add_fb_offsets(int *x, int *y,
2163 const struct intel_plane_state *state,
2167 const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
2168 unsigned int rotation = state->base.rotation;
2170 if (drm_rotation_90_or_270(rotation)) {
2171 *x += intel_fb->rotated[color_plane].x;
2172 *y += intel_fb->rotated[color_plane].y;
2174 *x += intel_fb->normal[color_plane].x;
2175 *y += intel_fb->normal[color_plane].y;
2179 static u32 intel_adjust_tile_offset(int *x, int *y,
2180 unsigned int tile_width,
2181 unsigned int tile_height,
2182 unsigned int tile_size,
2183 unsigned int pitch_tiles,
2187 unsigned int pitch_pixels = pitch_tiles * tile_width;
2190 WARN_ON(old_offset & (tile_size - 1));
2191 WARN_ON(new_offset & (tile_size - 1));
2192 WARN_ON(new_offset > old_offset);
2194 tiles = (old_offset - new_offset) / tile_size;
2196 *y += tiles / pitch_tiles * tile_height;
2197 *x += tiles % pitch_tiles * tile_width;
2199 /* minimize x in case it got needlessly big */
2200 *y += *x / pitch_pixels * tile_height;
2206 static bool is_surface_linear(u64 modifier, int color_plane)
2208 return modifier == DRM_FORMAT_MOD_LINEAR;
2211 static u32 intel_adjust_aligned_offset(int *x, int *y,
2212 const struct drm_framebuffer *fb,
2214 unsigned int rotation,
2216 u32 old_offset, u32 new_offset)
2218 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2219 unsigned int cpp = fb->format->cpp[color_plane];
2221 WARN_ON(new_offset > old_offset);
2223 if (!is_surface_linear(fb->modifier, color_plane)) {
2224 unsigned int tile_size, tile_width, tile_height;
2225 unsigned int pitch_tiles;
2227 tile_size = intel_tile_size(dev_priv);
2228 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2230 if (drm_rotation_90_or_270(rotation)) {
2231 pitch_tiles = pitch / tile_height;
2232 swap(tile_width, tile_height);
2234 pitch_tiles = pitch / (tile_width * cpp);
2237 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2238 tile_size, pitch_tiles,
2239 old_offset, new_offset);
2241 old_offset += *y * pitch + *x * cpp;
2243 *y = (old_offset - new_offset) / pitch;
2244 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2251 * Adjust the tile offset by moving the difference into
2254 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2255 const struct intel_plane_state *state,
2257 u32 old_offset, u32 new_offset)
2259 return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
2260 state->base.rotation,
2261 state->color_plane[color_plane].stride,
2262 old_offset, new_offset);
2266 * Computes the aligned offset to the base tile and adjusts
2267 * x, y. bytes per pixel is assumed to be a power-of-two.
2269 * In the 90/270 rotated case, x and y are assumed
2270 * to be already rotated to match the rotated GTT view, and
2271 * pitch is the tile_height aligned framebuffer height.
2273 * This function is used when computing the derived information
2274 * under intel_framebuffer, so using any of that information
2275 * here is not allowed. Anything under drm_framebuffer can be
2276 * used. This is why the user has to pass in the pitch since it
2277 * is specified in the rotated orientation.
2279 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2281 const struct drm_framebuffer *fb,
2284 unsigned int rotation,
2287 unsigned int cpp = fb->format->cpp[color_plane];
2288 u32 offset, offset_aligned;
2293 if (!is_surface_linear(fb->modifier, color_plane)) {
2294 unsigned int tile_size, tile_width, tile_height;
2295 unsigned int tile_rows, tiles, pitch_tiles;
2297 tile_size = intel_tile_size(dev_priv);
2298 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2300 if (drm_rotation_90_or_270(rotation)) {
2301 pitch_tiles = pitch / tile_height;
2302 swap(tile_width, tile_height);
2304 pitch_tiles = pitch / (tile_width * cpp);
2307 tile_rows = *y / tile_height;
2310 tiles = *x / tile_width;
2313 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2314 offset_aligned = offset & ~alignment;
2316 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2317 tile_size, pitch_tiles,
2318 offset, offset_aligned);
2320 offset = *y * pitch + *x * cpp;
2321 offset_aligned = offset & ~alignment;
2323 *y = (offset & alignment) / pitch;
2324 *x = ((offset & alignment) - *y * pitch) / cpp;
2327 return offset_aligned;
2330 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2331 const struct intel_plane_state *state,
2334 struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2335 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2336 const struct drm_framebuffer *fb = state->base.fb;
2337 unsigned int rotation = state->base.rotation;
2338 int pitch = state->color_plane[color_plane].stride;
2341 if (intel_plane->id == PLANE_CURSOR)
2342 alignment = intel_cursor_alignment(dev_priv);
2344 alignment = intel_surf_alignment(fb, color_plane);
2346 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2347 pitch, rotation, alignment);
2350 /* Convert the fb->offset[] into x/y offsets */
2351 static int intel_fb_offset_to_xy(int *x, int *y,
2352 const struct drm_framebuffer *fb,
2355 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2356 unsigned int height;
2358 if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2359 fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2360 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2361 fb->offsets[color_plane], color_plane);
2365 height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2366 height = ALIGN(height, intel_tile_height(fb, color_plane));
2368 /* Catch potential overflows early */
2369 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2370 fb->offsets[color_plane])) {
2371 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2372 fb->offsets[color_plane], fb->pitches[color_plane],
2380 intel_adjust_aligned_offset(x, y,
2381 fb, color_plane, DRM_MODE_ROTATE_0,
2382 fb->pitches[color_plane],
2383 fb->offsets[color_plane], 0);
2388 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2390 switch (fb_modifier) {
2391 case I915_FORMAT_MOD_X_TILED:
2392 return I915_TILING_X;
2393 case I915_FORMAT_MOD_Y_TILED:
2394 case I915_FORMAT_MOD_Y_TILED_CCS:
2395 return I915_TILING_Y;
2397 return I915_TILING_NONE;
2402 * From the Sky Lake PRM:
2403 * "The Color Control Surface (CCS) contains the compression status of
2404 * the cache-line pairs. The compression state of the cache-line pair
2405 * is specified by 2 bits in the CCS. Each CCS cache-line represents
2406 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2407 * cache-line-pairs. CCS is always Y tiled."
2409 * Since cache line pairs refers to horizontally adjacent cache lines,
2410 * each cache line in the CCS corresponds to an area of 32x16 cache
2411 * lines on the main surface. Since each pixel is 4 bytes, this gives
2412 * us a ratio of one byte in the CCS for each 8x16 pixels in the
2415 static const struct drm_format_info ccs_formats[] = {
2416 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2417 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2418 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2419 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2422 static const struct drm_format_info *
2423 lookup_format_info(const struct drm_format_info formats[],
2424 int num_formats, u32 format)
2428 for (i = 0; i < num_formats; i++) {
2429 if (formats[i].format == format)
2436 static const struct drm_format_info *
2437 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2439 switch (cmd->modifier[0]) {
2440 case I915_FORMAT_MOD_Y_TILED_CCS:
2441 case I915_FORMAT_MOD_Yf_TILED_CCS:
2442 return lookup_format_info(ccs_formats,
2443 ARRAY_SIZE(ccs_formats),
2450 bool is_ccs_modifier(u64 modifier)
2452 return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2453 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2457 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2458 struct drm_framebuffer *fb)
2460 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2461 struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2462 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2463 u32 gtt_offset_rotated = 0;
2464 unsigned int max_size = 0;
2465 int i, num_planes = fb->format->num_planes;
2466 unsigned int tile_size = intel_tile_size(dev_priv);
2468 for (i = 0; i < num_planes; i++) {
2469 unsigned int width, height;
2470 unsigned int cpp, size;
2475 cpp = fb->format->cpp[i];
2476 width = drm_framebuffer_plane_width(fb->width, fb, i);
2477 height = drm_framebuffer_plane_height(fb->height, fb, i);
2479 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2481 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2486 if (is_ccs_modifier(fb->modifier) && i == 1) {
2487 int hsub = fb->format->hsub;
2488 int vsub = fb->format->vsub;
2489 int tile_width, tile_height;
2493 intel_tile_dims(fb, i, &tile_width, &tile_height);
2495 tile_height *= vsub;
2497 ccs_x = (x * hsub) % tile_width;
2498 ccs_y = (y * vsub) % tile_height;
2499 main_x = intel_fb->normal[0].x % tile_width;
2500 main_y = intel_fb->normal[0].y % tile_height;
2503 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2504 * x/y offsets must match between CCS and the main surface.
2506 if (main_x != ccs_x || main_y != ccs_y) {
2507 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2510 intel_fb->normal[0].x,
2511 intel_fb->normal[0].y,
2518 * The fence (if used) is aligned to the start of the object
2519 * so having the framebuffer wrap around across the edge of the
2520 * fenced region doesn't really work. We have no API to configure
2521 * the fence start offset within the object (nor could we probably
2522 * on gen2/3). So it's just easier if we just require that the
2523 * fb layout agrees with the fence layout. We already check that the
2524 * fb stride matches the fence stride elsewhere.
2526 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2527 (x + width) * cpp > fb->pitches[i]) {
2528 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2534 * First pixel of the framebuffer from
2535 * the start of the normal gtt mapping.
2537 intel_fb->normal[i].x = x;
2538 intel_fb->normal[i].y = y;
2540 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2544 offset /= tile_size;
2546 if (!is_surface_linear(fb->modifier, i)) {
2547 unsigned int tile_width, tile_height;
2548 unsigned int pitch_tiles;
2551 intel_tile_dims(fb, i, &tile_width, &tile_height);
2553 rot_info->plane[i].offset = offset;
2554 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2555 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2556 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2558 intel_fb->rotated[i].pitch =
2559 rot_info->plane[i].height * tile_height;
2561 /* how many tiles does this plane need */
2562 size = rot_info->plane[i].stride * rot_info->plane[i].height;
2564 * If the plane isn't horizontally tile aligned,
2565 * we need one more tile.
2570 /* rotate the x/y offsets to match the GTT view */
2576 rot_info->plane[i].width * tile_width,
2577 rot_info->plane[i].height * tile_height,
2578 DRM_MODE_ROTATE_270);
2582 /* rotate the tile dimensions to match the GTT view */
2583 pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2584 swap(tile_width, tile_height);
2587 * We only keep the x/y offsets, so push all of the
2588 * gtt offset into the x/y offsets.
2590 intel_adjust_tile_offset(&x, &y,
2591 tile_width, tile_height,
2592 tile_size, pitch_tiles,
2593 gtt_offset_rotated * tile_size, 0);
2595 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2598 * First pixel of the framebuffer from
2599 * the start of the rotated gtt mapping.
2601 intel_fb->rotated[i].x = x;
2602 intel_fb->rotated[i].y = y;
2604 size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2605 x * cpp, tile_size);
2608 /* how many tiles in total needed in the bo */
2609 max_size = max(max_size, offset + size);
2612 if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2613 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2614 mul_u32_u32(max_size, tile_size), obj->base.size);
2621 static int i9xx_format_to_fourcc(int format)
2624 case DISPPLANE_8BPP:
2625 return DRM_FORMAT_C8;
2626 case DISPPLANE_BGRX555:
2627 return DRM_FORMAT_XRGB1555;
2628 case DISPPLANE_BGRX565:
2629 return DRM_FORMAT_RGB565;
2631 case DISPPLANE_BGRX888:
2632 return DRM_FORMAT_XRGB8888;
2633 case DISPPLANE_RGBX888:
2634 return DRM_FORMAT_XBGR8888;
2635 case DISPPLANE_BGRX101010:
2636 return DRM_FORMAT_XRGB2101010;
2637 case DISPPLANE_RGBX101010:
2638 return DRM_FORMAT_XBGR2101010;
2642 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2645 case PLANE_CTL_FORMAT_RGB_565:
2646 return DRM_FORMAT_RGB565;
2647 case PLANE_CTL_FORMAT_NV12:
2648 return DRM_FORMAT_NV12;
2649 case PLANE_CTL_FORMAT_P010:
2650 return DRM_FORMAT_P010;
2651 case PLANE_CTL_FORMAT_P012:
2652 return DRM_FORMAT_P012;
2653 case PLANE_CTL_FORMAT_P016:
2654 return DRM_FORMAT_P016;
2655 case PLANE_CTL_FORMAT_Y210:
2656 return DRM_FORMAT_Y210;
2657 case PLANE_CTL_FORMAT_Y212:
2658 return DRM_FORMAT_Y212;
2659 case PLANE_CTL_FORMAT_Y216:
2660 return DRM_FORMAT_Y216;
2661 case PLANE_CTL_FORMAT_Y410:
2662 return DRM_FORMAT_XVYU2101010;
2663 case PLANE_CTL_FORMAT_Y412:
2664 return DRM_FORMAT_XVYU12_16161616;
2665 case PLANE_CTL_FORMAT_Y416:
2666 return DRM_FORMAT_XVYU16161616;
2668 case PLANE_CTL_FORMAT_XRGB_8888:
2671 return DRM_FORMAT_ABGR8888;
2673 return DRM_FORMAT_XBGR8888;
2676 return DRM_FORMAT_ARGB8888;
2678 return DRM_FORMAT_XRGB8888;
2680 case PLANE_CTL_FORMAT_XRGB_2101010:
2682 return DRM_FORMAT_XBGR2101010;
2684 return DRM_FORMAT_XRGB2101010;
2685 case PLANE_CTL_FORMAT_XRGB_16161616F:
2688 return DRM_FORMAT_ABGR16161616F;
2690 return DRM_FORMAT_XBGR16161616F;
2693 return DRM_FORMAT_ARGB16161616F;
2695 return DRM_FORMAT_XRGB16161616F;
2701 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2702 struct intel_initial_plane_config *plane_config)
2704 struct drm_device *dev = crtc->base.dev;
2705 struct drm_i915_private *dev_priv = to_i915(dev);
2706 struct drm_i915_gem_object *obj = NULL;
2707 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2708 struct drm_framebuffer *fb = &plane_config->fb->base;
2709 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2710 u32 size_aligned = round_up(plane_config->base + plane_config->size,
2713 size_aligned -= base_aligned;
2715 if (plane_config->size == 0)
2718 /* If the FB is too big, just don't use it since fbdev is not very
2719 * important and we should probably use that space with FBC or other
2721 if (size_aligned * 2 > dev_priv->stolen_usable_size)
2724 switch (fb->modifier) {
2725 case DRM_FORMAT_MOD_LINEAR:
2726 case I915_FORMAT_MOD_X_TILED:
2727 case I915_FORMAT_MOD_Y_TILED:
2730 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
2735 mutex_lock(&dev->struct_mutex);
2736 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
2740 mutex_unlock(&dev->struct_mutex);
2744 switch (plane_config->tiling) {
2745 case I915_TILING_NONE:
2749 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
2752 MISSING_CASE(plane_config->tiling);
2756 mode_cmd.pixel_format = fb->format->format;
2757 mode_cmd.width = fb->width;
2758 mode_cmd.height = fb->height;
2759 mode_cmd.pitches[0] = fb->pitches[0];
2760 mode_cmd.modifier[0] = fb->modifier;
2761 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2763 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
2764 DRM_DEBUG_KMS("intel fb init failed\n");
2769 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2773 i915_gem_object_put(obj);
2778 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2779 struct intel_plane_state *plane_state,
2782 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2784 plane_state->base.visible = visible;
2787 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
2789 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
2792 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
2794 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2795 struct drm_plane *plane;
2798 * Active_planes aliases if multiple "primary" or cursor planes
2799 * have been used on the same (or wrong) pipe. plane_mask uses
2800 * unique ids, hence we can use that to reconstruct active_planes.
2802 crtc_state->active_planes = 0;
2804 drm_for_each_plane_mask(plane, &dev_priv->drm,
2805 crtc_state->base.plane_mask)
2806 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
2809 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2810 struct intel_plane *plane)
2812 struct intel_crtc_state *crtc_state =
2813 to_intel_crtc_state(crtc->base.state);
2814 struct intel_plane_state *plane_state =
2815 to_intel_plane_state(plane->base.state);
2817 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
2818 plane->base.base.id, plane->base.name,
2819 crtc->base.base.id, crtc->base.name);
2821 intel_set_plane_visible(crtc_state, plane_state, false);
2822 fixup_active_planes(crtc_state);
2824 if (plane->id == PLANE_PRIMARY)
2825 intel_pre_disable_primary_noatomic(&crtc->base);
2827 intel_disable_plane(plane, crtc_state);
2831 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2832 struct intel_initial_plane_config *plane_config)
2834 struct drm_device *dev = intel_crtc->base.dev;
2835 struct drm_i915_private *dev_priv = to_i915(dev);
2837 struct drm_i915_gem_object *obj;
2838 struct drm_plane *primary = intel_crtc->base.primary;
2839 struct drm_plane_state *plane_state = primary->state;
2840 struct intel_plane *intel_plane = to_intel_plane(primary);
2841 struct intel_plane_state *intel_state =
2842 to_intel_plane_state(plane_state);
2843 struct drm_framebuffer *fb;
2845 if (!plane_config->fb)
2848 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2849 fb = &plane_config->fb->base;
2853 kfree(plane_config->fb);
2856 * Failed to alloc the obj, check to see if we should share
2857 * an fb with another CRTC instead
2859 for_each_crtc(dev, c) {
2860 struct intel_plane_state *state;
2862 if (c == &intel_crtc->base)
2865 if (!to_intel_crtc(c)->active)
2868 state = to_intel_plane_state(c->primary->state);
2872 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2873 fb = state->base.fb;
2874 drm_framebuffer_get(fb);
2880 * We've failed to reconstruct the BIOS FB. Current display state
2881 * indicates that the primary plane is visible, but has a NULL FB,
2882 * which will lead to problems later if we don't fix it up. The
2883 * simplest solution is to just disable the primary plane now and
2884 * pretend the BIOS never had it enabled.
2886 intel_plane_disable_noatomic(intel_crtc, intel_plane);
2891 intel_state->base.rotation = plane_config->rotation;
2892 intel_fill_fb_ggtt_view(&intel_state->view, fb,
2893 intel_state->base.rotation);
2894 intel_state->color_plane[0].stride =
2895 intel_fb_pitch(fb, 0, intel_state->base.rotation);
2897 mutex_lock(&dev->struct_mutex);
2899 intel_pin_and_fence_fb_obj(fb,
2901 intel_plane_uses_fence(intel_state),
2902 &intel_state->flags);
2903 mutex_unlock(&dev->struct_mutex);
2904 if (IS_ERR(intel_state->vma)) {
2905 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2906 intel_crtc->pipe, PTR_ERR(intel_state->vma));
2908 intel_state->vma = NULL;
2909 drm_framebuffer_put(fb);
2913 obj = intel_fb_obj(fb);
2914 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
2916 plane_state->src_x = 0;
2917 plane_state->src_y = 0;
2918 plane_state->src_w = fb->width << 16;
2919 plane_state->src_h = fb->height << 16;
2921 plane_state->crtc_x = 0;
2922 plane_state->crtc_y = 0;
2923 plane_state->crtc_w = fb->width;
2924 plane_state->crtc_h = fb->height;
2926 intel_state->base.src = drm_plane_state_src(plane_state);
2927 intel_state->base.dst = drm_plane_state_dest(plane_state);
2929 if (i915_gem_object_is_tiled(obj))
2930 dev_priv->preserve_bios_swizzle = true;
2932 plane_state->fb = fb;
2933 plane_state->crtc = &intel_crtc->base;
2935 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2936 &obj->frontbuffer_bits);
2939 static int skl_max_plane_width(const struct drm_framebuffer *fb,
2941 unsigned int rotation)
2943 int cpp = fb->format->cpp[color_plane];
2945 switch (fb->modifier) {
2946 case DRM_FORMAT_MOD_LINEAR:
2947 case I915_FORMAT_MOD_X_TILED:
2960 case I915_FORMAT_MOD_Y_TILED_CCS:
2961 case I915_FORMAT_MOD_Yf_TILED_CCS:
2962 /* FIXME AUX plane? */
2963 case I915_FORMAT_MOD_Y_TILED:
2964 case I915_FORMAT_MOD_Yf_TILED:
2979 MISSING_CASE(fb->modifier);
2985 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
2986 int main_x, int main_y, u32 main_offset)
2988 const struct drm_framebuffer *fb = plane_state->base.fb;
2989 int hsub = fb->format->hsub;
2990 int vsub = fb->format->vsub;
2991 int aux_x = plane_state->color_plane[1].x;
2992 int aux_y = plane_state->color_plane[1].y;
2993 u32 aux_offset = plane_state->color_plane[1].offset;
2994 u32 alignment = intel_surf_alignment(fb, 1);
2996 while (aux_offset >= main_offset && aux_y <= main_y) {
2999 if (aux_x == main_x && aux_y == main_y)
3002 if (aux_offset == 0)
3007 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3008 aux_offset, aux_offset - alignment);
3009 aux_x = x * hsub + aux_x % hsub;
3010 aux_y = y * vsub + aux_y % vsub;
3013 if (aux_x != main_x || aux_y != main_y)
3016 plane_state->color_plane[1].offset = aux_offset;
3017 plane_state->color_plane[1].x = aux_x;
3018 plane_state->color_plane[1].y