Merge tag 'drm-intel-next-2012-12-21' of git://people.freedesktop.org/~danvet/drm...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / intel_display.c
index 1464e472ce44ed28d32694a16d661d29c705c5e7..8c36a11a9a57040bc93ae9fa4dc37cec7e18f84a 100644 (file)
@@ -1489,8 +1489,11 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 
 /* SBI access */
 static void
-intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
+intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
+               enum intel_sbi_destination destination)
 {
+       u32 tmp;
+
        WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
 
        if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
@@ -1499,13 +1502,14 @@ intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
                return;
        }
 
-       I915_WRITE(SBI_ADDR,
-                       (reg << 16));
-       I915_WRITE(SBI_DATA,
-                       value);
-       I915_WRITE(SBI_CTL_STAT,
-                       SBI_BUSY |
-                       SBI_CTL_OP_CRWR);
+       I915_WRITE(SBI_ADDR, (reg << 16));
+       I915_WRITE(SBI_DATA, value);
+
+       if (destination == SBI_ICLK)
+               tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
+       else
+               tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
+       I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
 
        if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
                                100)) {
@@ -1515,8 +1519,10 @@ intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
 }
 
 static u32
-intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
+intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
+              enum intel_sbi_destination destination)
 {
+       u32 value = 0;
        WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
 
        if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
@@ -1525,11 +1531,13 @@ intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
                return 0;
        }
 
-       I915_WRITE(SBI_ADDR,
-                       (reg << 16));
-       I915_WRITE(SBI_CTL_STAT,
-                       SBI_BUSY |
-                       SBI_CTL_OP_CRRD);
+       I915_WRITE(SBI_ADDR, (reg << 16));
+
+       if (destination == SBI_ICLK)
+               value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
+       else
+               value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
+       I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
 
        if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
                                100)) {
@@ -2361,18 +2369,6 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
                           FDI_FE_ERRC_ENABLE);
 }
 
-static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 flags = I915_READ(SOUTH_CHICKEN1);
-
-       flags |= FDI_PHASE_SYNC_OVR(pipe);
-       I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
-       flags |= FDI_PHASE_SYNC_EN(pipe);
-       I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
-       POSTING_READ(SOUTH_CHICKEN1);
-}
-
 static void ivb_modeset_global_resources(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2547,8 +2543,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
        POSTING_READ(reg);
        udelay(150);
 
-       cpt_phase_pointer_enable(dev, pipe);
-
        for (i = 0; i < 4; i++) {
                reg = FDI_TX_CTL(pipe);
                temp = I915_READ(reg);
@@ -2681,8 +2675,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
        POSTING_READ(reg);
        udelay(150);
 
-       cpt_phase_pointer_enable(dev, pipe);
-
        for (i = 0; i < 4; i++) {
                reg = FDI_TX_CTL(pipe);
                temp = I915_READ(reg);
@@ -2817,17 +2809,6 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
        udelay(100);
 }
 
-static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 flags = I915_READ(SOUTH_CHICKEN1);
-
-       flags &= ~(FDI_PHASE_SYNC_EN(pipe));
-       I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
-       flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
-       I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
-       POSTING_READ(SOUTH_CHICKEN1);
-}
 static void ironlake_fdi_disable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
@@ -2854,8 +2835,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
        /* Ironlake workaround, disable clock pointer after downing FDI */
        if (HAS_PCH_IBX(dev)) {
                I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
-       } else if (HAS_PCH_CPT(dev)) {
-               cpt_phase_pointer_disable(dev, pipe);
        }
 
        /* still set train pattern 1 */
@@ -2959,8 +2938,9 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
 
        /* Disable SSCCTL */
        intel_sbi_write(dev_priv, SBI_SSCCTL6,
-                               intel_sbi_read(dev_priv, SBI_SSCCTL6) |
-                                       SBI_SSCCTL_DISABLE);
+                       intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
+                               SBI_SSCCTL_DISABLE,
+                       SBI_ICLK);
 
        /* 20MHz is a corner case which is out of range for the 7-bit divisor */
        if (crtc->mode.clock == 20000) {
@@ -3001,33 +2981,25 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
                        phaseinc);
 
        /* Program SSCDIVINTPHASE6 */
-       temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
+       temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
        temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
        temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
        temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
        temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
        temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
        temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
-
-       intel_sbi_write(dev_priv,
-                       SBI_SSCDIVINTPHASE6,
-                       temp);
+       intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
 
        /* Program SSCAUXDIV */
-       temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
+       temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
        temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
        temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
-       intel_sbi_write(dev_priv,
-                       SBI_SSCAUXDIV6,
-                       temp);
-
+       intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
 
        /* Enable modulator and associated divider */
-       temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
+       temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
        temp &= ~SBI_SSCCTL_DISABLE;
-       intel_sbi_write(dev_priv,
-                       SBI_SSCCTL6,
-                       temp);
+       intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
 
        /* Wait for initialization time */
        udelay(24);
@@ -4077,6 +4049,17 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
                        }
                }
 
+               if (intel_encoder->type == INTEL_OUTPUT_EDP) {
+                       /* Use VBT settings if we have an eDP panel */
+                       unsigned int edp_bpc = dev_priv->edp.bpp / 3;
+
+                       if (edp_bpc && edp_bpc < display_bpc) {
+                               DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
+                               display_bpc = edp_bpc;
+                       }
+                       continue;
+               }
+
                /*
                 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
                 * through, clamp it down.  (Note: >12bpc will be caught below.)
@@ -4749,10 +4732,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
        return ret;
 }
 
-/*
- * Initialize reference clocks when the driver loads
- */
-void ironlake_init_pch_refclk(struct drm_device *dev)
+static void ironlake_init_pch_refclk(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_mode_config *mode_config = &dev->mode_config;
@@ -4866,6 +4846,182 @@ void ironlake_init_pch_refclk(struct drm_device *dev)
        }
 }
 
+/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
+static void lpt_init_pch_refclk(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct intel_encoder *encoder;
+       bool has_vga = false;
+       bool is_sdv = false;
+       u32 tmp;
+
+       list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+               switch (encoder->type) {
+               case INTEL_OUTPUT_ANALOG:
+                       has_vga = true;
+                       break;
+               }
+       }
+
+       if (!has_vga)
+               return;
+
+       /* XXX: Rip out SDV support once Haswell ships for real. */
+       if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
+               is_sdv = true;
+
+       tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+       tmp &= ~SBI_SSCCTL_DISABLE;
+       tmp |= SBI_SSCCTL_PATHALT;
+       intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+       udelay(24);
+
+       tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+       tmp &= ~SBI_SSCCTL_PATHALT;
+       intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+       if (!is_sdv) {
+               tmp = I915_READ(SOUTH_CHICKEN2);
+               tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+               I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+               if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
+                                      FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+                       DRM_ERROR("FDI mPHY reset assert timeout\n");
+
+               tmp = I915_READ(SOUTH_CHICKEN2);
+               tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+               I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+               if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
+                                       FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
+                                      100))
+                       DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+       }
+
+       tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
+       tmp &= ~(0xFF << 24);
+       tmp |= (0x12 << 24);
+       intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
+
+       if (!is_sdv) {
+               tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY);
+               tmp &= ~(0x3 << 6);
+               tmp |= (1 << 6) | (1 << 0);
+               intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY);
+       }
+
+       if (is_sdv) {
+               tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
+               tmp |= 0x7FFF;
+               intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
+       }
+
+       tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
+       tmp |= (1 << 11);
+       intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
+       tmp |= (1 << 11);
+       intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
+
+       if (is_sdv) {
+               tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
+               tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
+               intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
+
+               tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
+               tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
+               intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
+
+               tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
+               tmp |= (0x3F << 8);
+               intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
+
+               tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
+               tmp |= (0x3F << 8);
+               intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
+       }
+
+       tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
+       tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+       intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
+       tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+       intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
+
+       if (!is_sdv) {
+               tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+               tmp &= ~(7 << 13);
+               tmp |= (5 << 13);
+               intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+
+               tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+               tmp &= ~(7 << 13);
+               tmp |= (5 << 13);
+               intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+       }
+
+       tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+       tmp &= ~0xFF;
+       tmp |= 0x1C;
+       intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+       tmp &= ~0xFF;
+       tmp |= 0x1C;
+       intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+       tmp &= ~(0xFF << 16);
+       tmp |= (0x1C << 16);
+       intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+       tmp &= ~(0xFF << 16);
+       tmp |= (0x1C << 16);
+       intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+
+       if (!is_sdv) {
+               tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+               tmp |= (1 << 27);
+               intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+
+               tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+               tmp |= (1 << 27);
+               intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+
+               tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+               tmp &= ~(0xF << 28);
+               tmp |= (4 << 28);
+               intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+
+               tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+               tmp &= ~(0xF << 28);
+               tmp |= (4 << 28);
+               intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+       }
+
+       /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
+       tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
+       tmp |= SBI_DBUFF0_ENABLE;
+       intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
+}
+
+/*
+ * Initialize reference clocks when the driver loads
+ */
+void intel_init_pch_refclk(struct drm_device *dev)
+{
+       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+               ironlake_init_pch_refclk(dev);
+       else if (HAS_PCH_LPT(dev))
+               lpt_init_pch_refclk(dev);
+}
+
 static int ironlake_get_refclk(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
@@ -5110,6 +5266,17 @@ static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
        }
 }
 
+int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
+{
+       /*
+        * Account for spread spectrum to avoid
+        * oversubscribing the link. Max center spread
+        * is 2.5%; use 5% for safety's sake.
+        */
+       u32 bps = target_clock * bpp * 21 / 20;
+       return bps / (link_bw * 8) + 1;
+}
+
 static void ironlake_set_m_n(struct drm_crtc *crtc,
                             struct drm_display_mode *mode,
                             struct drm_display_mode *adjusted_mode)
@@ -5163,15 +5330,9 @@ static void ironlake_set_m_n(struct drm_crtc *crtc,
        else
                target_clock = adjusted_mode->clock;
 
-       if (!lane) {
-               /*
-                * Account for spread spectrum to avoid
-                * oversubscribing the link. Max center spread
-                * is 2.5%; use 5% for safety's sake.
-                */
-               u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
-               lane = bps / (link_bw * 8) + 1;
-       }
+       if (!lane)
+               lane = ironlake_get_lanes_required(target_clock, link_bw,
+                                                  intel_crtc->bpp);
 
        intel_crtc->fdi_lanes = lane;
 
@@ -6640,11 +6801,18 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
 
        spin_lock_irqsave(&dev->event_lock, flags);
        work = intel_crtc->unpin_work;
-       if (work == NULL || !work->pending) {
+
+       /* Ensure we don't miss a work->pending update ... */
+       smp_rmb();
+
+       if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
                spin_unlock_irqrestore(&dev->event_lock, flags);
                return;
        }
 
+       /* and that the unpin work is consistent wrt ->pending. */
+       smp_rmb();
+
        intel_crtc->unpin_work = NULL;
 
        if (work->event)
@@ -6686,16 +6854,25 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
                to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
        unsigned long flags;
 
+       /* NB: An MMIO update of the plane base pointer will also
+        * generate a page-flip completion irq, i.e. every modeset
+        * is also accompanied by a spurious intel_prepare_page_flip().
+        */
        spin_lock_irqsave(&dev->event_lock, flags);
-       if (intel_crtc->unpin_work) {
-               if ((++intel_crtc->unpin_work->pending) > 1)
-                       DRM_ERROR("Prepared flip multiple times\n");
-       } else {
-               DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
-       }
+       if (intel_crtc->unpin_work)
+               atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
        spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
+inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
+{
+       /* Ensure that the work item is consistent when activating it ... */
+       smp_wmb();
+       atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
+       /* and that it is marked active as soon as the irq could fire. */
+       smp_wmb();
+}
+
 static int intel_gen2_queue_flip(struct drm_device *dev,
                                 struct drm_crtc *crtc,
                                 struct drm_framebuffer *fb,
@@ -6729,6 +6906,8 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, fb->pitches[0]);
        intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, 0); /* aux display base address, unused */
+
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
 
@@ -6769,6 +6948,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, MI_NOOP);
 
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
 
@@ -6815,6 +6995,8 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
        pf = 0;
        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
        intel_ring_emit(ring, pf | pipesrc);
+
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
 
@@ -6857,6 +7039,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
        pf = 0;
        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
        intel_ring_emit(ring, pf | pipesrc);
+
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
 
@@ -6911,6 +7095,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
        intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, (MI_NOOP));
+
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
 
@@ -7679,10 +7865,6 @@ intel_modeset_stage_output_state(struct drm_device *dev,
                        DRM_DEBUG_KMS("encoder changed, full mode switch\n");
                        config->mode_changed = true;
                }
-
-               /* Disable all disconnected encoders. */
-               if (connector->base.status == connector_status_disconnected)
-                       connector->new_encoder = NULL;
        }
        /* connector->new_encoder is now updated for all connectors. */
 
@@ -8099,8 +8281,7 @@ static void intel_setup_outputs(struct drm_device *dev)
                        intel_encoder_clones(encoder);
        }
 
-       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
-               ironlake_init_pch_refclk(dev);
+       intel_init_pch_refclk(dev);
 
        drm_helper_move_panel_connectors_to_head(dev);
 }
@@ -8704,6 +8885,23 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
         * the crtc fixup. */
 }
 
+static void i915_redisable_vga(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 vga_reg;
+
+       if (HAS_PCH_SPLIT(dev))
+               vga_reg = CPU_VGACNTRL;
+       else
+               vga_reg = VGACNTRL;
+
+       if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
+               DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+               I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+               POSTING_READ(vga_reg);
+       }
+}
+
 /* Scan out the current hw modeset state, sanitizes it and maps it into the drm
  * and i915 state tracking structures. */
 void intel_modeset_setup_hw_state(struct drm_device *dev,
@@ -8807,8 +9005,11 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
        }
 
        if (force_restore) {
-               for_each_pipe(pipe)
+               for_each_pipe(pipe) {
                        intel_crtc_restore_mode(dev_priv->pipe_to_crtc_mapping[pipe]);
+               }
+
+               i915_redisable_vga(dev);
        } else {
                intel_modeset_update_staged_output_state(dev);
        }