Merge tag 'drm-intel-next-2012-12-21' of git://people.freedesktop.org/~danvet/drm...
authorDave Airlie <airlied@redhat.com>
Thu, 17 Jan 2013 10:34:08 +0000 (20:34 +1000)
committerDave Airlie <airlied@redhat.com>
Thu, 17 Jan 2013 10:34:08 +0000 (20:34 +1000)
Daniel writes:
- seqno wrap fixes and debug infrastructure from Mika Kuoppala and Chris
  Wilson
- some leftover kill-agp on gen6+ patches from Ben
- hotplug improvements from Damien
- clear fb when allocated from stolen, avoids dirt on the fbcon (Chris)
- Stolen mem support from Chris Wilson, one of the many steps to get to
  real fastboot support.
- Some DDI code cleanups from Paulo.
- Some refactorings around lvds and dp code.
- some random little bits&pieces

* tag 'drm-intel-next-2012-12-21' of git://people.freedesktop.org/~danvet/drm-intel: (93 commits)
  drm/i915: Return the real error code from intel_set_mode()
  drm/i915: Make GSM void
  drm/i915: Move GSM mapping into dev_priv
  drm/i915: Move even more gtt code to i915_gem_gtt
  drm/i915: Make next_seqno debugs entry to use i915_gem_set_seqno
  drm/i915: Introduce i915_gem_set_seqno()
  drm/i915: Always clear semaphore mboxes on seqno wrap
  drm/i915: Initialize hardware semaphore state on ring init
  drm/i915: Introduce ring set_seqno
  drm/i915: Missed conversion to gtt_pte_t
  drm/i915: Bug on unsupported swizzled platforms
  drm/i915: BUG() if fences are used on unsupported platform
  drm/i915: fixup overlay stolen memory leak
  drm/i915: clean up PIPECONF bpc #defines
  drm/i915: add intel_dp_set_signal_levels
  drm/i915: remove leftover display.update_wm assignment
  drm/i915: check for the PCH when setting pch_transcoder
  drm/i915: Clear the stolen fb before enabling
  drm/i915: Access to snooped system memory through the GTT is incoherent
  drm/i915: Remove stale comment about intel_dp_detect()
  ...

Conflicts:
drivers/gpu/drm/i915/intel_display.c

21 files changed:
1  2 
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_sdvo.c
include/drm/drm_mm.h

diff --combined drivers/gpu/drm/drm_mm.c
index 2bf9670ba29b3a584c878e15ab9f69bfdcc11255,b751b8e1e2055dd36e356a0f0a1daad426377cb1..86272f04b82f24045bb3b36996a99e2bc4eb40e3
@@@ -102,20 -102,6 +102,6 @@@ int drm_mm_pre_get(struct drm_mm *mm
  }
  EXPORT_SYMBOL(drm_mm_pre_get);
  
- static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
- {
-       return hole_node->start + hole_node->size;
- }
- static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
- {
-       struct drm_mm_node *next_node =
-               list_entry(hole_node->node_list.next, struct drm_mm_node,
-                          node_list);
-       return next_node->start;
- }
  static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
                                 struct drm_mm_node *node,
                                 unsigned long size, unsigned alignment,
        unsigned long adj_start = hole_start;
        unsigned long adj_end = hole_end;
  
-       BUG_ON(!hole_node->hole_follows || node->allocated);
+       BUG_ON(node->allocated);
  
        if (mm->color_adjust)
                mm->color_adjust(hole_node, color, &adj_start, &adj_end);
        BUG_ON(node->start + node->size > adj_end);
  
        node->hole_follows = 0;
-       if (node->start + node->size < hole_end) {
+       if (__drm_mm_hole_node_start(node) < hole_end) {
                list_add(&node->hole_stack, &mm->hole_stack);
                node->hole_follows = 1;
        }
  }
  
+ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
+                                       unsigned long start,
+                                       unsigned long size,
+                                       bool atomic)
+ {
+       struct drm_mm_node *hole, *node;
+       unsigned long end = start + size;
+       unsigned long hole_start;
+       unsigned long hole_end;
+       drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
+               if (hole_start > start || hole_end < end)
+                       continue;
+               node = drm_mm_kmalloc(mm, atomic);
+               if (unlikely(node == NULL))
+                       return NULL;
+               node->start = start;
+               node->size = size;
+               node->mm = mm;
+               node->allocated = 1;
+               INIT_LIST_HEAD(&node->hole_stack);
+               list_add(&node->node_list, &hole->node_list);
+               if (start == hole_start) {
+                       hole->hole_follows = 0;
+                       list_del_init(&hole->hole_stack);
+               }
+               node->hole_follows = 0;
+               if (end != hole_end) {
+                       list_add(&node->hole_stack, &mm->hole_stack);
+                       node->hole_follows = 1;
+               }
+               return node;
+       }
+       WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
+       return NULL;
+ }
+ EXPORT_SYMBOL(drm_mm_create_block);
  struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
                                             unsigned long size,
                                             unsigned alignment,
@@@ -184,27 -215,19 +215,27 @@@ EXPORT_SYMBOL(drm_mm_get_block_generic)
   * -ENOSPC if no suitable free area is available. The preallocated memory node
   * must be cleared.
   */
 -int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
 -                     unsigned long size, unsigned alignment)
 +int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
 +                             unsigned long size, unsigned alignment,
 +                             unsigned long color)
  {
        struct drm_mm_node *hole_node;
  
 -      hole_node = drm_mm_search_free(mm, size, alignment, false);
 +      hole_node = drm_mm_search_free_generic(mm, size, alignment,
 +                                             color, 0);
        if (!hole_node)
                return -ENOSPC;
  
 -      drm_mm_insert_helper(hole_node, node, size, alignment, 0);
 -
 +      drm_mm_insert_helper(hole_node, node, size, alignment, color);
        return 0;
  }
 +EXPORT_SYMBOL(drm_mm_insert_node_generic);
 +
 +int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
 +                     unsigned long size, unsigned alignment)
 +{
 +      return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
 +}
  EXPORT_SYMBOL(drm_mm_insert_node);
  
  static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
        BUG_ON(node->start + node->size > end);
  
        node->hole_follows = 0;
-       if (node->start + node->size < hole_end) {
+       if (__drm_mm_hole_node_start(node) < hole_end) {
                list_add(&node->hole_stack, &mm->hole_stack);
                node->hole_follows = 1;
        }
@@@ -283,31 -306,22 +314,31 @@@ EXPORT_SYMBOL(drm_mm_get_block_range_ge
   * -ENOSPC if no suitable free area is available. This is for range
   * restricted allocations. The preallocated memory node must be cleared.
   */
 -int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
 -                              unsigned long size, unsigned alignment,
 -                              unsigned long start, unsigned long end)
 +int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
 +                                      unsigned long size, unsigned alignment, unsigned long color,
 +                                      unsigned long start, unsigned long end)
  {
        struct drm_mm_node *hole_node;
  
 -      hole_node = drm_mm_search_free_in_range(mm, size, alignment,
 -                                              start, end, false);
 +      hole_node = drm_mm_search_free_in_range_generic(mm,
 +                                                      size, alignment, color,
 +                                                      start, end, 0);
        if (!hole_node)
                return -ENOSPC;
  
 -      drm_mm_insert_helper_range(hole_node, node, size, alignment, 0,
 +      drm_mm_insert_helper_range(hole_node, node,
 +                                 size, alignment, color,
                                   start, end);
 -
        return 0;
  }
 +EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
 +
 +int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
 +                              unsigned long size, unsigned alignment,
 +                              unsigned long start, unsigned long end)
 +{
 +      return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
 +}
  EXPORT_SYMBOL(drm_mm_insert_node_in_range);
  
  /**
@@@ -325,12 -339,13 +356,13 @@@ void drm_mm_remove_node(struct drm_mm_n
            list_entry(node->node_list.prev, struct drm_mm_node, node_list);
  
        if (node->hole_follows) {
-               BUG_ON(drm_mm_hole_node_start(node)
-                               == drm_mm_hole_node_end(node));
+               BUG_ON(__drm_mm_hole_node_start(node) ==
+                      __drm_mm_hole_node_end(node));
                list_del(&node->hole_stack);
        } else
-               BUG_ON(drm_mm_hole_node_start(node)
-                               != drm_mm_hole_node_end(node));
+               BUG_ON(__drm_mm_hole_node_start(node) !=
+                      __drm_mm_hole_node_end(node));
  
        if (!prev_node->hole_follows) {
                prev_node->hole_follows = 1;
@@@ -388,6 -403,8 +420,8 @@@ struct drm_mm_node *drm_mm_search_free_
  {
        struct drm_mm_node *entry;
        struct drm_mm_node *best;
+       unsigned long adj_start;
+       unsigned long adj_end;
        unsigned long best_size;
  
        BUG_ON(mm->scanned_blocks);
        best = NULL;
        best_size = ~0UL;
  
-       list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
-               unsigned long adj_start = drm_mm_hole_node_start(entry);
-               unsigned long adj_end = drm_mm_hole_node_end(entry);
+       drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
                if (mm->color_adjust) {
                        mm->color_adjust(entry, color, &adj_start, &adj_end);
                        if (adj_end <= adj_start)
                                continue;
                }
  
-               BUG_ON(!entry->hole_follows);
                if (!check_free_hole(adj_start, adj_end, size, alignment))
                        continue;
  
@@@ -432,6 -445,8 +462,8 @@@ struct drm_mm_node *drm_mm_search_free_
  {
        struct drm_mm_node *entry;
        struct drm_mm_node *best;
+       unsigned long adj_start;
+       unsigned long adj_end;
        unsigned long best_size;
  
        BUG_ON(mm->scanned_blocks);
        best = NULL;
        best_size = ~0UL;
  
-       list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
-               unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
-                       start : drm_mm_hole_node_start(entry);
-               unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
-                       end : drm_mm_hole_node_end(entry);
-               BUG_ON(!entry->hole_follows);
+       drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
+               if (adj_start < start)
+                       adj_start = start;
+               if (adj_end > end)
+                       adj_end = end;
  
                if (mm->color_adjust) {
                        mm->color_adjust(entry, color, &adj_start, &adj_end);
index e6a11ca85eafa913f97d315493abd995fd0a764c,882a7352b9b9f5d79c0936ababab139fdff6a812..f7d88e99ebf07d839389bc672491e58581bba114
@@@ -102,7 -102,7 +102,7 @@@ static const char *cache_level_str(int 
  static void
  describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
  {
-       seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
+       seq_printf(m, "%p: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
                   &obj->base,
                   get_pin_flag(obj),
                   get_tiling_flag(obj),
        if (obj->gtt_space != NULL)
                seq_printf(m, " (gtt offset: %08x, size: %08x)",
                           obj->gtt_offset, (unsigned int)obj->gtt_space->size);
+       if (obj->stolen)
+               seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
        if (obj->pin_mappable || obj->fault_mappable) {
                char s[3], *t = s;
                if (obj->pin_mappable)
@@@ -317,7 -319,7 +319,7 @@@ static int i915_gem_pageflip_info(struc
                        seq_printf(m, "No flip due on pipe %c (plane %c)\n",
                                   pipe, plane);
                } else {
 -                      if (!work->pending) {
 +                      if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
                                seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
                                           pipe, plane);
                        } else {
                                seq_printf(m, "Stall check enabled, ");
                        else
                                seq_printf(m, "Stall check waiting for page flip ioctl, ");
 -                      seq_printf(m, "%d prepares\n", work->pending);
 +                      seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
  
                        if (work->old_fb_obj) {
                                struct drm_i915_gem_object *obj = work->old_fb_obj;
@@@ -387,7 -389,7 +389,7 @@@ static void i915_ring_seqno_info(struc
                                 struct intel_ring_buffer *ring)
  {
        if (ring->get_seqno) {
-               seq_printf(m, "Current sequence (%s): %d\n",
+               seq_printf(m, "Current sequence (%s): %u\n",
                           ring->name, ring->get_seqno(ring, false));
        }
  }
@@@ -544,11 -546,11 +546,11 @@@ static int i915_hws_info(struct seq_fil
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring;
-       const volatile u32 __iomem *hws;
+       const u32 *hws;
        int i;
  
        ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
-       hws = (volatile u32 __iomem *)ring->status_page.page_addr;
+       hws = ring->status_page.page_addr;
        if (hws == NULL)
                return 0;
  
@@@ -608,7 -610,7 +610,7 @@@ static void print_error_buffers(struct 
        seq_printf(m, "%s [%d]:\n", name, count);
  
        while (count--) {
-               seq_printf(m, "  %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s",
+               seq_printf(m, "  %08x %8u %02x %02x %x %x%s%s%s%s%s%s%s",
                           err->gtt_offset,
                           err->size,
                           err->read_domains,
@@@ -841,6 -843,77 +843,77 @@@ static const struct file_operations i91
        .release = i915_error_state_release,
  };
  
+ static ssize_t
+ i915_next_seqno_read(struct file *filp,
+                char __user *ubuf,
+                size_t max,
+                loff_t *ppos)
+ {
+       struct drm_device *dev = filp->private_data;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       char buf[80];
+       int len;
+       int ret;
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+       len = snprintf(buf, sizeof(buf),
+                      "next_seqno :  0x%x\n",
+                      dev_priv->next_seqno);
+       mutex_unlock(&dev->struct_mutex);
+       if (len > sizeof(buf))
+               len = sizeof(buf);
+       return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+ }
+ static ssize_t
+ i915_next_seqno_write(struct file *filp,
+                     const char __user *ubuf,
+                     size_t cnt,
+                     loff_t *ppos)
+ {
+       struct drm_device *dev = filp->private_data;
+       char buf[20];
+       u32 val = 1;
+       int ret;
+       if (cnt > 0) {
+               if (cnt > sizeof(buf) - 1)
+                       return -EINVAL;
+               if (copy_from_user(buf, ubuf, cnt))
+                       return -EFAULT;
+               buf[cnt] = 0;
+               ret = kstrtouint(buf, 0, &val);
+               if (ret < 0)
+                       return ret;
+       }
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+       ret = i915_gem_set_seqno(dev, val);
+       mutex_unlock(&dev->struct_mutex);
+       return ret ?: cnt;
+ }
+ static const struct file_operations i915_next_seqno_fops = {
+       .owner = THIS_MODULE,
+       .open = simple_open,
+       .read = i915_next_seqno_read,
+       .write = i915_next_seqno_write,
+       .llseek = default_llseek,
+ };
  static int i915_rstdby_delays(struct seq_file *m, void *unused)
  {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
@@@ -1551,7 -1624,7 +1624,7 @@@ static int i915_dpio_info(struct seq_fi
                return 0;
        }
  
-       ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+       ret = mutex_lock_interruptible(&dev_priv->dpio_lock);
        if (ret)
                return ret;
  
        seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
                   intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
  
-       mutex_unlock(&dev->mode_config.mutex);
+       mutex_unlock(&dev_priv->dpio_lock);
  
        return 0;
  }
@@@ -2105,6 -2178,12 +2178,12 @@@ int i915_debugfs_init(struct drm_minor 
        if (ret)
                return ret;
  
+       ret = i915_debugfs_create(minor->debugfs_root, minor,
+                                "i915_next_seqno",
+                                &i915_next_seqno_fops);
+       if (ret)
+               return ret;
        return drm_debugfs_create_files(i915_debugfs_list,
                                        I915_DEBUGFS_ENTRIES,
                                        minor->debugfs_root, minor);
@@@ -2128,6 -2207,8 +2207,8 @@@ void i915_debugfs_cleanup(struct drm_mi
                                 1, minor);
        drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
                                 1, minor);
+       drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops,
+                                1, minor);
  }
  
  #endif /* CONFIG_DEBUG_FS */
index 99daa896105d0b221d084ab1ed4b0c839983dcdd,272e500c920e9f885f83ee38e0d09354125feda3..6d8a1dc749346769fe7a19c7edc0131698e466e6
@@@ -141,7 -141,7 +141,7 @@@ void i915_kernel_lost_context(struct dr
  
        ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
        ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
 -      ring->space = ring->head - (ring->tail + 8);
 +      ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
        if (ring->space < 0)
                ring->space += ring->size;
  
@@@ -989,9 -989,6 +989,9 @@@ static int i915_getparam(struct drm_dev
        case I915_PARAM_HAS_SECURE_BATCHES:
                value = capable(CAP_SYS_ADMIN);
                break;
 +      case I915_PARAM_HAS_PINNED_BATCHES:
 +              value = 1;
 +              break;
        default:
                DRM_DEBUG_DRIVER("Unknown parameter %d\n",
                                 param->param);
@@@ -1297,19 -1294,21 +1297,21 @@@ static int i915_load_modeset_init(struc
        if (ret)
                goto cleanup_vga_switcheroo;
  
+       ret = drm_irq_install(dev);
+       if (ret)
+               goto cleanup_gem_stolen;
+       /* Important: The output setup functions called by modeset_init need
+        * working irqs for e.g. gmbus and dp aux transfers. */
        intel_modeset_init(dev);
  
        ret = i915_gem_init(dev);
        if (ret)
-               goto cleanup_gem_stolen;
-       intel_modeset_gem_init(dev);
+               goto cleanup_irq;
  
        INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
  
-       ret = drm_irq_install(dev);
-       if (ret)
-               goto cleanup_gem;
+       intel_modeset_gem_init(dev);
  
        /* Always safe in the mode setting case. */
        /* FIXME: do pre/post-mode set stuff in core KMS code */
  
        ret = intel_fbdev_init(dev);
        if (ret)
-               goto cleanup_irq;
+               goto cleanup_gem;
+       /* Only enable hotplug handling once the fbdev is fully set up. */
+       intel_hpd_init(dev);
+       /*
+        * Some ports require correctly set-up hpd registers for detection to
+        * work properly (leading to ghost connected connector status), e.g. VGA
+        * on gm45.  Hence we can only set up the initial fbdev config after hpd
+        * irqs are fully enabled. Now we should scan for the initial config
+        * only once hotplug handling is enabled, but due to screwed-up locking
+        * around kms/fbdev init we can't protect the fdbev initial config
+        * scanning against hotplug events. Hence do this first and ignore the
+        * tiny window where we will loose hotplug notifactions.
+        */
+       intel_fbdev_initial_config(dev);
+       /* Only enable hotplug handling once the fbdev is fully set up. */
+       dev_priv->enable_hotplug_processing = true;
  
        drm_kms_helper_poll_init(dev);
  
  
        return 0;
  
- cleanup_irq:
-       drm_irq_uninstall(dev);
  cleanup_gem:
        mutex_lock(&dev->struct_mutex);
        i915_gem_cleanup_ringbuffer(dev);
        mutex_unlock(&dev->struct_mutex);
        i915_gem_cleanup_aliasing_ppgtt(dev);
+ cleanup_irq:
+       drm_irq_uninstall(dev);
  cleanup_gem_stolen:
        i915_gem_cleanup_stolen(dev);
  cleanup_vga_switcheroo:
@@@ -1582,7 -1599,7 +1602,7 @@@ int i915_driver_load(struct drm_device 
        spin_lock_init(&dev_priv->irq_lock);
        spin_lock_init(&dev_priv->error_lock);
        spin_lock_init(&dev_priv->rps.lock);
-       spin_lock_init(&dev_priv->dpio_lock);
+       mutex_init(&dev_priv->dpio_lock);
  
        mutex_init(&dev_priv->rps.hw_lock);
  
        intel_opregion_init(dev);
        acpi_video_register();
  
-       setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
-                   (unsigned long) dev);
        if (IS_GEN5(dev))
                intel_gpu_ips_init(dev_priv);
  
@@@ -1723,9 -1737,6 +1740,6 @@@ int i915_driver_unload(struct drm_devic
                mutex_unlock(&dev->struct_mutex);
                i915_gem_cleanup_aliasing_ppgtt(dev);
                i915_gem_cleanup_stolen(dev);
-               drm_mm_takedown(&dev_priv->mm.stolen);
-               intel_cleanup_overlay(dev);
  
                if (!I915_NEED_GFX_HWS(dev))
                        i915_free_hws(dev);
        intel_teardown_mchbar(dev);
  
        destroy_workqueue(dev_priv->wq);
+       pm_qos_remove_request(&dev_priv->pm_qos);
+       if (dev_priv->slab)
+               kmem_cache_destroy(dev_priv->slab);
  
        pci_dev_put(dev_priv->bridge_dev);
        kfree(dev->dev_private);
index 117265840b1ff2fcbd845c618acf8d3b592cd4a5,fbd0b28b72006e0930ad0dd52f890f77292cbca8..c8cbc32fe8dbd64aa581f9498ab71b4af7d850b4
@@@ -554,7 -554,8 +554,7 @@@ static int __i915_drm_thaw(struct drm_d
  
        /* KMS EnterVT equivalent */
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 -              if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
 -                      ironlake_init_pch_refclk(dev);
 +              intel_init_pch_refclk(dev);
  
                mutex_lock(&dev->struct_mutex);
                dev_priv->mm.suspended = 0;
                intel_modeset_init_hw(dev);
                intel_modeset_setup_hw_state(dev, false);
                drm_irq_install(dev);
+               intel_hpd_init(dev);
        }
  
        intel_opregion_init(dev);
@@@ -870,6 -872,7 +871,7 @@@ int i915_reset(struct drm_device *dev
  
                drm_irq_uninstall(dev);
                drm_irq_install(dev);
+               intel_hpd_init(dev);
        } else {
                mutex_unlock(&dev->struct_mutex);
        }
        return 0;
  }
  
 -static int __devinit
 -i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 +static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  {
        struct intel_device_info *intel_info =
                (struct intel_device_info *) ent->driver_data;
index ed3059575576c4ea869c462e15e61463b4237f0f,d2b93a4a3d710c4e1f593ebb6af23a0bf8cd32b4..b1b1b7350ca4e2633b51e8f9ef61941be72968ac
@@@ -30,6 -30,8 +30,8 @@@
  #ifndef _I915_DRV_H_
  #define _I915_DRV_H_
  
+ #include <uapi/drm/i915_drm.h>
  #include "i915_reg.h"
  #include "intel_bios.h"
  #include "intel_ringbuffer.h"
@@@ -40,6 -42,7 +42,7 @@@
  #include <linux/backlight.h>
  #include <linux/intel-iommu.h>
  #include <linux/kref.h>
+ #include <linux/pm_qos.h>
  
  /* General customization:
   */
@@@ -83,7 -86,12 +86,12 @@@ enum port 
  };
  #define port_name(p) ((p) + 'A')
  
- #define I915_GEM_GPU_DOMAINS  (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+ #define I915_GEM_GPU_DOMAINS \
+       (I915_GEM_DOMAIN_RENDER | \
+        I915_GEM_DOMAIN_SAMPLER | \
+        I915_GEM_DOMAIN_COMMAND | \
+        I915_GEM_DOMAIN_INSTRUCTION | \
+        I915_GEM_DOMAIN_VERTEX)
  
  #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
  
@@@ -101,6 -109,19 +109,19 @@@ struct intel_pch_pll 
  };
  #define I915_NUM_PLLS 2
  
+ /* Used by dp and fdi links */
+ struct intel_link_m_n {
+       uint32_t        tu;
+       uint32_t        gmch_m;
+       uint32_t        gmch_n;
+       uint32_t        link_m;
+       uint32_t        link_n;
+ };
+ void intel_link_compute_m_n(int bpp, int nlanes,
+                           int pixel_clock, int link_clock,
+                           struct intel_link_m_n *m_n);
  struct intel_ddi_plls {
        int spll_refcount;
        int wrpll1_refcount;
@@@ -276,6 -297,7 +297,7 @@@ struct drm_i915_display_funcs 
                          struct drm_i915_gem_object *obj);
        int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                            int x, int y);
+       void (*hpd_irq_setup)(struct drm_device *dev);
        /* clock updates for mode set */
        /* cursor updates */
        /* render clock increase/decrease */
@@@ -382,11 -404,6 +404,11 @@@ enum intel_pch 
        PCH_LPT,        /* Lynxpoint PCH */
  };
  
 +enum intel_sbi_destination {
 +      SBI_ICLK,
 +      SBI_MPHY,
 +};
 +
  #define QUIRK_PIPEA_FORCE (1<<0)
  #define QUIRK_LVDS_SSC_DISABLE (1<<1)
  #define QUIRK_INVERT_BRIGHTNESS (1<<2)
@@@ -577,6 -594,9 +599,9 @@@ struct intel_gen6_power_mgmt 
        struct mutex hw_lock;
  };
  
+ /* defined intel_pm.c */
+ extern spinlock_t mchdev_lock;
  struct intel_ilk_power_mgmt {
        u8 cur_delay;
        u8 min_delay;
@@@ -619,6 -639,7 +644,7 @@@ struct intel_l3_parity 
  
  typedef struct drm_i915_private {
        struct drm_device *dev;
+       struct kmem_cache *slab;
  
        const struct intel_device_info *info;
  
        /** forcewake_count is protected by gt_lock */
        unsigned forcewake_count;
        /** gt_lock is also taken in irq contexts. */
-       struct spinlock gt_lock;
+       spinlock_t gt_lock;
  
        struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
  
        /** gmbus_mutex protects against concurrent usage of the single hw gmbus
         * controller on different i2c buses. */
        struct mutex gmbus_mutex;
         */
        uint32_t gpio_mmio_base;
  
+       wait_queue_head_t gmbus_wait_queue;
        struct pci_dev *bridge_dev;
        struct intel_ring_buffer ring[I915_NUM_RINGS];
-       uint32_t next_seqno;
+       uint32_t last_seqno, next_seqno;
  
        drm_dma_handle_t *status_page_dmah;
        struct resource mch_res;
        /* protects the irq masks */
        spinlock_t irq_lock;
  
+       /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
+       struct pm_qos_request pm_qos;
        /* DPIO indirect register protection */
-       spinlock_t dpio_lock;
+       struct mutex dpio_lock;
  
        /** Cached value of IMR to avoid reads in updating the bitfield */
        u32 pipestat[2];
  
        u32 hotplug_supported_mask;
        struct work_struct hotplug_work;
+       bool enable_hotplug_processing;
  
        int num_pipe;
        int num_pch_pll;
        unsigned int display_clock_mode:1;
        int lvds_ssc_freq;
        unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
-       unsigned int lvds_val; /* used for checking LVDS channel mode */
        struct {
                int rate;
                int lanes;
                unsigned long gtt_start;
                unsigned long gtt_mappable_end;
                unsigned long gtt_end;
+               unsigned long stolen_base; /* limited to low memory (32-bit) */
+               /** "Graphics Stolen Memory" holds the global PTEs */
+               void __iomem *gsm;
  
                struct io_mapping *gtt_mapping;
                phys_addr_t gtt_base_addr;
                struct i915_hw_ppgtt *aliasing_ppgtt;
  
                struct shrinker inactive_shrinker;
 +              bool shrinker_no_lock_stealing;
  
                /**
                 * List of objects currently involved in rendering.
        bool hw_contexts_disabled;
        uint32_t hw_context_size;
  
 +      bool fdi_rx_polarity_reversed;
 +
        struct i915_suspend_saved_registers regfile;
  
        /* Old dri1 support infrastructure, beware the dragons ya fools entering
@@@ -943,6 -971,8 +979,8 @@@ enum i915_cache_level 
        I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
  };
  
+ #define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
  struct drm_i915_gem_object_ops {
        /* Interface between the GEM object and its backing storage.
         * get_pages() is called once prior to the use of the associated set
@@@ -968,6 -998,8 +1006,8 @@@ struct drm_i915_gem_object 
  
        /** Current space allocated to this object in the GTT, if any. */
        struct drm_mm_node *gtt_space;
+       /** Stolen memory for this object, instead of being backed by shmem. */
+       struct drm_mm_node *stolen;
        struct list_head gtt_list;
  
        /** This object's place on the active/inactive lists */
         */
        atomic_t pending_flip;
  };
 +#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
  
  #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
  
@@@ -1138,7 -1169,7 +1178,7 @@@ struct drm_i915_gem_request 
  
  struct drm_i915_file_private {
        struct {
-               struct spinlock lock;
+               spinlock_t lock;
                struct list_head request_list;
        } mm;
        struct idr context_idr;
  #define IS_IVB_GT1(dev)               ((dev)->pci_device == 0x0156 || \
                                 (dev)->pci_device == 0x0152 || \
                                 (dev)->pci_device == 0x015a)
 +#define IS_SNB_GT1(dev)               ((dev)->pci_device == 0x0102 || \
 +                               (dev)->pci_device == 0x0106 || \
 +                               (dev)->pci_device == 0x010A)
  #define IS_VALLEYVIEW(dev)    (INTEL_INFO(dev)->is_valleyview)
  #define IS_HASWELL(dev)       (INTEL_INFO(dev)->is_haswell)
  #define IS_MOBILE(dev)                (INTEL_INFO(dev)->is_mobile)
  #define HAS_OVERLAY(dev)              (INTEL_INFO(dev)->has_overlay)
  #define OVERLAY_NEEDS_PHYSICAL(dev)   (INTEL_INFO(dev)->overlay_needs_physical)
  
 +/* Early gen2 have a totally busted CS tlb and require pinned batches. */
 +#define HAS_BROKEN_CS_TLB(dev)                (IS_I830(dev) || IS_845G(dev))
 +
  /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
   * rows, which changed the alignment requirements and fence programming.
   */
  
  #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
  
+ #define HAS_DDI(dev)          (IS_HASWELL(dev))
  #define INTEL_PCH_DEVICE_ID_MASK              0xff00
  #define INTEL_PCH_IBX_DEVICE_ID_TYPE          0x3b00
  #define INTEL_PCH_CPT_DEVICE_ID_TYPE          0x1c00
@@@ -1320,6 -1347,7 +1362,7 @@@ void i915_hangcheck_elapsed(unsigned lo
  void i915_handle_error(struct drm_device *dev, bool wedged);
  
  extern void intel_irq_init(struct drm_device *dev);
+ extern void intel_hpd_init(struct drm_device *dev);
  extern void intel_gt_init(struct drm_device *dev);
  extern void intel_gt_reset(struct drm_device *dev);
  
@@@ -1388,12 -1416,15 +1431,15 @@@ int i915_gem_get_aperture_ioctl(struct 
  int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
  void i915_gem_load(struct drm_device *dev);
+ void *i915_gem_object_alloc(struct drm_device *dev);
+ void i915_gem_object_free(struct drm_i915_gem_object *obj);
  int i915_gem_init_object(struct drm_gem_object *obj);
  void i915_gem_object_init(struct drm_i915_gem_object *obj,
                         const struct drm_i915_gem_object_ops *ops);
  struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
                                                  size_t size);
  void i915_gem_free_object(struct drm_gem_object *obj);
  int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
                                     uint32_t alignment,
                                     bool map_and_fenceable,
@@@ -1451,8 -1482,8 +1497,8 @@@ i915_seqno_passed(uint32_t seq1, uint32
        return (int32_t)(seq1 - seq2) >= 0;
  }
  
extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
+ int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
  int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
  int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
  
@@@ -1559,10 -1590,9 +1605,9 @@@ void i915_gem_gtt_bind_object(struct dr
                                enum i915_cache_level cache_level);
  void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
  void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
- void i915_gem_init_global_gtt(struct drm_device *dev,
-                             unsigned long start,
-                             unsigned long mappable_end,
-                             unsigned long end);
+ void i915_gem_init_global_gtt(struct drm_device *dev);
+ void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
+                              unsigned long mappable_end, unsigned long end);
  int i915_gem_gtt_init(struct drm_device *dev);
  void i915_gem_gtt_fini(struct drm_device *dev);
  static inline void i915_gem_chipset_flush(struct drm_device *dev)
@@@ -1582,9 -1612,22 +1627,22 @@@ int i915_gem_evict_everything(struct dr
  
  /* i915_gem_stolen.c */
  int i915_gem_init_stolen(struct drm_device *dev);
+ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
+ void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
  void i915_gem_cleanup_stolen(struct drm_device *dev);
+ struct drm_i915_gem_object *
+ i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
+ void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
  
  /* i915_gem_tiling.c */
+ inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
+ {
+       drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+       return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+               obj->tiling_mode != I915_TILING_NONE;
+ }
  void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
  void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
  void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
@@@ -1672,7 -1715,7 +1730,7 @@@ extern void intel_modeset_setup_hw_stat
  extern bool intel_fbc_enabled(struct drm_device *dev);
  extern void intel_disable_fbc(struct drm_device *dev);
  extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
 -extern void ironlake_init_pch_refclk(struct drm_device *dev);
 +extern void intel_init_pch_refclk(struct drm_device *dev);
  extern void gen6_set_rps(struct drm_device *dev, u8 val);
  extern void intel_detect_pch(struct drm_device *dev);
  extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
index da3c82e301b1868257ffa60fc90b31714b934ea0,ad98db5d22ea95d4082fe1530b76a956bd20ea35..e6cc020ea32c704bf3cf751f0d13322325227e72
@@@ -163,8 -163,8 +163,8 @@@ i915_gem_init_ioctl(struct drm_device *
                return -ENODEV;
  
        mutex_lock(&dev->struct_mutex);
-       i915_gem_init_global_gtt(dev, args->gtt_start,
-                                args->gtt_end, args->gtt_end);
+       i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
+                                 args->gtt_end);
        mutex_unlock(&dev->struct_mutex);
  
        return 0;
@@@ -192,6 -192,18 +192,18 @@@ i915_gem_get_aperture_ioctl(struct drm_
        return 0;
  }
  
+ void *i915_gem_object_alloc(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
+ }
+ void i915_gem_object_free(struct drm_i915_gem_object *obj)
+ {
+       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       kmem_cache_free(dev_priv->slab, obj);
+ }
  static int
  i915_gem_create(struct drm_file *file,
                struct drm_device *dev,
        if (ret) {
                drm_gem_object_release(&obj->base);
                i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
-               kfree(obj);
+               i915_gem_object_free(obj);
                return ret;
        }
  
@@@ -259,14 -271,6 +271,6 @@@ i915_gem_create_ioctl(struct drm_devic
                               args->size, &args->handle);
  }
  
- static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
- {
-       drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
-       return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
-               obj->tiling_mode != I915_TILING_NONE;
- }
  static inline int
  __copy_to_user_swizzled(char __user *cpu_vaddr,
                        const char *gpu_vaddr, int gpu_offset,
@@@ -407,7 -411,6 +411,6 @@@ i915_gem_shmem_pread(struct drm_device 
        loff_t offset;
        int shmem_page_offset, page_length, ret = 0;
        int obj_do_bit17_swizzling, page_do_bit17_swizzling;
-       int hit_slowpath = 0;
        int prefaulted = 0;
        int needs_clflush = 0;
        struct scatterlist *sg;
                if (ret == 0)
                        goto next_page;
  
-               hit_slowpath = 1;
                mutex_unlock(&dev->struct_mutex);
  
                if (!prefaulted) {
@@@ -502,12 -504,6 +504,6 @@@ next_page
  out:
        i915_gem_object_unpin_pages(obj);
  
-       if (hit_slowpath) {
-               /* Fixup: Kill any reinstated backing storage pages */
-               if (obj->madv == __I915_MADV_PURGED)
-                       i915_gem_object_truncate(obj);
-       }
        return ret;
  }
  
@@@ -838,12 -834,13 +834,13 @@@ out
        i915_gem_object_unpin_pages(obj);
  
        if (hit_slowpath) {
-               /* Fixup: Kill any reinstated backing storage pages */
-               if (obj->madv == __I915_MADV_PURGED)
-                       i915_gem_object_truncate(obj);
-               /* and flush dirty cachelines in case the object isn't in the cpu write
-                * domain anymore. */
-               if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+               /*
+                * Fixup: Flush cpu caches in case we didn't flush the dirty
+                * cachelines in-line while writing and the object moved
+                * out of the cpu write domain while we've dropped the lock.
+                */
+               if (!needs_clflush_after &&
+                   obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
                        i915_gem_clflush_object(obj);
                        i915_gem_chipset_flush(dev);
                }
@@@ -1344,6 -1341,12 +1341,12 @@@ int i915_gem_fault(struct vm_area_struc
  
        trace_i915_gem_object_fault(obj, page_offset, true, write);
  
+       /* Access to snoopable pages through the GTT is incoherent. */
+       if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
+               ret = -EINVAL;
+               goto unlock;
+       }
        /* Now bind it into the GTT if needed */
        ret = i915_gem_object_pin(obj, 0, true, false);
        if (ret)
@@@ -1517,11 -1520,9 +1520,11 @@@ static int i915_gem_object_create_mmap_
        if (obj->base.map_list.map)
                return 0;
  
 +      dev_priv->mm.shrinker_no_lock_stealing = true;
 +
        ret = drm_gem_create_mmap_offset(&obj->base);
        if (ret != -ENOSPC)
 -              return ret;
 +              goto out;
  
        /* Badly fragmented mmap space? The only way we can recover
         * space is by destroying unwanted objects. We can't randomly release
        i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
        ret = drm_gem_create_mmap_offset(&obj->base);
        if (ret != -ENOSPC)
 -              return ret;
 +              goto out;
  
        i915_gem_shrink_all(dev_priv);
 -      return drm_gem_create_mmap_offset(&obj->base);
 +      ret = drm_gem_create_mmap_offset(&obj->base);
 +out:
 +      dev_priv->mm.shrinker_no_lock_stealing = false;
 +
 +      return ret;
  }
  
  static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
@@@ -1702,14 -1699,10 +1705,14 @@@ i915_gem_object_put_pages(struct drm_i9
        if (obj->pages_pin_count)
                return -EBUSY;
  
 +      /* ->put_pages might need to allocate memory for the bit17 swizzle
 +       * array, hence protect them from being reaped by removing them from gtt
 +       * lists early. */
 +      list_del(&obj->gtt_list);
 +
        ops->put_pages(obj);
        obj->pages = NULL;
  
 -      list_del(&obj->gtt_list);
        if (i915_gem_object_is_purgeable(obj))
                i915_gem_object_truncate(obj);
  
@@@ -1795,7 -1788,7 +1798,7 @@@ i915_gem_object_get_pages_gtt(struct dr
         */
        mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
        gfp = mapping_gfp_mask(mapping);
 -      gfp |= __GFP_NORETRY | __GFP_NOWARN;
 +      gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
        gfp &= ~(__GFP_IO | __GFP_WAIT);
        for_each_sg(st->sgl, sg, page_count, i) {
                page = shmem_read_mapping_page_gfp(mapping, i, gfp);
                         * our own buffer, now let the real VM do its job and
                         * go down in flames if truly OOM.
                         */
 -                      gfp &= ~(__GFP_NORETRY | __GFP_NOWARN);
 +                      gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
                        gfp |= __GFP_IO | __GFP_WAIT;
  
                        i915_gem_shrink_all(dev_priv);
                        if (IS_ERR(page))
                                goto err_pages;
  
 -                      gfp |= __GFP_NORETRY | __GFP_NOWARN;
 +                      gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
                        gfp &= ~(__GFP_IO | __GFP_WAIT);
                }
  
@@@ -1933,30 -1926,24 +1936,24 @@@ i915_gem_object_move_to_inactive(struc
  }
  
  static int
- i915_gem_handle_seqno_wrap(struct drm_device *dev)
+ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring;
        int ret, i, j;
  
-       /* The hardware uses various monotonic 32-bit counters, if we
-        * detect that they will wraparound we need to idle the GPU
-        * and reset those counters.
-        */
-       ret = 0;
+       /* Carefully retire all requests without writing to the rings */
        for_each_ring(ring, dev_priv, i) {
-               for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
-                       ret |= ring->sync_seqno[j] != 0;
+               ret = intel_ring_idle(ring);
+               if (ret)
+                       return ret;
        }
-       if (ret == 0)
-               return ret;
-       ret = i915_gpu_idle(dev);
-       if (ret)
-               return ret;
        i915_gem_retire_requests(dev);
+       /* Finally reset hw state */
        for_each_ring(ring, dev_priv, i) {
+               intel_ring_init_seqno(ring, seqno);
                for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
                        ring->sync_seqno[j] = 0;
        }
        return 0;
  }
  
+ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+       if (seqno == 0)
+               return -EINVAL;
+       /* HWS page needs to be set less than what we
+        * will inject to ring
+        */
+       ret = i915_gem_init_seqno(dev, seqno - 1);
+       if (ret)
+               return ret;
+       /* Carefully set the last_seqno value so that wrap
+        * detection still works
+        */
+       dev_priv->next_seqno = seqno;
+       dev_priv->last_seqno = seqno - 1;
+       if (dev_priv->last_seqno == 0)
+               dev_priv->last_seqno--;
+       return 0;
+ }
  int
  i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
  {
  
        /* reserve 0 for non-seqno */
        if (dev_priv->next_seqno == 0) {
-               int ret = i915_gem_handle_seqno_wrap(dev);
+               int ret = i915_gem_init_seqno(dev, 0);
                if (ret)
                        return ret;
  
                dev_priv->next_seqno = 1;
        }
  
-       *seqno = dev_priv->next_seqno++;
+       *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
        return 0;
  }
  
@@@ -2648,7 -2661,7 +2671,7 @@@ static void i915_gem_write_fence(struc
        case 4: i965_write_fence_reg(dev, reg, obj); break;
        case 3: i915_write_fence_reg(dev, reg, obj); break;
        case 2: i830_write_fence_reg(dev, reg, obj); break;
-       default: break;
+       default: BUG();
        }
  }
  
@@@ -2823,7 -2836,7 +2846,7 @@@ static bool i915_gem_valid_gtt_space(st
  
        /* On non-LLC machines we have to be careful when putting differing
         * types of snoopable memory together to avoid the prefetcher
-        * crossing memory domains and dieing.
+        * crossing memory domains and dying.
         */
        if (HAS_LLC(dev))
                return true;
@@@ -2896,7 -2909,7 +2919,7 @@@ i915_gem_object_bind_to_gtt(struct drm_
  {
        struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
 -      struct drm_mm_node *free_space;
 +      struct drm_mm_node *node;
        u32 size, fence_size, fence_alignment, unfenced_alignment;
        bool mappable, fenceable;
        int ret;
  
        i915_gem_object_pin_pages(obj);
  
 +      node = kzalloc(sizeof(*node), GFP_KERNEL);
 +      if (node == NULL) {
 +              i915_gem_object_unpin_pages(obj);
 +              return -ENOMEM;
 +      }
 +
   search_free:
        if (map_and_fenceable)
 -              free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
 -                                                             size, alignment, obj->cache_level,
 -                                                             0, dev_priv->mm.gtt_mappable_end,
 -                                                             false);
 +              ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
 +                                                        size, alignment, obj->cache_level,
 +                                                        0, dev_priv->mm.gtt_mappable_end);
        else
 -              free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
 -                                                    size, alignment, obj->cache_level,
 -                                                    false);
 -
 -      if (free_space != NULL) {
 -              if (map_and_fenceable)
 -                      free_space =
 -                              drm_mm_get_block_range_generic(free_space,
 -                                                             size, alignment, obj->cache_level,
 -                                                             0, dev_priv->mm.gtt_mappable_end,
 -                                                             false);
 -              else
 -                      free_space =
 -                              drm_mm_get_block_generic(free_space,
 -                                                       size, alignment, obj->cache_level,
 -                                                       false);
 -      }
 -      if (free_space == NULL) {
 +              ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
 +                                               size, alignment, obj->cache_level);
 +      if (ret) {
                ret = i915_gem_evict_something(dev, size, alignment,
                                               obj->cache_level,
                                               map_and_fenceable,
                                               nonblocking);
 -              if (ret) {
 -                      i915_gem_object_unpin_pages(obj);
 -                      return ret;
 -              }
 +              if (ret == 0)
 +                      goto search_free;
  
 -              goto search_free;
 +              i915_gem_object_unpin_pages(obj);
 +              kfree(node);
 +              return ret;
        }
 -      if (WARN_ON(!i915_gem_valid_gtt_space(dev,
 -                                            free_space,
 -                                            obj->cache_level))) {
 +      if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
                i915_gem_object_unpin_pages(obj);
 -              drm_mm_put_block(free_space);
 +              drm_mm_put_block(node);
                return -EINVAL;
        }
  
        ret = i915_gem_gtt_prepare_object(obj);
        if (ret) {
                i915_gem_object_unpin_pages(obj);
 -              drm_mm_put_block(free_space);
 +              drm_mm_put_block(node);
                return ret;
        }
  
        list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
        list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
  
 -      obj->gtt_space = free_space;
 -      obj->gtt_offset = free_space->start;
 +      obj->gtt_space = node;
 +      obj->gtt_offset = node->start;
  
        fenceable =
 -              free_space->size == fence_size &&
 -              (free_space->start & (fence_alignment - 1)) == 0;
 +              node->size == fence_size &&
 +              (node->start & (fence_alignment - 1)) == 0;
  
        mappable =
                obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
@@@ -3698,14 -3723,14 +3721,14 @@@ struct drm_i915_gem_object *i915_gem_al
  {
        struct drm_i915_gem_object *obj;
        struct address_space *mapping;
-       u32 mask;
+       gfp_t mask;
  
-       obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+       obj = i915_gem_object_alloc(dev);
        if (obj == NULL)
                return NULL;
  
        if (drm_gem_object_init(dev, &obj->base, size) != 0) {
-               kfree(obj);
+               i915_gem_object_free(obj);
                return NULL;
        }
  
@@@ -3777,6 -3802,7 +3800,7 @@@ void i915_gem_free_object(struct drm_ge
        obj->pages_pin_count = 0;
        i915_gem_object_put_pages(obj);
        i915_gem_object_free_mmap_offset(obj);
+       i915_gem_object_release_stolen(obj);
  
        BUG_ON(obj->pages);
  
        i915_gem_info_remove_obj(dev_priv, obj->base.size);
  
        kfree(obj->bit_17);
-       kfree(obj);
+       i915_gem_object_free(obj);
  }
  
  int
@@@ -3883,8 -3909,10 +3907,10 @@@ void i915_gem_init_swizzling(struct drm
        I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
        if (IS_GEN6(dev))
                I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
-       else
+       else if (IS_GEN7(dev))
                I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
+       else
+               BUG();
  }
  
  static bool
@@@ -3919,6 -3947,8 +3945,8 @@@ i915_gem_init_hw(struct drm_device *dev
  
        i915_gem_init_swizzling(dev);
  
+       dev_priv->next_seqno = dev_priv->last_seqno = (u32)~0 - 0x1000;
        ret = intel_init_render_ring_buffer(dev);
        if (ret)
                return ret;
                        goto cleanup_bsd_ring;
        }
  
-       dev_priv->next_seqno = 1;
        /*
         * XXX: There was some w/a described somewhere suggesting loading
         * contexts before PPGTT.
@@@ -3953,58 -3981,13 +3979,13 @@@ cleanup_render_ring
        return ret;
  }
  
- static bool
- intel_enable_ppgtt(struct drm_device *dev)
- {
-       if (i915_enable_ppgtt >= 0)
-               return i915_enable_ppgtt;
- #ifdef CONFIG_INTEL_IOMMU
-       /* Disable ppgtt on SNB if VT-d is on. */
-       if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
-               return false;
- #endif
-       return true;
- }
  int i915_gem_init(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long gtt_size, mappable_size;
        int ret;
  
-       gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
-       mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
        mutex_lock(&dev->struct_mutex);
-       if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
-               /* PPGTT pdes are stolen from global gtt ptes, so shrink the
-                * aperture accordingly when using aliasing ppgtt. */
-               gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
-               i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
-               ret = i915_gem_init_aliasing_ppgtt(dev);
-               if (ret) {
-                       mutex_unlock(&dev->struct_mutex);
-                       return ret;
-               }
-       } else {
-               /* Let GEM Manage all of the aperture.
-                *
-                * However, leave one page at the end still bound to the scratch
-                * page.  There are a number of places where the hardware
-                * apparently prefetches past the end of the object, and we've
-                * seen multiple hangs with the GPU head pointer stuck in a
-                * batchbuffer bound at the last page of the aperture.  One page
-                * should be enough to keep any prefetching inside of the
-                * aperture.
-                */
-               i915_gem_init_global_gtt(dev, 0, mappable_size,
-                                        gtt_size);
-       }
+       i915_gem_init_global_gtt(dev);
        ret = i915_gem_init_hw(dev);
        mutex_unlock(&dev->struct_mutex);
        if (ret) {
@@@ -4105,8 -4088,14 +4086,14 @@@ init_ring_lists(struct intel_ring_buffe
  void
  i915_gem_load(struct drm_device *dev)
  {
-       int i;
        drm_i915_private_t *dev_priv = dev->dev_private;
+       int i;
+       dev_priv->slab =
+               kmem_cache_create("i915_gem_object",
+                                 sizeof(struct drm_i915_gem_object), 0,
+                                 SLAB_HWCACHE_ALIGN,
+                                 NULL);
  
        INIT_LIST_HEAD(&dev_priv->mm.active_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
@@@ -4386,9 -4375,6 +4373,9 @@@ i915_gem_inactive_shrink(struct shrinke
                if (!mutex_is_locked_by(&dev->struct_mutex, current))
                        return 0;
  
 +              if (dev_priv->mm.shrinker_no_lock_stealing)
 +                      return 0;
 +
                unlock = false;
        }
  
index abeaafef6d7ef9986313be7ab8e0c2c5a992c065,defb888ef7f56954a0ada160abf0bbcb821f4f84..6a5af6828624e257d4b2516ade02ce79eaf4d839
@@@ -226,7 -226,7 +226,7 @@@ struct dma_buf *i915_gem_prime_export(s
  {
        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
  
 -      return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
 +      return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
  }
  
  static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
@@@ -266,12 -266,7 +266,12 @@@ struct drm_gem_object *i915_gem_prime_i
                obj = dma_buf->priv;
                /* is it from our device? */
                if (obj->base.dev == dev) {
 +                      /*
 +                       * Importing dmabuf exported from out own gem increases
 +                       * refcount on gem itself instead of f_count of dmabuf.
 +                       */
                        drm_gem_object_reference(&obj->base);
 +                      dma_buf_put(dma_buf);
                        return &obj->base;
                }
        }
        if (IS_ERR(attach))
                return ERR_CAST(attach);
  
-       obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+       obj = i915_gem_object_alloc(dev);
        if (obj == NULL) {
                ret = -ENOMEM;
                goto fail_detach;
  
        ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
        if (ret) {
-               kfree(obj);
+               i915_gem_object_free(obj);
                goto fail_detach;
        }
  
index d6a994a07393677fb5b4fd6edb868cf7bd460cbe,6cd3e1c1629edf9e685d05a28553b2e025abdb51..163bb52bd3b3b0b168fec9cd097655c003d5ba33
@@@ -150,17 -150,6 +150,6 @@@ i915_gem_execbuffer_relocate_entry(stru
                          reloc->write_domain);
                return ret;
        }
-       if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
-                    reloc->write_domain != target_obj->pending_write_domain)) {
-               DRM_DEBUG("Write domain conflict: "
-                         "obj %p target %d offset %d "
-                         "new %08x old %08x\n",
-                         obj, reloc->target_handle,
-                         (int) reloc->offset,
-                         reloc->write_domain,
-                         target_obj->pending_write_domain);
-               return ret;
-       }
  
        target_obj->pending_read_domains |= reloc->read_domains;
        target_obj->pending_write_domain |= reloc->write_domain;
@@@ -601,45 -590,12 +590,12 @@@ err
        return ret;
  }
  
- static int
- i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
- {
-       u32 plane, flip_mask;
-       int ret;
-       /* Check for any pending flips. As we only maintain a flip queue depth
-        * of 1, we can simply insert a WAIT for the next display flip prior
-        * to executing the batch and avoid stalling the CPU.
-        */
-       for (plane = 0; flips >> plane; plane++) {
-               if (((flips >> plane) & 1) == 0)
-                       continue;
-               if (plane)
-                       flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
-               else
-                       flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-               ret = intel_ring_begin(ring, 2);
-               if (ret)
-                       return ret;
-               intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-               intel_ring_emit(ring, MI_NOOP);
-               intel_ring_advance(ring);
-       }
-       return 0;
- }
  static int
  i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
                                struct list_head *objects)
  {
        struct drm_i915_gem_object *obj;
        uint32_t flush_domains = 0;
-       uint32_t flips = 0;
        int ret;
  
        list_for_each_entry(obj, objects, exec_list) {
                if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
                        i915_gem_clflush_object(obj);
  
-               if (obj->base.pending_write_domain)
-                       flips |= atomic_read(&obj->pending_flip);
                flush_domains |= obj->base.write_domain;
        }
  
-       if (flips) {
-               ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
-               if (ret)
-                       return ret;
-       }
        if (flush_domains & I915_GEM_DOMAIN_CPU)
                i915_gem_chipset_flush(ring->dev);
  
@@@ -808,8 -755,6 +755,8 @@@ i915_gem_do_execbuffer(struct drm_devic
  
                flags |= I915_DISPATCH_SECURE;
        }
 +      if (args->flags & I915_EXEC_IS_PINNED)
 +              flags |= I915_DISPATCH_PINNED;
  
        switch (args->flags & I915_EXEC_RING_MASK) {
        case I915_EXEC_DEFAULT:
index 2c150dee78a70468f96a87295cd29595cfa46a68,eac2cec716521124f7e02500b5b93eac02e6f23e..a4af0f79e9724dedd11625042104accf4dd42e53
@@@ -282,7 -282,7 +282,7 @@@ void i915_gem_init_ppgtt(struct drm_dev
        uint32_t pd_offset;
        struct intel_ring_buffer *ring;
        struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-       uint32_t __iomem *pd_addr;
+       gtt_pte_t __iomem *pd_addr;
        uint32_t pd_entry;
        int i;
  
                return;
  
  
-       pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
+       pd_addr = (gtt_pte_t __iomem*)dev_priv->mm.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t);
        for (i = 0; i < ppgtt->num_pd_entries; i++) {
                dma_addr_t pt_addr;
  
@@@ -367,7 -367,7 +367,7 @@@ static void i915_ggtt_clear_range(struc
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        gtt_pte_t scratch_pte;
-       gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
+       gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->mm.gsm + first_entry;
        const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
        int i;
  
@@@ -432,7 -432,8 +432,8 @@@ static void gen6_ggtt_bind_object(struc
        struct scatterlist *sg = st->sgl;
        const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
        const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
-       gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
+       gtt_pte_t __iomem *gtt_entries =
+               (gtt_pte_t __iomem *)dev_priv->mm.gsm + first_entry;
        int unused, i = 0;
        unsigned int len, m = 0;
        dma_addr_t addr;
@@@ -525,26 -526,103 +526,103 @@@ static void i915_gtt_color_adjust(struc
        }
  }
  
- void i915_gem_init_global_gtt(struct drm_device *dev,
-                             unsigned long start,
-                             unsigned long mappable_end,
-                             unsigned long end)
+ void i915_gem_setup_global_gtt(struct drm_device *dev,
+                              unsigned long start,
+                              unsigned long mappable_end,
+                              unsigned long end)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_mm_node *entry;
+       struct drm_i915_gem_object *obj;
+       unsigned long hole_start, hole_end;
  
-       /* Substract the guard page ... */
+       /* Subtract the guard page ... */
        drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
        if (!HAS_LLC(dev))
                dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
  
+       /* Mark any preallocated objects as occupied */
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
+               DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
+                             obj->gtt_offset, obj->base.size);
+               BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
+               obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
+                                                    obj->gtt_offset,
+                                                    obj->base.size,
+                                                    false);
+               obj->has_global_gtt_mapping = 1;
+       }
        dev_priv->mm.gtt_start = start;
        dev_priv->mm.gtt_mappable_end = mappable_end;
        dev_priv->mm.gtt_end = end;
        dev_priv->mm.gtt_total = end - start;
        dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
  
-       /* ... but ensure that we clear the entire range. */
-       i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+       /* Clear any non-preallocated blocks */
+       drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
+                            hole_start, hole_end) {
+               DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
+                             hole_start, hole_end);
+               i915_ggtt_clear_range(dev,
+                                     hole_start / PAGE_SIZE,
+                                     (hole_end-hole_start) / PAGE_SIZE);
+       }
+       /* And finally clear the reserved guard page */
+       i915_ggtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
+ }
+ static bool
+ intel_enable_ppgtt(struct drm_device *dev)
+ {
+       if (i915_enable_ppgtt >= 0)
+               return i915_enable_ppgtt;
+ #ifdef CONFIG_INTEL_IOMMU
+       /* Disable ppgtt on SNB if VT-d is on. */
+       if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+               return false;
+ #endif
+       return true;
+ }
+ void i915_gem_init_global_gtt(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long gtt_size, mappable_size;
+       int ret;
+       gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
+       mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+       if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
+               /* PPGTT pdes are stolen from global gtt ptes, so shrink the
+                * aperture accordingly when using aliasing ppgtt. */
+               gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+               i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
+               ret = i915_gem_init_aliasing_ppgtt(dev);
+               if (ret) {
+                       mutex_unlock(&dev->struct_mutex);
+                       return;
+               }
+       } else {
+               /* Let GEM Manage all of the aperture.
+                *
+                * However, leave one page at the end still bound to the scratch
+                * page.  There are a number of places where the hardware
+                * apparently prefetches past the end of the object, and we've
+                * seen multiple hangs with the GPU head pointer stuck in a
+                * batchbuffer bound at the last page of the aperture.  One page
+                * should be enough to keep any prefetching inside of the
+                * aperture.
+                */
+               i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
+       }
  }
  
  static int setup_scratch_page(struct drm_device *dev)
@@@ -639,10 -717,6 +717,10 @@@ int i915_gem_gtt_init(struct drm_devic
        if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
                pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
  
 +#ifdef CONFIG_INTEL_IOMMU
 +      dev_priv->mm.gtt->needs_dmar = 1;
 +#endif
 +
        /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
        gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
        dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
                goto err_out;
        }
  
-       dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr,
-                                          dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
-       if (!dev_priv->mm.gtt->gtt) {
+       dev_priv->mm.gsm = ioremap_wc(gtt_bus_addr,
+                                     dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
+       if (!dev_priv->mm.gsm) {
                DRM_ERROR("Failed to map the gtt page table\n");
                teardown_scratch_page(dev);
                ret = -ENOMEM;
@@@ -700,7 -774,7 +778,7 @@@ err_out
  void i915_gem_gtt_fini(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       iounmap(dev_priv->mm.gtt->gtt);
+       iounmap(dev_priv->mm.gsm);
        teardown_scratch_page(dev);
        if (INTEL_INFO(dev)->gen < 6)
                intel_gmch_remove();
index 2220dec3e5d983eb9f730d95011e7eb2ebd82adc,6ba0573e7f169a6611483c118b98841ca895ff1e..6689a61b02a31c7f9ef1b57c439ab7d44befb918
@@@ -287,6 -287,10 +287,10 @@@ static void i915_hotplug_work_func(stru
        struct drm_mode_config *mode_config = &dev->mode_config;
        struct intel_encoder *encoder;
  
+       /* HPD irq before everything is fully set up. */
+       if (!dev_priv->enable_hotplug_processing)
+               return;
        mutex_lock(&mode_config->mutex);
        DRM_DEBUG_KMS("running encoder hotplug functions\n");
  
        drm_helper_hpd_irq_event(dev);
  }
  
- /* defined intel_pm.c */
- extern spinlock_t mchdev_lock;
  static void ironlake_handle_rps_change(struct drm_device *dev)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@@ -524,6 -525,20 +525,20 @@@ static void gen6_queue_rps_work(struct 
        queue_work(dev_priv->wq, &dev_priv->rps.work);
  }
  
+ static void gmbus_irq_handler(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       wake_up_all(&dev_priv->gmbus_wait_queue);
+ }
+ static void dp_aux_irq_handler(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       wake_up_all(&dev_priv->gmbus_wait_queue);
+ }
  static irqreturn_t valleyview_irq_handler(int irq, void *arg)
  {
        struct drm_device *dev = (struct drm_device *) arg;
        unsigned long irqflags;
        int pipe;
        u32 pipe_stats[I915_MAX_PIPES];
-       bool blc_event;
  
        atomic_inc(&dev_priv->irq_received);
  
                        I915_READ(PORT_HOTPLUG_STAT);
                }
  
-               if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
-                       blc_event = true;
+               if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
+                       gmbus_irq_handler(dev);
  
                if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
                        gen6_queue_rps_work(dev_priv, pm_iir);
@@@ -618,8 -632,11 +632,11 @@@ static void ibx_irq_handler(struct drm_
                                 (pch_iir & SDE_AUDIO_POWER_MASK) >>
                                 SDE_AUDIO_POWER_SHIFT);
  
+       if (pch_iir & SDE_AUX_MASK)
+               dp_aux_irq_handler(dev);
        if (pch_iir & SDE_GMBUS)
-               DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
+               gmbus_irq_handler(dev);
  
        if (pch_iir & SDE_AUDIO_HDCP_MASK)
                DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
@@@ -662,10 -679,10 +679,10 @@@ static void cpt_irq_handler(struct drm_
                                 SDE_AUDIO_POWER_SHIFT_CPT);
  
        if (pch_iir & SDE_AUX_MASK_CPT)
-               DRM_DEBUG_DRIVER("AUX channel interrupt\n");
+               dp_aux_irq_handler(dev);
  
        if (pch_iir & SDE_GMBUS_CPT)
-               DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
+               gmbus_irq_handler(dev);
  
        if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
                DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
@@@ -703,6 -720,9 +720,9 @@@ static irqreturn_t ivybridge_irq_handle
  
        de_iir = I915_READ(DEIIR);
        if (de_iir) {
+               if (de_iir & DE_AUX_CHANNEL_A_IVB)
+                       dp_aux_irq_handler(dev);
                if (de_iir & DE_GSE_IVB)
                        intel_opregion_gse_intr(dev);
  
@@@ -758,7 -778,7 +778,7 @@@ static irqreturn_t ironlake_irq_handler
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int ret = IRQ_NONE;
-       u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
+       u32 de_iir, gt_iir, de_ier, pm_iir;
  
        atomic_inc(&dev_priv->irq_received);
  
  
        de_iir = I915_READ(DEIIR);
        gt_iir = I915_READ(GTIIR);
-       pch_iir = I915_READ(SDEIIR);
        pm_iir = I915_READ(GEN6_PMIIR);
  
-       if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
-           (!IS_GEN6(dev) || pm_iir == 0))
+       if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
                goto done;
  
        ret = IRQ_HANDLED;
        else
                snb_gt_irq_handler(dev, dev_priv, gt_iir);
  
+       if (de_iir & DE_AUX_CHANNEL_A)
+               dp_aux_irq_handler(dev);
        if (de_iir & DE_GSE)
                intel_opregion_gse_intr(dev);
  
  
        /* check event from PCH */
        if (de_iir & DE_PCH_EVENT) {
+               u32 pch_iir = I915_READ(SDEIIR);
                if (HAS_PCH_CPT(dev))
                        cpt_irq_handler(dev, pch_iir);
                else
                        ibx_irq_handler(dev, pch_iir);
+               /* should clear PCH hotplug event before clear CPU irq */
+               I915_WRITE(SDEIIR, pch_iir);
        }
  
        if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
        if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
                gen6_queue_rps_work(dev_priv, pm_iir);
  
-       /* should clear PCH hotplug event before clear CPU irq */
-       I915_WRITE(SDEIIR, pch_iir);
        I915_WRITE(GTIIR, gt_iir);
        I915_WRITE(DEIIR, de_iir);
        I915_WRITE(GEN6_PMIIR, pm_iir);
@@@ -928,6 -952,14 +952,14 @@@ i915_error_object_create(struct drm_i91
                                                     reloc_offset);
                        memcpy_fromio(d, s, PAGE_SIZE);
                        io_mapping_unmap_atomic(s);
+               } else if (src->stolen) {
+                       unsigned long offset;
+                       offset = dev_priv->mm.stolen_base;
+                       offset += src->stolen->start;
+                       offset += i << PAGE_SHIFT;
+                       memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
                } else {
                        struct page *page;
                        void *s;
@@@ -1074,6 -1106,8 +1106,8 @@@ static void i915_gem_record_fences(stru
                        error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
                break;
  
+       default:
+               BUG();
        }
  }
  
@@@ -1087,18 -1121,6 +1121,18 @@@ i915_error_first_batchbuffer(struct drm
        if (!ring->get_seqno)
                return NULL;
  
 +      if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
 +              u32 acthd = I915_READ(ACTHD);
 +
 +              if (WARN_ON(ring->id != RCS))
 +                      return NULL;
 +
 +              obj = ring->private;
 +              if (acthd >= obj->gtt_offset &&
 +                  acthd < obj->gtt_offset + obj->base.size)
 +                      return i915_error_object_create(dev_priv, obj);
 +      }
 +
        seqno = ring->get_seqno(ring, false);
        list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
                if (obj->ring != ring)
@@@ -1478,9 -1500,7 +1512,9 @@@ static void i915_pageflip_stall_check(s
        spin_lock_irqsave(&dev->event_lock, flags);
        work = intel_crtc->unpin_work;
  
 -      if (work == NULL || work->pending || !work->enable_stall_check) {
 +      if (work == NULL ||
 +          atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
 +          !work->enable_stall_check) {
                /* Either the pending flip IRQ arrived, or we're too early. Don't check */
                spin_unlock_irqrestore(&dev->event_lock, flags);
                return;
@@@ -1854,7 -1874,8 +1888,8 @@@ static int ironlake_irq_postinstall(str
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        /* enable kind of interrupts always enabled */
        u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
-                          DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
+                          DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
+                          DE_AUX_CHANNEL_A;
        u32 render_irqs;
        u32 hotplug_mask;
  
                hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
                                SDE_PORTB_HOTPLUG_CPT |
                                SDE_PORTC_HOTPLUG_CPT |
-                               SDE_PORTD_HOTPLUG_CPT);
+                               SDE_PORTD_HOTPLUG_CPT |
+                               SDE_GMBUS_CPT |
+                               SDE_AUX_MASK_CPT);
        } else {
                hotplug_mask = (SDE_CRT_HOTPLUG |
                                SDE_PORTB_HOTPLUG |
                                SDE_PORTC_HOTPLUG |
                                SDE_PORTD_HOTPLUG |
+                               SDE_GMBUS |
                                SDE_AUX_MASK);
        }
  
@@@ -1924,7 -1948,8 +1962,8 @@@ static int ivybridge_irq_postinstall(st
                DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
                DE_PLANEC_FLIP_DONE_IVB |
                DE_PLANEB_FLIP_DONE_IVB |
-               DE_PLANEA_FLIP_DONE_IVB;
+               DE_PLANEA_FLIP_DONE_IVB |
+               DE_AUX_CHANNEL_A_IVB;
        u32 render_irqs;
        u32 hotplug_mask;
  
        hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
                        SDE_PORTB_HOTPLUG_CPT |
                        SDE_PORTC_HOTPLUG_CPT |
-                       SDE_PORTD_HOTPLUG_CPT);
+                       SDE_PORTD_HOTPLUG_CPT |
+                       SDE_GMBUS_CPT |
+                       SDE_AUX_MASK_CPT);
        dev_priv->pch_irq_mask = ~hotplug_mask;
  
        I915_WRITE(SDEIIR, I915_READ(SDEIIR));
@@@ -1970,7 -1997,6 +2011,6 @@@ static int valleyview_irq_postinstall(s
  {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        u32 enable_mask;
-       u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
        u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
        u32 render_irqs;
        u16 msid;
        msid |= (1<<14);
        pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
  
+       I915_WRITE(PORT_HOTPLUG_EN, 0);
+       POSTING_READ(PORT_HOTPLUG_EN);
        I915_WRITE(VLV_IMR, dev_priv->irq_mask);
        I915_WRITE(VLV_IER, enable_mask);
        I915_WRITE(VLV_IIR, 0xffffffff);
        POSTING_READ(VLV_IER);
  
        i915_enable_pipestat(dev_priv, 0, pipestat_enable);
+       i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
        i915_enable_pipestat(dev_priv, 1, pipestat_enable);
  
        I915_WRITE(VLV_IIR, 0xffffffff);
  #endif
  
        I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+       return 0;
+ }
+ static void valleyview_hpd_irq_setup(struct drm_device *dev)
+ {
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
        /* Note HDMI and DP share bits */
        if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
                hotplug_en |= HDMIB_HOTPLUG_INT_EN;
        }
  
        I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
-       return 0;
  }
  
  static void valleyview_irq_uninstall(struct drm_device *dev)
@@@ -2275,6 -2312,9 +2326,9 @@@ static int i915_irq_postinstall(struct 
                I915_USER_INTERRUPT;
  
        if (I915_HAS_HOTPLUG(dev)) {
+               I915_WRITE(PORT_HOTPLUG_EN, 0);
+               POSTING_READ(PORT_HOTPLUG_EN);
                /* Enable in IER... */
                enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
                /* and unmask in IMR */
        I915_WRITE(IER, enable_mask);
        POSTING_READ(IER);
  
+       intel_opregion_enable_asle(dev);
+       return 0;
+ }
+ static void i915_hpd_irq_setup(struct drm_device *dev)
+ {
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       u32 hotplug_en;
        if (I915_HAS_HOTPLUG(dev)) {
-               u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+               hotplug_en = I915_READ(PORT_HOTPLUG_EN);
  
                if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
                        hotplug_en |= HDMIB_HOTPLUG_INT_EN;
  
                I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
        }
-       intel_opregion_enable_asle(dev);
-       return 0;
  }
  
  static irqreturn_t i915_irq_handler(int irq, void *arg)
@@@ -2470,7 -2516,6 +2530,6 @@@ static void i965_irq_preinstall(struct 
  static int i965_irq_postinstall(struct drm_device *dev)
  {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       u32 hotplug_en;
        u32 enable_mask;
        u32 error_mask;
  
  
        dev_priv->pipestat[0] = 0;
        dev_priv->pipestat[1] = 0;
+       i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
  
        /*
         * Enable some error detection, note the instruction error mask
        I915_WRITE(IER, enable_mask);
        POSTING_READ(IER);
  
+       I915_WRITE(PORT_HOTPLUG_EN, 0);
+       POSTING_READ(PORT_HOTPLUG_EN);
+       intel_opregion_enable_asle(dev);
+       return 0;
+ }
+ static void i965_hpd_irq_setup(struct drm_device *dev)
+ {
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       u32 hotplug_en;
        /* Note HDMI and DP share hotplug bits */
        hotplug_en = 0;
        if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
        /* Ignore TV since it's buggy */
  
        I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
-       intel_opregion_enable_asle(dev);
-       return 0;
  }
  
  static irqreturn_t i965_irq_handler(int irq, void *arg)
                if (blc_event || (iir & I915_ASLE_INTERRUPT))
                        intel_opregion_asle_intr(dev);
  
+               if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
+                       gmbus_irq_handler(dev);
                /* With MSI, interrupts are only generated when iir
                 * transitions from zero to nonzero.  If another bit got
                 * set while we were handling the existing iir bits, then
@@@ -2699,6 -2757,11 +2771,11 @@@ void intel_irq_init(struct drm_device *
        INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
        INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
  
+       setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
+                   (unsigned long) dev);
+       pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
        dev->driver->get_vblank_counter = i915_get_vblank_counter;
        dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
        if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
                dev->driver->irq_uninstall = valleyview_irq_uninstall;
                dev->driver->enable_vblank = valleyview_enable_vblank;
                dev->driver->disable_vblank = valleyview_disable_vblank;
-       } else if (IS_IVYBRIDGE(dev)) {
+               dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup;
+       } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
                /* Share pre & uninstall handlers with ILK/SNB */
                dev->driver->irq_handler = ivybridge_irq_handler;
                dev->driver->irq_preinstall = ironlake_irq_preinstall;
                dev->driver->irq_uninstall = ironlake_irq_uninstall;
                dev->driver->enable_vblank = ivybridge_enable_vblank;
                dev->driver->disable_vblank = ivybridge_disable_vblank;
-       } else if (IS_HASWELL(dev)) {
-               /* Share interrupts handling with IVB */
-               dev->driver->irq_handler = ivybridge_irq_handler;
-               dev->driver->irq_preinstall = ironlake_irq_preinstall;
-               dev->driver->irq_postinstall = ivybridge_irq_postinstall;
-               dev->driver->irq_uninstall = ironlake_irq_uninstall;
-               dev->driver->enable_vblank = ivybridge_enable_vblank;
-               dev->driver->disable_vblank = ivybridge_disable_vblank;
        } else if (HAS_PCH_SPLIT(dev)) {
                dev->driver->irq_handler = ironlake_irq_handler;
                dev->driver->irq_preinstall = ironlake_irq_preinstall;
                        dev->driver->irq_postinstall = i915_irq_postinstall;
                        dev->driver->irq_uninstall = i915_irq_uninstall;
                        dev->driver->irq_handler = i915_irq_handler;
+                       dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
                } else {
                        dev->driver->irq_preinstall = i965_irq_preinstall;
                        dev->driver->irq_postinstall = i965_irq_postinstall;
                        dev->driver->irq_uninstall = i965_irq_uninstall;
                        dev->driver->irq_handler = i965_irq_handler;
+                       dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup;
                }
                dev->driver->enable_vblank = i915_enable_vblank;
                dev->driver->disable_vblank = i915_disable_vblank;
        }
  }
+ void intel_hpd_init(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       if (dev_priv->display.hpd_irq_setup)
+               dev_priv->display.hpd_irq_setup(dev);
+ }
index 186ee5c85b516592d2c83c8cac8cb2142121e993,5b3020f836b9b05167b6092035fadfb75cef9c72..3b039f4268e3fd38301eaa042b2bde373de0a05e
  #define   VGA_MSR_CGA_MODE (1<<0)
  
  #define VGA_SR_INDEX 0x3c4
+ #define SR01                  1
  #define VGA_SR_DATA 0x3c5
  
  #define VGA_AR_INDEX 0x3c0
   * the enables for writing to the corresponding low bit.
   */
  #define _3D_CHICKEN   0x02084
 +#define  _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB    (1 << 10)
  #define _3D_CHICKEN2  0x0208c
  /* Disables pipelining of read flushes past the SF-WIZ interface.
   * Required on all Ironlake steppings according to the B-Spec, but the
  # define MI_FLUSH_ENABLE                              (1 << 12)
  
  #define GEN6_GT_MODE  0x20d0
 -#define   GEN6_GT_MODE_HI     (1 << 9)
 +#define   GEN6_GT_MODE_HI                             (1 << 9)
 +#define   GEN6_TD_FOUR_ROW_DISPATCH_DISABLE           (1 << 5)
  
  #define GFX_MODE      0x02520
  #define GFX_MODE_GEN7 0x0229c
  #define   DPLL_LOCK_VLV                       (1<<15)
  #define   DPLL_INTEGRATED_CLOCK_VLV   (1<<13)
  
- #define SRX_INDEX             0x3c4
- #define SRX_DATA              0x3c5
- #define SR01                  1
- #define SR01_SCREEN_OFF               (1<<5)
- #define PPCR                  0x61204
- #define PPCR_ON                       (1<<0)
- #define DVOB                  0x61140
- #define DVOB_ON                       (1<<31)
- #define DVOC                  0x61160
- #define DVOC_ON                       (1<<31)
- #define LVDS                  0x61180
- #define LVDS_ON                       (1<<31)
- /* Scratch pad debug 0 reg:
-  */
  #define   DPLL_FPA01_P1_POST_DIV_MASK_I830    0x001f0000
  /*
   * The i830 generation, in LVDS mode, defines P1 as the bit number set within
  #define   PFIT_SCALING_PILLAR (2 << 26)
  #define   PFIT_SCALING_LETTER (3 << 26)
  #define PFIT_PGM_RATIOS       0x61234
- #define   PFIT_VERT_SCALE_MASK                        0xfff00000
- #define   PFIT_HORIZ_SCALE_MASK                       0x0000fff0
  /* Pre-965 */
  #define               PFIT_VERT_SCALE_SHIFT           20
  #define               PFIT_VERT_SCALE_MASK            0xfff00000
  #define   PIPECONF_INTERLACED_DBL_ILK         (4 << 21) /* ilk/snb only */
  #define   PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
  #define   PIPECONF_CXSR_DOWNCLOCK     (1<<16)
- #define   PIPECONF_BPP_MASK   (0x000000e0)
- #define   PIPECONF_BPP_8      (0<<5)
- #define   PIPECONF_BPP_10     (1<<5)
- #define   PIPECONF_BPP_6      (2<<5)
- #define   PIPECONF_BPP_12     (3<<5)
+ #define   PIPECONF_BPC_MASK   (0x7 << 5)
+ #define   PIPECONF_8BPC               (0<<5)
+ #define   PIPECONF_10BPC      (1<<5)
+ #define   PIPECONF_6BPC               (2<<5)
+ #define   PIPECONF_12BPC      (3<<5)
  #define   PIPECONF_DITHER_EN  (1<<4)
  #define   PIPECONF_DITHER_TYPE_MASK (0x0000000c)
  #define   PIPECONF_DITHER_TYPE_SP (0<<2)
  #define   PIPE_START_VBLANK_INTERRUPT_STATUS  (1UL<<2) /* 965 or later */
  #define   PIPE_VBLANK_INTERRUPT_STATUS                (1UL<<1)
  #define   PIPE_OVERLAY_UPDATED_STATUS         (1UL<<0)
- #define   PIPE_BPC_MASK                               (7 << 5) /* Ironlake */
- #define   PIPE_8BPC                           (0 << 5)
- #define   PIPE_10BPC                          (1 << 5)
- #define   PIPE_6BPC                           (2 << 5)
- #define   PIPE_12BPC                          (3 << 5)
  
  #define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
  #define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
  #define PORTD_PULSE_DURATION_6ms        (2 << 18)
  #define PORTD_PULSE_DURATION_100ms      (3 << 18)
  #define PORTD_PULSE_DURATION_MASK     (3 << 18)
- #define PORTD_HOTPLUG_NO_DETECT         (0)
- #define PORTD_HOTPLUG_SHORT_DETECT      (1 << 16)
- #define PORTD_HOTPLUG_LONG_DETECT       (1 << 17)
+ #define PORTD_HOTPLUG_STATUS_MASK     (0x3 << 16)
+ #define  PORTD_HOTPLUG_NO_DETECT      (0 << 16)
+ #define  PORTD_HOTPLUG_SHORT_DETECT   (1 << 16)
+ #define  PORTD_HOTPLUG_LONG_DETECT    (2 << 16)
  #define PORTC_HOTPLUG_ENABLE            (1 << 12)
  #define PORTC_PULSE_DURATION_2ms        (0)
  #define PORTC_PULSE_DURATION_4_5ms      (1 << 10)
  #define PORTC_PULSE_DURATION_6ms        (2 << 10)
  #define PORTC_PULSE_DURATION_100ms      (3 << 10)
  #define PORTC_PULSE_DURATION_MASK     (3 << 10)
- #define PORTC_HOTPLUG_NO_DETECT         (0)
- #define PORTC_HOTPLUG_SHORT_DETECT      (1 << 8)
- #define PORTC_HOTPLUG_LONG_DETECT       (1 << 9)
+ #define PORTC_HOTPLUG_STATUS_MASK     (0x3 << 8)
+ #define  PORTC_HOTPLUG_NO_DETECT      (0 << 8)
+ #define  PORTC_HOTPLUG_SHORT_DETECT   (1 << 8)
+ #define  PORTC_HOTPLUG_LONG_DETECT    (2 << 8)
  #define PORTB_HOTPLUG_ENABLE            (1 << 4)
  #define PORTB_PULSE_DURATION_2ms        (0)
  #define PORTB_PULSE_DURATION_4_5ms      (1 << 2)
  #define PORTB_PULSE_DURATION_6ms        (2 << 2)
  #define PORTB_PULSE_DURATION_100ms      (3 << 2)
  #define PORTB_PULSE_DURATION_MASK     (3 << 2)
- #define PORTB_HOTPLUG_NO_DETECT         (0)
- #define PORTB_HOTPLUG_SHORT_DETECT      (1 << 0)
- #define PORTB_HOTPLUG_LONG_DETECT       (1 << 1)
+ #define PORTB_HOTPLUG_STATUS_MASK     (0x3 << 0)
+ #define  PORTB_HOTPLUG_NO_DETECT      (0 << 0)
+ #define  PORTB_HOTPLUG_SHORT_DETECT   (1 << 0)
+ #define  PORTB_HOTPLUG_LONG_DETECT    (2 << 0)
  
  #define PCH_GPIOA               0xc5010
  #define PCH_GPIOB               0xc5014
  #define  TRANS_FSYNC_DELAY_HB2  (1<<27)
  #define  TRANS_FSYNC_DELAY_HB3  (2<<27)
  #define  TRANS_FSYNC_DELAY_HB4  (3<<27)
- #define  TRANS_DP_AUDIO_ONLY    (1<<26)
- #define  TRANS_DP_VIDEO_AUDIO   (0<<26)
  #define  TRANS_INTERLACE_MASK   (7<<21)
  #define  TRANS_PROGRESSIVE      (0<<21)
  #define  TRANS_INTERLACED       (3<<21)
  #define  FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
  #define  FDI_BC_BIFURCATION_SELECT    (1 << 12)
  #define SOUTH_CHICKEN2                0xc2004
 -#define  DPLS_EDP_PPS_FIX_DIS (1<<0)
 +#define  FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
 +#define  FDI_MPHY_IOSFSB_RESET_CTL    (1<<12)
 +#define  DPLS_EDP_PPS_FIX_DIS         (1<<0)
  
  #define _FDI_RXA_CHICKEN         0xc200c
  #define _FDI_RXB_CHICKEN         0xc2010
  #define  FDI_FS_ERRC_ENABLE           (1<<27)
  #define  FDI_FE_ERRC_ENABLE           (1<<26)
  #define  FDI_DP_PORT_WIDTH_X8           (7<<19)
 +#define  FDI_RX_POLARITY_REVERSED_LPT (1<<16)
  #define  FDI_8BPC                       (0<<16)
  #define  FDI_10BPC                      (1<<16)
  #define  FDI_6BPC                       (2<<16)
  #define SBI_ADDR                      0xC6000
  #define SBI_DATA                      0xC6004
  #define SBI_CTL_STAT                  0xC6008
 +#define  SBI_CTL_DEST_ICLK            (0x0<<16)
 +#define  SBI_CTL_DEST_MPHY            (0x1<<16)
 +#define  SBI_CTL_OP_IORD              (0x2<<8)
 +#define  SBI_CTL_OP_IOWR              (0x3<<8)
  #define  SBI_CTL_OP_CRRD              (0x6<<8)
  #define  SBI_CTL_OP_CRWR              (0x7<<8)
  #define  SBI_RESPONSE_FAIL            (0x1<<1)
  #define   SBI_SSCDIVINTPHASE_PROPAGATE                (1<<0)
  #define  SBI_SSCCTL                           0x020c
  #define  SBI_SSCCTL6                          0x060C
 +#define   SBI_SSCCTL_PATHALT                  (1<<3)
  #define   SBI_SSCCTL_DISABLE                  (1<<0)
  #define  SBI_SSCAUXDIV6                               0x0610
  #define   SBI_SSCAUXDIV_FINALDIV2SEL(x)               ((x)<<4)
  #define  SBI_DBUFF0                           0x2a00
 +#define   SBI_DBUFF0_ENABLE                   (1<<0)
  
  /* LPT PIXCLK_GATE */
  #define PIXCLK_GATE                   0xC6020
index 9293878ec7eb53165e259e1607a57e23519dbaa7,bc07b3f0d5e74ed6bb20ed210fa5221f9195def8..71a5ebad14fbf2c86bea360d5bf6620ac729c4d1
@@@ -143,7 -143,7 +143,7 @@@ static void intel_crt_dpms(struct drm_c
        int old_dpms;
  
        /* PCH platforms and VLV only support on/off. */
 -      if (INTEL_INFO(dev)->gen < 5 && mode != DRM_MODE_DPMS_ON)
 +      if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
                mode = DRM_MODE_DPMS_OFF;
  
        if (mode == connector->dpms)
@@@ -198,11 -198,6 +198,11 @@@ static int intel_crt_mode_valid(struct 
        if (mode->clock > max_clock)
                return MODE_CLOCK_HIGH;
  
 +      /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
 +      if (HAS_PCH_LPT(dev) &&
 +          (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
 +              return MODE_CLOCK_HIGH;
 +
        return MODE_OK;
  }
  
@@@ -776,7 -771,7 +776,7 @@@ void intel_crt_init(struct drm_device *
  
        crt->base.disable = intel_disable_crt;
        crt->base.enable = intel_enable_crt;
-       if (IS_HASWELL(dev))
+       if (HAS_DDI(dev))
                crt->base.get_hw_state = intel_ddi_get_hw_state;
        else
                crt->base.get_hw_state = intel_crt_get_hw_state;
        crt->force_hotplug_required = 0;
  
        dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
 +
 +      /*
 +       * TODO: find a proper way to discover whether we need to set the
 +       * polarity reversal bit or not, instead of relying on the BIOS.
 +       */
 +      if (HAS_PCH_LPT(dev))
 +              dev_priv->fdi_rx_polarity_reversed =
 +                   !!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT);
  }
index 4bad0f724019561c1895517d81adf1067ef617b7,f02b3feff504a8efb0a9f3771d3f90e7067814e3..2e904a5cd6cb26803f1ac608600cb65d16e76f2c
@@@ -84,7 -84,8 +84,8 @@@ static enum port intel_ddi_get_encoder_
   * in either FDI or DP modes only, as HDMI connections will work with both
   * of those
   */
- void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode)
+ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
+                                     bool use_fdi_mode)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 reg;
@@@ -114,16 -115,17 +115,17 @@@ void intel_prepare_ddi(struct drm_devic
  {
        int port;
  
-       if (IS_HASWELL(dev)) {
-               for (port = PORT_A; port < PORT_E; port++)
-                       intel_prepare_ddi_buffers(dev, port, false);
+       if (!HAS_DDI(dev))
+               return;
  
-               /* DDI E is the suggested one to work in FDI mode, so program is as such by
-                * default. It will have to be re-programmed in case a digital DP output
-                * will be detected on it
-                */
-               intel_prepare_ddi_buffers(dev, PORT_E, true);
-       }
+       for (port = PORT_A; port < PORT_E; port++)
+               intel_prepare_ddi_buffers(dev, port, false);
+       /* DDI E is the suggested one to work in FDI mode, so program is as such
+        * by default. It will have to be re-programmed in case a digital DP
+        * output will be detected on it
+        */
+       intel_prepare_ddi_buffers(dev, PORT_E, true);
  }
  
  static const long hsw_ddi_buf_ctl_values[] = {
        DDI_BUF_EMP_800MV_3_5DB_HSW
  };
  
 +static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
 +                                  enum port port)
 +{
 +      uint32_t reg = DDI_BUF_CTL(port);
 +      int i;
 +
 +      for (i = 0; i < 8; i++) {
 +              udelay(1);
 +              if (I915_READ(reg) & DDI_BUF_IS_IDLE)
 +                      return;
 +      }
 +      DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
 +}
  
  /* Starting with Haswell, different DDI ports can work in FDI mode for
   * connection to the PCH-located connectors. For this, it is necessary to train
@@@ -180,8 -169,6 +182,8 @@@ void hsw_fdi_link_train(struct drm_crt
        /* Enable the PCH Receiver FDI PLL */
        rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE |
                     ((intel_crtc->fdi_lanes - 1) << 19);
 +      if (dev_priv->fdi_rx_polarity_reversed)
 +              rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT;
        I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
        POSTING_READ(_FDI_RXA_CTL);
        udelay(220);
                        return;
                }
  
 +              temp = I915_READ(DDI_BUF_CTL(PORT_E));
 +              temp &= ~DDI_BUF_CTL_ENABLE;
 +              I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
 +              POSTING_READ(DDI_BUF_CTL(PORT_E));
 +
                /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
 -              I915_WRITE(DP_TP_CTL(PORT_E),
 -                         I915_READ(DP_TP_CTL(PORT_E)) & ~DP_TP_CTL_ENABLE);
 +              temp = I915_READ(DP_TP_CTL(PORT_E));
 +              temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
 +              temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
 +              I915_WRITE(DP_TP_CTL(PORT_E), temp);
 +              POSTING_READ(DP_TP_CTL(PORT_E));
 +
 +              intel_wait_ddi_buf_idle(dev_priv, PORT_E);
  
                rx_ctl_val &= ~FDI_RX_ENABLE;
                I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
 +              POSTING_READ(_FDI_RXA_CTL);
  
                /* Reset FDI_RX_MISC pwrdn lanes */
                temp = I915_READ(_FDI_RXA_MISC);
                temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
                temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
                I915_WRITE(_FDI_RXA_MISC, temp);
 +              POSTING_READ(_FDI_RXA_MISC);
        }
  
        DRM_ERROR("FDI link training failed!\n");
@@@ -1069,7 -1044,7 +1071,7 @@@ bool intel_ddi_connector_get_hw_state(s
        if (port == PORT_A)
                cpu_transcoder = TRANSCODER_EDP;
        else
-               cpu_transcoder = pipe;
+               cpu_transcoder = (enum transcoder) pipe;
  
        tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
  
@@@ -1249,6 -1224,20 +1251,6 @@@ static void intel_ddi_pre_enable(struc
        }
  }
  
 -static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
 -                                  enum port port)
 -{
 -      uint32_t reg = DDI_BUF_CTL(port);
 -      int i;
 -
 -      for (i = 0; i < 8; i++) {
 -              udelay(1);
 -              if (I915_READ(reg) & DDI_BUF_IS_IDLE)
 -                      return;
 -      }
 -      DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
 -}
 -
  static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
  {
        struct drm_encoder *encoder = &intel_encoder->base;
index a9fb046b94a140ab169cbd2eb016922fc702dd98,1464e472ce44ed28d32694a16d661d29c705c5e7..8c36a11a9a57040bc93ae9fa4dc37cec7e18f84a
@@@ -416,13 -416,11 +416,11 @@@ static const intel_limit_t intel_limits
  
  u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
  {
-       unsigned long flags;
-       u32 val = 0;
+       WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
  
-       spin_lock_irqsave(&dev_priv->dpio_lock, flags);
        if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
                DRM_ERROR("DPIO idle wait timed out\n");
-               goto out_unlock;
+               return 0;
        }
  
        I915_WRITE(DPIO_REG, reg);
                   DPIO_BYTE);
        if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
                DRM_ERROR("DPIO read wait timed out\n");
-               goto out_unlock;
+               return 0;
        }
-       val = I915_READ(DPIO_DATA);
  
- out_unlock:
-       spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
-       return val;
+       return I915_READ(DPIO_DATA);
  }
  
  static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
                             u32 val)
  {
-       unsigned long flags;
+       WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
  
-       spin_lock_irqsave(&dev_priv->dpio_lock, flags);
        if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
                DRM_ERROR("DPIO idle wait timed out\n");
-               goto out_unlock;
+               return;
        }
  
        I915_WRITE(DPIO_DATA, val);
                   DPIO_BYTE);
        if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
                DRM_ERROR("DPIO write wait timed out\n");
- out_unlock:
-        spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
  }
  
  static void vlv_init_dpio(struct drm_device *dev)
        POSTING_READ(DPIO_CTL);
  }
  
- static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
- {
-       DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
-       return 1;
- }
- static const struct dmi_system_id intel_dual_link_lvds[] = {
-       {
-               .callback = intel_dual_link_lvds_callback,
-               .ident = "Apple MacBook Pro (Core i5/i7 Series)",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
-               },
-       },
-       { }     /* terminating entry */
- };
- static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
-                             unsigned int reg)
- {
-       unsigned int val;
-       /* use the module option value if specified */
-       if (i915_lvds_channel_mode > 0)
-               return i915_lvds_channel_mode == 2;
-       if (dmi_check_system(intel_dual_link_lvds))
-               return true;
-       if (dev_priv->lvds_val)
-               val = dev_priv->lvds_val;
-       else {
-               /* BIOS should set the proper LVDS register value at boot, but
-                * in reality, it doesn't set the value when the lid is closed;
-                * we need to check "the value to be set" in VBT when LVDS
-                * register is uninitialized.
-                */
-               val = I915_READ(reg);
-               if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
-                       val = dev_priv->bios_lvds_val;
-               dev_priv->lvds_val = val;
-       }
-       return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
- }
  static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
                                                int refclk)
  {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        const intel_limit_t *limit;
  
        if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
-               if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
+               if (intel_is_dual_link_lvds(dev)) {
                        /* LVDS dual channel */
                        if (refclk == 100000)
                                limit = &intel_limits_ironlake_dual_lvds_100m;
  static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
  {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        const intel_limit_t *limit;
  
        if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
-               if (is_dual_link_lvds(dev_priv, LVDS))
+               if (intel_is_dual_link_lvds(dev))
                        /* LVDS with dual channel */
                        limit = &intel_limits_g4x_dual_channel_lvds;
                else
@@@ -686,19 -629,16 +629,16 @@@ intel_find_best_PLL(const intel_limit_
  
  {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        intel_clock_t clock;
        int err = target;
  
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
-           (I915_READ(LVDS)) != 0) {
+       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
                /*
-                * For LVDS, if the panel is on, just rely on its current
-                * settings for dual-channel.  We haven't figured out how to
-                * reliably set up different single/dual channel state, if we
-                * even can.
+                * For LVDS just rely on its current settings for dual-channel.
+                * We haven't figured out how to reliably set up different
+                * single/dual channel state, if we even can.
                 */
-               if (is_dual_link_lvds(dev_priv, LVDS))
+               if (intel_is_dual_link_lvds(dev))
                        clock.p2 = limit->p2.p2_fast;
                else
                        clock.p2 = limit->p2.p2_slow;
@@@ -751,7 -691,6 +691,6 @@@ intel_g4x_find_best_PLL(const intel_lim
                        intel_clock_t *best_clock)
  {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        intel_clock_t clock;
        int max_n;
        bool found;
                        lvds_reg = PCH_LVDS;
                else
                        lvds_reg = LVDS;
-               if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
-                   LVDS_CLKB_POWER_UP)
+               if (intel_is_dual_link_lvds(dev))
                        clock.p2 = limit->p2.p2_fast;
                else
                        clock.p2 = limit->p2.p2_slow;
@@@ -1047,6 -985,51 +985,51 @@@ void intel_wait_for_pipe_off(struct drm
        }
  }
  
+ /*
+  * ibx_digital_port_connected - is the specified port connected?
+  * @dev_priv: i915 private structure
+  * @port: the port to test
+  *
+  * Returns true if @port is connected, false otherwise.
+  */
+ bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
+                               struct intel_digital_port *port)
+ {
+       u32 bit;
+       if (HAS_PCH_IBX(dev_priv->dev)) {
+               switch(port->port) {
+               case PORT_B:
+                       bit = SDE_PORTB_HOTPLUG;
+                       break;
+               case PORT_C:
+                       bit = SDE_PORTC_HOTPLUG;
+                       break;
+               case PORT_D:
+                       bit = SDE_PORTD_HOTPLUG;
+                       break;
+               default:
+                       return true;
+               }
+       } else {
+               switch(port->port) {
+               case PORT_B:
+                       bit = SDE_PORTB_HOTPLUG_CPT;
+                       break;
+               case PORT_C:
+                       bit = SDE_PORTC_HOTPLUG_CPT;
+                       break;
+               case PORT_D:
+                       bit = SDE_PORTD_HOTPLUG_CPT;
+                       break;
+               default:
+                       return true;
+               }
+       }
+       return I915_READ(SDEISR) & bit;
+ }
  static const char *state_string(bool enabled)
  {
        return enabled ? "on" : "off";
@@@ -1125,8 -1108,8 +1108,8 @@@ static void assert_fdi_tx(struct drm_i9
        enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
                                                                      pipe);
  
-       if (IS_HASWELL(dev_priv->dev)) {
-               /* On Haswell, DDI is used instead of FDI_TX_CTL */
+       if (HAS_DDI(dev_priv->dev)) {
+               /* DDI does not have a specific FDI_TX register */
                reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
                val = I915_READ(reg);
                cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
@@@ -1170,7 -1153,7 +1153,7 @@@ static void assert_fdi_tx_pll_enabled(s
                return;
  
        /* On Haswell, DDI ports are responsible for the FDI PLL setup */
-       if (IS_HASWELL(dev_priv->dev))
+       if (HAS_DDI(dev_priv->dev))
                return;
  
        reg = FDI_TX_CTL(pipe);
@@@ -1506,69 -1489,55 +1489,63 @@@ static void intel_disable_pll(struct dr
  
  /* SBI access */
  static void
 -intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
 +intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
 +              enum intel_sbi_destination destination)
  {
-       unsigned long flags;
 +      u32 tmp;
 +
-       spin_lock_irqsave(&dev_priv->dpio_lock, flags);
-       if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
+       WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
+       if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
+                               100)) {
                DRM_ERROR("timeout waiting for SBI to become ready\n");
-               goto out_unlock;
+               return;
        }
  
 -      I915_WRITE(SBI_ADDR,
 -                      (reg << 16));
 -      I915_WRITE(SBI_DATA,
 -                      value);
 -      I915_WRITE(SBI_CTL_STAT,
 -                      SBI_BUSY |
 -                      SBI_CTL_OP_CRWR);
 +      I915_WRITE(SBI_ADDR, (reg << 16));
 +      I915_WRITE(SBI_DATA, value);
 +
 +      if (destination == SBI_ICLK)
 +              tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
 +      else
 +              tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
 +      I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
  
        if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
                                100)) {
                DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
-               goto out_unlock;
+               return;
        }
- out_unlock:
-       spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
  }
  
  static u32
 -intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
 +intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
 +             enum intel_sbi_destination destination)
  {
-       unsigned long flags;
 +      u32 value = 0;
+       WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
  
-       spin_lock_irqsave(&dev_priv->dpio_lock, flags);
-       if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
+       if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
+                               100)) {
                DRM_ERROR("timeout waiting for SBI to become ready\n");
-               goto out_unlock;
+               return 0;
        }
  
 -      I915_WRITE(SBI_ADDR,
 -                      (reg << 16));
 -      I915_WRITE(SBI_CTL_STAT,
 -                      SBI_BUSY |
 -                      SBI_CTL_OP_CRRD);
 +      I915_WRITE(SBI_ADDR, (reg << 16));
 +
 +      if (destination == SBI_ICLK)
 +              value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
 +      else
 +              value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
 +      I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
  
        if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
                                100)) {
                DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
-               goto out_unlock;
+               return 0;
        }
  
-       value = I915_READ(SBI_DATA);
- out_unlock:
-       spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
-       return value;
+       return I915_READ(SBI_DATA);
  }
  
  /**
@@@ -1700,8 -1669,8 +1677,8 @@@ static void ironlake_enable_pch_transco
                 * make the BPC in transcoder be consistent with
                 * that in pipeconf reg.
                 */
-               val &= ~PIPE_BPC_MASK;
-               val |= pipeconf_val & PIPE_BPC_MASK;
+               val &= ~PIPECONF_BPC_MASK;
+               val |= pipeconf_val & PIPECONF_BPC_MASK;
        }
  
        val &= ~TRANS_INTERLACE_MASK;
@@@ -1728,7 -1697,7 +1705,7 @@@ static void lpt_enable_pch_transcoder(s
        BUG_ON(dev_priv->info->gen < 5);
  
        /* FDI must be feeding us bits for PCH ports */
-       assert_fdi_tx_enabled(dev_priv, cpu_transcoder);
+       assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
        assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
  
        /* Workaround: set timing override bit. */
@@@ -1816,11 -1785,11 +1793,11 @@@ static void intel_enable_pipe(struct dr
  {
        enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
                                                                      pipe);
-       enum transcoder pch_transcoder;
+       enum pipe pch_transcoder;
        int reg;
        u32 val;
  
-       if (IS_HASWELL(dev_priv->dev))
+       if (HAS_PCH_LPT(dev_priv->dev))
                pch_transcoder = TRANSCODER_A;
        else
                pch_transcoder = pipe;
                if (pch_port) {
                        /* if driving the PCH, we need FDI enabled */
                        assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
-                       assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder);
+                       assert_fdi_tx_pll_enabled(dev_priv,
+                                                 (enum pipe) cpu_transcoder);
                }
                /* FIXME: assert CPU port conditions for SNB+ */
        }
@@@ -2350,43 -2320,6 +2328,6 @@@ intel_pipe_set_base(struct drm_crtc *cr
        return 0;
  }
  
- static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
- {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 dpa_ctl;
-       DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
-       dpa_ctl = I915_READ(DP_A);
-       dpa_ctl &= ~DP_PLL_FREQ_MASK;
-       if (clock < 200000) {
-               u32 temp;
-               dpa_ctl |= DP_PLL_FREQ_160MHZ;
-               /* workaround for 160Mhz:
-                  1) program 0x4600c bits 15:0 = 0x8124
-                  2) program 0x46010 bit 0 = 1
-                  3) program 0x46034 bit 24 = 1
-                  4) program 0x64000 bit 14 = 1
-                  */
-               temp = I915_READ(0x4600c);
-               temp &= 0xffff0000;
-               I915_WRITE(0x4600c, temp | 0x8124);
-               temp = I915_READ(0x46010);
-               I915_WRITE(0x46010, temp | 1);
-               temp = I915_READ(0x46034);
-               I915_WRITE(0x46034, temp | (1 << 24));
-       } else {
-               dpa_ctl |= DP_PLL_FREQ_270MHZ;
-       }
-       I915_WRITE(DP_A, dpa_ctl);
-       POSTING_READ(DP_A);
-       udelay(500);
- }
  static void intel_fdi_normal_train(struct drm_crtc *crtc)
  {
        struct drm_device *dev = crtc->dev;
                           FDI_FE_ERRC_ENABLE);
  }
  
 -static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      u32 flags = I915_READ(SOUTH_CHICKEN1);
 -
 -      flags |= FDI_PHASE_SYNC_OVR(pipe);
 -      I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
 -      flags |= FDI_PHASE_SYNC_EN(pipe);
 -      I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
 -      POSTING_READ(SOUTH_CHICKEN1);
 -}
 -
  static void ivb_modeset_global_resources(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@@ -2602,6 -2547,8 +2543,6 @@@ static void gen6_fdi_link_train(struct 
        POSTING_READ(reg);
        udelay(150);
  
 -      cpt_phase_pointer_enable(dev, pipe);
 -
        for (i = 0; i < 4; i++) {
                reg = FDI_TX_CTL(pipe);
                temp = I915_READ(reg);
@@@ -2734,6 -2681,8 +2675,6 @@@ static void ivb_manual_fdi_link_train(s
        POSTING_READ(reg);
        udelay(150);
  
 -      cpt_phase_pointer_enable(dev, pipe);
 -
        for (i = 0; i < 4; i++) {
                reg = FDI_TX_CTL(pipe);
                temp = I915_READ(reg);
@@@ -2815,7 -2764,7 +2756,7 @@@ static void ironlake_fdi_pll_enable(str
        temp = I915_READ(reg);
        temp &= ~((0x7 << 19) | (0x7 << 16));
        temp |= (intel_crtc->fdi_lanes - 1) << 19;
-       temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+       temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
        I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
  
        POSTING_READ(reg);
        POSTING_READ(reg);
        udelay(200);
  
-       /* On Haswell, the PLL configuration for ports and pipes is handled
-        * separately, as part of DDI setup */
-       if (!IS_HASWELL(dev)) {
-               /* Enable CPU FDI TX PLL, always on for Ironlake */
-               reg = FDI_TX_CTL(pipe);
-               temp = I915_READ(reg);
-               if ((temp & FDI_TX_PLL_ENABLE) == 0) {
-                       I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
+       /* Enable CPU FDI TX PLL, always on for Ironlake */
+       reg = FDI_TX_CTL(pipe);
+       temp = I915_READ(reg);
+       if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+               I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
  
-                       POSTING_READ(reg);
-                       udelay(100);
-               }
+               POSTING_READ(reg);
+               udelay(100);
        }
  }
  
@@@ -2872,6 -2817,17 +2809,6 @@@ static void ironlake_fdi_pll_disable(st
        udelay(100);
  }
  
 -static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      u32 flags = I915_READ(SOUTH_CHICKEN1);
 -
 -      flags &= ~(FDI_PHASE_SYNC_EN(pipe));
 -      I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
 -      flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
 -      I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
 -      POSTING_READ(SOUTH_CHICKEN1);
 -}
  static void ironlake_fdi_disable(struct drm_crtc *crtc)
  {
        struct drm_device *dev = crtc->dev;
        reg = FDI_RX_CTL(pipe);
        temp = I915_READ(reg);
        temp &= ~(0x7 << 16);
-       temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+       temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
        I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
  
        POSTING_READ(reg);
        /* Ironlake workaround, disable clock pointer after downing FDI */
        if (HAS_PCH_IBX(dev)) {
                I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
 -      } else if (HAS_PCH_CPT(dev)) {
 -              cpt_phase_pointer_disable(dev, pipe);
        }
  
        /* still set train pattern 1 */
        }
        /* BPC in FDI rx is consistent with that in PIPECONF */
        temp &= ~(0x07 << 16);
-       temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+       temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
        I915_WRITE(reg, temp);
  
        POSTING_READ(reg);
@@@ -2992,6 -2950,8 +2929,8 @@@ static void lpt_program_iclkip(struct d
        u32 divsel, phaseinc, auxdiv, phasedir = 0;
        u32 temp;
  
+       mutex_lock(&dev_priv->dpio_lock);
        /* It is necessary to ungate the pixclk gate prior to programming
         * the divisors, and gate it back when it is done.
         */
  
        /* Disable SSCCTL */
        intel_sbi_write(dev_priv, SBI_SSCCTL6,
 -                              intel_sbi_read(dev_priv, SBI_SSCCTL6) |
 -                                      SBI_SSCCTL_DISABLE);
 +                      intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
 +                              SBI_SSCCTL_DISABLE,
 +                      SBI_ICLK);
  
        /* 20MHz is a corner case which is out of range for the 7-bit divisor */
        if (crtc->mode.clock == 20000) {
                        phaseinc);
  
        /* Program SSCDIVINTPHASE6 */
 -      temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
 +      temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
        temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
        temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
        temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
        temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
        temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
        temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
 -
 -      intel_sbi_write(dev_priv,
 -                      SBI_SSCDIVINTPHASE6,
 -                      temp);
 +      intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
  
        /* Program SSCAUXDIV */
 -      temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
 +      temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
        temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
        temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
 -      intel_sbi_write(dev_priv,
 -                      SBI_SSCAUXDIV6,
 -                      temp);
 -
 +      intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
  
        /* Enable modulator and associated divider */
 -      temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
 +      temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
        temp &= ~SBI_SSCCTL_DISABLE;
 -      intel_sbi_write(dev_priv,
 -                      SBI_SSCCTL6,
 -                      temp);
 +      intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
  
        /* Wait for initialization time */
        udelay(24);
  
        I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+       mutex_unlock(&dev_priv->dpio_lock);
  }
  
  /*
@@@ -3146,7 -3115,7 +3087,7 @@@ static void ironlake_pch_enable(struct 
        if (HAS_PCH_CPT(dev) &&
            (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
             intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
-               u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
+               u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
                reg = TRANS_DP_CTL(pipe);
                temp = I915_READ(reg);
                temp &= ~(TRANS_DP_PORT_SEL_MASK |
@@@ -3623,7 -3592,7 +3564,7 @@@ static void haswell_crtc_off(struct drm
  
        /* Stop saying we're using TRANSCODER_EDP because some other CRTC might
         * start using it. */
-       intel_crtc->cpu_transcoder = intel_crtc->pipe;
+       intel_crtc->cpu_transcoder = (enum transcoder) intel_crtc->pipe;
  
        intel_ddi_put_crtc_pll(crtc);
  }
@@@ -4012,16 -3981,8 +3953,8 @@@ static int i830_get_display_clock_speed
        return 133000;
  }
  
- struct fdi_m_n {
-       u32        tu;
-       u32        gmch_m;
-       u32        gmch_n;
-       u32        link_m;
-       u32        link_n;
- };
  static void
fdi_reduce_ratio(u32 *num, u32 *den)
intel_reduce_ratio(uint32_t *num, uint32_t *den)
  {
        while (*num > 0xffffff || *den > 0xffffff) {
                *num >>= 1;
        }
  }
  
- static void
- ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
-                    int link_clock, struct fdi_m_n *m_n)
+ void
+ intel_link_compute_m_n(int bits_per_pixel, int nlanes,
+                      int pixel_clock, int link_clock,
+                      struct intel_link_m_n *m_n)
  {
-       m_n->tu = 64; /* default size */
-       /* BUG_ON(pixel_clock > INT_MAX / 36); */
+       m_n->tu = 64;
        m_n->gmch_m = bits_per_pixel * pixel_clock;
        m_n->gmch_n = link_clock * nlanes * 8;
-       fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
+       intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
        m_n->link_m = pixel_clock;
        m_n->link_n = link_clock;
-       fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
+       intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
  }
  
  static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
@@@ -4118,17 -4077,6 +4049,17 @@@ static bool intel_choose_pipe_bpp_dithe
                        }
                }
  
 +              if (intel_encoder->type == INTEL_OUTPUT_EDP) {
 +                      /* Use VBT settings if we have an eDP panel */
 +                      unsigned int edp_bpc = dev_priv->edp.bpp / 3;
 +
 +                      if (edp_bpc && edp_bpc < display_bpc) {
 +                              DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
 +                              display_bpc = edp_bpc;
 +                      }
 +                      continue;
 +              }
 +
                /*
                 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
                 * through, clamp it down.  (Note: >12bpc will be caught below.)
@@@ -4289,51 -4237,6 +4220,6 @@@ static void i9xx_update_pll_dividers(st
        }
  }
  
- static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
-                             struct drm_display_mode *adjusted_mode)
- {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
-       u32 temp;
-       temp = I915_READ(LVDS);
-       temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
-       if (pipe == 1) {
-               temp |= LVDS_PIPEB_SELECT;
-       } else {
-               temp &= ~LVDS_PIPEB_SELECT;
-       }
-       /* set the corresponsding LVDS_BORDER bit */
-       temp |= dev_priv->lvds_border_bits;
-       /* Set the B0-B3 data pairs corresponding to whether we're going to
-        * set the DPLLs for dual-channel mode or not.
-        */
-       if (clock->p2 == 7)
-               temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
-       else
-               temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
-       /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
-        * appropriately here, but we need to look more thoroughly into how
-        * panels behave in the two modes.
-        */
-       /* set the dithering flag on LVDS as needed */
-       if (INTEL_INFO(dev)->gen >= 4) {
-               if (dev_priv->lvds_dither)
-                       temp |= LVDS_ENABLE_DITHER;
-               else
-                       temp &= ~LVDS_ENABLE_DITHER;
-       }
-       temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
-       if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
-               temp |= LVDS_HSYNC_POLARITY;
-       if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
-               temp |= LVDS_VSYNC_POLARITY;
-       I915_WRITE(LVDS, temp);
- }
  static void vlv_update_pll(struct drm_crtc *crtc,
                           struct drm_display_mode *mode,
                           struct drm_display_mode *adjusted_mode,
        bool is_sdvo;
        u32 temp;
  
+       mutex_lock(&dev_priv->dpio_lock);
        is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
                intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
  
                        temp |= (1 << 21);
                intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
        }
+       mutex_unlock(&dev_priv->dpio_lock);
  }
  
  static void i9xx_update_pll(struct drm_crtc *crtc,
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
        u32 dpll;
        bool is_sdvo;
        POSTING_READ(DPLL(pipe));
        udelay(150);
  
-       /* The LVDS pin pair needs to be on before the DPLLs are enabled.
-        * This is an exception to the general rule that mode_set doesn't turn
-        * things on.
-        */
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
-               intel_update_lvds(crtc, clock, adjusted_mode);
+       for_each_encoder_on_crtc(dev, crtc, encoder)
+               if (encoder->pre_pll_enable)
+                       encoder->pre_pll_enable(encoder);
  
        if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
                intel_dp_set_m_n(crtc, mode, adjusted_mode);
@@@ -4555,6 -4460,7 +4443,7 @@@ static void i8xx_update_pll(struct drm_
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
        u32 dpll;
  
        POSTING_READ(DPLL(pipe));
        udelay(150);
  
-       /* The LVDS pin pair needs to be on before the DPLLs are enabled.
-        * This is an exception to the general rule that mode_set doesn't turn
-        * things on.
-        */
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
-               intel_update_lvds(crtc, clock, adjusted_mode);
+       for_each_encoder_on_crtc(dev, crtc, encoder)
+               if (encoder->pre_pll_enable)
+                       encoder->pre_pll_enable(encoder);
  
        I915_WRITE(DPLL(pipe), dpll);
  
@@@ -4783,10 -4686,10 +4669,10 @@@ static int i9xx_crtc_mode_set(struct dr
        }
  
        /* default to 8bpc */
-       pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+       pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN);
        if (is_dp) {
                if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
-                       pipeconf |= PIPECONF_BPP_6 |
+                       pipeconf |= PIPECONF_6BPC |
                                    PIPECONF_DITHER_EN |
                                    PIPECONF_DITHER_TYPE_SP;
                }
  
        if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
                if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
-                       pipeconf |= PIPECONF_BPP_6 |
+                       pipeconf |= PIPECONF_6BPC |
                                        PIPECONF_ENABLE |
                                        I965_PIPECONF_ACTIVE;
                }
        return ret;
  }
  
 -/*
 - * Initialize reference clocks when the driver loads
 - */
 -void ironlake_init_pch_refclk(struct drm_device *dev)
 +static void ironlake_init_pch_refclk(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_mode_config *mode_config = &dev->mode_config;
        }
  }
  
 +/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
 +static void lpt_init_pch_refclk(struct drm_device *dev)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_mode_config *mode_config = &dev->mode_config;
 +      struct intel_encoder *encoder;
 +      bool has_vga = false;
 +      bool is_sdv = false;
 +      u32 tmp;
 +
 +      list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
 +              switch (encoder->type) {
 +              case INTEL_OUTPUT_ANALOG:
 +                      has_vga = true;
 +                      break;
 +              }
 +      }
 +
 +      if (!has_vga)
 +              return;
 +
 +      /* XXX: Rip out SDV support once Haswell ships for real. */
 +      if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
 +              is_sdv = true;
 +
 +      tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
 +      tmp &= ~SBI_SSCCTL_DISABLE;
 +      tmp |= SBI_SSCCTL_PATHALT;
 +      intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
 +
 +      udelay(24);
 +
 +      tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
 +      tmp &= ~SBI_SSCCTL_PATHALT;
 +      intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
 +
 +      if (!is_sdv) {
 +              tmp = I915_READ(SOUTH_CHICKEN2);
 +              tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
 +              I915_WRITE(SOUTH_CHICKEN2, tmp);
 +
 +              if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
 +                                     FDI_MPHY_IOSFSB_RESET_STATUS, 100))
 +                      DRM_ERROR("FDI mPHY reset assert timeout\n");
 +
 +              tmp = I915_READ(SOUTH_CHICKEN2);
 +              tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
 +              I915_WRITE(SOUTH_CHICKEN2, tmp);
 +
 +              if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
 +                                      FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
 +                                     100))
 +                      DRM_ERROR("FDI mPHY reset de-assert timeout\n");
 +      }
 +
 +      tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
 +      tmp &= ~(0xFF << 24);
 +      tmp |= (0x12 << 24);
 +      intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
 +
 +      if (!is_sdv) {
 +              tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY);
 +              tmp &= ~(0x3 << 6);
 +              tmp |= (1 << 6) | (1 << 0);
 +              intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY);
 +      }
 +
 +      if (is_sdv) {
 +              tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
 +              tmp |= 0x7FFF;
 +              intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
 +      }
 +
 +      tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
 +      tmp |= (1 << 11);
 +      intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
 +
 +      tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
 +      tmp |= (1 << 11);
 +      intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
 +
 +      if (is_sdv) {
 +              tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
 +              tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
 +              intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
 +
 +              tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
 +              tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
 +              intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
 +
 +              tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
 +              tmp |= (0x3F << 8);
 +              intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
 +
 +              tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
 +              tmp |= (0x3F << 8);
 +              intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
 +      }
 +
 +      tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
 +      tmp |= (1 << 24) | (1 << 21) | (1 << 18);
 +      intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
 +
 +      tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
 +      tmp |= (1 << 24) | (1 << 21) | (1 << 18);
 +      intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
 +
 +      if (!is_sdv) {
 +              tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
 +              tmp &= ~(7 << 13);
 +              tmp |= (5 << 13);
 +              intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
 +
 +              tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
 +              tmp &= ~(7 << 13);
 +              tmp |= (5 << 13);
 +              intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
 +      }
 +
 +      tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
 +      tmp &= ~0xFF;
 +      tmp |= 0x1C;
 +      intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
 +
 +      tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
 +      tmp &= ~0xFF;
 +      tmp |= 0x1C;
 +      intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
 +
 +      tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
 +      tmp &= ~(0xFF << 16);
 +      tmp |= (0x1C << 16);
 +      intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
 +
 +      tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
 +      tmp &= ~(0xFF << 16);
 +      tmp |= (0x1C << 16);
 +      intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
 +
 +      if (!is_sdv) {
 +              tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
 +              tmp |= (1 << 27);
 +              intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
 +
 +              tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
 +              tmp |= (1 << 27);
 +              intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
 +
 +              tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
 +              tmp &= ~(0xF << 28);
 +              tmp |= (4 << 28);
 +              intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
 +
 +              tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
 +              tmp &= ~(0xF << 28);
 +              tmp |= (4 << 28);
 +              intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
 +      }
 +
 +      /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
 +      tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
 +      tmp |= SBI_DBUFF0_ENABLE;
 +      intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
 +}
 +
 +/*
 + * Initialize reference clocks when the driver loads
 + */
 +void intel_init_pch_refclk(struct drm_device *dev)
 +{
 +      if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
 +              ironlake_init_pch_refclk(dev);
 +      else if (HAS_PCH_LPT(dev))
 +              lpt_init_pch_refclk(dev);
 +}
 +
  static int ironlake_get_refclk(struct drm_crtc *crtc)
  {
        struct drm_device *dev = crtc->dev;
@@@ -5177,19 -4907,19 +5063,19 @@@ static void ironlake_set_pipeconf(struc
  
        val = I915_READ(PIPECONF(pipe));
  
-       val &= ~PIPE_BPC_MASK;
+       val &= ~PIPECONF_BPC_MASK;
        switch (intel_crtc->bpp) {
        case 18:
-               val |= PIPE_6BPC;
+               val |= PIPECONF_6BPC;
                break;
        case 24:
-               val |= PIPE_8BPC;
+               val |= PIPECONF_8BPC;
                break;
        case 30:
-               val |= PIPE_10BPC;
+               val |= PIPECONF_10BPC;
                break;
        case 36:
-               val |= PIPE_12BPC;
+               val |= PIPECONF_12BPC;
                break;
        default:
                /* Case prevented by intel_choose_pipe_bpp_dither. */
@@@ -5380,17 -5110,6 +5266,17 @@@ static bool ironlake_check_fdi_lanes(st
        }
  }
  
 +int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
 +{
 +      /*
 +       * Account for spread spectrum to avoid
 +       * oversubscribing the link. Max center spread
 +       * is 2.5%; use 5% for safety's sake.
 +       */
 +      u32 bps = target_clock * bpp * 21 / 20;
 +      return bps / (link_bw * 8) + 1;
 +}
 +
  static void ironlake_set_m_n(struct drm_crtc *crtc,
                             struct drm_display_mode *mode,
                             struct drm_display_mode *adjusted_mode)
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
        struct intel_encoder *intel_encoder, *edp_encoder = NULL;
-       struct fdi_m_n m_n = {0};
+       struct intel_link_m_n m_n = {0};
        int target_clock, pixel_multiplier, lane, link_bw;
        bool is_dp = false, is_cpu_edp = false;
  
        else
                target_clock = adjusted_mode->clock;
  
 -      if (!lane) {
 -              /*
 -               * Account for spread spectrum to avoid
 -               * oversubscribing the link. Max center spread
 -               * is 2.5%; use 5% for safety's sake.
 -               */
 -              u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
 -              lane = bps / (link_bw * 8) + 1;
 -      }
 +      if (!lane)
 +              lane = ironlake_get_lanes_required(target_clock, link_bw,
 +                                                 intel_crtc->bpp);
  
        intel_crtc->fdi_lanes = lane;
  
        if (pixel_multiplier > 1)
                link_bw *= pixel_multiplier;
-       ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
-                            &m_n);
+       intel_link_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, &m_n);
  
        I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
        I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
@@@ -5506,7 -5230,7 +5391,7 @@@ static uint32_t ironlake_compute_dpll(s
        if (is_lvds) {
                if ((intel_panel_use_ssc(dev_priv) &&
                     dev_priv->lvds_ssc_freq == 100) ||
-                   (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
+                   intel_is_dual_link_lvds(dev))
                        factor = 25;
        } else if (is_sdvo && is_tv)
                factor = 20;
@@@ -5581,7 -5305,6 +5466,6 @@@ static int ironlake_crtc_mode_set(struc
        bool ok, has_reduced_clock = false;
        bool is_lvds = false, is_dp = false, is_cpu_edp = false;
        struct intel_encoder *encoder;
-       u32 temp;
        int ret;
        bool dither, fdi_config_ok;
  
        } else
                intel_put_pch_pll(intel_crtc);
  
-       /* The LVDS pin pair needs to be on before the DPLLs are enabled.
-        * This is an exception to the general rule that mode_set doesn't turn
-        * things on.
-        */
-       if (is_lvds) {
-               temp = I915_READ(PCH_LVDS);
-               temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
-               if (HAS_PCH_CPT(dev)) {
-                       temp &= ~PORT_TRANS_SEL_MASK;
-                       temp |= PORT_TRANS_SEL_CPT(pipe);
-               } else {
-                       if (pipe == 1)
-                               temp |= LVDS_PIPEB_SELECT;
-                       else
-                               temp &= ~LVDS_PIPEB_SELECT;
-               }
-               /* set the corresponsding LVDS_BORDER bit */
-               temp |= dev_priv->lvds_border_bits;
-               /* Set the B0-B3 data pairs corresponding to whether we're going to
-                * set the DPLLs for dual-channel mode or not.
-                */
-               if (clock.p2 == 7)
-                       temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
-               else
-                       temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
-               /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
-                * appropriately here, but we need to look more thoroughly into how
-                * panels behave in the two modes.
-                */
-               temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
-               if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
-                       temp |= LVDS_HSYNC_POLARITY;
-               if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
-                       temp |= LVDS_VSYNC_POLARITY;
-               I915_WRITE(PCH_LVDS, temp);
-       }
-       if (is_dp && !is_cpu_edp) {
+       if (is_dp && !is_cpu_edp)
                intel_dp_set_m_n(crtc, mode, adjusted_mode);
-       } else {
-               /* For non-DP output, clear any trans DP clock recovery setting.*/
-               I915_WRITE(TRANSDATA_M1(pipe), 0);
-               I915_WRITE(TRANSDATA_N1(pipe), 0);
-               I915_WRITE(TRANSDPLINK_M1(pipe), 0);
-               I915_WRITE(TRANSDPLINK_N1(pipe), 0);
-       }
+       for_each_encoder_on_crtc(dev, crtc, encoder)
+               if (encoder->pre_pll_enable)
+                       encoder->pre_pll_enable(encoder);
  
        if (intel_crtc->pch_pll) {
                I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
  
        fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
  
-       if (is_cpu_edp)
-               ironlake_set_pll_edp(crtc, adjusted_mode->clock);
        ironlake_set_pipeconf(crtc, adjusted_mode, dither);
  
        intel_wait_for_vblank(dev, pipe);
@@@ -5759,20 -5437,13 +5598,13 @@@ static int haswell_crtc_mode_set(struc
        int pipe = intel_crtc->pipe;
        int plane = intel_crtc->plane;
        int num_connectors = 0;
-       intel_clock_t clock, reduced_clock;
-       u32 dpll = 0, fp = 0, fp2 = 0;
-       bool ok, has_reduced_clock = false;
-       bool is_lvds = false, is_dp = false, is_cpu_edp = false;
+       bool is_dp = false, is_cpu_edp = false;
        struct intel_encoder *encoder;
-       u32 temp;
        int ret;
        bool dither;
  
        for_each_encoder_on_crtc(dev, crtc, encoder) {
                switch (encoder->type) {
-               case INTEL_OUTPUT_LVDS:
-                       is_lvds = true;
-                       break;
                case INTEL_OUTPUT_DISPLAYPORT:
                        is_dp = true;
                        break;
        if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
                return -EINVAL;
  
-       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
-               ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
-                                            &has_reduced_clock,
-                                            &reduced_clock);
-               if (!ok) {
-                       DRM_ERROR("Couldn't find PLL settings for mode!\n");
-                       return -EINVAL;
-               }
-       }
        /* Ensure that the cursor is valid for the new mode before changing... */
        intel_crtc_update_cursor(crtc, true);
  
        /* determine panel color depth */
        dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
                                              adjusted_mode);
-       if (is_lvds && dev_priv->lvds_dither)
-               dither = true;
  
        DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
        drm_mode_debug_printmodeline(mode);
  
-       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
-               fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
-               if (has_reduced_clock)
-                       fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
-                             reduced_clock.m2;
-               dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock,
-                                            fp);
-               /* CPU eDP is the only output that doesn't need a PCH PLL of its
-                * own on pre-Haswell/LPT generation */
-               if (!is_cpu_edp) {
-                       struct intel_pch_pll *pll;
-                       pll = intel_get_pch_pll(intel_crtc, dpll, fp);
-                       if (pll == NULL) {
-                               DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
-                                                pipe);
-                               return -EINVAL;
-                       }
-               } else
-                       intel_put_pch_pll(intel_crtc);
-               /* The LVDS pin pair needs to be on before the DPLLs are
-                * enabled.  This is an exception to the general rule that
-                * mode_set doesn't turn things on.
-                */
-               if (is_lvds) {
-                       temp = I915_READ(PCH_LVDS);
-                       temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
-                       if (HAS_PCH_CPT(dev)) {
-                               temp &= ~PORT_TRANS_SEL_MASK;
-                               temp |= PORT_TRANS_SEL_CPT(pipe);
-                       } else {
-                               if (pipe == 1)
-                                       temp |= LVDS_PIPEB_SELECT;
-                               else
-                                       temp &= ~LVDS_PIPEB_SELECT;
-                       }
-                       /* set the corresponsding LVDS_BORDER bit */
-                       temp |= dev_priv->lvds_border_bits;
-                       /* Set the B0-B3 data pairs corresponding to whether
-                        * we're going to set the DPLLs for dual-channel mode or
-                        * not.
-                        */
-                       if (clock.p2 == 7)
-                               temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
-                       else
-                               temp &= ~(LVDS_B0B3_POWER_UP |
-                                         LVDS_CLKB_POWER_UP);
-                       /* It would be nice to set 24 vs 18-bit mode
-                        * (LVDS_A3_POWER_UP) appropriately here, but we need to
-                        * look more thoroughly into how panels behave in the
-                        * two modes.
-                        */
-                       temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
-                       if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
-                               temp |= LVDS_HSYNC_POLARITY;
-                       if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
-                               temp |= LVDS_VSYNC_POLARITY;
-                       I915_WRITE(PCH_LVDS, temp);
-               }
-       }
-       if (is_dp && !is_cpu_edp) {
+       if (is_dp && !is_cpu_edp)
                intel_dp_set_m_n(crtc, mode, adjusted_mode);
-       } else {
-               if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
-                       /* For non-DP output, clear any trans DP clock recovery
-                        * setting.*/
-                       I915_WRITE(TRANSDATA_M1(pipe), 0);
-                       I915_WRITE(TRANSDATA_N1(pipe), 0);
-                       I915_WRITE(TRANSDPLINK_M1(pipe), 0);
-                       I915_WRITE(TRANSDPLINK_N1(pipe), 0);
-               }
-       }
  
        intel_crtc->lowfreq_avail = false;
-       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
-               if (intel_crtc->pch_pll) {
-                       I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
-                       /* Wait for the clocks to stabilize. */
-                       POSTING_READ(intel_crtc->pch_pll->pll_reg);
-                       udelay(150);
-                       /* The pixel multiplier can only be updated once the
-                        * DPLL is enabled and the clocks are stable.
-                        *
-                        * So write it again.
-                        */
-                       I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
-               }
-               if (intel_crtc->pch_pll) {
-                       if (is_lvds && has_reduced_clock && i915_powersave) {
-                               I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
-                               intel_crtc->lowfreq_avail = true;
-                       } else {
-                               I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
-                       }
-               }
-       }
  
        intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
  
        if (!is_dp || is_cpu_edp)
                ironlake_set_m_n(crtc, mode, adjusted_mode);
  
-       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
-               if (is_cpu_edp)
-                       ironlake_set_pll_edp(crtc, adjusted_mode->clock);
        haswell_set_pipeconf(crtc, adjusted_mode, dither);
  
        /* Set up the display plane register */
@@@ -6759,7 -6313,7 +6474,7 @@@ bool intel_get_load_detect_pipe(struct 
                return false;
        }
  
-       if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
+       if (intel_set_mode(crtc, mode, 0, 0, fb)) {
                DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
                if (old->release_fb)
                        old->release_fb->funcs->destroy(old->release_fb);
@@@ -7086,18 -6640,11 +6801,18 @@@ static void do_intel_finish_page_flip(s
  
        spin_lock_irqsave(&dev->event_lock, flags);
        work = intel_crtc->unpin_work;
 -      if (work == NULL || !work->pending) {
 +
 +      /* Ensure we don't miss a work->pending update ... */
 +      smp_rmb();
 +
 +      if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
                spin_unlock_irqrestore(&dev->event_lock, flags);
                return;
        }
  
 +      /* and that the unpin work is consistent wrt ->pending. */
 +      smp_rmb();
 +
        intel_crtc->unpin_work = NULL;
  
        if (work->event)
  
        obj = work->old_fb_obj;
  
-       atomic_clear_mask(1 << intel_crtc->plane,
-                         &obj->pending_flip.counter);
        wake_up(&dev_priv->pending_flip_queue);
  
        queue_work(dev_priv->wq, &work->work);
@@@ -7141,25 -6686,16 +6854,25 @@@ void intel_prepare_page_flip(struct drm
                to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
        unsigned long flags;
  
 +      /* NB: An MMIO update of the plane base pointer will also
 +       * generate a page-flip completion irq, i.e. every modeset
 +       * is also accompanied by a spurious intel_prepare_page_flip().
 +       */
        spin_lock_irqsave(&dev->event_lock, flags);
 -      if (intel_crtc->unpin_work) {
 -              if ((++intel_crtc->unpin_work->pending) > 1)
 -                      DRM_ERROR("Prepared flip multiple times\n");
 -      } else {
 -              DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
 -      }
 +      if (intel_crtc->unpin_work)
 +              atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
        spin_unlock_irqrestore(&dev->event_lock, flags);
  }
  
 +inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
 +{
 +      /* Ensure that the work item is consistent when activating it ... */
 +      smp_wmb();
 +      atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
 +      /* and that it is marked active as soon as the irq could fire. */
 +      smp_wmb();
 +}
 +
  static int intel_gen2_queue_flip(struct drm_device *dev,
                                 struct drm_crtc *crtc,
                                 struct drm_framebuffer *fb,
        intel_ring_emit(ring, fb->pitches[0]);
        intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, 0); /* aux display base address, unused */
 +
 +      intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
  
@@@ -7235,7 -6769,6 +6948,7 @@@ static int intel_gen3_queue_flip(struc
        intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, MI_NOOP);
  
 +      intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
  
@@@ -7282,8 -6815,6 +6995,8 @@@ static int intel_gen4_queue_flip(struc
        pf = 0;
        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
        intel_ring_emit(ring, pf | pipesrc);
 +
 +      intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
  
@@@ -7326,8 -6857,6 +7039,8 @@@ static int intel_gen6_queue_flip(struc
        pf = 0;
        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
        intel_ring_emit(ring, pf | pipesrc);
 +
 +      intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
  
@@@ -7382,8 -6911,6 +7095,8 @@@ static int intel_gen7_queue_flip(struc
        intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
        intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, (MI_NOOP));
 +
 +      intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
  
@@@ -7474,10 -7001,6 +7187,6 @@@ static int intel_crtc_page_flip(struct 
  
        work->enable_stall_check = true;
  
-       /* Block clients from rendering to the new back buffer until
-        * the flip occurs and the object is no longer visible.
-        */
-       atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
        atomic_inc(&intel_crtc->unpin_work_count);
  
        ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
  
  cleanup_pending:
        atomic_dec(&intel_crtc->unpin_work_count);
-       atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
        drm_gem_object_unreference(&work->old_fb_obj->base);
        drm_gem_object_unreference(&obj->base);
        mutex_unlock(&dev->struct_mutex);
@@@ -7904,16 -7426,21 +7612,21 @@@ intel_modeset_check_state(struct drm_de
        }
  }
  
bool intel_set_mode(struct drm_crtc *crtc,
-                   struct drm_display_mode *mode,
-                   int x, int y, struct drm_framebuffer *fb)
int intel_set_mode(struct drm_crtc *crtc,
+                  struct drm_display_mode *mode,
+                  int x, int y, struct drm_framebuffer *fb)
  {
        struct drm_device *dev = crtc->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+       struct drm_display_mode *adjusted_mode, *saved_mode, *saved_hwmode;
        struct intel_crtc *intel_crtc;
        unsigned disable_pipes, prepare_pipes, modeset_pipes;
-       bool ret = true;
+       int ret = 0;
+       saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL);
+       if (!saved_mode)
+               return -ENOMEM;
+       saved_hwmode = saved_mode + 1;
  
        intel_modeset_affected_pipes(crtc, &modeset_pipes,
                                     &prepare_pipes, &disable_pipes);
        for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
                intel_crtc_disable(&intel_crtc->base);
  
-       saved_hwmode = crtc->hwmode;
-       saved_mode = crtc->mode;
+       *saved_hwmode = crtc->hwmode;
+       *saved_mode = crtc->mode;
  
        /* Hack: Because we don't (yet) support global modeset on multiple
         * crtcs, we don't keep track of the new mode for more than one crtc.
        if (modeset_pipes) {
                adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
                if (IS_ERR(adjusted_mode)) {
-                       return false;
+                       ret = PTR_ERR(adjusted_mode);
+                       goto out;
                }
        }
  
         * on the DPLL.
         */
        for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
-               ret = !intel_crtc_mode_set(&intel_crtc->base,
-                                          mode, adjusted_mode,
-                                          x, y, fb);
-               if (!ret)
-                   goto done;
+               ret = intel_crtc_mode_set(&intel_crtc->base,
+                                         mode, adjusted_mode,
+                                         x, y, fb);
+               if (ret)
+                       goto done;
        }
  
        /* Now enable the clocks, plane, pipe, and connectors that we set up. */
        /* FIXME: add subpixel order */
  done:
        drm_mode_destroy(dev, adjusted_mode);
-       if (!ret && crtc->enabled) {
-               crtc->hwmode = saved_hwmode;
-               crtc->mode = saved_mode;
+       if (ret && crtc->enabled) {
+               crtc->hwmode = *saved_hwmode;
+               crtc->mode = *saved_mode;
        } else {
                intel_modeset_check_state(dev);
        }
  
+ out:
+       kfree(saved_mode);
        return ret;
  }
  
+ void intel_crtc_restore_mode(struct drm_crtc *crtc)
+ {
+       intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb);
+ }
  #undef for_each_intel_crtc_masked
  
  static void intel_set_config_free(struct intel_set_config *config)
@@@ -8144,6 -7679,10 +7865,6 @@@ intel_modeset_stage_output_state(struc
                        DRM_DEBUG_KMS("encoder changed, full mode switch\n");
                        config->mode_changed = true;
                }
 -
 -              /* Disable all disconnected encoders. */
 -              if (connector->base.status == connector_status_disconnected)
 -                      connector->new_encoder = NULL;
        }
        /* connector->new_encoder is now updated for all connectors. */
  
@@@ -8262,11 -7801,11 +7983,11 @@@ static int intel_crtc_set_config(struc
                        drm_mode_debug_printmodeline(set->mode);
                }
  
-               if (!intel_set_mode(set->crtc, set->mode,
-                                   set->x, set->y, set->fb)) {
-                       DRM_ERROR("failed to set mode on [CRTC:%d]\n",
-                                 set->crtc->base.id);
-                       ret = -EINVAL;
+               ret = intel_set_mode(set->crtc, set->mode,
+                                    set->x, set->y, set->fb);
+               if (ret) {
+                       DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n",
+                                 set->crtc->base.id, ret);
                        goto fail;
                }
        } else if (config->fb_changed) {
@@@ -8283,8 -7822,8 +8004,8 @@@ fail
  
        /* Try to restore the config */
        if (config->mode_changed &&
-           !intel_set_mode(save_set.crtc, save_set.mode,
-                           save_set.x, save_set.y, save_set.fb))
+           intel_set_mode(save_set.crtc, save_set.mode,
+                          save_set.x, save_set.y, save_set.fb))
                DRM_ERROR("failed to restore config after modeset failure\n");
  
  out_config:
@@@ -8303,7 -7842,7 +8024,7 @@@ static const struct drm_crtc_funcs inte
  
  static void intel_cpu_pll_init(struct drm_device *dev)
  {
-       if (IS_HASWELL(dev))
+       if (HAS_DDI(dev))
                intel_ddi_pll_init(dev);
  }
  
@@@ -8439,11 -7978,10 +8160,10 @@@ static void intel_setup_outputs(struct 
                I915_WRITE(PFIT_CONTROL, 0);
        }
  
-       if (!(IS_HASWELL(dev) &&
-             (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
+       if (!(HAS_DDI(dev) && (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
                intel_crt_init(dev);
  
-       if (IS_HASWELL(dev)) {
+       if (HAS_DDI(dev)) {
                int found;
  
                /* Haswell uses DDI functions to detect digital outputs */
                        intel_encoder_clones(encoder);
        }
  
 -      if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
 -              ironlake_init_pch_refclk(dev);
 +      intel_init_pch_refclk(dev);
  
        drm_helper_move_panel_connectors_to_head(dev);
  }
@@@ -8686,7 -8225,7 +8406,7 @@@ static void intel_init_display(struct d
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        /* We always want a DPMS function */
-       if (IS_HASWELL(dev)) {
+       if (HAS_DDI(dev)) {
                dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
                dev_priv->display.crtc_enable = haswell_crtc_enable;
                dev_priv->display.crtc_disable = haswell_crtc_disable;
                } else if (IS_HASWELL(dev)) {
                        dev_priv->display.fdi_link_train = hsw_fdi_link_train;
                        dev_priv->display.write_eld = haswell_write_eld;
-               } else
-                       dev_priv->display.update_wm = NULL;
+               }
        } else if (IS_G4X(dev)) {
                dev_priv->display.write_eld = g4x_write_eld;
        }
@@@ -8982,6 -8520,9 +8701,9 @@@ void intel_modeset_init(struct drm_devi
        /* Just disable it once at startup */
        i915_disable_vga(dev);
        intel_setup_outputs(dev);
+       /* Just in case the BIOS is doing something questionable. */
+       intel_disable_fbc(dev);
  }
  
  static void
@@@ -9163,23 -8704,6 +8885,23 @@@ static void intel_sanitize_encoder(stru
         * the crtc fixup. */
  }
  
 +static void i915_redisable_vga(struct drm_device *dev)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      u32 vga_reg;
 +
 +      if (HAS_PCH_SPLIT(dev))
 +              vga_reg = CPU_VGACNTRL;
 +      else
 +              vga_reg = VGACNTRL;
 +
 +      if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
 +              DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
 +              I915_WRITE(vga_reg, VGA_DISP_DISABLE);
 +              POSTING_READ(vga_reg);
 +      }
 +}
 +
  /* Scan out the current hw modeset state, sanitizes it and maps it into the drm
   * and i915 state tracking structures. */
  void intel_modeset_setup_hw_state(struct drm_device *dev,
        struct intel_encoder *encoder;
        struct intel_connector *connector;
  
-       if (IS_HASWELL(dev)) {
+       if (HAS_DDI(dev)) {
                tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
  
                if (tmp & TRANS_DDI_FUNC_ENABLE) {
                              crtc->active ? "enabled" : "disabled");
        }
  
-       if (IS_HASWELL(dev))
+       if (HAS_DDI(dev))
                intel_ddi_setup_hw_pll_state(dev);
  
        list_for_each_entry(encoder, &dev->mode_config.encoder_list,
        }
  
        if (force_restore) {
 -              for_each_pipe(pipe)
 +              for_each_pipe(pipe) {
-                       crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-                       intel_set_mode(&crtc->base, &crtc->base.mode,
-                                      crtc->base.x, crtc->base.y, crtc->base.fb);
+                       intel_crtc_restore_mode(dev_priv->pipe_to_crtc_mapping[pipe]);
 +              }
 +
 +              i915_redisable_vga(dev);
        } else {
                intel_modeset_update_staged_output_state(dev);
        }
@@@ -9350,6 -8869,8 +9070,8 @@@ void intel_modeset_cleanup(struct drm_d
        flush_scheduled_work();
  
        drm_mode_config_cleanup(dev);
+       intel_cleanup_overlay(dev);
  }
  
  /*
index 1b63d55318a0b466cdd45f6fcfb9af9259473557,1dd89d5fe51115a6ba2715ed153d323621852ab6..5f12eb2d0fb5cca95feeb6d33fea88baf726997d
@@@ -148,15 -148,6 +148,6 @@@ intel_dp_max_link_bw(struct intel_dp *i
        return max_link_bw;
  }
  
- static int
- intel_dp_link_clock(uint8_t link_bw)
- {
-       if (link_bw == DP_LINK_BW_2_7)
-               return 270000;
-       else
-               return 162000;
- }
  /*
   * The units on the numbers in the next two are... bizarre.  Examples will
   * make it clearer; this one parallels an example in the eDP spec.
@@@ -191,7 -182,8 +182,8 @@@ intel_dp_adjust_dithering(struct intel_
                          struct drm_display_mode *mode,
                          bool adjust_mode)
  {
-       int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
+       int max_link_clock =
+               drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
        int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
        int max_rate, mode_rate;
  
@@@ -330,6 -322,48 +322,48 @@@ intel_dp_check_edp(struct intel_dp *int
        }
  }
  
+ static uint32_t
+ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
+ {
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t ch_ctl = intel_dp->output_reg + 0x10;
+       uint32_t status;
+       bool done;
+       if (IS_HASWELL(dev)) {
+               switch (intel_dig_port->port) {
+               case PORT_A:
+                       ch_ctl = DPA_AUX_CH_CTL;
+                       break;
+               case PORT_B:
+                       ch_ctl = PCH_DPB_AUX_CH_CTL;
+                       break;
+               case PORT_C:
+                       ch_ctl = PCH_DPC_AUX_CH_CTL;
+                       break;
+               case PORT_D:
+                       ch_ctl = PCH_DPD_AUX_CH_CTL;
+                       break;
+               default:
+                       BUG();
+               }
+       }
+ #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+       if (has_aux_irq)
+               done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
+       else
+               done = wait_for_atomic(C, 10) == 0;
+       if (!done)
+               DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
+                         has_aux_irq);
+ #undef C
+       return status;
+ }
  static int
  intel_dp_aux_ch(struct intel_dp *intel_dp,
                uint8_t *send, int send_bytes,
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t ch_ctl = output_reg + 0x10;
        uint32_t ch_data = ch_ctl + 4;
-       int i;
-       int recv_bytes;
+       int i, ret, recv_bytes;
        uint32_t status;
        uint32_t aux_clock_divider;
        int try, precharge;
+       bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
+       /* dp aux is extremely sensitive to irq latency, hence request the
+        * lowest possible wakeup latency and so prevent the cpu from going into
+        * deep sleep states.
+        */
+       pm_qos_update_request(&dev_priv->pm_qos, 0);
  
        if (IS_HASWELL(dev)) {
                switch (intel_dig_port->port) {
         * clock divider.
         */
        if (is_cpu_edp(intel_dp)) {
-               if (IS_HASWELL(dev))
+               if (HAS_DDI(dev))
                        aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
                else if (IS_VALLEYVIEW(dev))
                        aux_clock_divider = 100;
  
        /* Try to wait for any previous AUX channel activity */
        for (try = 0; try < 3; try++) {
-               status = I915_READ(ch_ctl);
+               status = I915_READ_NOTRACE(ch_ctl);
                if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
                        break;
                msleep(1);
        if (try == 3) {
                WARN(1, "dp_aux_ch not started status 0x%08x\n",
                     I915_READ(ch_ctl));
-               return -EBUSY;
+               ret = -EBUSY;
+               goto out;
        }
  
        /* Must try at least 3 times according to DP spec */
                /* Send the command and wait for it to complete */
                I915_WRITE(ch_ctl,
                           DP_AUX_CH_CTL_SEND_BUSY |
+                          (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
                           DP_AUX_CH_CTL_TIME_OUT_400us |
                           (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
                           (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
                           DP_AUX_CH_CTL_DONE |
                           DP_AUX_CH_CTL_TIME_OUT_ERROR |
                           DP_AUX_CH_CTL_RECEIVE_ERROR);
-               for (;;) {
-                       status = I915_READ(ch_ctl);
-                       if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
-                               break;
-                       udelay(100);
-               }
+               status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
  
                /* Clear done status and any errors */
                I915_WRITE(ch_ctl,
  
        if ((status & DP_AUX_CH_CTL_DONE) == 0) {
                DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
-               return -EBUSY;
+               ret = -EBUSY;
+               goto out;
        }
  
        /* Check for timeout or receive error.
         */
        if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
                DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
  
        /* Timeouts occur when the device isn't connected, so they're
         * "normal" -- don't fill the kernel log with these */
        if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
                DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
-               return -ETIMEDOUT;
+               ret = -ETIMEDOUT;
+               goto out;
        }
  
        /* Unload any bytes sent back from the other side */
                unpack_aux(I915_READ(ch_data + i),
                           recv + i, recv_bytes - i);
  
-       return recv_bytes;
+       ret = recv_bytes;
+ out:
+       pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
+       return ret;
  }
  
  /* Write data to the aux channel in native mode */
@@@ -722,12 -767,15 +767,15 @@@ intel_dp_mode_fixup(struct drm_encoder 
  
        for (clock = 0; clock <= max_clock; clock++) {
                for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
-                       int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
+                       int link_bw_clock =
+                               drm_dp_bw_code_to_link_rate(bws[clock]);
+                       int link_avail = intel_dp_max_data_rate(link_bw_clock,
+                                                               lane_count);
  
                        if (mode_rate <= link_avail) {
                                intel_dp->link_bw = bws[clock];
                                intel_dp->lane_count = lane_count;
-                               adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
+                               adjusted_mode->clock = link_bw_clock;
                                DRM_DEBUG_KMS("DP link bw %02x lane "
                                                "count %d clock %d bpp %d\n",
                                       intel_dp->link_bw, intel_dp->lane_count,
        return false;
  }
  
- struct intel_dp_m_n {
-       uint32_t        tu;
-       uint32_t        gmch_m;
-       uint32_t        gmch_n;
-       uint32_t        link_m;
-       uint32_t        link_n;
- };
- static void
- intel_reduce_ratio(uint32_t *num, uint32_t *den)
- {
-       while (*num > 0xffffff || *den > 0xffffff) {
-               *num >>= 1;
-               *den >>= 1;
-       }
- }
- static void
- intel_dp_compute_m_n(int bpp,
-                    int nlanes,
-                    int pixel_clock,
-                    int link_clock,
-                    struct intel_dp_m_n *m_n)
- {
-       m_n->tu = 64;
-       m_n->gmch_m = (pixel_clock * bpp) >> 3;
-       m_n->gmch_n = link_clock * nlanes;
-       intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
-       m_n->link_m = pixel_clock;
-       m_n->link_n = link_clock;
-       intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
- }
  void
  intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
                 struct drm_display_mode *adjusted_mode)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int lane_count = 4;
-       struct intel_dp_m_n m_n;
+       struct intel_link_m_n m_n;
        int pipe = intel_crtc->pipe;
        enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
  
         * the number of bytes_per_pixel post-LUT, which we always
         * set up for 8-bits of R/G/B, or 3 bytes total.
         */
-       intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
-                            mode->clock, adjusted_mode->clock, &m_n);
+       intel_link_compute_m_n(intel_crtc->bpp, lane_count,
+                              mode->clock, adjusted_mode->clock, &m_n);
  
        if (IS_HASWELL(dev)) {
                I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
@@@ -851,6 -866,32 +866,32 @@@ void intel_dp_init_link_config(struct i
        }
  }
  
+ static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
+ {
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 dpa_ctl;
+       DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
+       dpa_ctl = I915_READ(DP_A);
+       dpa_ctl &= ~DP_PLL_FREQ_MASK;
+       if (clock < 200000) {
+               /* For a long time we've carried around a ILK-DevA w/a for the
+                * 160MHz clock. If we're really unlucky, it's still required.
+                */
+               DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
+               dpa_ctl |= DP_PLL_FREQ_160MHZ;
+       } else {
+               dpa_ctl |= DP_PLL_FREQ_270MHZ;
+       }
+       I915_WRITE(DP_A, dpa_ctl);
+       POSTING_READ(DP_A);
+       udelay(500);
+ }
  static void
  intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
                  struct drm_display_mode *adjusted_mode)
        } else {
                intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
        }
+       if (is_cpu_edp(intel_dp))
+               ironlake_set_pll_edp(crtc, adjusted_mode->clock);
  }
  
  #define IDLE_ON_MASK          (PP_ON | 0        | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
@@@ -1543,7 -1587,7 +1587,7 @@@ intel_get_adjust_train(struct intel_dp 
  }
  
  static uint32_t
- intel_dp_signal_levels(uint8_t train_set)
+ intel_gen4_signal_levels(uint8_t train_set)
  {
        uint32_t        signal_levels = 0;
  
@@@ -1641,7 -1685,7 +1685,7 @@@ intel_gen7_edp_signal_levels(uint8_t tr
  
  /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
  static uint32_t
- intel_dp_signal_levels_hsw(uint8_t train_set)
+ intel_hsw_signal_levels(uint8_t train_set)
  {
        int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
                                         DP_TRAIN_PRE_EMPHASIS_MASK);
        }
  }
  
+ /* Properly updates "DP" with the correct signal levels. */
+ static void
+ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
+ {
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       uint32_t signal_levels, mask;
+       uint8_t train_set = intel_dp->train_set[0];
+       if (IS_HASWELL(dev)) {
+               signal_levels = intel_hsw_signal_levels(train_set);
+               mask = DDI_BUF_EMP_MASK;
+       } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
+               signal_levels = intel_gen7_edp_signal_levels(train_set);
+               mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
+       } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+               signal_levels = intel_gen6_edp_signal_levels(train_set);
+               mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
+       } else {
+               signal_levels = intel_gen4_signal_levels(train_set);
+               mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
+       }
+       DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
+       *DP = (*DP & ~mask) | signal_levels;
+ }
  static bool
  intel_dp_set_link_train(struct intel_dp *intel_dp,
                        uint32_t dp_reg_value,
@@@ -1791,7 -1863,7 +1863,7 @@@ intel_dp_start_link_train(struct intel_
        int voltage_tries, loop_tries;
        uint32_t DP = intel_dp->DP;
  
-       if (IS_HASWELL(dev))
+       if (HAS_DDI(dev))
                intel_ddi_prepare_link_retrain(encoder);
  
        /* Write the link configuration data */
        for (;;) {
                /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
                uint8_t     link_status[DP_LINK_STATUS_SIZE];
-               uint32_t    signal_levels;
-               if (IS_HASWELL(dev)) {
-                       signal_levels = intel_dp_signal_levels_hsw(
-                                                       intel_dp->train_set[0]);
-                       DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
-               } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
-                       signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
-                       DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
-               } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
-                       signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
-                       DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
-               } else {
-                       signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
-                       DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
-               }
-               DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
-                             signal_levels);
+               intel_dp_set_signal_levels(intel_dp, &DP);
  
                /* Set training pattern 1 */
                if (!intel_dp_set_link_train(intel_dp, DP,
  void
  intel_dp_complete_link_train(struct intel_dp *intel_dp)
  {
-       struct drm_device *dev = intel_dp_to_dev(intel_dp);
        bool channel_eq = false;
        int tries, cr_tries;
        uint32_t DP = intel_dp->DP;
        cr_tries = 0;
        channel_eq = false;
        for (;;) {
-               /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
-               uint32_t    signal_levels;
                uint8_t     link_status[DP_LINK_STATUS_SIZE];
  
                if (cr_tries > 5) {
                        break;
                }
  
-               if (IS_HASWELL(dev)) {
-                       signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
-                       DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
-               } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
-                       signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
-                       DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
-               } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
-                       signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
-                       DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
-               } else {
-                       signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
-                       DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
-               }
+               intel_dp_set_signal_levels(intel_dp, &DP);
  
                /* channel eq pattern */
                if (!intel_dp_set_link_train(intel_dp, DP,
@@@ -1964,6 -2005,8 +2005,8 @@@ intel_dp_link_down(struct intel_dp *int
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc =
+               to_intel_crtc(intel_dig_port->base.base.crtc);
        uint32_t DP = intel_dp->DP;
  
        /*
         *   intel_ddi_prepare_link_retrain will take care of redoing the link
         *   train.
         */
-       if (IS_HASWELL(dev))
+       if (HAS_DDI(dev))
                return;
  
        if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
        }
        POSTING_READ(intel_dp->output_reg);
  
-       msleep(17);
+       /* We don't really know why we're doing this */
+       intel_wait_for_vblank(dev, intel_crtc->pipe);
  
        if (HAS_PCH_IBX(dev) &&
            I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
                /* Changes to enable or select take place the vblank
                 * after being written.
                 */
-               if (crtc == NULL) {
-                       /* We can arrive here never having been attached
-                        * to a CRTC, for instance, due to inheriting
-                        * random state from the BIOS.
-                        *
-                        * If the pipe is not running, play safe and
-                        * wait for the clocks to stabilise before
-                        * continuing.
-                        */
+               if (WARN_ON(crtc == NULL)) {
+                       /* We should never try to disable a port without a crtc
+                        * attached. For paranoia keep the code around for a
+                        * bit. */
                        POSTING_READ(intel_dp->output_reg);
                        msleep(50);
                } else
-                       intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
+                       intel_wait_for_vblank(dev, intel_crtc->pipe);
        }
  
        DP &= ~DP_AUDIO_OUTPUT_ENABLE;
  static bool
  intel_dp_get_dpcd(struct intel_dp *intel_dp)
  {
+       char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
        if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
                                           sizeof(intel_dp->dpcd)) == 0)
                return false; /* aux transfer failed */
  
+       hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
+                          32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
+       DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
        if (intel_dp->dpcd[DP_DPCD_REV] == 0)
                return false; /* DPCD not present */
  
@@@ -2206,6 -2251,8 +2251,8 @@@ static enum drm_connector_statu
  ironlake_dp_detect(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        enum drm_connector_status status;
  
        /* Can't disconnect eDP, but you can close the lid... */
                return status;
        }
  
+       if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
+               return connector_status_disconnected;
        return intel_dp_detect_dpcd(intel_dp);
  }
  
@@@ -2290,13 -2340,6 +2340,6 @@@ intel_dp_get_edid_modes(struct drm_conn
        return intel_ddc_get_modes(connector, adapter);
  }
  
- /**
-  * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
-  *
-  * \return true if DP port is connected.
-  * \return false if DP port is disconnected.
-  */
  static enum drm_connector_status
  intel_dp_detect(struct drm_connector *connector, bool force)
  {
        struct drm_device *dev = connector->dev;
        enum drm_connector_status status;
        struct edid *edid = NULL;
-       char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
  
        intel_dp->has_audio = false;
  
        else
                status = g4x_dp_detect(intel_dp);
  
-       hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
-                          32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
-       DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
        if (status != connector_status_connected)
                return status;
  
@@@ -2445,11 -2483,8 +2483,8 @@@ intel_dp_set_property(struct drm_connec
        return -EINVAL;
  
  done:
-       if (intel_encoder->base.crtc) {
-               struct drm_crtc *crtc = intel_encoder->base.crtc;
-               intel_set_mode(crtc, &crtc->mode,
-                              crtc->x, crtc->y, crtc->fb);
-       }
+       if (intel_encoder->base.crtc)
+               intel_crtc_restore_mode(intel_encoder->base.crtc);
  
        return 0;
  }
@@@ -2569,8 -2604,8 +2604,8 @@@ intel_dp_add_properties(struct intel_d
  
        if (is_edp(intel_dp)) {
                drm_mode_create_scaling_mode_property(connector->dev);
 -              drm_connector_attach_property(
 -                      connector,
 +              drm_object_attach_property(
 +                      &connector->base,
                        connector->dev->mode_config.scaling_mode_property,
                        DRM_MODE_SCALE_ASPECT);
                intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
@@@ -2742,7 -2777,7 +2777,7 @@@ intel_dp_init_connector(struct intel_di
        intel_connector_attach_encoder(intel_connector, intel_encoder);
        drm_sysfs_connector_add(connector);
  
-       if (IS_HASWELL(dev))
+       if (HAS_DDI(dev))
                intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
        else
                intel_connector->get_hw_state = intel_connector_get_hw_state;
index 8a1bd4a3ad0dc724ba4a166f7b0915d5e1795208,116580b623dd232a5c6a6c27b5f5a88c97d3c1e6..54a034c82061fdbdc87012f8438e0b89de2af38b
@@@ -153,6 -153,7 +153,7 @@@ struct intel_encoder 
        bool cloneable;
        bool connectors_active;
        void (*hot_plug)(struct intel_encoder *);
+       void (*pre_pll_enable)(struct intel_encoder *);
        void (*pre_enable)(struct intel_encoder *);
        void (*enable)(struct intel_encoder *);
        void (*disable)(struct intel_encoder *);
@@@ -401,10 -402,7 +402,10 @@@ struct intel_unpin_work 
        struct drm_i915_gem_object *old_fb_obj;
        struct drm_i915_gem_object *pending_flip_obj;
        struct drm_pending_vblank_event *event;
 -      int pending;
 +      atomic_t pending;
 +#define INTEL_FLIP_INACTIVE   0
 +#define INTEL_FLIP_PENDING    1
 +#define INTEL_FLIP_COMPLETE   2
        bool enable_stall_check;
  };
  
@@@ -443,6 -441,7 +444,7 @@@ extern void intel_mark_idle(struct drm_
  extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
  extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
  extern bool intel_lvds_init(struct drm_device *dev);
+ extern bool intel_is_dual_link_lvds(struct drm_device *dev);
  extern void intel_dp_init(struct drm_device *dev, int output_reg,
                          enum port port);
  extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
@@@ -502,9 -501,10 +504,10 @@@ struct intel_set_config 
        bool mode_changed;
  };
  
- extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
-                          int x, int y, struct drm_framebuffer *old_fb);
+ extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
+                         int x, int y, struct drm_framebuffer *old_fb);
  extern void intel_modeset_disable(struct drm_device *dev);
+ extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
  extern void intel_crtc_load_lut(struct drm_crtc *crtc);
  extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
  extern void intel_encoder_noop(struct drm_encoder *encoder);
@@@ -546,6 -546,9 +549,9 @@@ hdmi_to_dig_port(struct intel_hdmi *int
        return container_of(intel_hdmi, struct intel_digital_port, hdmi);
  }
  
+ bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
+                               struct intel_digital_port *port);
  extern void intel_connector_attach_encoder(struct intel_connector *connector,
                                           struct intel_encoder *encoder);
  extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@@@ -559,7 -562,6 +565,7 @@@ intel_pipe_to_cpu_transcoder(struct drm
                             enum pipe pipe);
  extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
  extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
 +extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
  
  struct intel_load_detect_pipe {
        struct drm_framebuffer *release_fb;
@@@ -589,6 -591,7 +595,7 @@@ extern int intel_framebuffer_init(struc
                                  struct drm_mode_fb_cmd2 *mode_cmd,
                                  struct drm_i915_gem_object *obj);
  extern int intel_fbdev_init(struct drm_device *dev);
+ extern void intel_fbdev_initial_config(struct drm_device *dev);
  extern void intel_fbdev_fini(struct drm_device *dev);
  extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
  extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
index e6f54ffab3ba4fa1dcf25b2e2dfe9a1f745ced07,abfff29ed13a0c83edf91a926ae56037d6934c96..5a8a72c5a89d2ccf1a84dc6133304fe10d58f792
@@@ -405,7 -405,7 +405,7 @@@ void intel_update_fbc(struct drm_devic
         *   - going to an unsupported config (interlace, pixel multiply, etc.)
         */
        list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
 -              if (tmp_crtc->enabled &&
 +              if (to_intel_crtc(tmp_crtc)->active &&
                    !to_intel_crtc(tmp_crtc)->primary_disabled &&
                    tmp_crtc->fb) {
                        if (crtc) {
                dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
                goto out_disable;
        }
-       if (intel_fb->obj->base.size > dev_priv->cfb_size) {
-               DRM_DEBUG_KMS("framebuffer too large, disabling "
-                             "compression\n");
-               dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
-               goto out_disable;
-       }
        if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
            (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
                DRM_DEBUG_KMS("mode incompatible with compression, "
        if (in_dbg_master())
                goto out_disable;
  
+       if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
+               DRM_INFO("not enough stolen space for compressed buffer (need %zd bytes), disabling\n", intel_fb->obj->base.size);
+               DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
+               DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
+               dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+               goto out_disable;
+       }
        /* If the scanout has not changed, don't modify the FBC settings.
         * Note that we make the fundamental assumption that the fb->obj
         * cannot be unpinned (and have its GTT offset and fence revoked)
@@@ -526,6 -528,7 +528,7 @@@ out_disable
                DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
                intel_disable_fbc(dev);
        }
+       i915_gem_stolen_cleanup_compression(dev);
  }
  
  static void i915_pineview_get_mem_freq(struct drm_device *dev)
@@@ -992,7 -995,7 +995,7 @@@ static struct drm_crtc *single_enabled_
        struct drm_crtc *crtc, *enabled = NULL;
  
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 -              if (crtc->enabled && crtc->fb) {
 +              if (to_intel_crtc(crtc)->active && crtc->fb) {
                        if (enabled)
                                return NULL;
                        enabled = crtc;
@@@ -1086,7 -1089,7 +1089,7 @@@ static bool g4x_compute_wm0(struct drm_
        int entries, tlb_miss;
  
        crtc = intel_get_crtc_for_plane(dev, plane);
 -      if (crtc->fb == NULL || !crtc->enabled) {
 +      if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) {
                *cursor_wm = cursor->guard_size;
                *plane_wm = display->guard_size;
                return false;
@@@ -1215,7 -1218,7 +1218,7 @@@ static bool vlv_compute_drain_latency(s
        int entries;
  
        crtc = intel_get_crtc_for_plane(dev, plane);
 -      if (crtc->fb == NULL || !crtc->enabled)
 +      if (crtc->fb == NULL || !to_intel_crtc(crtc)->active)
                return false;
  
        clock = crtc->mode.clock;       /* VESA DOT Clock */
@@@ -1286,7 -1289,6 +1289,7 @@@ static void valleyview_update_wm(struc
        struct drm_i915_private *dev_priv = dev->dev_private;
        int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
        int plane_sr, cursor_sr;
 +      int ignore_plane_sr, ignore_cursor_sr;
        unsigned int enabled = 0;
  
        vlv_update_drain_latency(dev);
                            &planeb_wm, &cursorb_wm))
                enabled |= 2;
  
 -      plane_sr = cursor_sr = 0;
        if (single_plane_enabled(enabled) &&
            g4x_compute_srwm(dev, ffs(enabled) - 1,
                             sr_latency_ns,
                             &valleyview_wm_info,
                             &valleyview_cursor_wm_info,
 -                           &plane_sr, &cursor_sr))
 +                           &plane_sr, &ignore_cursor_sr) &&
 +          g4x_compute_srwm(dev, ffs(enabled) - 1,
 +                           2*sr_latency_ns,
 +                           &valleyview_wm_info,
 +                           &valleyview_cursor_wm_info,
 +                           &ignore_plane_sr, &cursor_sr)) {
                I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
 -      else
 +      } else {
                I915_WRITE(FW_BLC_SELF_VLV,
                           I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
 +              plane_sr = cursor_sr = 0;
 +      }
  
        DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
                      planea_wm, cursora_wm,
                   (planeb_wm << DSPFW_PLANEB_SHIFT) |
                   planea_wm);
        I915_WRITE(DSPFW2,
 -                 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
 +                 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
                   (cursora_wm << DSPFW_CURSORA_SHIFT));
        I915_WRITE(DSPFW3,
 -                 (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
 +                 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
 +                 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
  }
  
  static void g4x_update_wm(struct drm_device *dev)
                            &planeb_wm, &cursorb_wm))
                enabled |= 2;
  
 -      plane_sr = cursor_sr = 0;
        if (single_plane_enabled(enabled) &&
            g4x_compute_srwm(dev, ffs(enabled) - 1,
                             sr_latency_ns,
                             &g4x_wm_info,
                             &g4x_cursor_wm_info,
 -                           &plane_sr, &cursor_sr))
 +                           &plane_sr, &cursor_sr)) {
                I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
 -      else
 +      } else {
                I915_WRITE(FW_BLC_SELF,
                           I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
 +              plane_sr = cursor_sr = 0;
 +      }
  
        DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
                      planea_wm, cursora_wm,
                   (planeb_wm << DSPFW_PLANEB_SHIFT) |
                   planea_wm);
        I915_WRITE(DSPFW2,
 -                 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
 +                 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
                   (cursora_wm << DSPFW_CURSORA_SHIFT));
        /* HPLL off in SR has some issues on G4x... disable it */
        I915_WRITE(DSPFW3,
 -                 (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
 +                 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
                   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
  }
  
@@@ -1476,7 -1470,7 +1479,7 @@@ static void i9xx_update_wm(struct drm_d
  
        fifo_size = dev_priv->display.get_fifo_size(dev, 0);
        crtc = intel_get_crtc_for_plane(dev, 0);
 -      if (crtc->enabled && crtc->fb) {
 +      if (to_intel_crtc(crtc)->active && crtc->fb) {
                int cpp = crtc->fb->bits_per_pixel / 8;
                if (IS_GEN2(dev))
                        cpp = 4;
  
        fifo_size = dev_priv->display.get_fifo_size(dev, 1);
        crtc = intel_get_crtc_for_plane(dev, 1);
 -      if (crtc->enabled && crtc->fb) {
 +      if (to_intel_crtc(crtc)->active && crtc->fb) {
                int cpp = crtc->fb->bits_per_pixel / 8;
                if (IS_GEN2(dev))
                        cpp = 4;
@@@ -1819,110 -1813,8 +1822,110 @@@ static void sandybridge_update_wm(struc
                enabled |= 2;
        }
  
 -      if ((dev_priv->num_pipe == 3) &&
 -          g4x_compute_wm0(dev, 2,
 +      /*
 +       * Calculate and update the self-refresh watermark only when one
 +       * display plane is used.
 +       *
 +       * SNB support 3 levels of watermark.
 +       *
 +       * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
 +       * and disabled in the descending order
 +       *
 +       */
 +      I915_WRITE(WM3_LP_ILK, 0);
 +      I915_WRITE(WM2_LP_ILK, 0);
 +      I915_WRITE(WM1_LP_ILK, 0);
 +
 +      if (!single_plane_enabled(enabled) ||
 +          dev_priv->sprite_scaling_enabled)
 +              return;
 +      enabled = ffs(enabled) - 1;
 +
 +      /* WM1 */
 +      if (!ironlake_compute_srwm(dev, 1, enabled,
 +                                 SNB_READ_WM1_LATENCY() * 500,
 +                                 &sandybridge_display_srwm_info,
 +                                 &sandybridge_cursor_srwm_info,
 +                                 &fbc_wm, &plane_wm, &cursor_wm))
 +              return;
 +
 +      I915_WRITE(WM1_LP_ILK,
 +                 WM1_LP_SR_EN |
 +                 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
 +                 (fbc_wm << WM1_LP_FBC_SHIFT) |
 +                 (plane_wm << WM1_LP_SR_SHIFT) |
 +                 cursor_wm);
 +
 +      /* WM2 */
 +      if (!ironlake_compute_srwm(dev, 2, enabled,
 +                                 SNB_READ_WM2_LATENCY() * 500,
 +                                 &sandybridge_display_srwm_info,
 +                                 &sandybridge_cursor_srwm_info,
 +                                 &fbc_wm, &plane_wm, &cursor_wm))
 +              return;
 +
 +      I915_WRITE(WM2_LP_ILK,
 +                 WM2_LP_EN |
 +                 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
 +                 (fbc_wm << WM1_LP_FBC_SHIFT) |
 +                 (plane_wm << WM1_LP_SR_SHIFT) |
 +                 cursor_wm);
 +
 +      /* WM3 */
 +      if (!ironlake_compute_srwm(dev, 3, enabled,
 +                                 SNB_READ_WM3_LATENCY() * 500,
 +                                 &sandybridge_display_srwm_info,
 +                                 &sandybridge_cursor_srwm_info,
 +                                 &fbc_wm, &plane_wm, &cursor_wm))
 +              return;
 +
 +      I915_WRITE(WM3_LP_ILK,
 +                 WM3_LP_EN |
 +                 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
 +                 (fbc_wm << WM1_LP_FBC_SHIFT) |
 +                 (plane_wm << WM1_LP_SR_SHIFT) |
 +                 cursor_wm);
 +}
 +
 +static void ivybridge_update_wm(struct drm_device *dev)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
 +      u32 val;
 +      int fbc_wm, plane_wm, cursor_wm;
 +      int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
 +      unsigned int enabled;
 +
 +      enabled = 0;
 +      if (g4x_compute_wm0(dev, 0,
 +                          &sandybridge_display_wm_info, latency,
 +                          &sandybridge_cursor_wm_info, latency,
 +                          &plane_wm, &cursor_wm)) {
 +              val = I915_READ(WM0_PIPEA_ILK);
 +              val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
 +              I915_WRITE(WM0_PIPEA_ILK, val |
 +                         ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
 +              DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
 +                            " plane %d, " "cursor: %d\n",
 +                            plane_wm, cursor_wm);
 +              enabled |= 1;
 +      }
 +
 +      if (g4x_compute_wm0(dev, 1,
 +                          &sandybridge_display_wm_info, latency,
 +                          &sandybridge_cursor_wm_info, latency,
 +                          &plane_wm, &cursor_wm)) {
 +              val = I915_READ(WM0_PIPEB_ILK);
 +              val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
 +              I915_WRITE(WM0_PIPEB_ILK, val |
 +                         ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
 +              DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
 +                            " plane %d, cursor: %d\n",
 +                            plane_wm, cursor_wm);
 +              enabled |= 2;
 +      }
 +
 +      if (g4x_compute_wm0(dev, 2,
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
                   (plane_wm << WM1_LP_SR_SHIFT) |
                   cursor_wm);
  
 -      /* WM3 */
 +      /* WM3, note we have to correct the cursor latency */
        if (!ironlake_compute_srwm(dev, 3, enabled,
                                   SNB_READ_WM3_LATENCY() * 500,
                                   &sandybridge_display_srwm_info,
                                   &sandybridge_cursor_srwm_info,
 -                                 &fbc_wm, &plane_wm, &cursor_wm))
 +                                 &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
 +          !ironlake_compute_srwm(dev, 3, enabled,
 +                                 2 * SNB_READ_WM3_LATENCY() * 500,
 +                                 &sandybridge_display_srwm_info,
 +                                 &sandybridge_cursor_srwm_info,
 +                                 &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
                return;
  
        I915_WRITE(WM3_LP_ILK,
@@@ -2044,7 -1931,7 +2047,7 @@@ sandybridge_compute_sprite_wm(struct dr
        int entries, tlb_miss;
  
        crtc = intel_get_crtc_for_plane(dev, plane);
 -      if (crtc->fb == NULL || !crtc->enabled) {
 +      if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) {
                *sprite_wm = display->guard_size;
                return false;
        }
@@@ -2494,9 -2381,15 +2497,9 @@@ int intel_enable_rc6(const struct drm_d
        if (i915_enable_rc6 >= 0)
                return i915_enable_rc6;
  
 -      if (INTEL_INFO(dev)->gen == 5) {
 -#ifdef CONFIG_INTEL_IOMMU
 -              /* Disable rc6 on ilk if VT-d is on. */
 -              if (intel_iommu_gfx_mapped)
 -                      return false;
 -#endif
 -              DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");
 -              return INTEL_RC6_ENABLE;
 -      }
 +      /* Disable RC6 on Ironlake */
 +      if (INTEL_INFO(dev)->gen == 5)
 +              return 0;
  
        if (IS_HASWELL(dev)) {
                DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
@@@ -3562,11 -3455,6 +3565,11 @@@ static void cpt_init_clock_gating(struc
        I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
        I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
                   DPLS_EDP_PPS_FIX_DIS);
 +      /* The below fixes the weird display corruption, a few pixels shifted
 +       * downward, on (only) LVDS of some HP laptops with IVY.
 +       */
 +      for_each_pipe(pipe)
 +              I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE);
        /* WADP0ClockGatingDisable */
        for_each_pipe(pipe) {
                I915_WRITE(TRANS_CHICKEN1(pipe),
@@@ -3586,15 -3474,6 +3589,15 @@@ static void gen6_init_clock_gating(stru
                   I915_READ(ILK_DISPLAY_CHICKEN2) |
                   ILK_ELPIN_409_SELECT);
  
 +      /* WaDisableHiZPlanesWhenMSAAEnabled */
 +      I915_WRITE(_3D_CHICKEN,
 +                 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
 +
 +      /* WaSetupGtModeTdRowDispatch */
 +      if (IS_SNB_GT1(dev))
 +              I915_WRITE(GEN6_GT_MODE,
 +                         _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
 +
        I915_WRITE(WM3_LP_ILK, 0);
        I915_WRITE(WM2_LP_ILK, 0);
        I915_WRITE(WM1_LP_ILK, 0);
@@@ -4123,7 -4002,7 +4126,7 @@@ void intel_init_pm(struct drm_device *d
                } else if (IS_IVYBRIDGE(dev)) {
                        /* FIXME: detect B0+ stepping and use auto training */
                        if (SNB_READ_WM0_LATENCY()) {
 -                              dev_priv->display.update_wm = sandybridge_update_wm;
 +                              dev_priv->display.update_wm = ivybridge_update_wm;
                                dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
                        } else {
                                DRM_DEBUG_KMS("Failed to read display plane latency. "
index ae253e04c39105502fa1b82e05889970139f123f,2bd074ad6f54f6da9b6b6eef9f699e952fbb18cb..59e02691baf39aa3a25afb5694855ae4e4f4b782
@@@ -45,7 -45,7 +45,7 @@@ struct pipe_control 
  
  static inline int ring_space(struct intel_ring_buffer *ring)
  {
 -      int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
 +      int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
        if (space < 0)
                space += ring->size;
        return space;
@@@ -547,14 -547,9 +547,14 @@@ static int init_render_ring(struct inte
  
  static void render_ring_cleanup(struct intel_ring_buffer *ring)
  {
 +      struct drm_device *dev = ring->dev;
 +
        if (!ring->private)
                return;
  
 +      if (HAS_BROKEN_CS_TLB(dev))
 +              drm_gem_object_unreference(to_gem_object(ring->private));
 +
        cleanup_pipe_control(ring);
  }
  
@@@ -601,6 -596,13 +601,13 @@@ gen6_add_request(struct intel_ring_buff
        return 0;
  }
  
+ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
+                                             u32 seqno)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       return dev_priv->last_seqno < seqno;
+ }
  /**
   * intel_ring_sync - sync the waiter to the signaller on seqno
   *
@@@ -631,11 -633,20 +638,20 @@@ gen6_ring_sync(struct intel_ring_buffe
        if (ret)
                return ret;
  
-       intel_ring_emit(waiter,
-                       dw1 | signaller->semaphore_register[waiter->id]);
-       intel_ring_emit(waiter, seqno);
-       intel_ring_emit(waiter, 0);
-       intel_ring_emit(waiter, MI_NOOP);
+       /* If seqno wrap happened, omit the wait with no-ops */
+       if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
+               intel_ring_emit(waiter,
+                               dw1 |
+                               signaller->semaphore_register[waiter->id]);
+               intel_ring_emit(waiter, seqno);
+               intel_ring_emit(waiter, 0);
+               intel_ring_emit(waiter, MI_NOOP);
+       } else {
+               intel_ring_emit(waiter, MI_NOOP);
+               intel_ring_emit(waiter, MI_NOOP);
+               intel_ring_emit(waiter, MI_NOOP);
+               intel_ring_emit(waiter, MI_NOOP);
+       }
        intel_ring_advance(waiter);
  
        return 0;
@@@ -716,6 -727,12 +732,12 @@@ ring_get_seqno(struct intel_ring_buffe
        return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
  }
  
+ static void
+ ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
+ {
+       intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+ }
  static u32
  pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
  {
        return pc->cpu_page[0];
  }
  
+ static void
+ pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
+ {
+       struct pipe_control *pc = ring->private;
+       pc->cpu_page[0] = seqno;
+ }
  static bool
  gen5_ring_get_irq(struct intel_ring_buffer *ring)
  {
@@@ -974,8 -998,6 +1003,8 @@@ i965_dispatch_execbuffer(struct intel_r
        return 0;
  }
  
 +/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
 +#define I830_BATCH_LIMIT (256*1024)
  static int
  i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
                                u32 offset, u32 len,
  {
        int ret;
  
 -      ret = intel_ring_begin(ring, 4);
 -      if (ret)
 -              return ret;
 +      if (flags & I915_DISPATCH_PINNED) {
 +              ret = intel_ring_begin(ring, 4);
 +              if (ret)
 +                      return ret;
  
 -      intel_ring_emit(ring, MI_BATCH_BUFFER);
 -      intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
 -      intel_ring_emit(ring, offset + len - 8);
 -      intel_ring_emit(ring, 0);
 -      intel_ring_advance(ring);
 +              intel_ring_emit(ring, MI_BATCH_BUFFER);
 +              intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
 +              intel_ring_emit(ring, offset + len - 8);
 +              intel_ring_emit(ring, MI_NOOP);
 +              intel_ring_advance(ring);
 +      } else {
 +              struct drm_i915_gem_object *obj = ring->private;
 +              u32 cs_offset = obj->gtt_offset;
 +
 +              if (len > I830_BATCH_LIMIT)
 +                      return -ENOSPC;
 +
 +              ret = intel_ring_begin(ring, 9+3);
 +              if (ret)
 +                      return ret;
 +              /* Blit the batch (which has now all relocs applied) to the stable batch
 +               * scratch bo area (so that the CS never stumbles over its tlb
 +               * invalidation bug) ... */
 +              intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
 +                              XY_SRC_COPY_BLT_WRITE_ALPHA |
 +                              XY_SRC_COPY_BLT_WRITE_RGB);
 +              intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
 +              intel_ring_emit(ring, 0);
 +              intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
 +              intel_ring_emit(ring, cs_offset);
 +              intel_ring_emit(ring, 0);
 +              intel_ring_emit(ring, 4096);
 +              intel_ring_emit(ring, offset);
 +              intel_ring_emit(ring, MI_FLUSH);
 +
 +              /* ... and execute it. */
 +              intel_ring_emit(ring, MI_BATCH_BUFFER);
 +              intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
 +              intel_ring_emit(ring, cs_offset + len - 8);
 +              intel_ring_advance(ring);
 +      }
  
        return 0;
  }
@@@ -1152,7 -1142,11 +1181,11 @@@ static int intel_init_ring_buffer(struc
                        return ret;
        }
  
-       obj = i915_gem_alloc_object(dev, ring->size);
+       obj = NULL;
+       if (!HAS_LLC(dev))
+               obj = i915_gem_object_create_stolen(dev, ring->size);
+       if (obj == NULL)
+               obj = i915_gem_alloc_object(dev, ring->size);
        if (obj == NULL) {
                DRM_ERROR("Failed to allocate ringbuffer\n");
                ret = -ENOMEM;
        if (IS_I830(ring->dev) || IS_845G(ring->dev))
                ring->effective_size -= 128;
  
+       intel_ring_init_seqno(ring, dev_priv->last_seqno);
        return 0;
  
  err_unmap:
@@@ -1266,7 -1262,7 +1301,7 @@@ static int intel_ring_wait_request(stru
                if (request->tail == -1)
                        continue;
  
 -              space = request->tail - (ring->tail + 8);
 +              space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
                if (space < 0)
                        space += ring->size;
                if (space >= n) {
@@@ -1398,11 -1394,31 +1433,31 @@@ intel_ring_alloc_seqno(struct intel_rin
        return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
  }
  
+ static int __intel_ring_begin(struct intel_ring_buffer *ring,
+                             int bytes)
+ {
+       int ret;
+       if (unlikely(ring->tail + bytes > ring->effective_size)) {
+               ret = intel_wrap_ring_buffer(ring);
+               if (unlikely(ret))
+                       return ret;
+       }
+       if (unlikely(ring->space < bytes)) {
+               ret = ring_wait_for_space(ring, bytes);
+               if (unlikely(ret))
+                       return ret;
+       }
+       ring->space -= bytes;
+       return 0;
+ }
  int intel_ring_begin(struct intel_ring_buffer *ring,
                     int num_dwords)
  {
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
-       int n = 4*num_dwords;
        int ret;
  
        ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
        if (ret)
                return ret;
  
-       if (unlikely(ring->tail + n > ring->effective_size)) {
-               ret = intel_wrap_ring_buffer(ring);
-               if (unlikely(ret))
-                       return ret;
-       }
+       return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
+ }
  
-       if (unlikely(ring->space < n)) {
-               ret = ring_wait_for_space(ring, n);
-               if (unlikely(ret))
-                       return ret;
+ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
+ {
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       BUG_ON(ring->outstanding_lazy_request);
+       if (INTEL_INFO(ring->dev)->gen >= 6) {
+               I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
+               I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
        }
  
-       ring->space -= n;
-       return 0;
+       ring->set_seqno(ring, seqno);
  }
  
  void intel_ring_advance(struct intel_ring_buffer *ring)
@@@ -1592,6 -1609,7 +1648,7 @@@ int intel_init_render_ring_buffer(struc
                ring->irq_put = gen6_ring_put_irq;
                ring->irq_enable_mask = GT_USER_INTERRUPT;
                ring->get_seqno = gen6_ring_get_seqno;
+               ring->set_seqno = ring_set_seqno;
                ring->sync_to = gen6_ring_sync;
                ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
                ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
                ring->add_request = pc_render_add_request;
                ring->flush = gen4_render_ring_flush;
                ring->get_seqno = pc_render_get_seqno;
+               ring->set_seqno = pc_render_set_seqno;
                ring->irq_get = gen5_ring_get_irq;
                ring->irq_put = gen5_ring_put_irq;
                ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
                else
                        ring->flush = gen4_render_ring_flush;
                ring->get_seqno = ring_get_seqno;
+               ring->set_seqno = ring_set_seqno;
                if (IS_GEN2(dev)) {
                        ring->irq_get = i8xx_ring_get_irq;
                        ring->irq_put = i8xx_ring_put_irq;
        ring->init = init_render_ring;
        ring->cleanup = render_ring_cleanup;
  
 +      /* Workaround batchbuffer to combat CS tlb bug. */
 +      if (HAS_BROKEN_CS_TLB(dev)) {
 +              struct drm_i915_gem_object *obj;
 +              int ret;
 +
 +              obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
 +              if (obj == NULL) {
 +                      DRM_ERROR("Failed to allocate batch bo\n");
 +                      return -ENOMEM;
 +              }
 +
 +              ret = i915_gem_object_pin(obj, 0, true, false);
 +              if (ret != 0) {
 +                      drm_gem_object_unreference(&obj->base);
 +                      DRM_ERROR("Failed to ping batch bo\n");
 +                      return ret;
 +              }
 +
 +              ring->private = obj;
 +      }
 +
        return intel_init_ring_buffer(dev, ring);
  }
  
@@@ -1683,6 -1682,7 +1742,7 @@@ int intel_render_ring_init_dri(struct d
        else
                ring->flush = gen4_render_ring_flush;
        ring->get_seqno = ring_get_seqno;
+       ring->set_seqno = ring_set_seqno;
        if (IS_GEN2(dev)) {
                ring->irq_get = i8xx_ring_get_irq;
                ring->irq_put = i8xx_ring_put_irq;
@@@ -1743,6 -1743,7 +1803,7 @@@ int intel_init_bsd_ring_buffer(struct d
                ring->flush = gen6_ring_flush;
                ring->add_request = gen6_add_request;
                ring->get_seqno = gen6_ring_get_seqno;
+               ring->set_seqno = ring_set_seqno;
                ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
                ring->irq_get = gen6_ring_get_irq;
                ring->irq_put = gen6_ring_put_irq;
                ring->flush = bsd_ring_flush;
                ring->add_request = i9xx_add_request;
                ring->get_seqno = ring_get_seqno;
+               ring->set_seqno = ring_set_seqno;
                if (IS_GEN5(dev)) {
                        ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
                        ring->irq_get = gen5_ring_get_irq;
@@@ -1787,6 -1789,7 +1849,7 @@@ int intel_init_blt_ring_buffer(struct d
        ring->flush = blt_ring_flush;
        ring->add_request = gen6_add_request;
        ring->get_seqno = gen6_ring_get_seqno;
+       ring->set_seqno = ring_set_seqno;
        ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
        ring->irq_get = gen6_ring_get_irq;
        ring->irq_put = gen6_ring_put_irq;
index 6af87cd0572501fb8621f34732627b37d89a83ac,e7b9a6aac95524898d696ad370d81a43969d7b76..d66208c2c48b61e350e2dbf98c0d52cfbda27f96
@@@ -1,17 -1,6 +1,17 @@@
  #ifndef _INTEL_RINGBUFFER_H_
  #define _INTEL_RINGBUFFER_H_
  
 +/*
 + * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
 + * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
 + * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
 + *
 + * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
 + * cacheline, the Head Pointer must not be greater than the Tail
 + * Pointer."
 + */
 +#define I915_RING_FREE_SPACE 64
 +
  struct  intel_hw_status_page {
        u32             *page_addr;
        unsigned int    gfx_addr;
@@@ -90,11 -79,12 +90,13 @@@ struct  intel_ring_buffer 
         */
        u32             (*get_seqno)(struct intel_ring_buffer *ring,
                                     bool lazy_coherency);
+       void            (*set_seqno)(struct intel_ring_buffer *ring,
+                                    u32 seqno);
        int             (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
                                               u32 offset, u32 length,
                                               unsigned flags);
  #define I915_DISPATCH_SECURE 0x1
 +#define I915_DISPATCH_PINNED 0x2
        void            (*cleanup)(struct intel_ring_buffer *ring);
        int             (*sync_to)(struct intel_ring_buffer *ring,
                                   struct intel_ring_buffer *to,
@@@ -178,6 -168,13 +180,13 @@@ intel_read_status_page(struct intel_rin
        return ring->status_page.page_addr[reg];
  }
  
+ static inline void
+ intel_write_status_page(struct intel_ring_buffer *ring,
+                       int reg, u32 value)
+ {
+       ring->status_page.page_addr[reg] = value;
+ }
  /**
   * Reads a dword out of the status page, which is written to from the command
   * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
@@@ -208,7 -205,7 +217,7 @@@ static inline void intel_ring_emit(stru
  }
  void intel_ring_advance(struct intel_ring_buffer *ring);
  int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
+ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
  int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
  int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
  
index c275bf0fa36db86a09d861a7e17457c2c103dd4e,ea2e79f63d2b757fd8fe03dc3d96f9973df9b6b3..153377bed66a6279c29c6abcd936e36fe060d54d
@@@ -1997,11 -1997,8 +1997,8 @@@ set_value
  
  
  done:
-       if (intel_sdvo->base.base.crtc) {
-               struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
-               intel_set_mode(crtc, &crtc->mode,
-                              crtc->x, crtc->y, crtc->fb);
-       }
+       if (intel_sdvo->base.base.crtc)
+               intel_crtc_restore_mode(intel_sdvo->base.base.crtc);
  
        return 0;
  #undef CHECK_PROPERTY
@@@ -2251,6 -2248,7 +2248,6 @@@ intel_sdvo_dvi_init(struct intel_sdvo *
                connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
                intel_sdvo->is_hdmi = true;
        }
 -      intel_sdvo->base.cloneable = true;
  
        intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
        if (intel_sdvo->is_hdmi)
@@@ -2281,6 -2279,7 +2278,6 @@@ intel_sdvo_tv_init(struct intel_sdvo *i
  
        intel_sdvo->is_tv = true;
        intel_sdvo->base.needs_tv_clock = true;
 -      intel_sdvo->base.cloneable = false;
  
        intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
  
@@@ -2323,6 -2322,8 +2320,6 @@@ intel_sdvo_analog_init(struct intel_sdv
                intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
        }
  
 -      intel_sdvo->base.cloneable = true;
 -
        intel_sdvo_connector_init(intel_sdvo_connector,
                                  intel_sdvo);
        return true;
@@@ -2353,6 -2354,9 +2350,6 @@@ intel_sdvo_lvds_init(struct intel_sdvo 
                intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
        }
  
 -      /* SDVO LVDS is not cloneable because the input mode gets adjusted by the encoder */
 -      intel_sdvo->base.cloneable = false;
 -
        intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
        if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
                goto err;
@@@ -2425,18 -2429,6 +2422,18 @@@ intel_sdvo_output_setup(struct intel_sd
        return true;
  }
  
 +static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
 +{
 +      struct drm_device *dev = intel_sdvo->base.base.dev;
 +      struct drm_connector *connector, *tmp;
 +
 +      list_for_each_entry_safe(connector, tmp,
 +                               &dev->mode_config.connector_list, head) {
 +              if (intel_attached_encoder(connector) == &intel_sdvo->base)
 +                      intel_sdvo_destroy(connector);
 +      }
 +}
 +
  static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
                                          struct intel_sdvo_connector *intel_sdvo_connector,
                                          int type)
@@@ -2758,20 -2750,9 +2755,20 @@@ bool intel_sdvo_init(struct drm_device 
                                    intel_sdvo->caps.output_flags) != true) {
                DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
                              SDVO_NAME(intel_sdvo));
 -              goto err;
 +              /* Output_setup can leave behind connectors! */
 +              goto err_output;
        }
  
 +      /*
 +       * Cloning SDVO with anything is often impossible, since the SDVO
 +       * encoder can request a special input timing mode. And even if that's
 +       * not the case we have evidence that cloning a plain unscaled mode with
 +       * VGA doesn't really work. Furthermore the cloning flags are way too
 +       * simplistic anyway to express such constraints, so just give up on
 +       * cloning for SDVO encoders.
 +       */
 +      intel_sdvo->base.cloneable = false;
 +
        /* Only enable the hotplug irq if we need it, to work around noisy
         * hotplug lines.
         */
  
        /* Set the input timing to the screen. Assume always input 0. */
        if (!intel_sdvo_set_target_input(intel_sdvo))
 -              goto err;
 +              goto err_output;
  
        if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
                                                    &intel_sdvo->pixel_clock_min,
                                                    &intel_sdvo->pixel_clock_max))
 -              goto err;
 +              goto err_output;
  
        DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
                        "clock range %dMHz - %dMHz, "
                        (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
        return true;
  
 +err_output:
 +      intel_sdvo_output_cleanup(intel_sdvo);
 +
  err:
        drm_encoder_cleanup(&intel_encoder->base);
        i2c_del_adapter(&intel_sdvo->ddc);
diff --combined include/drm/drm_mm.h
index 0f4a366f6fa6e3a872a995035c867d482368178c,cd453653f634bae4d667dec60fad2caf7a27bb33..9b991f91d81bdd1ce7b5b3bbe917b985721b5dba
@@@ -89,6 -89,29 +89,29 @@@ static inline bool drm_mm_initialized(s
  {
        return mm->hole_stack.next;
  }
+ static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+ {
+       return hole_node->start + hole_node->size;
+ }
+ static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+ {
+       BUG_ON(!hole_node->hole_follows);
+       return __drm_mm_hole_node_start(hole_node);
+ }
+ static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+ {
+       return list_entry(hole_node->node_list.next,
+                         struct drm_mm_node, node_list)->start;
+ }
+ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+ {
+       return __drm_mm_hole_node_end(hole_node);
+ }
  #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
                                                &(mm)->head_node.node_list, \
                                                node_list)
             entry != NULL; entry = next, \
                next = entry ? list_entry(entry->node_list.next, \
                        struct drm_mm_node, node_list) : NULL) \
+ /* Note that we need to unroll list_for_each_entry in order to inline
+  * setting hole_start and hole_end on each iteration and keep the
+  * macro sane.
+  */
+ #define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
+       for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
+            &entry->hole_stack != &(mm)->hole_stack ? \
+            hole_start = drm_mm_hole_node_start(entry), \
+            hole_end = drm_mm_hole_node_end(entry), \
+            1 : 0; \
+            entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack))
  /*
   * Basic range manager support (drm_mm.c)
   */
+ extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
+                                              unsigned long start,
+                                              unsigned long size,
+                                              bool atomic);
  extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
                                                    unsigned long size,
                                                    unsigned alignment,
@@@ -158,29 -198,12 +198,29 @@@ static inline struct drm_mm_node *drm_m
        return drm_mm_get_block_range_generic(parent, size, alignment, 0,
                                                start, end, 1);
  }
 -extern int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
 -                            unsigned long size, unsigned alignment);
 +
 +extern int drm_mm_insert_node(struct drm_mm *mm,
 +                            struct drm_mm_node *node,
 +                            unsigned long size,
 +                            unsigned alignment);
  extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
                                       struct drm_mm_node *node,
 -                                     unsigned long size, unsigned alignment,
 -                                     unsigned long start, unsigned long end);
 +                                     unsigned long size,
 +                                     unsigned alignment,
 +                                     unsigned long start,
 +                                     unsigned long end);
 +extern int drm_mm_insert_node_generic(struct drm_mm *mm,
 +                                    struct drm_mm_node *node,
 +                                    unsigned long size,
 +                                    unsigned alignment,
 +                                    unsigned long color);
 +extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
 +                                     struct drm_mm_node *node,
 +                                     unsigned long size,
 +                                     unsigned alignment,
 +                                     unsigned long color,
 +                                     unsigned long start,
 +                                     unsigned long end);
  extern void drm_mm_put_block(struct drm_mm_node *cur);
  extern void drm_mm_remove_node(struct drm_mm_node *node);
  extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);