Merge remote-tracking branch 'drm/drm-next' into drm-misc-next
authorMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Thu, 9 May 2019 08:18:57 +0000 (10:18 +0200)
committerMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Thu, 9 May 2019 08:19:03 +0000 (10:19 +0200)
Requested for backmerging airlied's drm-legacy cleanup.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
1  2 
MAINTAINERS
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c

diff --combined MAINTAINERS
index 3d199592fd2cbb637c5b8749c3c40ea5906caf13,e233b3c485460d002204394e2070c383d1baaa5c..965f3a90744fb08feb1cf932a2fd1a36bc1e83e0
@@@ -1902,14 -1902,15 +1902,15 @@@ T:   git git://git.kernel.org/pub/scm/lin
  ARM/NUVOTON NPCM ARCHITECTURE
  M:    Avi Fishman <avifishman70@gmail.com>
  M:    Tomer Maimon <tmaimon77@gmail.com>
+ M:    Tali Perry <tali.perry1@gmail.com>
  R:    Patrick Venture <venture@google.com>
  R:    Nancy Yuen <yuenn@google.com>
- R:    Brendan Higgins <brendanhiggins@google.com>
+ R:    Benjamin Fair <benjaminfair@google.com>
  L:    openbmc@lists.ozlabs.org (moderated for non-subscribers)
  S:    Supported
  F:    arch/arm/mach-npcm/
  F:    arch/arm/boot/dts/nuvoton-npcm*
- F:    include/dt-bindings/clock/nuvoton,npcm7xx-clks.h
+ F:    include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
  F:    drivers/*/*npcm*
  F:    Documentation/devicetree/bindings/*/*npcm*
  F:    Documentation/devicetree/bindings/*/*/*npcm*
@@@ -2365,7 -2366,7 +2366,7 @@@ F:      arch/arm/mm/cache-uniphier.
  F:    arch/arm64/boot/dts/socionext/uniphier*
  F:    drivers/bus/uniphier-system-bus.c
  F:    drivers/clk/uniphier/
- F:    drivers/dmaengine/uniphier-mdmac.c
+ F:    drivers/dma/uniphier-mdmac.c
  F:    drivers/gpio/gpio-uniphier.c
  F:    drivers/i2c/busses/i2c-uniphier*
  F:    drivers/irqchip/irq-uniphier-aidet.c
@@@ -4138,7 -4139,7 +4139,7 @@@ F:      drivers/cpuidle/
  F:    include/linux/cpuidle.h
  
  CRAMFS FILESYSTEM
- M:    Nicolas Pitre <nico@linaro.org>
+ M:    Nicolas Pitre <nico@fluxnic.net>
  S:    Maintained
  F:    Documentation/filesystems/cramfs.txt
  F:    fs/cramfs/
@@@ -5340,7 -5341,6 +5341,7 @@@ T:      git git://anongit.freedesktop.org/dr
  
  DRM PANEL DRIVERS
  M:    Thierry Reding <thierry.reding@gmail.com>
 +R:    Sam Ravnborg <sam@ravnborg.org>
  L:    dri-devel@lists.freedesktop.org
  T:    git git://anongit.freedesktop.org/drm/drm-misc
  S:    Maintained
@@@ -5879,7 -5879,7 +5880,7 @@@ L:      netdev@vger.kernel.or
  S:    Maintained
  F:    Documentation/ABI/testing/sysfs-bus-mdio
  F:    Documentation/devicetree/bindings/net/mdio*
- F:    Documentation/networking/phy.txt
+ F:    Documentation/networking/phy.rst
  F:    drivers/net/phy/
  F:    drivers/of/of_mdio.c
  F:    drivers/of/of_net.c
@@@ -6454,7 -6454,6 +6455,6 @@@ L:      linux-kernel@vger.kernel.or
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
  S:    Maintained
  F:    kernel/futex.c
- F:    kernel/futex_compat.c
  F:    include/asm-generic/futex.h
  F:    include/linux/futex.h
  F:    include/uapi/linux/futex.h
@@@ -7562,7 -7561,7 +7562,7 @@@ F:      include/net/mac802154.
  F:    include/net/af_ieee802154.h
  F:    include/net/cfg802154.h
  F:    include/net/ieee802154_netdev.h
- F:    Documentation/networking/ieee802154.txt
+ F:    Documentation/networking/ieee802154.rst
  
  IFE PROTOCOL
  M:    Yotam Gigi <yotam.gi@gmail.com>
@@@ -14028,7 -14027,7 +14028,7 @@@ F:   drivers/media/rc/serial_ir.
  SFC NETWORK DRIVER
  M:    Solarflare linux maintainers <linux-net-drivers@solarflare.com>
  M:    Edward Cree <ecree@solarflare.com>
- M:    Bert Kenward <bkenward@solarflare.com>
+ M:    Martin Habets <mhabets@solarflare.com>
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    drivers/net/ethernet/sfc/
@@@ -16555,7 -16554,7 +16555,7 @@@ F:   drivers/char/virtio_console.
  F:    include/linux/virtio_console.h
  F:    include/uapi/linux/virtio_console.h
  
- VIRTIO CORE, NET AND BLOCK DRIVERS
+ VIRTIO CORE AND NET DRIVERS
  M:    "Michael S. Tsirkin" <mst@redhat.com>
  M:    Jason Wang <jasowang@redhat.com>
  L:    virtualization@lists.linux-foundation.org
@@@ -16570,6 -16569,19 +16570,19 @@@ F: include/uapi/linux/virtio_*.
  F:    drivers/crypto/virtio/
  F:    mm/balloon_compaction.c
  
+ VIRTIO BLOCK AND SCSI DRIVERS
+ M:    "Michael S. Tsirkin" <mst@redhat.com>
+ M:    Jason Wang <jasowang@redhat.com>
+ R:    Paolo Bonzini <pbonzini@redhat.com>
+ R:    Stefan Hajnoczi <stefanha@redhat.com>
+ L:    virtualization@lists.linux-foundation.org
+ S:    Maintained
+ F:    drivers/block/virtio_blk.c
+ F:    drivers/scsi/virtio_scsi.c
+ F:    include/uapi/linux/virtio_blk.h
+ F:    include/uapi/linux/virtio_scsi.h
+ F:    drivers/vhost/scsi.c
  VIRTIO CRYPTO DRIVER
  M:    Gonglei <arei.gonglei@huawei.com>
  L:    virtualization@lists.linux-foundation.org
index 85f8792c2a5fb070f55678c847ae063e30c7e83f,9ec6356d3f0b7b02d7bd20f6b5bf085a4478cabc..b9371ec5e04f08ac5dfac416b1c33bc5ac41fcae
@@@ -2471,6 -2471,7 +2471,7 @@@ int amdgpu_device_init(struct amdgpu_de
        mutex_init(&adev->virt.vf_errors.lock);
        hash_init(adev->mn_hash);
        mutex_init(&adev->lock_reset);
+       mutex_init(&adev->virt.dpm_mutex);
  
        amdgpu_device_check_arguments(adev);
  
@@@ -3191,11 -3192,16 +3192,16 @@@ static int amdgpu_device_recover_vram(s
                        break;
  
                if (fence) {
-                       r = dma_fence_wait_timeout(fence, false, tmo);
+                       tmo = dma_fence_wait_timeout(fence, false, tmo);
                        dma_fence_put(fence);
                        fence = next;
-                       if (r <= 0)
+                       if (tmo == 0) {
+                               r = -ETIMEDOUT;
                                break;
+                       } else if (tmo < 0) {
+                               r = tmo;
+                               break;
+                       }
                } else {
                        fence = next;
                }
                tmo = dma_fence_wait_timeout(fence, false, tmo);
        dma_fence_put(fence);
  
-       if (r <= 0 || tmo <= 0) {
-               DRM_ERROR("recover vram bo from shadow failed\n");
+       if (r < 0 || tmo <= 0) {
+               DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
                return -EIO;
        }
  
@@@ -3334,6 -3340,8 +3340,6 @@@ static int amdgpu_device_pre_asic_reset
                if (!ring || !ring->sched.thread)
                        continue;
  
 -              drm_sched_stop(&ring->sched);
 -
                /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
                amdgpu_fence_driver_force_completion(ring);
        }
        if(job)
                drm_sched_increase_karma(&job->base);
  
 -
 -
 +      /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
        if (!amdgpu_sriov_vf(adev)) {
  
                if (!need_full_reset)
@@@ -3428,7 -3437,7 +3434,7 @@@ static int amdgpu_do_asic_reset(struct 
  
                                vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
                                if (vram_lost) {
-                                       DRM_ERROR("VRAM is lost!\n");
+                                       DRM_INFO("VRAM is lost due to GPU reset!\n");
                                        atomic_inc(&tmp_adev->vram_lost_counter);
                                }
  
        return r;
  }
  
 -static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev,
 -                                        struct amdgpu_job *job)
 +static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
  {
 -      int i;
 -
 -      for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 -              struct amdgpu_ring *ring = adev->rings[i];
 -
 -              if (!ring || !ring->sched.thread)
 -                      continue;
 +      if (trylock) {
 +              if (!mutex_trylock(&adev->lock_reset))
 +                      return false;
 +      } else
 +              mutex_lock(&adev->lock_reset);
  
 -              if (!adev->asic_reset_res)
 -                      drm_sched_resubmit_jobs(&ring->sched);
 -
 -              drm_sched_start(&ring->sched, !adev->asic_reset_res);
 -      }
 -
 -      if (!amdgpu_device_has_dc_support(adev)) {
 -              drm_helper_resume_force_mode(adev->ddev);
 -      }
 -
 -      adev->asic_reset_res = 0;
 -}
 -
 -static void amdgpu_device_lock_adev(struct amdgpu_device *adev)
 -{
 -      mutex_lock(&adev->lock_reset);
        atomic_inc(&adev->gpu_reset_counter);
        adev->in_gpu_reset = 1;
        /* Block kfd: SRIOV would do it separately */
        if (!amdgpu_sriov_vf(adev))
                  amdgpu_amdkfd_pre_reset(adev);
 +
 +      return true;
  }
  
  static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
  int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                              struct amdgpu_job *job)
  {
 -      int r;
 +      struct list_head device_list, *device_list_handle =  NULL;
 +      bool need_full_reset, job_signaled;
        struct amdgpu_hive_info *hive = NULL;
 -      bool need_full_reset = false;
        struct amdgpu_device *tmp_adev = NULL;
 -      struct list_head device_list, *device_list_handle =  NULL;
 +      int i, r = 0;
  
 +      need_full_reset = job_signaled = false;
        INIT_LIST_HEAD(&device_list);
  
        dev_info(adev->dev, "GPU reset begin!\n");
  
 +      hive = amdgpu_get_xgmi_hive(adev, false);
 +
        /*
 -       * In case of XGMI hive disallow concurrent resets to be triggered
 -       * by different nodes. No point also since the one node already executing
 -       * reset will also reset all the other nodes in the hive.
 +       * Here we trylock to avoid chain of resets executing from
 +       * either trigger by jobs on different adevs in XGMI hive or jobs on
 +       * different schedulers for same device while this TO handler is running.
 +       * We always reset all schedulers for device and all devices for XGMI
 +       * hive so that should take care of them too.
         */
 -      hive = amdgpu_get_xgmi_hive(adev, 0);
 -      if (hive && adev->gmc.xgmi.num_physical_nodes > 1 &&
 -          !mutex_trylock(&hive->reset_lock))
 +
 +      if (hive && !mutex_trylock(&hive->reset_lock)) {
 +              DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
 +                       job->base.id, hive->hive_id);
                return 0;
 +      }
  
        /* Start with adev pre asic reset first for soft reset check.*/
 -      amdgpu_device_lock_adev(adev);
 -      r = amdgpu_device_pre_asic_reset(adev,
 -                                       job,
 -                                       &need_full_reset);
 -      if (r) {
 -              /*TODO Should we stop ?*/
 -              DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
 -                        r, adev->ddev->unique);
 -              adev->asic_reset_res = r;
 +      if (!amdgpu_device_lock_adev(adev, !hive)) {
 +              DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
 +                                       job->base.id);
 +              return 0;
        }
  
        /* Build list of devices to reset */
 -      if  (need_full_reset && adev->gmc.xgmi.num_physical_nodes > 1) {
 +      if  (adev->gmc.xgmi.num_physical_nodes > 1) {
                if (!hive) {
                        amdgpu_device_unlock_adev(adev);
                        return -ENODEV;
                device_list_handle = &device_list;
        }
  
 +      /* block all schedulers and reset given job's ring */
 +      list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
 +              for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 +                      struct amdgpu_ring *ring = tmp_adev->rings[i];
 +
 +                      if (!ring || !ring->sched.thread)
 +                              continue;
 +
 +                      drm_sched_stop(&ring->sched, &job->base);
 +              }
 +      }
 +
 +
 +      /*
 +       * Must check guilty signal here since after this point all old
 +       * HW fences are force signaled.
 +       *
 +       * job->base holds a reference to parent fence
 +       */
 +      if (job && job->base.s_fence->parent &&
 +          dma_fence_is_signaled(job->base.s_fence->parent))
 +              job_signaled = true;
 +
 +      if (!amdgpu_device_ip_need_full_reset(adev))
 +              device_list_handle = &device_list;
 +
 +      if (job_signaled) {
 +              dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
 +              goto skip_hw_reset;
 +      }
 +
 +
 +      /* Guilty job will be freed after this*/
 +      r = amdgpu_device_pre_asic_reset(adev,
 +                                       job,
 +                                       &need_full_reset);
 +      if (r) {
 +              /*TODO Should we stop ?*/
 +              DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
 +                        r, adev->ddev->unique);
 +              adev->asic_reset_res = r;
 +      }
 +
  retry:        /* Rest of adevs pre asic reset from XGMI hive. */
        list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
  
                if (tmp_adev == adev)
                        continue;
  
 -              amdgpu_device_lock_adev(tmp_adev);
 +              amdgpu_device_lock_adev(tmp_adev, false);
                r = amdgpu_device_pre_asic_reset(tmp_adev,
                                                 NULL,
                                                 &need_full_reset);
                        goto retry;
        }
  
 +skip_hw_reset:
 +
        /* Post ASIC reset for all devs .*/
        list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
 -              amdgpu_device_post_asic_reset(tmp_adev, tmp_adev == adev ? job : NULL);
 +              for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 +                      struct amdgpu_ring *ring = tmp_adev->rings[i];
 +
 +                      if (!ring || !ring->sched.thread)
 +                              continue;
 +
 +                      /* No point to resubmit jobs if we didn't HW reset*/
 +                      if (!tmp_adev->asic_reset_res && !job_signaled)
 +                              drm_sched_resubmit_jobs(&ring->sched);
 +
 +                      drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
 +              }
 +
 +              if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
 +                      drm_helper_resume_force_mode(tmp_adev->ddev);
 +              }
 +
 +              tmp_adev->asic_reset_res = 0;
  
                if (r) {
                        /* bad news, how to tell it to userspace ? */
                amdgpu_device_unlock_adev(tmp_adev);
        }
  
 -      if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
 +      if (hive)
                mutex_unlock(&hive->reset_lock);
  
        if (r)
@@@ -3695,6 -3657,7 +3701,7 @@@ static void amdgpu_device_get_min_pci_s
        struct pci_dev *pdev = adev->pdev;
        enum pci_bus_speed cur_speed;
        enum pcie_link_width cur_width;
+       u32 ret = 1;
  
        *speed = PCI_SPEED_UNKNOWN;
        *width = PCIE_LNK_WIDTH_UNKNOWN;
        while (pdev) {
                cur_speed = pcie_get_speed_cap(pdev);
                cur_width = pcie_get_width_cap(pdev);
+               ret = pcie_bandwidth_available(adev->pdev, NULL,
+                                                      NULL, &cur_width);
+               if (!ret)
+                       cur_width = PCIE_LNK_WIDTH_RESRV;
  
                if (cur_speed != PCI_SPEED_UNKNOWN) {
                        if (*speed == PCI_SPEED_UNKNOWN)
index 553415fe8eded1510932f4690c5a8f08bc211cb5,2e0cb4246cbda6c3a7a1e12506c55061449aea75..79dbeafb9a528ff859cee6eb5f1a3a097b3dbc62
@@@ -1034,7 -1034,7 +1034,7 @@@ disable_outputs(struct drm_device *dev
                        funcs->atomic_disable(crtc, old_crtc_state);
                else if (funcs->disable)
                        funcs->disable(crtc);
-               else
+               else if (funcs->dpms)
                        funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
  
                if (!(dev->irq_enabled && dev->num_crtcs))
@@@ -1277,10 -1277,9 +1277,9 @@@ void drm_atomic_helper_commit_modeset_e
                if (new_crtc_state->enable) {
                        DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
                                         crtc->base.id, crtc->name);
                        if (funcs->atomic_enable)
                                funcs->atomic_enable(crtc, old_crtc_state);
-                       else
+                       else if (funcs->commit)
                                funcs->commit(crtc);
                }
        }
@@@ -1424,7 -1423,7 +1423,7 @@@ drm_atomic_helper_wait_for_vblanks(stru
                ret = wait_event_timeout(dev->vblank[i].queue,
                                old_state->crtcs[i].last_vblank_count !=
                                        drm_crtc_vblank_count(crtc),
 -                              msecs_to_jiffies(50));
 +                              msecs_to_jiffies(100));
  
                WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
                     crtc->base.id, crtc->name);
index f1632cbf1b257535e5ddaf64115358015b2d03c0,649cfd8b42007cff821c2a39cca6dba391ca481c..852bdd87cf13ea2f84cb899121f8ed488bc30f7a
   * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
   * DEALINGS IN THE SOFTWARE.
   */
 -#include <linux/kernel.h>
 -#include <linux/slab.h>
 +
  #include <linux/hdmi.h>
  #include <linux/i2c.h>
 +#include <linux/kernel.h>
  #include <linux/module.h>
 +#include <linux/slab.h>
  #include <linux/vga_switcheroo.h>
 -#include <drm/drmP.h>
 +
 +#include <drm/drm_displayid.h>
 +#include <drm/drm_drv.h>
  #include <drm/drm_edid.h>
  #include <drm/drm_encoder.h>
 -#include <drm/drm_displayid.h>
 +#include <drm/drm_print.h>
  #include <drm/drm_scdc_helper.h>
  
  #include "drm_crtc_internal.h"
@@@ -165,6 -162,25 +165,25 @@@ static const struct edid_quirk 
        /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
        { "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
  
+       /* Valve Index Headset */
+       { "VLV", 0x91a8, EDID_QUIRK_NON_DESKTOP },
+       { "VLV", 0x91b0, EDID_QUIRK_NON_DESKTOP },
+       { "VLV", 0x91b1, EDID_QUIRK_NON_DESKTOP },
+       { "VLV", 0x91b2, EDID_QUIRK_NON_DESKTOP },
+       { "VLV", 0x91b3, EDID_QUIRK_NON_DESKTOP },
+       { "VLV", 0x91b4, EDID_QUIRK_NON_DESKTOP },
+       { "VLV", 0x91b5, EDID_QUIRK_NON_DESKTOP },
+       { "VLV", 0x91b6, EDID_QUIRK_NON_DESKTOP },
+       { "VLV", 0x91b7, EDID_QUIRK_NON_DESKTOP },
+       { "VLV", 0x91b8, EDID_QUIRK_NON_DESKTOP },
+       { "VLV", 0x91b9, EDID_QUIRK_NON_DESKTOP },
+       { "VLV", 0x91ba, EDID_QUIRK_NON_DESKTOP },
+       { "VLV", 0x91bb, EDID_QUIRK_NON_DESKTOP },
+       { "VLV", 0x91bc, EDID_QUIRK_NON_DESKTOP },
+       { "VLV", 0x91bd, EDID_QUIRK_NON_DESKTOP },
+       { "VLV", 0x91be, EDID_QUIRK_NON_DESKTOP },
+       { "VLV", 0x91bf, EDID_QUIRK_NON_DESKTOP },
        /* HTC Vive and Vive Pro VR Headsets */
        { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
        { "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
index f2b38eb7b1b6b95bd2d5060fecfaea72a8aba4be,3bd40a4a67399370b2c92b36ef3a48981dcdf0f3..5454930f6aa89f01f11ff01024007fb403fc8995
  
  #include "i915_drv.h"
  #include "i915_gem_clflush.h"
+ #include "i915_reset.h"
  #include "i915_trace.h"
+ #include "intel_atomic_plane.h"
+ #include "intel_color.h"
+ #include "intel_cdclk.h"
+ #include "intel_crt.h"
+ #include "intel_ddi.h"
+ #include "intel_dp.h"
  #include "intel_drv.h"
  #include "intel_dsi.h"
+ #include "intel_dvo.h"
+ #include "intel_fbc.h"
+ #include "intel_fbdev.h"
  #include "intel_frontbuffer.h"
- #include "intel_drv.h"
- #include "intel_dsi.h"
- #include "intel_frontbuffer.h"
- #include "i915_drv.h"
- #include "i915_gem_clflush.h"
- #include "i915_reset.h"
- #include "i915_trace.h"
+ #include "intel_hdcp.h"
+ #include "intel_hdmi.h"
+ #include "intel_lvds.h"
+ #include "intel_pipe_crc.h"
+ #include "intel_pm.h"
+ #include "intel_psr.h"
+ #include "intel_sdvo.h"
+ #include "intel_sprite.h"
+ #include "intel_tv.h"
  
  /* Primary plane formats for gen <= 3 */
  static const u32 i8xx_primary_formats[] = {
@@@ -115,8 -125,8 +125,8 @@@ static void vlv_prepare_pll(struct inte
                            const struct intel_crtc_state *pipe_config);
  static void chv_prepare_pll(struct intel_crtc *crtc,
                            const struct intel_crtc_state *pipe_config);
- static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
- static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
+ static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
+ static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
  static void intel_crtc_init_scalers(struct intel_crtc *crtc,
                                    struct intel_crtc_state *crtc_state);
  static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
@@@ -467,10 -477,11 +477,11 @@@ static const struct intel_limit intel_l
  };
  
  static void
- skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
+ skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
  {
        if (enable)
                I915_WRITE(CLKGATE_DIS_PSL(pipe),
+                          I915_READ(CLKGATE_DIS_PSL(pipe)) |
                           DUPS1_GATING_DIS | DUPS2_GATING_DIS);
        else
                I915_WRITE(CLKGATE_DIS_PSL(pipe),
@@@ -5530,7 -5541,7 +5541,7 @@@ static void intel_post_plane_update(str
        /* Display WA 827 */
        if (needs_nv12_wa(dev_priv, old_crtc_state) &&
            !needs_nv12_wa(dev_priv, pipe_config)) {
-               skl_wa_clkgate(dev_priv, crtc->pipe, false);
+               skl_wa_827(dev_priv, crtc->pipe, false);
        }
  }
  
@@@ -5569,7 -5580,7 +5580,7 @@@ static void intel_pre_plane_update(stru
        /* Display WA 827 */
        if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
            needs_nv12_wa(dev_priv, pipe_config)) {
-               skl_wa_clkgate(dev_priv, crtc->pipe, true);
+               skl_wa_827(dev_priv, crtc->pipe, true);
        }
  
        /*
@@@ -6180,6 -6191,9 +6191,9 @@@ bool intel_port_is_combophy(struct drm_
        if (port == PORT_NONE)
                return false;
  
+       if (IS_ELKHARTLAKE(dev_priv))
+               return port <= PORT_C;
        if (INTEL_GEN(dev_priv) >= 11)
                return port <= PORT_B;
  
  
  bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
  {
-       if (INTEL_GEN(dev_priv) >= 11)
+       if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
                return port >= PORT_C && port <= PORT_F;
  
        return false;
@@@ -9751,7 -9765,8 +9765,8 @@@ static void haswell_get_ddi_pll(struct 
  
  static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
                                     struct intel_crtc_state *pipe_config,
-                                    u64 *power_domain_mask)
+                                    u64 *power_domain_mask,
+                                    intel_wakeref_t *wakerefs)
  {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned long panel_transcoder_mask = 0;
        unsigned long enabled_panel_transcoders = 0;
        enum transcoder panel_transcoder;
+       intel_wakeref_t wf;
        u32 tmp;
  
        if (INTEL_GEN(dev_priv) >= 11)
                enabled_panel_transcoders != BIT(TRANSCODER_EDP));
  
        power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
-       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+       WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+       wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wf)
                return false;
  
-       WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+       wakerefs[power_domain] = wf;
        *power_domain_mask |= BIT_ULL(power_domain);
  
        tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
  
  static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
                                         struct intel_crtc_state *pipe_config,
-                                        u64 *power_domain_mask)
+                                        u64 *power_domain_mask,
+                                        intel_wakeref_t *wakerefs)
  {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
-       enum port port;
        enum transcoder cpu_transcoder;
+       intel_wakeref_t wf;
+       enum port port;
        u32 tmp;
  
        for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
                        cpu_transcoder = TRANSCODER_DSI_C;
  
                power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
-               if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+               WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+               wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+               if (!wf)
                        continue;
  
-               WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+               wakerefs[power_domain] = wf;
                *power_domain_mask |= BIT_ULL(power_domain);
  
                /*
@@@ -9935,6 -9959,7 +9959,7 @@@ static bool haswell_get_pipe_config(str
                                    struct intel_crtc_state *pipe_config)
  {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
        enum intel_display_power_domain power_domain;
        u64 power_domain_mask;
        bool active;
        intel_crtc_init_scalers(crtc, pipe_config);
  
        power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
-       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+       wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (!wf)
                return false;
+       wakerefs[power_domain] = wf;
        power_domain_mask = BIT_ULL(power_domain);
  
        pipe_config->shared_dpll = NULL;
  
-       active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
+       active = hsw_get_transcoder_state(crtc, pipe_config,
+                                         &power_domain_mask, wakerefs);
  
        if (IS_GEN9_LP(dev_priv) &&
-           bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
+           bxt_get_dsi_transcoder_state(crtc, pipe_config,
+                                        &power_domain_mask, wakerefs)) {
                WARN_ON(active);
                active = true;
        }
        }
  
        power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
-       if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
-               WARN_ON(power_domain_mask & BIT_ULL(power_domain));
+       WARN_ON(power_domain_mask & BIT_ULL(power_domain));
+       wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+       if (wf) {
+               wakerefs[power_domain] = wf;
                power_domain_mask |= BIT_ULL(power_domain);
  
                if (INTEL_GEN(dev_priv) >= 9)
  
  out:
        for_each_power_domain(power_domain, power_domain_mask)
-               intel_display_power_put_unchecked(dev_priv, power_domain);
+               intel_display_power_put(dev_priv,
+                                       power_domain, wakerefs[power_domain]);
  
        return active;
  }
@@@ -12990,10 -13024,16 +13024,16 @@@ static int intel_modeset_checks(struct 
                return -EINVAL;
        }
  
+       /* keep the current setting */
+       if (!intel_state->cdclk.force_min_cdclk_changed)
+               intel_state->cdclk.force_min_cdclk =
+                       dev_priv->cdclk.force_min_cdclk;
        intel_state->modeset = true;
        intel_state->active_crtcs = dev_priv->active_crtcs;
        intel_state->cdclk.logical = dev_priv->cdclk.logical;
        intel_state->cdclk.actual = dev_priv->cdclk.actual;
+       intel_state->cdclk.pipe = INVALID_PIPE;
  
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
                if (new_crtc_state->active)
         * adjusted_mode bits in the crtc directly.
         */
        if (dev_priv->display.modeset_calc_cdclk) {
+               enum pipe pipe;
                ret = dev_priv->display.modeset_calc_cdclk(state);
                if (ret < 0)
                        return ret;
                                return ret;
                }
  
+               if (is_power_of_2(intel_state->active_crtcs)) {
+                       struct drm_crtc *crtc;
+                       struct drm_crtc_state *crtc_state;
+                       pipe = ilog2(intel_state->active_crtcs);
+                       crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base;
+                       crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+                       if (crtc_state && needs_modeset(crtc_state))
+                               pipe = INVALID_PIPE;
+               } else {
+                       pipe = INVALID_PIPE;
+               }
                /* All pipes must be switched off while we change the cdclk. */
-               if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
-                                             &intel_state->cdclk.actual)) {
+               if (pipe != INVALID_PIPE &&
+                   intel_cdclk_needs_cd2x_update(dev_priv,
+                                                 &dev_priv->cdclk.actual,
+                                                 &intel_state->cdclk.actual)) {
+                       ret = intel_lock_all_pipes(state);
+                       if (ret < 0)
+                               return ret;
+                       intel_state->cdclk.pipe = pipe;
+               } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
+                                                    &intel_state->cdclk.actual)) {
                        ret = intel_modeset_all_pipes(state);
                        if (ret < 0)
                                return ret;
+                       intel_state->cdclk.pipe = INVALID_PIPE;
                }
  
                DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
                DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
                              intel_state->cdclk.logical.voltage_level,
                              intel_state->cdclk.actual.voltage_level);
-       } else {
-               to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
        }
  
        intel_modeset_clear_plls(state);
@@@ -13085,7 -13149,7 +13149,7 @@@ static int intel_atomic_check(struct dr
        struct drm_crtc *crtc;
        struct drm_crtc_state *old_crtc_state, *crtc_state;
        int ret, i;
-       bool any_ms = false;
+       bool any_ms = intel_state->cdclk.force_min_cdclk_changed;
  
        /* Catch I915_MODE_FLAG_INHERITED */
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
@@@ -13210,14 -13274,14 +13274,14 @@@ static void intel_update_crtc(struct dr
        else if (new_plane_state)
                intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
  
-       intel_begin_crtc_commit(crtc, old_crtc_state);
+       intel_begin_crtc_commit(to_intel_atomic_state(state), intel_crtc);
  
        if (INTEL_GEN(dev_priv) >= 9)
                skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
        else
                i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
  
-       intel_finish_crtc_commit(crtc, old_crtc_state);
+       intel_finish_crtc_commit(to_intel_atomic_state(state), intel_crtc);
  }
  
  static void intel_update_crtcs(struct drm_atomic_state *state)
@@@ -13445,7 -13509,10 +13509,10 @@@ static void intel_atomic_commit_tail(st
        if (intel_state->modeset) {
                drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
  
-               intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual);
+               intel_set_cdclk_pre_plane_update(dev_priv,
+                                                &intel_state->cdclk.actual,
+                                                &dev_priv->cdclk.actual,
+                                                intel_state->cdclk.pipe);
  
                /*
                 * SKL workaround: bspec recommends we disable the SAGV when we
        /* Now enable the clocks, plane, pipe, and connectors that we set up. */
        dev_priv->display.update_crtcs(state);
  
+       if (intel_state->modeset)
+               intel_set_cdclk_post_plane_update(dev_priv,
+                                                 &intel_state->cdclk.actual,
+                                                 &dev_priv->cdclk.actual,
+                                                 intel_state->cdclk.pipe);
        /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
         * already, but still need the state for the delayed optimization. To
         * fix this:
@@@ -13675,8 -13748,10 +13748,10 @@@ static int intel_atomic_commit(struct d
                       intel_state->min_voltage_level,
                       sizeof(intel_state->min_voltage_level));
                dev_priv->active_crtcs = intel_state->active_crtcs;
-               dev_priv->cdclk.logical = intel_state->cdclk.logical;
-               dev_priv->cdclk.actual = intel_state->cdclk.actual;
+               dev_priv->cdclk.force_min_cdclk =
+                       intel_state->cdclk.force_min_cdclk;
+               intel_cdclk_swap_state(intel_state);
        }
  
        drm_atomic_state_get(state);
@@@ -13996,39 -14071,35 +14071,35 @@@ skl_max_scale(const struct intel_crtc_s
        return max_scale;
  }
  
- static void intel_begin_crtc_commit(struct drm_crtc *crtc,
-                                   struct drm_crtc_state *old_crtc_state)
+ static void intel_begin_crtc_commit(struct intel_atomic_state *state,
+                                   struct intel_crtc *crtc)
  {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_crtc_state *old_intel_cstate =
-               to_intel_crtc_state(old_crtc_state);
-       struct intel_atomic_state *old_intel_state =
-               to_intel_atomic_state(old_crtc_state->state);
-       struct intel_crtc_state *intel_cstate =
-               intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
-       bool modeset = needs_modeset(&intel_cstate->base);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_crtc_state *old_crtc_state =
+               intel_atomic_get_old_crtc_state(state, crtc);
+       struct intel_crtc_state *new_crtc_state =
+               intel_atomic_get_new_crtc_state(state, crtc);
+       bool modeset = needs_modeset(&new_crtc_state->base);
  
        /* Perform vblank evasion around commit operation */
-       intel_pipe_update_start(intel_cstate);
+       intel_pipe_update_start(new_crtc_state);
  
        if (modeset)
                goto out;
  
-       if (intel_cstate->base.color_mgmt_changed ||
-           intel_cstate->update_pipe)
-               intel_color_commit(intel_cstate);
+       if (new_crtc_state->base.color_mgmt_changed ||
+           new_crtc_state->update_pipe)
+               intel_color_commit(new_crtc_state);
  
-       if (intel_cstate->update_pipe)
-               intel_update_pipe_config(old_intel_cstate, intel_cstate);
+       if (new_crtc_state->update_pipe)
+               intel_update_pipe_config(old_crtc_state, new_crtc_state);
        else if (INTEL_GEN(dev_priv) >= 9)
-               skl_detach_scalers(intel_cstate);
+               skl_detach_scalers(new_crtc_state);
  
  out:
        if (dev_priv->display.atomic_update_watermarks)
-               dev_priv->display.atomic_update_watermarks(old_intel_state,
-                                                          intel_cstate);
+               dev_priv->display.atomic_update_watermarks(state,
+                                                          new_crtc_state);
  }
  
  void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
        }
  }
  
- static void intel_finish_crtc_commit(struct drm_crtc *crtc,
-                                    struct drm_crtc_state *old_crtc_state)
+ static void intel_finish_crtc_commit(struct intel_atomic_state *state,
+                                    struct intel_crtc *crtc)
  {
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_atomic_state *old_intel_state =
-               to_intel_atomic_state(old_crtc_state->state);
+       struct intel_crtc_state *old_crtc_state =
+               intel_atomic_get_old_crtc_state(state, crtc);
        struct intel_crtc_state *new_crtc_state =
-               intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
+               intel_atomic_get_new_crtc_state(state, crtc);
  
        intel_pipe_update_end(new_crtc_state);
  
        if (new_crtc_state->update_pipe &&
            !needs_modeset(&new_crtc_state->base) &&
-           old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED)
-               intel_crtc_arm_fifo_underrun(intel_crtc, new_crtc_state);
+           old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
+               intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
  }
  
  /**
@@@ -14526,8 -14596,9 +14596,8 @@@ static int intel_crtc_init(struct drm_i
                ret = -ENOMEM;
                goto fail;
        }
 +      __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base);
        intel_crtc->config = crtc_state;
 -      intel_crtc->base.state = &crtc_state->base;
 -      crtc_state->base.crtc = &intel_crtc->base;
  
        primary = intel_primary_plane_create(dev_priv, pipe);
        if (IS_ERR(primary)) {
@@@ -16069,7 -16140,7 +16139,7 @@@ static void intel_modeset_readout_hw_st
  
                __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
                memset(crtc_state, 0, sizeof(*crtc_state));
 -              crtc_state->base.crtc = &crtc->base;
 +              __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base);
  
                crtc_state->base.active = crtc_state->base.enable =
                        dev_priv->display.get_pipe_config(crtc, crtc_state);
@@@ -16210,8 -16281,6 +16280,6 @@@ get_encoder_power_domains(struct drm_i9
        struct intel_encoder *encoder;
  
        for_each_intel_encoder(&dev_priv->drm, encoder) {
-               u64 get_domains;
-               enum intel_display_power_domain domain;
                struct intel_crtc_state *crtc_state;
  
                if (!encoder->get_power_domains)
                        continue;
  
                crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
-               get_domains = encoder->get_power_domains(encoder, crtc_state);
-               for_each_power_domain(domain, get_domains)
-                       intel_display_power_get(dev_priv, domain);
+               encoder->get_power_domains(encoder, crtc_state);
        }
  }
  
index eb156cb73dd4108e14da1198e0809359b0378ca8,dfdfa766da8f154a7093ad332001778dca8e7ca7..3772f745589dcf9683fd83b7a64b0aa4050d2537
@@@ -46,6 -46,9 +46,9 @@@
  #define LEFT_MIXER 0
  #define RIGHT_MIXER 1
  
+ /* timeout in ms waiting for frame done */
+ #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS        60
  static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
  {
        struct msm_drm_private *priv = crtc->dev->dev_private;
@@@ -425,65 -428,6 +428,6 @@@ void dpu_crtc_complete_commit(struct dr
        trace_dpu_crtc_complete_commit(DRMID(crtc));
  }
  
- static void _dpu_crtc_setup_mixer_for_encoder(
-               struct drm_crtc *crtc,
-               struct drm_encoder *enc)
- {
-       struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
-       struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
-       struct dpu_rm *rm = &dpu_kms->rm;
-       struct dpu_crtc_mixer *mixer;
-       struct dpu_hw_ctl *last_valid_ctl = NULL;
-       int i;
-       struct dpu_rm_hw_iter lm_iter, ctl_iter;
-       dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM);
-       dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
-       /* Set up all the mixers and ctls reserved by this encoder */
-       for (i = cstate->num_mixers; i < ARRAY_SIZE(cstate->mixers); i++) {
-               mixer = &cstate->mixers[i];
-               if (!dpu_rm_get_hw(rm, &lm_iter))
-                       break;
-               mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
-               /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
-               if (!dpu_rm_get_hw(rm, &ctl_iter)) {
-                       DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
-                                       mixer->hw_lm->idx - LM_0);
-                       mixer->lm_ctl = last_valid_ctl;
-               } else {
-                       mixer->lm_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
-                       last_valid_ctl = mixer->lm_ctl;
-               }
-               /* Shouldn't happen, mixers are always >= ctls */
-               if (!mixer->lm_ctl) {
-                       DPU_ERROR("no valid ctls found for lm %d\n",
-                                       mixer->hw_lm->idx - LM_0);
-                       return;
-               }
-               cstate->num_mixers++;
-               DPU_DEBUG("setup mixer %d: lm %d\n",
-                               i, mixer->hw_lm->idx - LM_0);
-               DPU_DEBUG("setup mixer %d: ctl %d\n",
-                               i, mixer->lm_ctl->idx - CTL_0);
-       }
- }
- static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
- {
-       struct drm_encoder *enc;
-       WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-       /* Check for mixers on all encoders attached to this crtc */
-       drm_for_each_encoder_mask(enc, crtc->dev, crtc->state->encoder_mask)
-               _dpu_crtc_setup_mixer_for_encoder(crtc, enc);
- }
  static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
                struct drm_crtc_state *state)
  {
@@@ -533,10 -477,7 +477,7 @@@ static void dpu_crtc_atomic_begin(struc
        dev = crtc->dev;
        smmu_state = &dpu_crtc->smmu_state;
  
-       if (!cstate->num_mixers) {
-               _dpu_crtc_setup_mixers(crtc);
-               _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
-       }
+       _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
  
        if (dpu_crtc->event) {
                WARN_ON(dpu_crtc->event);
@@@ -683,7 -624,7 +624,7 @@@ static int _dpu_crtc_wait_for_frame_don
  
        DPU_ATRACE_BEGIN("frame done completion wait");
        ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
-                       msecs_to_jiffies(DPU_FRAME_DONE_TIMEOUT));
+                       msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
        if (!ret) {
                DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
                rc = -ETIMEDOUT;
@@@ -753,12 -694,14 +694,12 @@@ end
  
  static void dpu_crtc_reset(struct drm_crtc *crtc)
  {
 -      struct dpu_crtc_state *cstate;
 +      struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
  
        if (crtc->state)
                dpu_crtc_destroy_state(crtc, crtc->state);
  
 -      crtc->state = kzalloc(sizeof(*cstate), GFP_KERNEL);
 -      if (crtc->state)
 -              crtc->state->crtc = crtc;
 +      __drm_atomic_helper_crtc_reset(crtc, &cstate->base);
  }
  
  /**
index 9254747ef65b97e2731594cb47ee9ab7d85d8456,20a9c296d0272d7ef8ad89cf6f66a81deace12c4..e590fa0bb02b1d7952af340643c0c019d3c21be8
@@@ -541,6 -541,18 +541,18 @@@ static void vop_core_clks_disable(struc
        clk_disable(vop->hclk);
  }
  
+ static void vop_win_disable(struct vop *vop, const struct vop_win_data *win)
+ {
+       if (win->phy->scl && win->phy->scl->ext) {
+               VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
+               VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
+               VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE);
+               VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE);
+       }
+       VOP_WIN_SET(vop, win, enable, 0);
+ }
  static int vop_enable(struct drm_crtc *crtc)
  {
        struct vop *vop = to_vop(crtc);
                struct vop_win *vop_win = &vop->win[i];
                const struct vop_win_data *win = vop_win->data;
  
-               VOP_WIN_SET(vop, win, enable, 0);
+               vop_win_disable(vop, win);
        }
        spin_unlock(&vop->reg_lock);
  
@@@ -735,7 -747,7 +747,7 @@@ static void vop_plane_atomic_disable(st
  
        spin_lock(&vop->reg_lock);
  
-       VOP_WIN_SET(vop, win, enable, 0);
+       vop_win_disable(vop, win);
  
        spin_unlock(&vop->reg_lock);
  }
@@@ -1210,6 -1222,17 +1222,6 @@@ static void vop_crtc_destroy(struct drm
        drm_crtc_cleanup(crtc);
  }
  
 -static void vop_crtc_reset(struct drm_crtc *crtc)
 -{
 -      if (crtc->state)
 -              __drm_atomic_helper_crtc_destroy_state(crtc->state);
 -      kfree(crtc->state);
 -
 -      crtc->state = kzalloc(sizeof(struct rockchip_crtc_state), GFP_KERNEL);
 -      if (crtc->state)
 -              crtc->state->crtc = crtc;
 -}
 -
  static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
  {
        struct rockchip_crtc_state *rockchip_state;
@@@ -1231,17 -1254,6 +1243,17 @@@ static void vop_crtc_destroy_state(stru
        kfree(s);
  }
  
 +static void vop_crtc_reset(struct drm_crtc *crtc)
 +{
 +      struct rockchip_crtc_state *crtc_state =
 +              kzalloc(sizeof(*crtc_state), GFP_KERNEL);
 +
 +      if (crtc->state)
 +              vop_crtc_destroy_state(crtc, crtc->state);
 +
 +      __drm_atomic_helper_crtc_reset(crtc, &crtc_state->base);
 +}
 +
  #ifdef CONFIG_DRM_ANALOGIX_DP
  static struct drm_connector *vop_get_edp_connector(struct vop *vop)
  {
@@@ -1631,7 -1643,7 +1643,7 @@@ static int vop_initial(struct vop *vop
                int channel = i * 2 + 1;
  
                VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
-               VOP_WIN_SET(vop, win, enable, 0);
+               vop_win_disable(vop, win);
                VOP_WIN_SET(vop, win, gate, 1);
        }