drm/i915: migrate skl planes code new file (v5)
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / display / intel_psr.c
index b3631b722de327595b8d1ca57adffdd1037b851f..bf214d0e2dec0ecca14a5167961014df34e4cd63 100644 (file)
 #include "i915_drv.h"
 #include "intel_atomic.h"
 #include "intel_display_types.h"
+#include "intel_dp_aux.h"
+#include "intel_hdmi.h"
 #include "intel_psr.h"
 #include "intel_sprite.h"
-#include "intel_hdmi.h"
+#include "skl_universal_plane.h"
 
 /**
  * DOC: Panel Self Refresh (PSR/SRD)
  * use page flips.
  */
 
-static bool psr_global_enabled(struct drm_i915_private *i915)
+static bool psr_global_enabled(struct intel_dp *intel_dp)
 {
-       switch (i915->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+       switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
        case I915_PSR_DEBUG_DEFAULT:
                return i915->params.enable_psr;
        case I915_PSR_DEBUG_DISABLE:
@@ -91,9 +95,9 @@ static bool psr_global_enabled(struct drm_i915_private *i915)
        }
 }
 
-static bool psr2_global_enabled(struct drm_i915_private *dev_priv)
+static bool psr2_global_enabled(struct intel_dp *intel_dp)
 {
-       switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
+       switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
        case I915_PSR_DEBUG_DISABLE:
        case I915_PSR_DEBUG_FORCE_PSR1:
                return false;
@@ -102,11 +106,12 @@ static bool psr2_global_enabled(struct drm_i915_private *dev_priv)
        }
 }
 
-static void psr_irq_control(struct drm_i915_private *dev_priv)
+static void psr_irq_control(struct intel_dp *intel_dp)
 {
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        enum transcoder trans_shift;
-       u32 mask, val;
        i915_reg_t imr_reg;
+       u32 mask, val;
 
        /*
         * gen12+ has registers relative to transcoder and one per transcoder
@@ -115,14 +120,14 @@ static void psr_irq_control(struct drm_i915_private *dev_priv)
         */
        if (INTEL_GEN(dev_priv) >= 12) {
                trans_shift = 0;
-               imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
+               imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
        } else {
-               trans_shift = dev_priv->psr.transcoder;
+               trans_shift = intel_dp->psr.transcoder;
                imr_reg = EDP_PSR_IMR;
        }
 
        mask = EDP_PSR_ERROR(trans_shift);
-       if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ)
+       if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
                mask |= EDP_PSR_POST_EXIT(trans_shift) |
                        EDP_PSR_PRE_ENTRY(trans_shift);
 
@@ -171,30 +176,31 @@ static void psr_event_print(struct drm_i915_private *i915,
                drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
 }
 
-void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
+void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
 {
-       enum transcoder cpu_transcoder = dev_priv->psr.transcoder;
+       enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       ktime_t time_ns =  ktime_get();
        enum transcoder trans_shift;
        i915_reg_t imr_reg;
-       ktime_t time_ns =  ktime_get();
 
        if (INTEL_GEN(dev_priv) >= 12) {
                trans_shift = 0;
-               imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
+               imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
        } else {
-               trans_shift = dev_priv->psr.transcoder;
+               trans_shift = intel_dp->psr.transcoder;
                imr_reg = EDP_PSR_IMR;
        }
 
        if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
-               dev_priv->psr.last_entry_attempt = time_ns;
+               intel_dp->psr.last_entry_attempt = time_ns;
                drm_dbg_kms(&dev_priv->drm,
                            "[transcoder %s] PSR entry attempt in 2 vblanks\n",
                            transcoder_name(cpu_transcoder));
        }
 
        if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
-               dev_priv->psr.last_exit = time_ns;
+               intel_dp->psr.last_exit = time_ns;
                drm_dbg_kms(&dev_priv->drm,
                            "[transcoder %s] PSR exit completed\n",
                            transcoder_name(cpu_transcoder));
@@ -202,7 +208,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
                if (INTEL_GEN(dev_priv) >= 9) {
                        u32 val = intel_de_read(dev_priv,
                                                PSR_EVENT(cpu_transcoder));
-                       bool psr2_enabled = dev_priv->psr.psr2_enabled;
+                       bool psr2_enabled = intel_dp->psr.psr2_enabled;
 
                        intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
                                       val);
@@ -216,7 +222,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
                drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
                         transcoder_name(cpu_transcoder));
 
-               dev_priv->psr.irq_aux_error = true;
+               intel_dp->psr.irq_aux_error = true;
 
                /*
                 * If this interruption is not masked it will keep
@@ -230,7 +236,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
                val |= EDP_PSR_ERROR(trans_shift);
                intel_de_write(dev_priv, imr_reg, val);
 
-               schedule_work(&dev_priv->psr.work);
+               schedule_work(&intel_dp->psr.work);
        }
 }
 
@@ -291,12 +297,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
        struct drm_i915_private *dev_priv =
                to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
 
-       if (dev_priv->psr.dp) {
-               drm_warn(&dev_priv->drm,
-                        "More than one eDP panel found, PSR support should be extended\n");
-               return;
-       }
-
        drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
                         sizeof(intel_dp->psr_dpcd));
 
@@ -305,7 +305,7 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
        drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
                    intel_dp->psr_dpcd[0]);
 
-       if (drm_dp_has_quirk(&intel_dp->desc, 0, DP_DPCD_QUIRK_NO_PSR)) {
+       if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
                drm_dbg_kms(&dev_priv->drm,
                            "PSR support not currently available for this panel\n");
                return;
@@ -317,12 +317,10 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
                return;
        }
 
-       dev_priv->psr.sink_support = true;
-       dev_priv->psr.sink_sync_latency =
+       intel_dp->psr.sink_support = true;
+       intel_dp->psr.sink_sync_latency =
                intel_dp_get_sink_sync_latency(intel_dp);
 
-       dev_priv->psr.dp = intel_dp;
-
        if (INTEL_GEN(dev_priv) >= 9 &&
            (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
                bool y_req = intel_dp->psr_dpcd[1] &
@@ -340,14 +338,14 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
                 * Y-coordinate requirement panels we would need to enable
                 * GTC first.
                 */
-               dev_priv->psr.sink_psr2_support = y_req && alpm;
+               intel_dp->psr.sink_psr2_support = y_req && alpm;
                drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
-                           dev_priv->psr.sink_psr2_support ? "" : "not ");
+                           intel_dp->psr.sink_psr2_support ? "" : "not ");
 
-               if (dev_priv->psr.sink_psr2_support) {
-                       dev_priv->psr.colorimetry_support =
+               if (intel_dp->psr.sink_psr2_support) {
+                       intel_dp->psr.colorimetry_support =
                                intel_dp_get_colorimetry_status(intel_dp);
-                       dev_priv->psr.su_x_granularity =
+                       intel_dp->psr.su_x_granularity =
                                intel_dp_get_su_x_granulartiy(intel_dp);
                }
        }
@@ -373,7 +371,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
        BUILD_BUG_ON(sizeof(aux_msg) > 20);
        for (i = 0; i < sizeof(aux_msg); i += 4)
                intel_de_write(dev_priv,
-                              EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
+                              EDP_PSR_AUX_DATA(intel_dp->psr.transcoder, i >> 2),
                               intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
 
        aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
@@ -384,7 +382,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
 
        /* Select only valid bits for SRD_AUX_CTL */
        aux_ctl &= psr_aux_mask;
-       intel_de_write(dev_priv, EDP_PSR_AUX_CTL(dev_priv->psr.transcoder),
+       intel_de_write(dev_priv, EDP_PSR_AUX_CTL(intel_dp->psr.transcoder),
                       aux_ctl);
 }
 
@@ -394,14 +392,14 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
        u8 dpcd_val = DP_PSR_ENABLE;
 
        /* Enable ALPM at sink for psr2 */
-       if (dev_priv->psr.psr2_enabled) {
+       if (intel_dp->psr.psr2_enabled) {
                drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
                                   DP_ALPM_ENABLE |
                                   DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
 
                dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
        } else {
-               if (dev_priv->psr.link_standby)
+               if (intel_dp->psr.link_standby)
                        dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
 
                if (INTEL_GEN(dev_priv) >= 8)
@@ -464,7 +462,7 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
         * off-by-one issue that HW has in some cases.
         */
        idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-       idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+       idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
 
        if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
                idle_frames = 0xf;
@@ -484,7 +482,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
        if (IS_HASWELL(dev_priv))
                val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
 
-       if (dev_priv->psr.link_standby)
+       if (intel_dp->psr.link_standby)
                val |= EDP_PSR_LINK_STANDBY;
 
        val |= intel_psr1_get_tp_time(intel_dp);
@@ -492,9 +490,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
        if (INTEL_GEN(dev_priv) >= 8)
                val |= EDP_PSR_CRC_ENABLE;
 
-       val |= (intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) &
+       val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) &
                EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
-       intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), val);
+       intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val);
 }
 
 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
@@ -529,7 +527,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
        if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
                val |= EDP_Y_COORDINATE_ENABLE;
 
-       val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
+       val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1);
        val |= intel_psr2_get_tp_time(intel_dp);
 
        if (INTEL_GEN(dev_priv) >= 12) {
@@ -548,29 +546,29 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
                val |= EDP_PSR2_FAST_WAKE(7);
        }
 
-       if (dev_priv->psr.psr2_sel_fetch_enabled) {
+       if (intel_dp->psr.psr2_sel_fetch_enabled) {
                /* WA 1408330847 */
-               if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) ||
+               if (IS_TGL_DISP_STEPPING(dev_priv, STEP_A0, STEP_A0) ||
                    IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))
                        intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
                                     DIS_RAM_BYPASS_PSR2_MAN_TRACK,
                                     DIS_RAM_BYPASS_PSR2_MAN_TRACK);
 
                intel_de_write(dev_priv,
-                              PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder),
+                              PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
                               PSR2_MAN_TRK_CTL_ENABLE);
        } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
                intel_de_write(dev_priv,
-                              PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder), 0);
+                              PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
        }
 
        /*
         * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
         * recommending keep this bit unset while PSR2 is enabled.
         */
-       intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
+       intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0);
 
-       intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+       intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
 }
 
 static bool
@@ -593,55 +591,58 @@ static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
                            drm_mode_vrefresh(&cstate->hw.adjusted_mode));
 }
 
-static void psr2_program_idle_frames(struct drm_i915_private *dev_priv,
+static void psr2_program_idle_frames(struct intel_dp *intel_dp,
                                     u32 idle_frames)
 {
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        u32 val;
 
        idle_frames <<=  EDP_PSR2_IDLE_FRAME_SHIFT;
-       val = intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder));
+       val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder));
        val &= ~EDP_PSR2_IDLE_FRAME_MASK;
        val |= idle_frames;
-       intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+       intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
 }
 
-static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv)
+static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
 {
-       psr2_program_idle_frames(dev_priv, 0);
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+       psr2_program_idle_frames(intel_dp, 0);
        intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
 }
 
-static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv)
+static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
 {
-       struct intel_dp *intel_dp = dev_priv->psr.dp;
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
-       psr2_program_idle_frames(dev_priv, psr_compute_idle_frames(intel_dp));
+       psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
 }
 
 static void tgl_dc3co_disable_work(struct work_struct *work)
 {
-       struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), psr.dc3co_work.work);
+       struct intel_dp *intel_dp =
+               container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
 
-       mutex_lock(&dev_priv->psr.lock);
+       mutex_lock(&intel_dp->psr.lock);
        /* If delayed work is pending, it is not idle */
-       if (delayed_work_pending(&dev_priv->psr.dc3co_work))
+       if (delayed_work_pending(&intel_dp->psr.dc3co_work))
                goto unlock;
 
-       tgl_psr2_disable_dc3co(dev_priv);
+       tgl_psr2_disable_dc3co(intel_dp);
 unlock:
-       mutex_unlock(&dev_priv->psr.lock);
+       mutex_unlock(&intel_dp->psr.lock);
 }
 
-static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv)
+static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
 {
-       if (!dev_priv->psr.dc3co_enabled)
+       if (!intel_dp->psr.dc3co_enabled)
                return;
 
-       cancel_delayed_work(&dev_priv->psr.dc3co_work);
+       cancel_delayed_work(&intel_dp->psr.dc3co_work);
        /* Before PSR2 exit disallow dc3co*/
-       tgl_psr2_disable_dc3co(dev_priv);
+       tgl_psr2_disable_dc3co(intel_dp);
 }
 
 static void
@@ -714,9 +715,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
        int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
        int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
 
-       if (!dev_priv->psr.sink_psr2_support)
+       if (!intel_dp->psr.sink_psr2_support)
                return false;
 
+       /* JSL and EHL only supports eDP 1.3 */
+       if (IS_JSL_EHL(dev_priv)) {
+               drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
+               return false;
+       }
+
        if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
                drm_dbg_kms(&dev_priv->drm,
                            "PSR2 not supported in transcoder %s\n",
@@ -724,7 +731,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
                return false;
        }
 
-       if (!psr2_global_enabled(dev_priv)) {
+       if (!psr2_global_enabled(intel_dp)) {
                drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
                return false;
        }
@@ -773,10 +780,10 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
         * only need to validate the SU block width is a multiple of
         * x granularity.
         */
-       if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
+       if (crtc_hdisplay % intel_dp->psr.su_x_granularity) {
                drm_dbg_kms(&dev_priv->drm,
                            "PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
-                           crtc_hdisplay, dev_priv->psr.su_x_granularity);
+                           crtc_hdisplay, intel_dp->psr.su_x_granularity);
                return false;
        }
 
@@ -805,36 +812,27 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
 void intel_psr_compute_config(struct intel_dp *intel_dp,
                              struct intel_crtc_state *crtc_state)
 {
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        const struct drm_display_mode *adjusted_mode =
                &crtc_state->hw.adjusted_mode;
        int psr_setup_time;
 
-       if (!CAN_PSR(dev_priv))
+       /*
+        * Current PSR panels dont work reliably with VRR enabled
+        * So if VRR is enabled, do not enable PSR.
+        */
+       if (crtc_state->vrr.enable)
                return;
 
-       if (intel_dp != dev_priv->psr.dp)
+       if (!CAN_PSR(intel_dp))
                return;
 
-       if (!psr_global_enabled(dev_priv)) {
+       if (!psr_global_enabled(intel_dp)) {
                drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
                return;
        }
 
-       /*
-        * HSW spec explicitly says PSR is tied to port A.
-        * BDW+ platforms have a instance of PSR registers per transcoder but
-        * for now it only supports one instance of PSR, so lets keep it
-        * hardcoded to PORT_A
-        */
-       if (dig_port->base.port != PORT_A) {
-               drm_dbg_kms(&dev_priv->drm,
-                           "PSR condition failed: Port not supported\n");
-               return;
-       }
-
-       if (dev_priv->psr.sink_not_reliable) {
+       if (intel_dp->psr.sink_not_reliable) {
                drm_dbg_kms(&dev_priv->drm,
                            "PSR sink implementation is not reliable\n");
                return;
@@ -870,23 +868,24 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
 static void intel_psr_activate(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       enum transcoder transcoder = intel_dp->psr.transcoder;
 
-       if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder))
+       if (transcoder_has_psr2(dev_priv, transcoder))
                drm_WARN_ON(&dev_priv->drm,
-                           intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
+                           intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE);
 
        drm_WARN_ON(&dev_priv->drm,
-                   intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
-       drm_WARN_ON(&dev_priv->drm, dev_priv->psr.active);
-       lockdep_assert_held(&dev_priv->psr.lock);
+                   intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE);
+       drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
+       lockdep_assert_held(&intel_dp->psr.lock);
 
        /* psr1 and psr2 are mutually exclusive.*/
-       if (dev_priv->psr.psr2_enabled)
+       if (intel_dp->psr.psr2_enabled)
                hsw_activate_psr2(intel_dp);
        else
                hsw_activate_psr1(intel_dp);
 
-       dev_priv->psr.active = true;
+       intel_dp->psr.active = true;
 }
 
 static void intel_psr_enable_source(struct intel_dp *intel_dp,
@@ -902,7 +901,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                hsw_psr_setup_aux(intel_dp);
 
-       if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
+       if (intel_dp->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
                                           !IS_GEMINILAKE(dev_priv))) {
                i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
                u32 chicken = intel_de_read(dev_priv, reg);
@@ -926,10 +925,10 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
        if (INTEL_GEN(dev_priv) < 11)
                mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
 
-       intel_de_write(dev_priv, EDP_PSR_DEBUG(dev_priv->psr.transcoder),
+       intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder),
                       mask);
 
-       psr_irq_control(dev_priv);
+       psr_irq_control(intel_dp);
 
        if (crtc_state->dc3co_exitline) {
                u32 val;
@@ -947,30 +946,30 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
 
        if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
                intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
-                            dev_priv->psr.psr2_sel_fetch_enabled ?
+                            intel_dp->psr.psr2_sel_fetch_enabled ?
                             IGNORE_PSR2_HW_TRACKING : 0);
 }
 
-static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
+static void intel_psr_enable_locked(struct intel_dp *intel_dp,
                                    const struct intel_crtc_state *crtc_state,
                                    const struct drm_connector_state *conn_state)
 {
-       struct intel_dp *intel_dp = dev_priv->psr.dp;
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct intel_encoder *encoder = &dig_port->base;
        u32 val;
 
-       drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled);
+       drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
 
-       dev_priv->psr.psr2_enabled = crtc_state->has_psr2;
-       dev_priv->psr.busy_frontbuffer_bits = 0;
-       dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
-       dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
-       dev_priv->psr.transcoder = crtc_state->cpu_transcoder;
+       intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
+       intel_dp->psr.busy_frontbuffer_bits = 0;
+       intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+       intel_dp->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
+       intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
        /* DC5/DC6 requires at least 6 idle frames */
        val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
-       dev_priv->psr.dc3co_exit_delay = val;
-       dev_priv->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
+       intel_dp->psr.dc3co_exit_delay = val;
+       intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
 
        /*
         * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
@@ -982,27 +981,27 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
         */
        if (INTEL_GEN(dev_priv) >= 12) {
                val = intel_de_read(dev_priv,
-                                   TRANS_PSR_IIR(dev_priv->psr.transcoder));
+                                   TRANS_PSR_IIR(intel_dp->psr.transcoder));
                val &= EDP_PSR_ERROR(0);
        } else {
                val = intel_de_read(dev_priv, EDP_PSR_IIR);
-               val &= EDP_PSR_ERROR(dev_priv->psr.transcoder);
+               val &= EDP_PSR_ERROR(intel_dp->psr.transcoder);
        }
        if (val) {
-               dev_priv->psr.sink_not_reliable = true;
+               intel_dp->psr.sink_not_reliable = true;
                drm_dbg_kms(&dev_priv->drm,
                            "PSR interruption error set, not enabling PSR\n");
                return;
        }
 
        drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
-                   dev_priv->psr.psr2_enabled ? "2" : "1");
+                   intel_dp->psr.psr2_enabled ? "2" : "1");
        intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
-                                    &dev_priv->psr.vsc);
-       intel_write_dp_vsc_sdp(encoder, crtc_state, &dev_priv->psr.vsc);
+                                    &intel_dp->psr.vsc);
+       intel_write_dp_vsc_sdp(encoder, crtc_state, &intel_dp->psr.vsc);
        intel_psr_enable_sink(intel_dp);
        intel_psr_enable_source(intel_dp, crtc_state);
-       dev_priv->psr.enabled = true;
+       intel_dp->psr.enabled = true;
 
        intel_psr_activate(intel_dp);
 }
@@ -1021,7 +1020,7 @@ void intel_psr_enable(struct intel_dp *intel_dp,
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
-       if (!CAN_PSR(dev_priv) || dev_priv->psr.dp != intel_dp)
+       if (!CAN_PSR(intel_dp))
                return;
 
        if (!crtc_state->has_psr)
@@ -1029,46 +1028,47 @@ void intel_psr_enable(struct intel_dp *intel_dp,
 
        drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp);
 
-       mutex_lock(&dev_priv->psr.lock);
-       intel_psr_enable_locked(dev_priv, crtc_state, conn_state);
-       mutex_unlock(&dev_priv->psr.lock);
+       mutex_lock(&intel_dp->psr.lock);
+       intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
+       mutex_unlock(&intel_dp->psr.lock);
 }
 
-static void intel_psr_exit(struct drm_i915_private *dev_priv)
+static void intel_psr_exit(struct intel_dp *intel_dp)
 {
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        u32 val;
 
-       if (!dev_priv->psr.active) {
-               if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) {
+       if (!intel_dp->psr.active) {
+               if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) {
                        val = intel_de_read(dev_priv,
-                                           EDP_PSR2_CTL(dev_priv->psr.transcoder));
+                                           EDP_PSR2_CTL(intel_dp->psr.transcoder));
                        drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
                }
 
                val = intel_de_read(dev_priv,
-                                   EDP_PSR_CTL(dev_priv->psr.transcoder));
+                                   EDP_PSR_CTL(intel_dp->psr.transcoder));
                drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
 
                return;
        }
 
-       if (dev_priv->psr.psr2_enabled) {
-               tgl_disallow_dc3co_on_psr2_exit(dev_priv);
+       if (intel_dp->psr.psr2_enabled) {
+               tgl_disallow_dc3co_on_psr2_exit(intel_dp);
                val = intel_de_read(dev_priv,
-                                   EDP_PSR2_CTL(dev_priv->psr.transcoder));
+                                   EDP_PSR2_CTL(intel_dp->psr.transcoder));
                drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
                val &= ~EDP_PSR2_ENABLE;
                intel_de_write(dev_priv,
-                              EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+                              EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
        } else {
                val = intel_de_read(dev_priv,
-                                   EDP_PSR_CTL(dev_priv->psr.transcoder));
+                                   EDP_PSR_CTL(intel_dp->psr.transcoder));
                drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
                val &= ~EDP_PSR_ENABLE;
                intel_de_write(dev_priv,
-                              EDP_PSR_CTL(dev_priv->psr.transcoder), val);
+                              EDP_PSR_CTL(intel_dp->psr.transcoder), val);
        }
-       dev_priv->psr.active = false;
+       intel_dp->psr.active = false;
 }
 
 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
@@ -1077,21 +1077,21 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
        i915_reg_t psr_status;
        u32 psr_status_mask;
 
-       lockdep_assert_held(&dev_priv->psr.lock);
+       lockdep_assert_held(&intel_dp->psr.lock);
 
-       if (!dev_priv->psr.enabled)
+       if (!intel_dp->psr.enabled)
                return;
 
        drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
-                   dev_priv->psr.psr2_enabled ? "2" : "1");
+                   intel_dp->psr.psr2_enabled ? "2" : "1");
 
-       intel_psr_exit(dev_priv);
+       intel_psr_exit(intel_dp);
 
-       if (dev_priv->psr.psr2_enabled) {
-               psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
+       if (intel_dp->psr.psr2_enabled) {
+               psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
                psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
        } else {
-               psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder);
+               psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder);
                psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
        }
 
@@ -1101,8 +1101,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
                drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
 
        /* WA 1408330847 */
-       if (dev_priv->psr.psr2_sel_fetch_enabled &&
-           (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) ||
+       if (intel_dp->psr.psr2_sel_fetch_enabled &&
+           (IS_TGL_DISP_STEPPING(dev_priv, STEP_A0, STEP_A0) ||
             IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)))
                intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
                             DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
@@ -1110,10 +1110,10 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
        /* Disable PSR on Sink */
        drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
 
-       if (dev_priv->psr.psr2_enabled)
+       if (intel_dp->psr.psr2_enabled)
                drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
 
-       dev_priv->psr.enabled = false;
+       intel_dp->psr.enabled = false;
 }
 
 /**
@@ -1131,20 +1131,22 @@ void intel_psr_disable(struct intel_dp *intel_dp,
        if (!old_crtc_state->has_psr)
                return;
 
-       if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(dev_priv)))
+       if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
                return;
 
-       mutex_lock(&dev_priv->psr.lock);
+       mutex_lock(&intel_dp->psr.lock);
 
        intel_psr_disable_locked(intel_dp);
 
-       mutex_unlock(&dev_priv->psr.lock);
-       cancel_work_sync(&dev_priv->psr.work);
-       cancel_delayed_work_sync(&dev_priv->psr.dc3co_work);
+       mutex_unlock(&intel_dp->psr.lock);
+       cancel_work_sync(&intel_dp->psr.work);
+       cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
 }
 
-static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
+static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
 {
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
        if (IS_TIGERLAKE(dev_priv))
                /*
                 * Writes to CURSURFLIVE in TGL are causing IOMMU errors and
@@ -1158,7 +1160,7 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
                 * So using this workaround until this issue is root caused
                 * and a better fix is found.
                 */
-               intel_psr_exit(dev_priv);
+               intel_psr_exit(intel_dp);
        else if (INTEL_GEN(dev_priv) >= 9)
                /*
                 * Display WA #0884: skl+
@@ -1169,13 +1171,13 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
                 * but it makes more sense write to the current active
                 * pipe.
                 */
-               intel_de_write(dev_priv, CURSURFLIVE(dev_priv->psr.pipe), 0);
+               intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
        else
                /*
                 * A write to CURSURFLIVE do not cause HW tracking to exit PSR
                 * on older gens so doing the manual exit instead.
                 */
-               intel_psr_exit(dev_priv);
+               intel_psr_exit(intel_dp);
 }
 
 void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
@@ -1185,7 +1187,9 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum pipe pipe = plane->pipe;
-       u32 val;
+       const struct drm_rect *clip;
+       u32 val, offset;
+       int ret, x, y;
 
        if (!crtc_state->enable_psr2_sel_fetch)
                return;
@@ -1196,32 +1200,49 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
        if (!val || plane->id == PLANE_CURSOR)
                return;
 
-       val = plane_state->uapi.dst.y1 << 16 | plane_state->uapi.dst.x1;
+       clip = &plane_state->psr2_sel_fetch_area;
+
+       val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
+       val |= plane_state->uapi.dst.x1;
        intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
 
-       val = plane_state->color_plane[color_plane].y << 16;
-       val |= plane_state->color_plane[color_plane].x;
+       /* TODO: consider auxiliary surfaces */
+       x = plane_state->uapi.src.x1 >> 16;
+       y = (plane_state->uapi.src.y1 >> 16) + clip->y1;
+       ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
+       if (ret)
+               drm_warn_once(&dev_priv->drm, "skl_calc_main_surface_offset() returned %i\n",
+                             ret);
+       val = y << 16 | x;
        intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
                          val);
 
        /* Sizes are 0 based */
-       val = ((drm_rect_height(&plane_state->uapi.src) >> 16) - 1) << 16;
+       val = (drm_rect_height(clip) - 1) << 16;
        val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
        intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
 }
 
 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
 {
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct i915_psr *psr = &dev_priv->psr;
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+       struct intel_encoder *encoder;
 
        if (!HAS_PSR2_SEL_FETCH(dev_priv) ||
            !crtc_state->enable_psr2_sel_fetch)
                return;
 
-       intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(psr->transcoder),
-                      crtc_state->psr2_man_track_ctl);
+       for_each_intel_encoder_mask_can_psr(&dev_priv->drm, encoder,
+                                           crtc_state->uapi.encoder_mask) {
+               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+               if (!intel_dp->psr.enabled)
+                       continue;
+
+               intel_de_write(dev_priv,
+                              PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
+                              crtc_state->psr2_man_track_ctl);
+       }
 }
 
 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
@@ -1237,9 +1258,11 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
        if (clip->y1 == -1)
                goto exit;
 
+       drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
+
        val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
        val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
-       val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(DIV_ROUND_UP(clip->y2, 4) + 1);
+       val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
 exit:
        crtc_state->psr2_man_track_ctl = val;
 }
@@ -1264,8 +1287,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
                                struct intel_crtc *crtc)
 {
        struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+       struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
        struct intel_plane_state *new_plane_state, *old_plane_state;
-       struct drm_rect pipe_clip = { .y1 = -1 };
        struct intel_plane *plane;
        bool full_update = false;
        int i, ret;
@@ -1277,13 +1300,25 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
        if (ret)
                return ret;
 
+       /*
+        * Calculate minimal selective fetch area of each plane and calculate
+        * the pipe damaged area.
+        * In the next loop the plane selective fetch area will actually be set
+        * using whole pipe damaged area.
+        */
        for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
                                             new_plane_state, i) {
-               struct drm_rect temp;
+               struct drm_rect src, damaged_area = { .y1 = -1 };
+               struct drm_mode_rect *damaged_clips;
+               u32 num_clips, j;
 
                if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
                        continue;
 
+               if (!new_plane_state->uapi.visible &&
+                   !old_plane_state->uapi.visible)
+                       continue;
+
                /*
                 * TODO: Not clear how to handle planes with negative position,
                 * also planes are not updated if they have a negative X
@@ -1295,18 +1330,94 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
                        break;
                }
 
-               if (!new_plane_state->uapi.visible)
-                       continue;
+               num_clips = drm_plane_get_damage_clips_count(&new_plane_state->uapi);
 
                /*
-                * For now doing a selective fetch in the whole plane area,
-                * optimizations will come in the future.
+                * If visibility or plane moved, mark the whole plane area as
+                * damaged as it needs to be complete redraw in the new and old
+                * position.
                 */
-               temp.y1 = new_plane_state->uapi.dst.y1;
-               temp.y2 = new_plane_state->uapi.dst.y2;
-               clip_area_update(&pipe_clip, &temp);
+               if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
+                   !drm_rect_equals(&new_plane_state->uapi.dst,
+                                    &old_plane_state->uapi.dst)) {
+                       if (old_plane_state->uapi.visible) {
+                               damaged_area.y1 = old_plane_state->uapi.dst.y1;
+                               damaged_area.y2 = old_plane_state->uapi.dst.y2;
+                               clip_area_update(&pipe_clip, &damaged_area);
+                       }
+
+                       if (new_plane_state->uapi.visible) {
+                               damaged_area.y1 = new_plane_state->uapi.dst.y1;
+                               damaged_area.y2 = new_plane_state->uapi.dst.y2;
+                               clip_area_update(&pipe_clip, &damaged_area);
+                       }
+                       continue;
+               } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha ||
+                          (!num_clips &&
+                           new_plane_state->uapi.fb != old_plane_state->uapi.fb)) {
+                       /*
+                        * If the plane don't have damaged areas but the
+                        * framebuffer changed or alpha changed, mark the whole
+                        * plane area as damaged.
+                        */
+                       damaged_area.y1 = new_plane_state->uapi.dst.y1;
+                       damaged_area.y2 = new_plane_state->uapi.dst.y2;
+                       clip_area_update(&pipe_clip, &damaged_area);
+                       continue;
+               }
+
+               drm_rect_fp_to_int(&src, &new_plane_state->uapi.src);
+               damaged_clips = drm_plane_get_damage_clips(&new_plane_state->uapi);
+
+               for (j = 0; j < num_clips; j++) {
+                       struct drm_rect clip;
+
+                       clip.x1 = damaged_clips[j].x1;
+                       clip.y1 = damaged_clips[j].y1;
+                       clip.x2 = damaged_clips[j].x2;
+                       clip.y2 = damaged_clips[j].y2;
+                       if (drm_rect_intersect(&clip, &src))
+                               clip_area_update(&damaged_area, &clip);
+               }
+
+               if (damaged_area.y1 == -1)
+                       continue;
+
+               damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
+               damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
+               clip_area_update(&pipe_clip, &damaged_area);
+       }
+
+       if (full_update)
+               goto skip_sel_fetch_set_loop;
+
+       /* It must be aligned to 4 lines */
+       pipe_clip.y1 -= pipe_clip.y1 % 4;
+       if (pipe_clip.y2 % 4)
+               pipe_clip.y2 = ((pipe_clip.y2 / 4) + 1) * 4;
+
+       /*
+        * Now that we have the pipe damaged area check if it intersect with
+        * every plane, if it does set the plane selective fetch area.
+        */
+       for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
+                                            new_plane_state, i) {
+               struct drm_rect *sel_fetch_area, inter;
+
+               if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
+                   !new_plane_state->uapi.visible)
+                       continue;
+
+               inter = pipe_clip;
+               if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
+                       continue;
+
+               sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
+               sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
+               sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
        }
 
+skip_sel_fetch_set_loop:
        psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
        return 0;
 }
@@ -1326,13 +1437,13 @@ void intel_psr_update(struct intel_dp *intel_dp,
                      const struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       struct i915_psr *psr = &dev_priv->psr;
+       struct intel_psr *psr = &intel_dp->psr;
        bool enable, psr2_enable;
 
-       if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
+       if (!CAN_PSR(intel_dp))
                return;
 
-       mutex_lock(&dev_priv->psr.lock);
+       mutex_lock(&intel_dp->psr.lock);
 
        enable = crtc_state->has_psr;
        psr2_enable = crtc_state->has_psr2;
@@ -1340,15 +1451,15 @@ void intel_psr_update(struct intel_dp *intel_dp,
        if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
                /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
                if (crtc_state->crc_enabled && psr->enabled)
-                       psr_force_hw_tracking_exit(dev_priv);
+                       psr_force_hw_tracking_exit(intel_dp);
                else if (INTEL_GEN(dev_priv) < 9 && psr->enabled) {
                        /*
                         * Activate PSR again after a force exit when enabling
                         * CRC in older gens
                         */
-                       if (!dev_priv->psr.active &&
-                           !dev_priv->psr.busy_frontbuffer_bits)
-                               schedule_work(&dev_priv->psr.work);
+                       if (!intel_dp->psr.active &&
+                           !intel_dp->psr.busy_frontbuffer_bits)
+                               schedule_work(&intel_dp->psr.work);
                }
 
                goto unlock;
@@ -1358,34 +1469,23 @@ void intel_psr_update(struct intel_dp *intel_dp,
                intel_psr_disable_locked(intel_dp);
 
        if (enable)
-               intel_psr_enable_locked(dev_priv, crtc_state, conn_state);
+               intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
 
 unlock:
-       mutex_unlock(&dev_priv->psr.lock);
+       mutex_unlock(&intel_dp->psr.lock);
 }
 
 /**
- * intel_psr_wait_for_idle - wait for PSR1 to idle
- * @new_crtc_state: new CRTC state
+ * psr_wait_for_idle - wait for PSR1 to idle
+ * @intel_dp: Intel DP
  * @out_value: PSR status in case of failure
  *
- * This function is expected to be called from pipe_update_start() where it is
- * not expected to race with PSR enable or disable.
- *
  * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
+ *
  */
-int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
-                           u32 *out_value)
+static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value)
 {
-       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-       if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
-               return 0;
-
-       /* FIXME: Update this for PSR2 if we need to wait for idle */
-       if (READ_ONCE(dev_priv->psr.psr2_enabled))
-               return 0;
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
        /*
         * From bspec: Panel Self Refresh (BDW+)
@@ -1393,32 +1493,68 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
         * exit training time + 1.5 ms of aux channel handshake. 50 ms is
         * defensive enough to cover everything.
         */
-
        return __intel_wait_for_register(&dev_priv->uncore,
-                                        EDP_PSR_STATUS(dev_priv->psr.transcoder),
+                                        EDP_PSR_STATUS(intel_dp->psr.transcoder),
                                         EDP_PSR_STATUS_STATE_MASK,
                                         EDP_PSR_STATUS_STATE_IDLE, 2, 50,
                                         out_value);
 }
 
-static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
+/**
+ * intel_psr_wait_for_idle - wait for PSR1 to idle
+ * @new_crtc_state: new CRTC state
+ *
+ * This function is expected to be called from pipe_update_start() where it is
+ * not expected to race with PSR enable or disable.
+ */
+void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
+       struct intel_encoder *encoder;
+
+       if (!new_crtc_state->has_psr)
+               return;
+
+       for_each_intel_encoder_mask_can_psr(&dev_priv->drm, encoder,
+                                           new_crtc_state->uapi.encoder_mask) {
+               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+               u32 psr_status;
+
+               mutex_lock(&intel_dp->psr.lock);
+               if (!intel_dp->psr.enabled ||
+                   (intel_dp->psr.enabled && intel_dp->psr.psr2_enabled)) {
+                       mutex_unlock(&intel_dp->psr.lock);
+                       continue;
+               }
+
+               /* when the PSR1 is enabled */
+               if (psr_wait_for_idle(intel_dp, &psr_status))
+                       drm_err(&dev_priv->drm,
+                               "PSR idle timed out 0x%x, atomic update may fail\n",
+                               psr_status);
+               mutex_unlock(&intel_dp->psr.lock);
+       }
+}
+
+static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
 {
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        i915_reg_t reg;
        u32 mask;
        int err;
 
-       if (!dev_priv->psr.enabled)
+       if (!intel_dp->psr.enabled)
                return false;
 
-       if (dev_priv->psr.psr2_enabled) {
-               reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
+       if (intel_dp->psr.psr2_enabled) {
+               reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
                mask = EDP_PSR2_STATUS_STATE_MASK;
        } else {
-               reg = EDP_PSR_STATUS(dev_priv->psr.transcoder);
+               reg = EDP_PSR_STATUS(intel_dp->psr.transcoder);
                mask = EDP_PSR_STATUS_STATE_MASK;
        }
 
-       mutex_unlock(&dev_priv->psr.lock);
+       mutex_unlock(&intel_dp->psr.lock);
 
        err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
        if (err)
@@ -1426,8 +1562,8 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
                        "Timed out waiting for PSR Idle for re-enable\n");
 
        /* After the unlocked wait, verify that PSR is still wanted! */
-       mutex_lock(&dev_priv->psr.lock);
-       return err == 0 && dev_priv->psr.enabled;
+       mutex_lock(&intel_dp->psr.lock);
+       return err == 0 && intel_dp->psr.enabled;
 }
 
 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
@@ -1493,8 +1629,9 @@ retry:
        return err;
 }
 
-int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
+int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
 {
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
        u32 old_mode;
        int ret;
@@ -1505,21 +1642,21 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
                return -EINVAL;
        }
 
-       ret = mutex_lock_interruptible(&dev_priv->psr.lock);
+       ret = mutex_lock_interruptible(&intel_dp->psr.lock);
        if (ret)
                return ret;
 
-       old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
-       dev_priv->psr.debug = val;
+       old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
+       intel_dp->psr.debug = val;
 
        /*
         * Do it right away if it's already enabled, otherwise it will be done
         * when enabling the source.
         */
-       if (dev_priv->psr.enabled)
-               psr_irq_control(dev_priv);
+       if (intel_dp->psr.enabled)
+               psr_irq_control(intel_dp);
 
-       mutex_unlock(&dev_priv->psr.lock);
+       mutex_unlock(&intel_dp->psr.lock);
 
        if (old_mode != mode)
                ret = intel_psr_fastset_force(dev_priv);
@@ -1527,28 +1664,28 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
        return ret;
 }
 
-static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
+static void intel_psr_handle_irq(struct intel_dp *intel_dp)
 {
-       struct i915_psr *psr = &dev_priv->psr;
+       struct intel_psr *psr = &intel_dp->psr;
 
-       intel_psr_disable_locked(psr->dp);
+       intel_psr_disable_locked(intel_dp);
        psr->sink_not_reliable = true;
        /* let's make sure that sink is awaken */
-       drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+       drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
 }
 
 static void intel_psr_work(struct work_struct *work)
 {
-       struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), psr.work);
+       struct intel_dp *intel_dp =
+               container_of(work, typeof(*intel_dp), psr.work);
 
-       mutex_lock(&dev_priv->psr.lock);
+       mutex_lock(&intel_dp->psr.lock);
 
-       if (!dev_priv->psr.enabled)
+       if (!intel_dp->psr.enabled)
                goto unlock;
 
-       if (READ_ONCE(dev_priv->psr.irq_aux_error))
-               intel_psr_handle_irq(dev_priv);
+       if (READ_ONCE(intel_dp->psr.irq_aux_error))
+               intel_psr_handle_irq(intel_dp);
 
        /*
         * We have to make sure PSR is ready for re-enable
@@ -1556,7 +1693,7 @@ static void intel_psr_work(struct work_struct *work)
         * PSR might take some time to get fully disabled
         * and be ready for re-enable.
         */
-       if (!__psr_wait_for_idle_locked(dev_priv))
+       if (!__psr_wait_for_idle_locked(intel_dp))
                goto unlock;
 
        /*
@@ -1564,12 +1701,12 @@ static void intel_psr_work(struct work_struct *work)
         * recheck. Since psr_flush first clears this and then reschedules we
         * won't ever miss a flush when bailing out here.
         */
-       if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
+       if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
                goto unlock;
 
-       intel_psr_activate(dev_priv->psr.dp);
+       intel_psr_activate(intel_dp);
 unlock:
-       mutex_unlock(&dev_priv->psr.lock);
+       mutex_unlock(&intel_dp->psr.lock);
 }
 
 /**
@@ -1588,27 +1725,31 @@ unlock:
 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
                          unsigned frontbuffer_bits, enum fb_op_origin origin)
 {
-       if (!CAN_PSR(dev_priv))
-               return;
+       struct intel_encoder *encoder;
 
        if (origin == ORIGIN_FLIP)
                return;
 
-       mutex_lock(&dev_priv->psr.lock);
-       if (!dev_priv->psr.enabled) {
-               mutex_unlock(&dev_priv->psr.lock);
-               return;
-       }
+       for_each_intel_encoder_can_psr(&dev_priv->drm, encoder) {
+               unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
+               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
-       frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
-       dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+               mutex_lock(&intel_dp->psr.lock);
+               if (!intel_dp->psr.enabled) {
+                       mutex_unlock(&intel_dp->psr.lock);
+                       continue;
+               }
 
-       if (frontbuffer_bits)
-               intel_psr_exit(dev_priv);
+               pipe_frontbuffer_bits &=
+                       INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
+               intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
 
-       mutex_unlock(&dev_priv->psr.lock);
-}
+               if (pipe_frontbuffer_bits)
+                       intel_psr_exit(intel_dp);
 
+               mutex_unlock(&intel_dp->psr.lock);
+       }
+}
 /*
  * When we will be completely rely on PSR2 S/W tracking in future,
  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
@@ -1616,15 +1757,15 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
  * accordingly in future.
  */
 static void
-tgl_dc3co_flush(struct drm_i915_private *dev_priv,
-               unsigned int frontbuffer_bits, enum fb_op_origin origin)
+tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
+               enum fb_op_origin origin)
 {
-       mutex_lock(&dev_priv->psr.lock);
+       mutex_lock(&intel_dp->psr.lock);
 
-       if (!dev_priv->psr.dc3co_enabled)
+       if (!intel_dp->psr.dc3co_enabled)
                goto unlock;
 
-       if (!dev_priv->psr.psr2_enabled || !dev_priv->psr.active)
+       if (!intel_dp->psr.psr2_enabled || !intel_dp->psr.active)
                goto unlock;
 
        /*
@@ -1632,15 +1773,15 @@ tgl_dc3co_flush(struct drm_i915_private *dev_priv,
         * when delayed work schedules that means display has been idle.
         */
        if (!(frontbuffer_bits &
-           INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe)))
+           INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
                goto unlock;
 
-       tgl_psr2_enable_dc3co(dev_priv);
-       mod_delayed_work(system_wq, &dev_priv->psr.dc3co_work,
-                        dev_priv->psr.dc3co_exit_delay);
+       tgl_psr2_enable_dc3co(intel_dp);
+       mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
+                        intel_dp->psr.dc3co_exit_delay);
 
 unlock:
-       mutex_unlock(&dev_priv->psr.lock);
+       mutex_unlock(&intel_dp->psr.lock);
 }
 
 /**
@@ -1659,46 +1800,72 @@ unlock:
 void intel_psr_flush(struct drm_i915_private *dev_priv,
                     unsigned frontbuffer_bits, enum fb_op_origin origin)
 {
-       if (!CAN_PSR(dev_priv))
-               return;
+       struct intel_encoder *encoder;
 
-       if (origin == ORIGIN_FLIP) {
-               tgl_dc3co_flush(dev_priv, frontbuffer_bits, origin);
-               return;
-       }
+       for_each_intel_encoder_can_psr(&dev_priv->drm, encoder) {
+               unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
+               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
-       mutex_lock(&dev_priv->psr.lock);
-       if (!dev_priv->psr.enabled) {
-               mutex_unlock(&dev_priv->psr.lock);
-               return;
-       }
+               if (origin == ORIGIN_FLIP) {
+                       tgl_dc3co_flush(intel_dp, frontbuffer_bits, origin);
+                       continue;
+               }
 
-       frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
-       dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
+               mutex_lock(&intel_dp->psr.lock);
+               if (!intel_dp->psr.enabled) {
+                       mutex_unlock(&intel_dp->psr.lock);
+                       continue;
+               }
+
+               pipe_frontbuffer_bits &=
+                       INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
+               intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
 
-       /* By definition flush = invalidate + flush */
-       if (frontbuffer_bits)
-               psr_force_hw_tracking_exit(dev_priv);
+               /* By definition flush = invalidate + flush */
+               if (pipe_frontbuffer_bits)
+                       psr_force_hw_tracking_exit(intel_dp);
 
-       if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
-               schedule_work(&dev_priv->psr.work);
-       mutex_unlock(&dev_priv->psr.lock);
+               if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
+                       schedule_work(&intel_dp->psr.work);
+               mutex_unlock(&intel_dp->psr.lock);
+       }
 }
 
 /**
  * intel_psr_init - Init basic PSR work and mutex.
- * @dev_priv: i915 device private
+ * @intel_dp: Intel DP
  *
- * This function is  called only once at driver load to initialize basic
- * PSR stuff.
+ * This function is called after the initializing connector.
+ * (the initializing of connector treats the handling of connector capabilities)
+ * And it initializes basic PSR stuff for each DP Encoder.
  */
-void intel_psr_init(struct drm_i915_private *dev_priv)
+void intel_psr_init(struct intel_dp *intel_dp)
 {
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
        if (!HAS_PSR(dev_priv))
                return;
 
-       if (!dev_priv->psr.sink_support)
+       if (!intel_dp->psr.sink_support)
+               return;
+
+       /*
+        * HSW spec explicitly says PSR is tied to port A.
+        * BDW+ platforms have a instance of PSR registers per transcoder but
+        * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
+        * than eDP one.
+        * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
+        * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
+        * But GEN12 supports a instance of PSR registers per transcoder.
+        */
+       if (INTEL_GEN(dev_priv) < 12 && dig_port->base.port != PORT_A) {
+               drm_dbg_kms(&dev_priv->drm,
+                           "PSR condition failed: Port not supported\n");
                return;
+       }
+
+       intel_dp->psr.source_support = true;
 
        if (IS_HASWELL(dev_priv))
                /*
@@ -1715,14 +1882,14 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
        /* Set link_standby x link_off defaults */
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                /* HSW and BDW require workarounds that we don't implement. */
-               dev_priv->psr.link_standby = false;
+               intel_dp->psr.link_standby = false;
        else if (INTEL_GEN(dev_priv) < 12)
                /* For new platforms up to TGL let's respect VBT back again */
-               dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
+               intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link;
 
-       INIT_WORK(&dev_priv->psr.work, intel_psr_work);
-       INIT_DELAYED_WORK(&dev_priv->psr.dc3co_work, tgl_dc3co_disable_work);
-       mutex_init(&dev_priv->psr.lock);
+       INIT_WORK(&intel_dp->psr.work, intel_psr_work);
+       INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
+       mutex_init(&intel_dp->psr.lock);
 }
 
 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
@@ -1748,7 +1915,7 @@ static void psr_alpm_check(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        struct drm_dp_aux *aux = &intel_dp->aux;
-       struct i915_psr *psr = &dev_priv->psr;
+       struct intel_psr *psr = &intel_dp->psr;
        u8 val;
        int r;
 
@@ -1775,7 +1942,7 @@ static void psr_alpm_check(struct intel_dp *intel_dp)
 static void psr_capability_changed_check(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       struct i915_psr *psr = &dev_priv->psr;
+       struct intel_psr *psr = &intel_dp->psr;
        u8 val;
        int r;
 
@@ -1799,18 +1966,18 @@ static void psr_capability_changed_check(struct intel_dp *intel_dp)
 void intel_psr_short_pulse(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       struct i915_psr *psr = &dev_priv->psr;
+       struct intel_psr *psr = &intel_dp->psr;
        u8 status, error_status;
        const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
                          DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
                          DP_PSR_LINK_CRC_ERROR;
 
-       if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+       if (!CAN_PSR(intel_dp) || !intel_dp_is_edp(intel_dp))
                return;
 
        mutex_lock(&psr->lock);
 
-       if (!psr->enabled || psr->dp != intel_dp)
+       if (!psr->enabled)
                goto exit;
 
        if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
@@ -1853,15 +2020,14 @@ exit:
 
 bool intel_psr_enabled(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        bool ret;
 
-       if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+       if (!CAN_PSR(intel_dp) || !intel_dp_is_edp(intel_dp))
                return false;
 
-       mutex_lock(&dev_priv->psr.lock);
-       ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
-       mutex_unlock(&dev_priv->psr.lock);
+       mutex_lock(&intel_dp->psr.lock);
+       ret = intel_dp->psr.enabled;
+       mutex_unlock(&intel_dp->psr.lock);
 
        return ret;
 }