2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
27 #include "display/intel_dp.h"
30 #include "intel_atomic.h"
31 #include "intel_crtc.h"
33 #include "intel_display_types.h"
34 #include "intel_dp_aux.h"
35 #include "intel_hdmi.h"
36 #include "intel_psr.h"
37 #include "intel_snps_phy.h"
38 #include "skl_universal_plane.h"
41 * DOC: Panel Self Refresh (PSR/SRD)
43 * Since Haswell Display controller supports Panel Self-Refresh on display
44 * panels witch have a remote frame buffer (RFB) implemented according to PSR
45 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
46 * when system is idle but display is on as it eliminates display refresh
47 * request to DDR memory completely as long as the frame buffer for that
48 * display is unchanged.
50 * Panel Self Refresh must be supported by both Hardware (source) and
53 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
54 * to power down the link and memory controller. For DSI panels the same idea
55 * is called "manual mode".
57 * The implementation uses the hardware-based PSR support which automatically
58 * enters/exits self-refresh mode. The hardware takes care of sending the
59 * required DP aux message and could even retrain the link (that part isn't
60 * enabled yet though). The hardware also keeps track of any frontbuffer
61 * changes to know when to exit self-refresh mode again. Unfortunately that
62 * part doesn't work too well, hence why the i915 PSR support uses the
63 * software frontbuffer tracking to make sure it doesn't miss a screen
64 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
65 * get called by the frontbuffer tracking code. Note that because of locking
66 * issues the self-refresh re-enable code is done from a work queue, which
67 * must be correctly synchronized/cancelled when shutting down the pipe."
69 * DC3CO (DC3 clock off)
71 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
72 * clock off automatically during PSR2 idle state.
73 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
74 * entry/exit allows the HW to enter a low-power state even when page flipping
75 * periodically (for instance a 30fps video playback scenario).
77 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
78 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
79 * frames, if no other flip occurs and the function above is executed, DC3CO is
80 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
82 * Front buffer modifications do not trigger DC3CO activation on purpose as it
83 * would bring a lot of complexity and most of the moderns systems will only
87 static bool psr_global_enabled(struct intel_dp *intel_dp)
89 struct intel_connector *connector = intel_dp->attached_connector;
90 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
92 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
93 case I915_PSR_DEBUG_DEFAULT:
94 if (i915->params.enable_psr == -1)
95 return connector->panel.vbt.psr.enable;
96 return i915->params.enable_psr;
97 case I915_PSR_DEBUG_DISABLE:
104 static bool psr2_global_enabled(struct intel_dp *intel_dp)
106 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
108 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
109 case I915_PSR_DEBUG_DISABLE:
110 case I915_PSR_DEBUG_FORCE_PSR1:
113 if (i915->params.enable_psr == 1)
119 static void psr_irq_control(struct intel_dp *intel_dp)
121 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
122 enum transcoder trans_shift;
127 * gen12+ has registers relative to transcoder and one per transcoder
128 * using the same bit definition: handle it as TRANSCODER_EDP to force
129 * 0 shift in bit definition
131 if (DISPLAY_VER(dev_priv) >= 12) {
133 imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
135 trans_shift = intel_dp->psr.transcoder;
136 imr_reg = EDP_PSR_IMR;
139 mask = EDP_PSR_ERROR(trans_shift);
140 if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
141 mask |= EDP_PSR_POST_EXIT(trans_shift) |
142 EDP_PSR_PRE_ENTRY(trans_shift);
144 /* Warning: it is masking/setting reserved bits too */
145 val = intel_de_read(dev_priv, imr_reg);
146 val &= ~EDP_PSR_TRANS_MASK(trans_shift);
148 intel_de_write(dev_priv, imr_reg, val);
151 static void psr_event_print(struct drm_i915_private *i915,
152 u32 val, bool psr2_enabled)
154 drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
155 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
156 drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
157 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
158 drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
159 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
160 drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
161 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
162 drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
163 if (val & PSR_EVENT_GRAPHICS_RESET)
164 drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
165 if (val & PSR_EVENT_PCH_INTERRUPT)
166 drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
167 if (val & PSR_EVENT_MEMORY_UP)
168 drm_dbg_kms(&i915->drm, "\tMemory up\n");
169 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
170 drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
171 if (val & PSR_EVENT_WD_TIMER_EXPIRE)
172 drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
173 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
174 drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
175 if (val & PSR_EVENT_REGISTER_UPDATE)
176 drm_dbg_kms(&i915->drm, "\tRegister updated\n");
177 if (val & PSR_EVENT_HDCP_ENABLE)
178 drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
179 if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
180 drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
181 if (val & PSR_EVENT_VBI_ENABLE)
182 drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
183 if (val & PSR_EVENT_LPSP_MODE_EXIT)
184 drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
185 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
186 drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
189 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
191 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
192 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
193 ktime_t time_ns = ktime_get();
194 enum transcoder trans_shift;
197 if (DISPLAY_VER(dev_priv) >= 12) {
199 imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
201 trans_shift = intel_dp->psr.transcoder;
202 imr_reg = EDP_PSR_IMR;
205 if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
206 intel_dp->psr.last_entry_attempt = time_ns;
207 drm_dbg_kms(&dev_priv->drm,
208 "[transcoder %s] PSR entry attempt in 2 vblanks\n",
209 transcoder_name(cpu_transcoder));
212 if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
213 intel_dp->psr.last_exit = time_ns;
214 drm_dbg_kms(&dev_priv->drm,
215 "[transcoder %s] PSR exit completed\n",
216 transcoder_name(cpu_transcoder));
218 if (DISPLAY_VER(dev_priv) >= 9) {
219 u32 val = intel_de_read(dev_priv,
220 PSR_EVENT(cpu_transcoder));
221 bool psr2_enabled = intel_dp->psr.psr2_enabled;
223 intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
225 psr_event_print(dev_priv, val, psr2_enabled);
229 if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
232 drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
233 transcoder_name(cpu_transcoder));
235 intel_dp->psr.irq_aux_error = true;
238 * If this interruption is not masked it will keep
239 * interrupting so fast that it prevents the scheduled
241 * Also after a PSR error, we don't want to arm PSR
242 * again so we don't care about unmask the interruption
243 * or unset irq_aux_error.
245 val = intel_de_read(dev_priv, imr_reg);
246 val |= EDP_PSR_ERROR(trans_shift);
247 intel_de_write(dev_priv, imr_reg, val);
249 schedule_work(&intel_dp->psr.work);
253 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
257 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
260 return alpm_caps & DP_ALPM_CAP;
263 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
265 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
266 u8 val = 8; /* assume the worst if we can't read the value */
268 if (drm_dp_dpcd_readb(&intel_dp->aux,
269 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
270 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
272 drm_dbg_kms(&i915->drm,
273 "Unable to get sink synchronization latency, assuming 8 frames\n");
277 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
279 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
284 /* If sink don't have specific granularity requirements set legacy ones */
285 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
286 /* As PSR2 HW sends full lines, we do not care about x granularity */
292 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
294 drm_dbg_kms(&i915->drm,
295 "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
297 * Spec says that if the value read is 0 the default granularity should
300 if (r != 2 || w == 0)
303 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
305 drm_dbg_kms(&i915->drm,
306 "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
313 intel_dp->psr.su_w_granularity = w;
314 intel_dp->psr.su_y_granularity = y;
317 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
319 struct drm_i915_private *dev_priv =
320 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
322 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
323 sizeof(intel_dp->psr_dpcd));
325 if (!intel_dp->psr_dpcd[0])
327 drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
328 intel_dp->psr_dpcd[0]);
330 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
331 drm_dbg_kms(&dev_priv->drm,
332 "PSR support not currently available for this panel\n");
336 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
337 drm_dbg_kms(&dev_priv->drm,
338 "Panel lacks power state control, PSR cannot be enabled\n");
342 intel_dp->psr.sink_support = true;
343 intel_dp->psr.sink_sync_latency =
344 intel_dp_get_sink_sync_latency(intel_dp);
346 if (DISPLAY_VER(dev_priv) >= 9 &&
347 (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
348 bool y_req = intel_dp->psr_dpcd[1] &
349 DP_PSR2_SU_Y_COORDINATE_REQUIRED;
350 bool alpm = intel_dp_get_alpm_status(intel_dp);
353 * All panels that supports PSR version 03h (PSR2 +
354 * Y-coordinate) can handle Y-coordinates in VSC but we are
355 * only sure that it is going to be used when required by the
356 * panel. This way panel is capable to do selective update
357 * without a aux frame sync.
359 * To support PSR version 02h and PSR version 03h without
360 * Y-coordinate requirement panels we would need to enable
363 intel_dp->psr.sink_psr2_support = y_req && alpm;
364 drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
365 intel_dp->psr.sink_psr2_support ? "" : "not ");
367 if (intel_dp->psr.sink_psr2_support) {
368 intel_dp->psr.colorimetry_support =
369 intel_dp_get_colorimetry_status(intel_dp);
370 intel_dp_get_su_granularity(intel_dp);
375 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
377 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
378 u8 dpcd_val = DP_PSR_ENABLE;
380 /* Enable ALPM at sink for psr2 */
381 if (intel_dp->psr.psr2_enabled) {
382 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
384 DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
386 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
388 if (intel_dp->psr.link_standby)
389 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
391 if (DISPLAY_VER(dev_priv) >= 8)
392 dpcd_val |= DP_PSR_CRC_VERIFICATION;
395 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
396 dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
398 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
400 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
403 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
405 struct intel_connector *connector = intel_dp->attached_connector;
406 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
409 if (DISPLAY_VER(dev_priv) >= 11)
410 val |= EDP_PSR_TP4_TIME_0US;
412 if (dev_priv->params.psr_safest_params) {
413 val |= EDP_PSR_TP1_TIME_2500us;
414 val |= EDP_PSR_TP2_TP3_TIME_2500us;
418 if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
419 val |= EDP_PSR_TP1_TIME_0us;
420 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
421 val |= EDP_PSR_TP1_TIME_100us;
422 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
423 val |= EDP_PSR_TP1_TIME_500us;
425 val |= EDP_PSR_TP1_TIME_2500us;
427 if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
428 val |= EDP_PSR_TP2_TP3_TIME_0us;
429 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
430 val |= EDP_PSR_TP2_TP3_TIME_100us;
431 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
432 val |= EDP_PSR_TP2_TP3_TIME_500us;
434 val |= EDP_PSR_TP2_TP3_TIME_2500us;
437 if (intel_dp_source_supports_tps3(dev_priv) &&
438 drm_dp_tps3_supported(intel_dp->dpcd))
439 val |= EDP_PSR_TP1_TP3_SEL;
441 val |= EDP_PSR_TP1_TP2_SEL;
446 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
448 struct intel_connector *connector = intel_dp->attached_connector;
449 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
452 /* Let's use 6 as the minimum to cover all known cases including the
453 * off-by-one issue that HW has in some cases.
455 idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
456 idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
458 if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
464 static void hsw_activate_psr1(struct intel_dp *intel_dp)
466 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
467 u32 max_sleep_time = 0x1f;
468 u32 val = EDP_PSR_ENABLE;
470 val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT;
472 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
473 if (IS_HASWELL(dev_priv))
474 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
476 if (intel_dp->psr.link_standby)
477 val |= EDP_PSR_LINK_STANDBY;
479 val |= intel_psr1_get_tp_time(intel_dp);
481 if (DISPLAY_VER(dev_priv) >= 8)
482 val |= EDP_PSR_CRC_ENABLE;
484 val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) &
485 EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
486 intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val);
489 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
491 struct intel_connector *connector = intel_dp->attached_connector;
492 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
495 if (dev_priv->params.psr_safest_params)
496 return EDP_PSR2_TP2_TIME_2500us;
498 if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
499 connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
500 val |= EDP_PSR2_TP2_TIME_50us;
501 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
502 val |= EDP_PSR2_TP2_TIME_100us;
503 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
504 val |= EDP_PSR2_TP2_TIME_500us;
506 val |= EDP_PSR2_TP2_TIME_2500us;
511 static void hsw_activate_psr2(struct intel_dp *intel_dp)
513 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
514 u32 val = EDP_PSR2_ENABLE;
516 val |= psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
518 if (!IS_ALDERLAKE_P(dev_priv))
519 val |= EDP_SU_TRACK_ENABLE;
521 if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
522 val |= EDP_Y_COORDINATE_ENABLE;
524 val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
525 val |= intel_psr2_get_tp_time(intel_dp);
527 /* Wa_22012278275:adl-p */
528 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
529 static const u8 map[] = {
540 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
541 * comments bellow for more information
545 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
547 tmp = map[lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
548 tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT;
551 tmp = map[lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
552 tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT;
554 } else if (DISPLAY_VER(dev_priv) >= 12) {
556 * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
557 * values from BSpec. In order to setting an optimal power
558 * consumption, lower than 4k resolution mode needs to decrease
559 * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
560 * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
562 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
563 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
564 val |= TGL_EDP_PSR2_FAST_WAKE(7);
565 } else if (DISPLAY_VER(dev_priv) >= 9) {
566 val |= EDP_PSR2_IO_BUFFER_WAKE(7);
567 val |= EDP_PSR2_FAST_WAKE(7);
570 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
571 val |= EDP_PSR2_SU_SDP_SCANLINE;
573 if (intel_dp->psr.psr2_sel_fetch_enabled) {
577 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
578 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
579 DIS_RAM_BYPASS_PSR2_MAN_TRACK,
580 DIS_RAM_BYPASS_PSR2_MAN_TRACK);
582 tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
583 drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
584 } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
585 intel_de_write(dev_priv,
586 PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
590 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
591 * recommending keep this bit unset while PSR2 is enabled.
593 intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0);
595 intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
599 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
601 if (IS_ALDERLAKE_P(dev_priv))
602 return trans == TRANSCODER_A || trans == TRANSCODER_B;
603 else if (DISPLAY_VER(dev_priv) >= 12)
604 return trans == TRANSCODER_A;
606 return trans == TRANSCODER_EDP;
609 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
611 if (!cstate || !cstate->hw.active)
614 return DIV_ROUND_UP(1000 * 1000,
615 drm_mode_vrefresh(&cstate->hw.adjusted_mode));
618 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
621 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
624 idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
625 val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder));
626 val &= ~EDP_PSR2_IDLE_FRAME_MASK;
628 intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
631 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
633 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
635 psr2_program_idle_frames(intel_dp, 0);
636 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
639 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
641 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
643 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
644 psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
647 static void tgl_dc3co_disable_work(struct work_struct *work)
649 struct intel_dp *intel_dp =
650 container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
652 mutex_lock(&intel_dp->psr.lock);
653 /* If delayed work is pending, it is not idle */
654 if (delayed_work_pending(&intel_dp->psr.dc3co_work))
657 tgl_psr2_disable_dc3co(intel_dp);
659 mutex_unlock(&intel_dp->psr.lock);
662 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
664 if (!intel_dp->psr.dc3co_exitline)
667 cancel_delayed_work(&intel_dp->psr.dc3co_work);
668 /* Before PSR2 exit disallow dc3co*/
669 tgl_psr2_disable_dc3co(intel_dp);
673 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
674 struct intel_crtc_state *crtc_state)
676 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
677 enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
678 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
679 enum port port = dig_port->base.port;
681 if (IS_ALDERLAKE_P(dev_priv))
682 return pipe <= PIPE_B && port <= PORT_B;
684 return pipe == PIPE_A && port == PORT_A;
688 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
689 struct intel_crtc_state *crtc_state)
691 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
692 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
696 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
697 * disable DC3CO until the changed dc3co activating/deactivating sequence
698 * is applied. B.Specs:49196
703 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
704 * TODO: when the issue is addressed, this restriction should be removed.
706 if (crtc_state->enable_psr2_sel_fetch)
709 if (!(dev_priv->display.dmc.allowed_dc_mask & DC_STATE_EN_DC3CO))
712 if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
715 /* Wa_16011303918:adl-p */
716 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
720 * DC3CO Exit time 200us B.Spec 49196
721 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
724 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
726 if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
729 crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
732 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
733 struct intel_crtc_state *crtc_state)
735 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
737 if (!dev_priv->params.enable_psr2_sel_fetch &&
738 intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
739 drm_dbg_kms(&dev_priv->drm,
740 "PSR2 sel fetch not enabled, disabled by parameter\n");
744 if (crtc_state->uapi.async_flip) {
745 drm_dbg_kms(&dev_priv->drm,
746 "PSR2 sel fetch not enabled, async flip enabled\n");
750 /* Wa_14010254185 Wa_14010103792 */
751 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
752 drm_dbg_kms(&dev_priv->drm,
753 "PSR2 sel fetch not enabled, missing the implementation of WAs\n");
757 return crtc_state->enable_psr2_sel_fetch = true;
760 static bool psr2_granularity_check(struct intel_dp *intel_dp,
761 struct intel_crtc_state *crtc_state)
763 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
764 const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
765 const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
766 u16 y_granularity = 0;
768 /* PSR2 HW only send full lines so we only need to validate the width */
769 if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
772 if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
775 /* HW tracking is only aligned to 4 lines */
776 if (!crtc_state->enable_psr2_sel_fetch)
777 return intel_dp->psr.su_y_granularity == 4;
780 * adl_p has 1 line granularity. For other platforms with SW tracking we
781 * can adjust the y coordinates to match sink requirement if multiple of
784 if (IS_ALDERLAKE_P(dev_priv))
785 y_granularity = intel_dp->psr.su_y_granularity;
786 else if (intel_dp->psr.su_y_granularity <= 2)
788 else if ((intel_dp->psr.su_y_granularity % 4) == 0)
789 y_granularity = intel_dp->psr.su_y_granularity;
791 if (y_granularity == 0 || crtc_vdisplay % y_granularity)
794 crtc_state->su_y_granularity = y_granularity;
798 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
799 struct intel_crtc_state *crtc_state)
801 const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
802 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
803 u32 hblank_total, hblank_ns, req_ns;
805 hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
806 hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
808 /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
809 req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
811 if ((hblank_ns - req_ns) > 100)
814 /* Not supported <13 / Wa_22012279113:adl-p */
815 if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
818 crtc_state->req_psr2_sdp_prior_scanline = true;
822 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
823 struct intel_crtc_state *crtc_state)
825 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
826 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
827 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
828 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
830 if (!intel_dp->psr.sink_psr2_support)
833 /* JSL and EHL only supports eDP 1.3 */
834 if (IS_JSL_EHL(dev_priv)) {
835 drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
840 if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
842 drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
846 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
847 drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
851 if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
852 drm_dbg_kms(&dev_priv->drm,
853 "PSR2 not supported in transcoder %s\n",
854 transcoder_name(crtc_state->cpu_transcoder));
858 if (!psr2_global_enabled(intel_dp)) {
859 drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
864 * DSC and PSR2 cannot be enabled simultaneously. If a requested
865 * resolution requires DSC to be enabled, priority is given to DSC
868 if (crtc_state->dsc.compression_enable) {
869 drm_dbg_kms(&dev_priv->drm,
870 "PSR2 cannot be enabled since DSC is enabled\n");
874 if (crtc_state->crc_enabled) {
875 drm_dbg_kms(&dev_priv->drm,
876 "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
880 if (DISPLAY_VER(dev_priv) >= 12) {
884 } else if (DISPLAY_VER(dev_priv) >= 10) {
888 } else if (DISPLAY_VER(dev_priv) == 9) {
894 if (crtc_state->pipe_bpp > max_bpp) {
895 drm_dbg_kms(&dev_priv->drm,
896 "PSR2 not enabled, pipe bpp %d > max supported %d\n",
897 crtc_state->pipe_bpp, max_bpp);
901 /* Wa_16011303918:adl-p */
902 if (crtc_state->vrr.enable &&
903 IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
904 drm_dbg_kms(&dev_priv->drm,
905 "PSR2 not enabled, not compatible with HW stepping + VRR\n");
909 if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
910 drm_dbg_kms(&dev_priv->drm,
911 "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
915 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
916 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
917 !HAS_PSR_HW_TRACKING(dev_priv)) {
918 drm_dbg_kms(&dev_priv->drm,
919 "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
925 if (!crtc_state->enable_psr2_sel_fetch &&
926 IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
927 drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
931 if (!psr2_granularity_check(intel_dp, crtc_state)) {
932 drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
936 if (!crtc_state->enable_psr2_sel_fetch &&
937 (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
938 drm_dbg_kms(&dev_priv->drm,
939 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
940 crtc_hdisplay, crtc_vdisplay,
941 psr_max_h, psr_max_v);
945 tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
949 crtc_state->enable_psr2_sel_fetch = false;
953 void intel_psr_compute_config(struct intel_dp *intel_dp,
954 struct intel_crtc_state *crtc_state,
955 struct drm_connector_state *conn_state)
957 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
958 const struct drm_display_mode *adjusted_mode =
959 &crtc_state->hw.adjusted_mode;
963 * Current PSR panels don't work reliably with VRR enabled
964 * So if VRR is enabled, do not enable PSR.
966 if (crtc_state->vrr.enable)
969 if (!CAN_PSR(intel_dp))
972 if (!psr_global_enabled(intel_dp)) {
973 drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
977 if (intel_dp->psr.sink_not_reliable) {
978 drm_dbg_kms(&dev_priv->drm,
979 "PSR sink implementation is not reliable\n");
983 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
984 drm_dbg_kms(&dev_priv->drm,
985 "PSR condition failed: Interlaced mode enabled\n");
989 psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
990 if (psr_setup_time < 0) {
991 drm_dbg_kms(&dev_priv->drm,
992 "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
993 intel_dp->psr_dpcd[1]);
997 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
998 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
999 drm_dbg_kms(&dev_priv->drm,
1000 "PSR condition failed: PSR setup time (%d us) too long\n",
1005 crtc_state->has_psr = true;
1006 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1008 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1009 intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1010 &crtc_state->psr_vsc);
1013 void intel_psr_get_config(struct intel_encoder *encoder,
1014 struct intel_crtc_state *pipe_config)
1016 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1017 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1018 struct intel_dp *intel_dp;
1024 intel_dp = &dig_port->dp;
1025 if (!CAN_PSR(intel_dp))
1028 mutex_lock(&intel_dp->psr.lock);
1029 if (!intel_dp->psr.enabled)
1033 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1034 * enabled/disabled because of frontbuffer tracking and others.
1036 pipe_config->has_psr = true;
1037 pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1038 pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1040 if (!intel_dp->psr.psr2_enabled)
1043 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1044 val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
1045 if (val & PSR2_MAN_TRK_CTL_ENABLE)
1046 pipe_config->enable_psr2_sel_fetch = true;
1049 if (DISPLAY_VER(dev_priv) >= 12) {
1050 val = intel_de_read(dev_priv, EXITLINE(intel_dp->psr.transcoder));
1051 val &= EXITLINE_MASK;
1052 pipe_config->dc3co_exitline = val;
1055 mutex_unlock(&intel_dp->psr.lock);
1058 static void intel_psr_activate(struct intel_dp *intel_dp)
1060 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1061 enum transcoder transcoder = intel_dp->psr.transcoder;
1063 if (transcoder_has_psr2(dev_priv, transcoder))
1064 drm_WARN_ON(&dev_priv->drm,
1065 intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE);
1067 drm_WARN_ON(&dev_priv->drm,
1068 intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE);
1069 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1070 lockdep_assert_held(&intel_dp->psr.lock);
1072 /* psr1 and psr2 are mutually exclusive.*/
1073 if (intel_dp->psr.psr2_enabled)
1074 hsw_activate_psr2(intel_dp);
1076 hsw_activate_psr1(intel_dp);
1078 intel_dp->psr.active = true;
1081 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1083 switch (intel_dp->psr.pipe) {
1085 return LATENCY_REPORTING_REMOVED_PIPE_A;
1087 return LATENCY_REPORTING_REMOVED_PIPE_B;
1089 return LATENCY_REPORTING_REMOVED_PIPE_C;
1091 MISSING_CASE(intel_dp->psr.pipe);
1096 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1097 const struct intel_crtc_state *crtc_state)
1099 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1100 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1104 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1105 * mask LPSP to avoid dependency on other drivers that might block
1106 * runtime_pm besides preventing other hw tracking issues now we
1107 * can rely on frontbuffer tracking.
1109 mask = EDP_PSR_DEBUG_MASK_MEMUP |
1110 EDP_PSR_DEBUG_MASK_HPD |
1111 EDP_PSR_DEBUG_MASK_LPSP |
1112 EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1114 if (DISPLAY_VER(dev_priv) < 11)
1115 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1117 intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder),
1120 psr_irq_control(intel_dp);
1122 if (intel_dp->psr.dc3co_exitline) {
1126 * TODO: if future platforms supports DC3CO in more than one
1127 * transcoder, EXITLINE will need to be unset when disabling PSR
1129 val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder));
1130 val &= ~EXITLINE_MASK;
1131 val |= intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT;
1132 val |= EXITLINE_ENABLE;
1133 intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
1136 if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1137 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1138 intel_dp->psr.psr2_sel_fetch_enabled ?
1139 IGNORE_PSR2_HW_TRACKING : 0);
1141 if (intel_dp->psr.psr2_enabled) {
1142 if (DISPLAY_VER(dev_priv) == 9)
1143 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1144 PSR2_VSC_ENABLE_PROG_HEADER |
1145 PSR2_ADD_VERTICAL_LINE_COUNT);
1148 * Wa_16014451276:adlp
1149 * All supported adlp panels have 1-based X granularity, this may
1150 * cause issues if non-supported panels are used.
1152 if (IS_ALDERLAKE_P(dev_priv))
1153 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1154 ADLP_1_BASED_X_GRANULARITY);
1156 /* Wa_16011168373:adl-p */
1157 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1158 intel_de_rmw(dev_priv,
1159 TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
1160 TRANS_SET_CONTEXT_LATENCY_MASK,
1161 TRANS_SET_CONTEXT_LATENCY_VALUE(1));
1163 /* Wa_16012604467:adlp */
1164 if (IS_ALDERLAKE_P(dev_priv))
1165 intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1166 CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1168 /* Wa_16013835468:tgl[b0+], dg1 */
1169 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER) ||
1173 vtotal = crtc_state->uapi.adjusted_mode.crtc_vtotal -
1174 crtc_state->uapi.adjusted_mode.crtc_vdisplay;
1175 vblank = crtc_state->uapi.adjusted_mode.crtc_vblank_end -
1176 crtc_state->uapi.adjusted_mode.crtc_vblank_start;
1177 if (vblank > vtotal)
1178 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0,
1179 wa_16013835468_bit_get(intel_dp));
1184 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1186 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1190 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1191 * will still keep the error set even after the reset done in the
1192 * irq_preinstall and irq_uninstall hooks.
1193 * And enabling in this situation cause the screen to freeze in the
1194 * first time that PSR HW tries to activate so lets keep PSR disabled
1195 * to avoid any rendering problems.
1197 if (DISPLAY_VER(dev_priv) >= 12) {
1198 val = intel_de_read(dev_priv,
1199 TRANS_PSR_IIR(intel_dp->psr.transcoder));
1200 val &= EDP_PSR_ERROR(0);
1202 val = intel_de_read(dev_priv, EDP_PSR_IIR);
1203 val &= EDP_PSR_ERROR(intel_dp->psr.transcoder);
1206 intel_dp->psr.sink_not_reliable = true;
1207 drm_dbg_kms(&dev_priv->drm,
1208 "PSR interruption error set, not enabling PSR\n");
1215 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1216 const struct intel_crtc_state *crtc_state)
1218 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1219 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1220 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1221 struct intel_encoder *encoder = &dig_port->base;
1224 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1226 intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1227 intel_dp->psr.busy_frontbuffer_bits = 0;
1228 intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1229 intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1230 /* DC5/DC6 requires at least 6 idle frames */
1231 val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1232 intel_dp->psr.dc3co_exit_delay = val;
1233 intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1234 intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1235 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1236 intel_dp->psr.req_psr2_sdp_prior_scanline =
1237 crtc_state->req_psr2_sdp_prior_scanline;
1239 if (!psr_interrupt_error_check(intel_dp))
1242 drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1243 intel_dp->psr.psr2_enabled ? "2" : "1");
1244 intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
1245 intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1246 intel_psr_enable_sink(intel_dp);
1247 intel_psr_enable_source(intel_dp, crtc_state);
1248 intel_dp->psr.enabled = true;
1249 intel_dp->psr.paused = false;
1251 intel_psr_activate(intel_dp);
1254 static void intel_psr_exit(struct intel_dp *intel_dp)
1256 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1259 if (!intel_dp->psr.active) {
1260 if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) {
1261 val = intel_de_read(dev_priv,
1262 EDP_PSR2_CTL(intel_dp->psr.transcoder));
1263 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1266 val = intel_de_read(dev_priv,
1267 EDP_PSR_CTL(intel_dp->psr.transcoder));
1268 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1273 if (intel_dp->psr.psr2_enabled) {
1274 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1275 val = intel_de_read(dev_priv,
1276 EDP_PSR2_CTL(intel_dp->psr.transcoder));
1277 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1278 val &= ~EDP_PSR2_ENABLE;
1279 intel_de_write(dev_priv,
1280 EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
1282 val = intel_de_read(dev_priv,
1283 EDP_PSR_CTL(intel_dp->psr.transcoder));
1284 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1285 val &= ~EDP_PSR_ENABLE;
1286 intel_de_write(dev_priv,
1287 EDP_PSR_CTL(intel_dp->psr.transcoder), val);
1289 intel_dp->psr.active = false;
1292 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1294 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1295 i915_reg_t psr_status;
1296 u32 psr_status_mask;
1298 if (intel_dp->psr.psr2_enabled) {
1299 psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
1300 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1302 psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder);
1303 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1306 /* Wait till PSR is idle */
1307 if (intel_de_wait_for_clear(dev_priv, psr_status,
1308 psr_status_mask, 2000))
1309 drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1312 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1314 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1315 enum phy phy = intel_port_to_phy(dev_priv,
1316 dp_to_dig_port(intel_dp)->base.port);
1318 lockdep_assert_held(&intel_dp->psr.lock);
1320 if (!intel_dp->psr.enabled)
1323 drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1324 intel_dp->psr.psr2_enabled ? "2" : "1");
1326 intel_psr_exit(intel_dp);
1327 intel_psr_wait_exit_locked(intel_dp);
1330 if (intel_dp->psr.psr2_sel_fetch_enabled &&
1331 IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1332 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
1333 DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
1335 if (intel_dp->psr.psr2_enabled) {
1336 /* Wa_16011168373:adl-p */
1337 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1338 intel_de_rmw(dev_priv,
1339 TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
1340 TRANS_SET_CONTEXT_LATENCY_MASK, 0);
1342 /* Wa_16012604467:adlp */
1343 if (IS_ALDERLAKE_P(dev_priv))
1344 intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1345 CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1347 /* Wa_16013835468:tgl[b0+], dg1 */
1348 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER) ||
1350 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1351 wa_16013835468_bit_get(intel_dp), 0);
1354 intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
1356 /* Disable PSR on Sink */
1357 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1359 if (intel_dp->psr.psr2_enabled)
1360 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1362 intel_dp->psr.enabled = false;
1363 intel_dp->psr.psr2_enabled = false;
1364 intel_dp->psr.psr2_sel_fetch_enabled = false;
1365 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1369 * intel_psr_disable - Disable PSR
1370 * @intel_dp: Intel DP
1371 * @old_crtc_state: old CRTC state
1373 * This function needs to be called before disabling pipe.
1375 void intel_psr_disable(struct intel_dp *intel_dp,
1376 const struct intel_crtc_state *old_crtc_state)
1378 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1380 if (!old_crtc_state->has_psr)
1383 if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1386 mutex_lock(&intel_dp->psr.lock);
1388 intel_psr_disable_locked(intel_dp);
1390 mutex_unlock(&intel_dp->psr.lock);
1391 cancel_work_sync(&intel_dp->psr.work);
1392 cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1396 * intel_psr_pause - Pause PSR
1397 * @intel_dp: Intel DP
1399 * This function need to be called after enabling psr.
1401 void intel_psr_pause(struct intel_dp *intel_dp)
1403 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1404 struct intel_psr *psr = &intel_dp->psr;
1406 if (!CAN_PSR(intel_dp))
1409 mutex_lock(&psr->lock);
1411 if (!psr->enabled) {
1412 mutex_unlock(&psr->lock);
1416 /* If we ever hit this, we will need to add refcount to pause/resume */
1417 drm_WARN_ON(&dev_priv->drm, psr->paused);
1419 intel_psr_exit(intel_dp);
1420 intel_psr_wait_exit_locked(intel_dp);
1423 mutex_unlock(&psr->lock);
1425 cancel_work_sync(&psr->work);
1426 cancel_delayed_work_sync(&psr->dc3co_work);
1430 * intel_psr_resume - Resume PSR
1431 * @intel_dp: Intel DP
1433 * This function need to be called after pausing psr.
1435 void intel_psr_resume(struct intel_dp *intel_dp)
1437 struct intel_psr *psr = &intel_dp->psr;
1439 if (!CAN_PSR(intel_dp))
1442 mutex_lock(&psr->lock);
1447 psr->paused = false;
1448 intel_psr_activate(intel_dp);
1451 mutex_unlock(&psr->lock);
1454 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1456 return IS_ALDERLAKE_P(dev_priv) ? 0 : PSR2_MAN_TRK_CTL_ENABLE;
1459 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1461 return IS_ALDERLAKE_P(dev_priv) ?
1462 ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1463 PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1466 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1468 return IS_ALDERLAKE_P(dev_priv) ?
1469 ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1470 PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1473 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1475 return IS_ALDERLAKE_P(dev_priv) ?
1476 ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1477 PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1480 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1482 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1484 if (intel_dp->psr.psr2_sel_fetch_enabled)
1485 intel_de_write(dev_priv,
1486 PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
1487 man_trk_ctl_enable_bit_get(dev_priv) |
1488 man_trk_ctl_partial_frame_bit_get(dev_priv) |
1489 man_trk_ctl_single_full_frame_bit_get(dev_priv));
1492 * Display WA #0884: skl+
1493 * This documented WA for bxt can be safely applied
1494 * broadly so we can force HW tracking to exit PSR
1495 * instead of disabling and re-enabling.
1496 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1497 * but it makes more sense write to the current active
1500 * This workaround do not exist for platforms with display 10 or newer
1501 * but testing proved that it works for up display 13, for newer
1502 * than that testing will be needed.
1504 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1507 void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
1508 const struct intel_crtc_state *crtc_state)
1510 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1511 enum pipe pipe = plane->pipe;
1513 if (!crtc_state->enable_psr2_sel_fetch)
1516 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
1519 void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
1520 const struct intel_crtc_state *crtc_state,
1521 const struct intel_plane_state *plane_state,
1524 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1525 enum pipe pipe = plane->pipe;
1526 const struct drm_rect *clip;
1530 if (!crtc_state->enable_psr2_sel_fetch)
1533 if (plane->id == PLANE_CURSOR) {
1534 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1539 clip = &plane_state->psr2_sel_fetch_area;
1541 val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
1542 val |= plane_state->uapi.dst.x1;
1543 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
1545 x = plane_state->view.color_plane[color_plane].x;
1548 * From Bspec: UV surface Start Y Position = half of Y plane Y
1552 y = plane_state->view.color_plane[color_plane].y + clip->y1;
1554 y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
1558 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
1561 /* Sizes are 0 based */
1562 val = (drm_rect_height(clip) - 1) << 16;
1563 val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
1564 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
1566 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1567 PLANE_SEL_FETCH_CTL_ENABLE);
1570 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1572 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1573 struct intel_encoder *encoder;
1575 if (!crtc_state->enable_psr2_sel_fetch)
1578 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1579 crtc_state->uapi.encoder_mask) {
1580 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1582 lockdep_assert_held(&intel_dp->psr.lock);
1583 if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
1588 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
1589 crtc_state->psr2_man_track_ctl);
1592 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1593 struct drm_rect *clip, bool full_update)
1595 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1596 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1597 u32 val = man_trk_ctl_enable_bit_get(dev_priv);
1599 /* SF partial frame enable has to be set even on full update */
1600 val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
1604 * Not applying Wa_14014971508:adlp as we do not support the
1605 * feature that requires this workaround.
1607 val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
1614 if (IS_ALDERLAKE_P(dev_priv)) {
1615 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
1616 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
1618 drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
1620 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1621 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1624 crtc_state->psr2_man_track_ctl = val;
1627 static void clip_area_update(struct drm_rect *overlap_damage_area,
1628 struct drm_rect *damage_area,
1629 struct drm_rect *pipe_src)
1631 if (!drm_rect_intersect(damage_area, pipe_src))
1634 if (overlap_damage_area->y1 == -1) {
1635 overlap_damage_area->y1 = damage_area->y1;
1636 overlap_damage_area->y2 = damage_area->y2;
1640 if (damage_area->y1 < overlap_damage_area->y1)
1641 overlap_damage_area->y1 = damage_area->y1;
1643 if (damage_area->y2 > overlap_damage_area->y2)
1644 overlap_damage_area->y2 = damage_area->y2;
1647 static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
1648 struct drm_rect *pipe_clip)
1650 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1651 const u16 y_alignment = crtc_state->su_y_granularity;
1653 pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
1654 if (pipe_clip->y2 % y_alignment)
1655 pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
1657 if (IS_ALDERLAKE_P(dev_priv) && crtc_state->dsc.compression_enable)
1658 drm_warn(&dev_priv->drm, "Missing PSR2 sel fetch alignment with DSC\n");
1662 * TODO: Not clear how to handle planes with negative position,
1663 * also planes are not updated if they have a negative X
1664 * position so for now doing a full update in this cases
1666 * Plane scaling and rotation is not supported by selective fetch and both
1667 * properties can change without a modeset, so need to be check at every
1670 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
1672 if (plane_state->uapi.dst.y1 < 0 ||
1673 plane_state->uapi.dst.x1 < 0 ||
1674 plane_state->scaler_id >= 0 ||
1675 plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
1682 * Check for pipe properties that is not supported by selective fetch.
1684 * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
1685 * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
1686 * enabled and going to the full update path.
1688 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
1690 if (crtc_state->scaler_state.scaler_id >= 0)
1696 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
1697 struct intel_crtc *crtc)
1699 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1700 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1701 struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
1702 struct intel_plane_state *new_plane_state, *old_plane_state;
1703 struct intel_plane *plane;
1704 bool full_update = false;
1707 if (!crtc_state->enable_psr2_sel_fetch)
1710 if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
1712 goto skip_sel_fetch_set_loop;
1716 * Calculate minimal selective fetch area of each plane and calculate
1717 * the pipe damaged area.
1718 * In the next loop the plane selective fetch area will actually be set
1719 * using whole pipe damaged area.
1721 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
1722 new_plane_state, i) {
1723 struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
1726 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
1729 if (!new_plane_state->uapi.visible &&
1730 !old_plane_state->uapi.visible)
1733 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
1739 * If visibility or plane moved, mark the whole plane area as
1740 * damaged as it needs to be complete redraw in the new and old
1743 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
1744 !drm_rect_equals(&new_plane_state->uapi.dst,
1745 &old_plane_state->uapi.dst)) {
1746 if (old_plane_state->uapi.visible) {
1747 damaged_area.y1 = old_plane_state->uapi.dst.y1;
1748 damaged_area.y2 = old_plane_state->uapi.dst.y2;
1749 clip_area_update(&pipe_clip, &damaged_area,
1750 &crtc_state->pipe_src);
1753 if (new_plane_state->uapi.visible) {
1754 damaged_area.y1 = new_plane_state->uapi.dst.y1;
1755 damaged_area.y2 = new_plane_state->uapi.dst.y2;
1756 clip_area_update(&pipe_clip, &damaged_area,
1757 &crtc_state->pipe_src);
1760 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
1761 /* If alpha changed mark the whole plane area as damaged */
1762 damaged_area.y1 = new_plane_state->uapi.dst.y1;
1763 damaged_area.y2 = new_plane_state->uapi.dst.y2;
1764 clip_area_update(&pipe_clip, &damaged_area,
1765 &crtc_state->pipe_src);
1769 src = drm_plane_state_src(&new_plane_state->uapi);
1770 drm_rect_fp_to_int(&src, &src);
1772 if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
1773 &new_plane_state->uapi, &damaged_area))
1776 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
1777 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
1778 damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
1779 damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
1781 clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
1785 * TODO: For now we are just using full update in case
1786 * selective fetch area calculation fails. To optimize this we
1787 * should identify cases where this happens and fix the area
1788 * calculation for those.
1790 if (pipe_clip.y1 == -1) {
1791 drm_info_once(&dev_priv->drm,
1792 "Selective fetch area calculation failed in pipe %c\n",
1793 pipe_name(crtc->pipe));
1798 goto skip_sel_fetch_set_loop;
1800 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
1804 intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
1807 * Now that we have the pipe damaged area check if it intersect with
1808 * every plane, if it does set the plane selective fetch area.
1810 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
1811 new_plane_state, i) {
1812 struct drm_rect *sel_fetch_area, inter;
1813 struct intel_plane *linked = new_plane_state->planar_linked_plane;
1815 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
1816 !new_plane_state->uapi.visible)
1820 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
1823 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
1828 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
1829 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
1830 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
1831 crtc_state->update_planes |= BIT(plane->id);
1834 * Sel_fetch_area is calculated for UV plane. Use
1835 * same area for Y plane as well.
1838 struct intel_plane_state *linked_new_plane_state;
1839 struct drm_rect *linked_sel_fetch_area;
1841 linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
1842 if (IS_ERR(linked_new_plane_state))
1843 return PTR_ERR(linked_new_plane_state);
1845 linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
1846 linked_sel_fetch_area->y1 = sel_fetch_area->y1;
1847 linked_sel_fetch_area->y2 = sel_fetch_area->y2;
1848 crtc_state->update_planes |= BIT(linked->id);
1852 skip_sel_fetch_set_loop:
1853 psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
1857 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
1858 struct intel_crtc *crtc)
1860 struct drm_i915_private *i915 = to_i915(state->base.dev);
1861 const struct intel_crtc_state *old_crtc_state =
1862 intel_atomic_get_old_crtc_state(state, crtc);
1863 const struct intel_crtc_state *new_crtc_state =
1864 intel_atomic_get_new_crtc_state(state, crtc);
1865 struct intel_encoder *encoder;
1870 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
1871 old_crtc_state->uapi.encoder_mask) {
1872 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1873 struct intel_psr *psr = &intel_dp->psr;
1874 bool needs_to_disable = false;
1876 mutex_lock(&psr->lock);
1879 * Reasons to disable:
1880 * - PSR disabled in new state
1881 * - All planes will go inactive
1882 * - Changing between PSR versions
1884 needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
1885 needs_to_disable |= !new_crtc_state->has_psr;
1886 needs_to_disable |= !new_crtc_state->active_planes;
1887 needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
1889 if (psr->enabled && needs_to_disable)
1890 intel_psr_disable_locked(intel_dp);
1892 mutex_unlock(&psr->lock);
1896 static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
1897 const struct intel_crtc_state *crtc_state)
1899 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1900 struct intel_encoder *encoder;
1902 if (!crtc_state->has_psr)
1905 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
1906 crtc_state->uapi.encoder_mask) {
1907 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1908 struct intel_psr *psr = &intel_dp->psr;
1910 mutex_lock(&psr->lock);
1912 if (psr->sink_not_reliable)
1915 drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
1917 /* Only enable if there is active planes */
1918 if (!psr->enabled && crtc_state->active_planes)
1919 intel_psr_enable_locked(intel_dp, crtc_state);
1921 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
1922 if (crtc_state->crc_enabled && psr->enabled)
1923 psr_force_hw_tracking_exit(intel_dp);
1926 mutex_unlock(&psr->lock);
1930 void intel_psr_post_plane_update(const struct intel_atomic_state *state)
1932 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1933 struct intel_crtc_state *crtc_state;
1934 struct intel_crtc *crtc;
1937 if (!HAS_PSR(dev_priv))
1940 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
1941 _intel_psr_post_plane_update(state, crtc_state);
1944 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
1946 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1949 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
1950 * As all higher states has bit 4 of PSR2 state set we can just wait for
1951 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
1953 return intel_de_wait_for_clear(dev_priv,
1954 EDP_PSR2_STATUS(intel_dp->psr.transcoder),
1955 EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
1958 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
1960 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1963 * From bspec: Panel Self Refresh (BDW+)
1964 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
1965 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
1966 * defensive enough to cover everything.
1968 return intel_de_wait_for_clear(dev_priv,
1969 EDP_PSR_STATUS(intel_dp->psr.transcoder),
1970 EDP_PSR_STATUS_STATE_MASK, 50);
1974 * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
1975 * @new_crtc_state: new CRTC state
1977 * This function is expected to be called from pipe_update_start() where it is
1978 * not expected to race with PSR enable or disable.
1980 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
1982 struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
1983 struct intel_encoder *encoder;
1985 if (!new_crtc_state->has_psr)
1988 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1989 new_crtc_state->uapi.encoder_mask) {
1990 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1993 lockdep_assert_held(&intel_dp->psr.lock);
1995 if (!intel_dp->psr.enabled)
1998 if (intel_dp->psr.psr2_enabled)
1999 ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2001 ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2004 drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2008 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2010 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2015 if (!intel_dp->psr.enabled)
2018 if (intel_dp->psr.psr2_enabled) {
2019 reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
2020 mask = EDP_PSR2_STATUS_STATE_MASK;
2022 reg = EDP_PSR_STATUS(intel_dp->psr.transcoder);
2023 mask = EDP_PSR_STATUS_STATE_MASK;
2026 mutex_unlock(&intel_dp->psr.lock);
2028 err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2030 drm_err(&dev_priv->drm,
2031 "Timed out waiting for PSR Idle for re-enable\n");
2033 /* After the unlocked wait, verify that PSR is still wanted! */
2034 mutex_lock(&intel_dp->psr.lock);
2035 return err == 0 && intel_dp->psr.enabled;
2038 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2040 struct drm_connector_list_iter conn_iter;
2041 struct drm_device *dev = &dev_priv->drm;
2042 struct drm_modeset_acquire_ctx ctx;
2043 struct drm_atomic_state *state;
2044 struct drm_connector *conn;
2047 state = drm_atomic_state_alloc(dev);
2051 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2052 state->acquire_ctx = &ctx;
2056 drm_connector_list_iter_begin(dev, &conn_iter);
2057 drm_for_each_connector_iter(conn, &conn_iter) {
2058 struct drm_connector_state *conn_state;
2059 struct drm_crtc_state *crtc_state;
2061 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2064 conn_state = drm_atomic_get_connector_state(state, conn);
2065 if (IS_ERR(conn_state)) {
2066 err = PTR_ERR(conn_state);
2070 if (!conn_state->crtc)
2073 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2074 if (IS_ERR(crtc_state)) {
2075 err = PTR_ERR(crtc_state);
2079 /* Mark mode as changed to trigger a pipe->update() */
2080 crtc_state->mode_changed = true;
2082 drm_connector_list_iter_end(&conn_iter);
2085 err = drm_atomic_commit(state);
2087 if (err == -EDEADLK) {
2088 drm_atomic_state_clear(state);
2089 err = drm_modeset_backoff(&ctx);
2094 drm_modeset_drop_locks(&ctx);
2095 drm_modeset_acquire_fini(&ctx);
2096 drm_atomic_state_put(state);
2101 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2103 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2104 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2108 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2109 mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2110 drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2114 ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2118 old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2119 intel_dp->psr.debug = val;
2122 * Do it right away if it's already enabled, otherwise it will be done
2123 * when enabling the source.
2125 if (intel_dp->psr.enabled)
2126 psr_irq_control(intel_dp);
2128 mutex_unlock(&intel_dp->psr.lock);
2130 if (old_mode != mode)
2131 ret = intel_psr_fastset_force(dev_priv);
2136 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2138 struct intel_psr *psr = &intel_dp->psr;
2140 intel_psr_disable_locked(intel_dp);
2141 psr->sink_not_reliable = true;
2142 /* let's make sure that sink is awaken */
2143 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2146 static void intel_psr_work(struct work_struct *work)
2148 struct intel_dp *intel_dp =
2149 container_of(work, typeof(*intel_dp), psr.work);
2151 mutex_lock(&intel_dp->psr.lock);
2153 if (!intel_dp->psr.enabled)
2156 if (READ_ONCE(intel_dp->psr.irq_aux_error))
2157 intel_psr_handle_irq(intel_dp);
2160 * We have to make sure PSR is ready for re-enable
2161 * otherwise it keeps disabled until next full enable/disable cycle.
2162 * PSR might take some time to get fully disabled
2163 * and be ready for re-enable.
2165 if (!__psr_wait_for_idle_locked(intel_dp))
2169 * The delayed work can race with an invalidate hence we need to
2170 * recheck. Since psr_flush first clears this and then reschedules we
2171 * won't ever miss a flush when bailing out here.
2173 if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2176 intel_psr_activate(intel_dp);
2178 mutex_unlock(&intel_dp->psr.lock);
2181 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2183 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2185 if (intel_dp->psr.psr2_sel_fetch_enabled) {
2188 if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
2191 val = man_trk_ctl_enable_bit_get(dev_priv) |
2192 man_trk_ctl_partial_frame_bit_get(dev_priv) |
2193 man_trk_ctl_continuos_full_frame(dev_priv);
2194 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), val);
2195 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2196 intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2198 intel_psr_exit(intel_dp);
2203 * intel_psr_invalidate - Invalidate PSR
2204 * @dev_priv: i915 device
2205 * @frontbuffer_bits: frontbuffer plane tracking bits
2206 * @origin: which operation caused the invalidate
2208 * Since the hardware frontbuffer tracking has gaps we need to integrate
2209 * with the software frontbuffer tracking. This function gets called every
2210 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2211 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2213 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2215 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2216 unsigned frontbuffer_bits, enum fb_op_origin origin)
2218 struct intel_encoder *encoder;
2220 if (origin == ORIGIN_FLIP)
2223 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2224 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2225 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2227 mutex_lock(&intel_dp->psr.lock);
2228 if (!intel_dp->psr.enabled) {
2229 mutex_unlock(&intel_dp->psr.lock);
2233 pipe_frontbuffer_bits &=
2234 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2235 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2237 if (pipe_frontbuffer_bits)
2238 _psr_invalidate_handle(intel_dp);
2240 mutex_unlock(&intel_dp->psr.lock);
2244 * When we will be completely rely on PSR2 S/W tracking in future,
2245 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2246 * event also therefore tgl_dc3co_flush_locked() require to be changed
2247 * accordingly in future.
2250 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2251 enum fb_op_origin origin)
2253 if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2254 !intel_dp->psr.active)
2258 * At every frontbuffer flush flip event modified delay of delayed work,
2259 * when delayed work schedules that means display has been idle.
2261 if (!(frontbuffer_bits &
2262 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2265 tgl_psr2_enable_dc3co(intel_dp);
2266 mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
2267 intel_dp->psr.dc3co_exit_delay);
2270 static void _psr_flush_handle(struct intel_dp *intel_dp)
2272 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2274 if (intel_dp->psr.psr2_sel_fetch_enabled) {
2275 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2276 /* can we turn CFF off? */
2277 if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2278 u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2279 man_trk_ctl_partial_frame_bit_get(dev_priv) |
2280 man_trk_ctl_single_full_frame_bit_get(dev_priv);
2283 * turn continuous full frame off and do a single
2286 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
2288 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2289 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2293 * continuous full frame is disabled, only a single full
2296 psr_force_hw_tracking_exit(intel_dp);
2299 psr_force_hw_tracking_exit(intel_dp);
2301 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2302 schedule_work(&intel_dp->psr.work);
2307 * intel_psr_flush - Flush PSR
2308 * @dev_priv: i915 device
2309 * @frontbuffer_bits: frontbuffer plane tracking bits
2310 * @origin: which operation caused the flush
2312 * Since the hardware frontbuffer tracking has gaps we need to integrate
2313 * with the software frontbuffer tracking. This function gets called every
2314 * time frontbuffer rendering has completed and flushed out to memory. PSR
2315 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2317 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2319 void intel_psr_flush(struct drm_i915_private *dev_priv,
2320 unsigned frontbuffer_bits, enum fb_op_origin origin)
2322 struct intel_encoder *encoder;
2324 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2325 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2326 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2328 mutex_lock(&intel_dp->psr.lock);
2329 if (!intel_dp->psr.enabled) {
2330 mutex_unlock(&intel_dp->psr.lock);
2334 pipe_frontbuffer_bits &=
2335 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2336 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2339 * If the PSR is paused by an explicit intel_psr_paused() call,
2340 * we have to ensure that the PSR is not activated until
2341 * intel_psr_resume() is called.
2343 if (intel_dp->psr.paused)
2346 if (origin == ORIGIN_FLIP ||
2347 (origin == ORIGIN_CURSOR_UPDATE &&
2348 !intel_dp->psr.psr2_sel_fetch_enabled)) {
2349 tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2353 if (pipe_frontbuffer_bits == 0)
2356 /* By definition flush = invalidate + flush */
2357 _psr_flush_handle(intel_dp);
2359 mutex_unlock(&intel_dp->psr.lock);
2364 * intel_psr_init - Init basic PSR work and mutex.
2365 * @intel_dp: Intel DP
2367 * This function is called after the initializing connector.
2368 * (the initializing of connector treats the handling of connector capabilities)
2369 * And it initializes basic PSR stuff for each DP Encoder.
2371 void intel_psr_init(struct intel_dp *intel_dp)
2373 struct intel_connector *connector = intel_dp->attached_connector;
2374 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2375 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2377 if (!HAS_PSR(dev_priv))
2381 * HSW spec explicitly says PSR is tied to port A.
2382 * BDW+ platforms have a instance of PSR registers per transcoder but
2383 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2385 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2386 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2387 * But GEN12 supports a instance of PSR registers per transcoder.
2389 if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2390 drm_dbg_kms(&dev_priv->drm,
2391 "PSR condition failed: Port not supported\n");
2395 intel_dp->psr.source_support = true;
2397 /* Set link_standby x link_off defaults */
2398 if (DISPLAY_VER(dev_priv) < 12)
2399 /* For new platforms up to TGL let's respect VBT back again */
2400 intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2402 INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2403 INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2404 mutex_init(&intel_dp->psr.lock);
2407 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2408 u8 *status, u8 *error_status)
2410 struct drm_dp_aux *aux = &intel_dp->aux;
2413 ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
2417 ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
2421 *status = *status & DP_PSR_SINK_STATE_MASK;
2426 static void psr_alpm_check(struct intel_dp *intel_dp)
2428 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2429 struct drm_dp_aux *aux = &intel_dp->aux;
2430 struct intel_psr *psr = &intel_dp->psr;
2434 if (!psr->psr2_enabled)
2437 r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2439 drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2443 if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2444 intel_psr_disable_locked(intel_dp);
2445 psr->sink_not_reliable = true;
2446 drm_dbg_kms(&dev_priv->drm,
2447 "ALPM lock timeout error, disabling PSR\n");
2449 /* Clearing error */
2450 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2454 static void psr_capability_changed_check(struct intel_dp *intel_dp)
2456 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2457 struct intel_psr *psr = &intel_dp->psr;
2461 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2463 drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2467 if (val & DP_PSR_CAPS_CHANGE) {
2468 intel_psr_disable_locked(intel_dp);
2469 psr->sink_not_reliable = true;
2470 drm_dbg_kms(&dev_priv->drm,
2471 "Sink PSR capability changed, disabling PSR\n");
2474 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2478 void intel_psr_short_pulse(struct intel_dp *intel_dp)
2480 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2481 struct intel_psr *psr = &intel_dp->psr;
2482 u8 status, error_status;
2483 const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2484 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2485 DP_PSR_LINK_CRC_ERROR;
2487 if (!CAN_PSR(intel_dp))
2490 mutex_lock(&psr->lock);
2495 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2496 drm_err(&dev_priv->drm,
2497 "Error reading PSR status or error status\n");
2501 if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2502 intel_psr_disable_locked(intel_dp);
2503 psr->sink_not_reliable = true;
2506 if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2507 drm_dbg_kms(&dev_priv->drm,
2508 "PSR sink internal error, disabling PSR\n");
2509 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2510 drm_dbg_kms(&dev_priv->drm,
2511 "PSR RFB storage error, disabling PSR\n");
2512 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2513 drm_dbg_kms(&dev_priv->drm,
2514 "PSR VSC SDP uncorrectable error, disabling PSR\n");
2515 if (error_status & DP_PSR_LINK_CRC_ERROR)
2516 drm_dbg_kms(&dev_priv->drm,
2517 "PSR Link CRC error, disabling PSR\n");
2519 if (error_status & ~errors)
2520 drm_err(&dev_priv->drm,
2521 "PSR_ERROR_STATUS unhandled errors %x\n",
2522 error_status & ~errors);
2523 /* clear status register */
2524 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2526 psr_alpm_check(intel_dp);
2527 psr_capability_changed_check(intel_dp);
2530 mutex_unlock(&psr->lock);
2533 bool intel_psr_enabled(struct intel_dp *intel_dp)
2537 if (!CAN_PSR(intel_dp))
2540 mutex_lock(&intel_dp->psr.lock);
2541 ret = intel_dp->psr.enabled;
2542 mutex_unlock(&intel_dp->psr.lock);
2548 * intel_psr_lock - grab PSR lock
2549 * @crtc_state: the crtc state
2551 * This is initially meant to be used by around CRTC update, when
2552 * vblank sensitive registers are updated and we need grab the lock
2553 * before it to avoid vblank evasion.
2555 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
2557 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2558 struct intel_encoder *encoder;
2560 if (!crtc_state->has_psr)
2563 for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2564 crtc_state->uapi.encoder_mask) {
2565 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2567 mutex_lock(&intel_dp->psr.lock);
2573 * intel_psr_unlock - release PSR lock
2574 * @crtc_state: the crtc state
2576 * Release the PSR lock that was held during pipe update.
2578 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
2580 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2581 struct intel_encoder *encoder;
2583 if (!crtc_state->has_psr)
2586 for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2587 crtc_state->uapi.encoder_mask) {
2588 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2590 mutex_unlock(&intel_dp->psr.lock);