2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
26 #include <drm/drm_debugfs.h>
30 #include "intel_atomic.h"
31 #include "intel_crtc.h"
32 #include "intel_ddi.h"
34 #include "intel_display_types.h"
36 #include "intel_dp_aux.h"
37 #include "intel_frontbuffer.h"
38 #include "intel_hdmi.h"
39 #include "intel_psr.h"
40 #include "intel_psr_regs.h"
41 #include "intel_snps_phy.h"
42 #include "skl_universal_plane.h"
45 * DOC: Panel Self Refresh (PSR/SRD)
47 * Since Haswell Display controller supports Panel Self-Refresh on display
48 * panels witch have a remote frame buffer (RFB) implemented according to PSR
49 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
50 * when system is idle but display is on as it eliminates display refresh
51 * request to DDR memory completely as long as the frame buffer for that
52 * display is unchanged.
54 * Panel Self Refresh must be supported by both Hardware (source) and
57 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
58 * to power down the link and memory controller. For DSI panels the same idea
59 * is called "manual mode".
61 * The implementation uses the hardware-based PSR support which automatically
62 * enters/exits self-refresh mode. The hardware takes care of sending the
63 * required DP aux message and could even retrain the link (that part isn't
64 * enabled yet though). The hardware also keeps track of any frontbuffer
65 * changes to know when to exit self-refresh mode again. Unfortunately that
66 * part doesn't work too well, hence why the i915 PSR support uses the
67 * software frontbuffer tracking to make sure it doesn't miss a screen
68 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
69 * get called by the frontbuffer tracking code. Note that because of locking
70 * issues the self-refresh re-enable code is done from a work queue, which
71 * must be correctly synchronized/cancelled when shutting down the pipe."
73 * DC3CO (DC3 clock off)
75 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
76 * clock off automatically during PSR2 idle state.
77 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
78 * entry/exit allows the HW to enter a low-power state even when page flipping
79 * periodically (for instance a 30fps video playback scenario).
81 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
82 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
83 * frames, if no other flip occurs and the function above is executed, DC3CO is
84 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
86 * Front buffer modifications do not trigger DC3CO activation on purpose as it
87 * would bring a lot of complexity and most of the moderns systems will only
92 * Description of PSR mask bits:
94 * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
96 * When unmasked (nearly) all display register writes (eg. even
97 * SWF) trigger a PSR exit. Some registers are excluded from this
98 * and they have a more specific mask (described below). On icl+
99 * this bit no longer exists and is effectively always set.
101 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
103 * When unmasked (nearly) all pipe/plane register writes
104 * trigger a PSR exit. Some plane registers are excluded from this
105 * and they have a more specific mask (described below).
107 * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
108 * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
109 * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
111 * When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
112 * SPR_SURF/CURBASE are not included in this and instead are
113 * controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
114 * EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
116 * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
117 * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
119 * When unmasked PSR is blocked as long as the sprite
120 * plane is enabled. skl+ with their universal planes no
121 * longer have a mask bit like this, and no plane being
122 * enabledb blocks PSR.
124 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
125 * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
127 * When umasked CURPOS writes trigger a PSR exit. On skl+
128 * this doesn't exit but CURPOS is included in the
129 * PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
131 * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
132 * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
134 * When unmasked PSR is blocked as long as vblank and/or vsync
135 * interrupt is unmasked in IMR *and* enabled in IER.
137 * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
138 * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
140 * Selectcs whether PSR exit generates an extra vblank before
141 * the first frame is transmitted. Also note the opposite polarity
142 * if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
143 * unmasked==do not generate the extra vblank).
145 * With DC states enabled the extra vblank happens after link training,
146 * with DC states disabled it happens immediately upuon PSR exit trigger.
147 * No idea as of now why there is a difference. HSW/BDW (which don't
148 * even have DMC) always generate it after link training. Go figure.
150 * Unfortunately CHICKEN_TRANS itself seems to be double buffered
151 * and thus won't latch until the first vblank. So with DC states
152 * enabled the register effctively uses the reset value during DC5
153 * exit+PSR exit sequence, and thus the bit does nothing until
154 * latched by the vblank that it was trying to prevent from being
155 * generated in the first place. So we should probably call this
156 * one a chicken/egg bit instead on skl+.
158 * In standby mode (as opposed to link-off) this makes no difference
159 * as the timing generator keeps running the whole time generating
160 * normal periodic vblanks.
162 * WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
163 * and doing so makes the behaviour match the skl+ reset value.
165 * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
166 * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
168 * On BDW without this bit is no vblanks whatsoever are
169 * generated after PSR exit. On HSW this has no apparant effect.
170 * WaPsrDPRSUnmaskVBlankInSRD says to set this.
172 * The rest of the bits are more self-explanatory and/or
173 * irrelevant for normal operation.
176 bool intel_encoder_can_psr(struct intel_encoder *encoder)
178 if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
179 return CAN_PSR(enc_to_intel_dp(encoder)) ||
180 CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
185 static bool psr_global_enabled(struct intel_dp *intel_dp)
187 struct intel_connector *connector = intel_dp->attached_connector;
188 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
190 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
191 case I915_PSR_DEBUG_DEFAULT:
192 if (i915->display.params.enable_psr == -1)
193 return connector->panel.vbt.psr.enable;
194 return i915->display.params.enable_psr;
195 case I915_PSR_DEBUG_DISABLE:
202 static bool psr2_global_enabled(struct intel_dp *intel_dp)
204 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
206 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
207 case I915_PSR_DEBUG_DISABLE:
208 case I915_PSR_DEBUG_FORCE_PSR1:
211 if (i915->display.params.enable_psr == 1)
217 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
219 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
221 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
222 EDP_PSR_ERROR(intel_dp->psr.transcoder);
225 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
227 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
229 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
230 EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
233 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
235 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
237 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
238 EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
241 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
243 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
245 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
246 EDP_PSR_MASK(intel_dp->psr.transcoder);
249 static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
250 enum transcoder cpu_transcoder)
252 if (DISPLAY_VER(dev_priv) >= 8)
253 return EDP_PSR_CTL(cpu_transcoder);
258 static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
259 enum transcoder cpu_transcoder)
261 if (DISPLAY_VER(dev_priv) >= 8)
262 return EDP_PSR_DEBUG(cpu_transcoder);
264 return HSW_SRD_DEBUG;
267 static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
268 enum transcoder cpu_transcoder)
270 if (DISPLAY_VER(dev_priv) >= 8)
271 return EDP_PSR_PERF_CNT(cpu_transcoder);
273 return HSW_SRD_PERF_CNT;
276 static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
277 enum transcoder cpu_transcoder)
279 if (DISPLAY_VER(dev_priv) >= 8)
280 return EDP_PSR_STATUS(cpu_transcoder);
282 return HSW_SRD_STATUS;
285 static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
286 enum transcoder cpu_transcoder)
288 if (DISPLAY_VER(dev_priv) >= 12)
289 return TRANS_PSR_IMR(cpu_transcoder);
294 static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
295 enum transcoder cpu_transcoder)
297 if (DISPLAY_VER(dev_priv) >= 12)
298 return TRANS_PSR_IIR(cpu_transcoder);
303 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
304 enum transcoder cpu_transcoder)
306 if (DISPLAY_VER(dev_priv) >= 8)
307 return EDP_PSR_AUX_CTL(cpu_transcoder);
309 return HSW_SRD_AUX_CTL;
312 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
313 enum transcoder cpu_transcoder, int i)
315 if (DISPLAY_VER(dev_priv) >= 8)
316 return EDP_PSR_AUX_DATA(cpu_transcoder, i);
318 return HSW_SRD_AUX_DATA(i);
321 static void psr_irq_control(struct intel_dp *intel_dp)
323 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
324 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
327 mask = psr_irq_psr_error_bit_get(intel_dp);
328 if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
329 mask |= psr_irq_post_exit_bit_get(intel_dp) |
330 psr_irq_pre_entry_bit_get(intel_dp);
332 intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
333 psr_irq_mask_get(intel_dp), ~mask);
336 static void psr_event_print(struct drm_i915_private *i915,
337 u32 val, bool psr2_enabled)
339 drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
340 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
341 drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
342 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
343 drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
344 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
345 drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
346 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
347 drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
348 if (val & PSR_EVENT_GRAPHICS_RESET)
349 drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
350 if (val & PSR_EVENT_PCH_INTERRUPT)
351 drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
352 if (val & PSR_EVENT_MEMORY_UP)
353 drm_dbg_kms(&i915->drm, "\tMemory up\n");
354 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
355 drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
356 if (val & PSR_EVENT_WD_TIMER_EXPIRE)
357 drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
358 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
359 drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
360 if (val & PSR_EVENT_REGISTER_UPDATE)
361 drm_dbg_kms(&i915->drm, "\tRegister updated\n");
362 if (val & PSR_EVENT_HDCP_ENABLE)
363 drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
364 if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
365 drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
366 if (val & PSR_EVENT_VBI_ENABLE)
367 drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
368 if (val & PSR_EVENT_LPSP_MODE_EXIT)
369 drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
370 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
371 drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
374 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
376 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
377 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
378 ktime_t time_ns = ktime_get();
380 if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
381 intel_dp->psr.last_entry_attempt = time_ns;
382 drm_dbg_kms(&dev_priv->drm,
383 "[transcoder %s] PSR entry attempt in 2 vblanks\n",
384 transcoder_name(cpu_transcoder));
387 if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
388 intel_dp->psr.last_exit = time_ns;
389 drm_dbg_kms(&dev_priv->drm,
390 "[transcoder %s] PSR exit completed\n",
391 transcoder_name(cpu_transcoder));
393 if (DISPLAY_VER(dev_priv) >= 9) {
396 val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
398 psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
402 if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
403 drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
404 transcoder_name(cpu_transcoder));
406 intel_dp->psr.irq_aux_error = true;
409 * If this interruption is not masked it will keep
410 * interrupting so fast that it prevents the scheduled
412 * Also after a PSR error, we don't want to arm PSR
413 * again so we don't care about unmask the interruption
414 * or unset irq_aux_error.
416 intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
417 0, psr_irq_psr_error_bit_get(intel_dp));
419 queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
423 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
427 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
430 return alpm_caps & DP_ALPM_CAP;
433 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
435 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
436 u8 val = 8; /* assume the worst if we can't read the value */
438 if (drm_dp_dpcd_readb(&intel_dp->aux,
439 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
440 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
442 drm_dbg_kms(&i915->drm,
443 "Unable to get sink synchronization latency, assuming 8 frames\n");
447 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
449 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
454 /* If sink don't have specific granularity requirements set legacy ones */
455 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
456 /* As PSR2 HW sends full lines, we do not care about x granularity */
462 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
464 drm_dbg_kms(&i915->drm,
465 "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
467 * Spec says that if the value read is 0 the default granularity should
470 if (r != 2 || w == 0)
473 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
475 drm_dbg_kms(&i915->drm,
476 "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
483 intel_dp->psr.su_w_granularity = w;
484 intel_dp->psr.su_y_granularity = y;
487 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
489 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
492 intel_dp->psr.sink_panel_replay_support = false;
493 drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP, &pr_dpcd);
495 if (!(pr_dpcd & DP_PANEL_REPLAY_SUPPORT)) {
496 drm_dbg_kms(&i915->drm,
497 "Panel replay is not supported by panel\n");
501 drm_dbg_kms(&i915->drm,
502 "Panel replay is supported by panel\n");
503 intel_dp->psr.sink_panel_replay_support = true;
506 static void _psr_init_dpcd(struct intel_dp *intel_dp)
508 struct drm_i915_private *i915 =
509 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
511 drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
512 intel_dp->psr_dpcd[0]);
514 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
515 drm_dbg_kms(&i915->drm,
516 "PSR support not currently available for this panel\n");
520 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
521 drm_dbg_kms(&i915->drm,
522 "Panel lacks power state control, PSR cannot be enabled\n");
526 intel_dp->psr.sink_support = true;
527 intel_dp->psr.sink_sync_latency =
528 intel_dp_get_sink_sync_latency(intel_dp);
530 if (DISPLAY_VER(i915) >= 9 &&
531 intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
532 bool y_req = intel_dp->psr_dpcd[1] &
533 DP_PSR2_SU_Y_COORDINATE_REQUIRED;
534 bool alpm = intel_dp_get_alpm_status(intel_dp);
537 * All panels that supports PSR version 03h (PSR2 +
538 * Y-coordinate) can handle Y-coordinates in VSC but we are
539 * only sure that it is going to be used when required by the
540 * panel. This way panel is capable to do selective update
541 * without a aux frame sync.
543 * To support PSR version 02h and PSR version 03h without
544 * Y-coordinate requirement panels we would need to enable
547 intel_dp->psr.sink_psr2_support = y_req && alpm;
548 drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
549 intel_dp->psr.sink_psr2_support ? "" : "not ");
553 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
555 _panel_replay_init_dpcd(intel_dp);
557 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
558 sizeof(intel_dp->psr_dpcd));
560 if (intel_dp->psr_dpcd[0])
561 _psr_init_dpcd(intel_dp);
563 if (intel_dp->psr.sink_psr2_support) {
564 intel_dp->psr.colorimetry_support =
565 intel_dp_get_colorimetry_status(intel_dp);
566 intel_dp_get_su_granularity(intel_dp);
570 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
572 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
573 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
574 u32 aux_clock_divider, aux_ctl;
575 /* write DP_SET_POWER=D0 */
576 static const u8 aux_msg[] = {
577 [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
578 [1] = (DP_SET_POWER >> 8) & 0xff,
579 [2] = DP_SET_POWER & 0xff,
581 [4] = DP_SET_POWER_D0,
585 BUILD_BUG_ON(sizeof(aux_msg) > 20);
586 for (i = 0; i < sizeof(aux_msg); i += 4)
587 intel_de_write(dev_priv,
588 psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
589 intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
591 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
593 /* Start with bits set for DDI_AUX_CTL register */
594 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
597 /* Select only valid bits for SRD_AUX_CTL */
598 aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
599 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
600 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
601 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
603 intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
607 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
609 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
610 u8 dpcd_val = DP_PSR_ENABLE;
612 if (intel_dp->psr.panel_replay_enabled)
615 if (intel_dp->psr.psr2_enabled) {
616 /* Enable ALPM at sink for psr2 */
617 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
619 DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
621 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
623 if (intel_dp->psr.link_standby)
624 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
626 if (DISPLAY_VER(dev_priv) >= 8)
627 dpcd_val |= DP_PSR_CRC_VERIFICATION;
630 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
631 dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
633 if (intel_dp->psr.entry_setup_frames > 0)
634 dpcd_val |= DP_PSR_FRAME_CAPTURE;
636 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
638 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
641 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
643 struct intel_connector *connector = intel_dp->attached_connector;
644 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
647 if (DISPLAY_VER(dev_priv) >= 11)
648 val |= EDP_PSR_TP4_TIME_0us;
650 if (dev_priv->display.params.psr_safest_params) {
651 val |= EDP_PSR_TP1_TIME_2500us;
652 val |= EDP_PSR_TP2_TP3_TIME_2500us;
656 if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
657 val |= EDP_PSR_TP1_TIME_0us;
658 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
659 val |= EDP_PSR_TP1_TIME_100us;
660 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
661 val |= EDP_PSR_TP1_TIME_500us;
663 val |= EDP_PSR_TP1_TIME_2500us;
665 if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
666 val |= EDP_PSR_TP2_TP3_TIME_0us;
667 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
668 val |= EDP_PSR_TP2_TP3_TIME_100us;
669 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
670 val |= EDP_PSR_TP2_TP3_TIME_500us;
672 val |= EDP_PSR_TP2_TP3_TIME_2500us;
676 * "Do not skip both TP1 and TP2/TP3"
678 if (DISPLAY_VER(dev_priv) < 9 &&
679 connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
680 connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
681 val |= EDP_PSR_TP2_TP3_TIME_100us;
684 if (intel_dp_source_supports_tps3(dev_priv) &&
685 drm_dp_tps3_supported(intel_dp->dpcd))
686 val |= EDP_PSR_TP_TP1_TP3;
688 val |= EDP_PSR_TP_TP1_TP2;
693 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
695 struct intel_connector *connector = intel_dp->attached_connector;
696 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
699 /* Let's use 6 as the minimum to cover all known cases including the
700 * off-by-one issue that HW has in some cases.
702 idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
703 idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
705 if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
711 static void hsw_activate_psr1(struct intel_dp *intel_dp)
713 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
714 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
715 u32 max_sleep_time = 0x1f;
716 u32 val = EDP_PSR_ENABLE;
718 val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
720 if (DISPLAY_VER(dev_priv) < 20)
721 val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
723 if (IS_HASWELL(dev_priv))
724 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
726 if (intel_dp->psr.link_standby)
727 val |= EDP_PSR_LINK_STANDBY;
729 val |= intel_psr1_get_tp_time(intel_dp);
731 if (DISPLAY_VER(dev_priv) >= 8)
732 val |= EDP_PSR_CRC_ENABLE;
734 if (DISPLAY_VER(dev_priv) >= 20)
735 val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
737 intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
738 ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
741 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
743 struct intel_connector *connector = intel_dp->attached_connector;
744 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
747 if (dev_priv->display.params.psr_safest_params)
748 return EDP_PSR2_TP2_TIME_2500us;
750 if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
751 connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
752 val |= EDP_PSR2_TP2_TIME_50us;
753 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
754 val |= EDP_PSR2_TP2_TIME_100us;
755 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
756 val |= EDP_PSR2_TP2_TIME_500us;
758 val |= EDP_PSR2_TP2_TIME_2500us;
763 static int psr2_block_count_lines(struct intel_dp *intel_dp)
765 return intel_dp->psr.io_wake_lines < 9 &&
766 intel_dp->psr.fast_wake_lines < 9 ? 8 : 12;
769 static int psr2_block_count(struct intel_dp *intel_dp)
771 return psr2_block_count_lines(intel_dp) / 4;
774 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
776 u8 frames_before_su_entry;
778 frames_before_su_entry = max_t(u8,
779 intel_dp->psr.sink_sync_latency + 1,
782 /* Entry setup frames must be at least 1 less than frames before SU entry */
783 if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
784 frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
786 return frames_before_su_entry;
789 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
791 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
793 intel_de_rmw(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
794 0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
796 intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
797 TRANS_DP2_PANEL_REPLAY_ENABLE);
800 static void hsw_activate_psr2(struct intel_dp *intel_dp)
802 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
803 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
804 u32 val = EDP_PSR2_ENABLE;
807 val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
809 if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))
810 val |= EDP_SU_TRACK_ENABLE;
812 if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13)
813 val |= EDP_Y_COORDINATE_ENABLE;
815 val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
817 val |= intel_psr2_get_tp_time(intel_dp);
819 if (DISPLAY_VER(dev_priv) >= 12) {
820 if (psr2_block_count(intel_dp) > 2)
821 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
823 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
826 /* Wa_22012278275:adl-p */
827 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
828 static const u8 map[] = {
839 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
840 * comments bellow for more information
844 tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
845 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
847 tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
848 val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
849 } else if (DISPLAY_VER(dev_priv) >= 12) {
850 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
851 val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
852 } else if (DISPLAY_VER(dev_priv) >= 9) {
853 val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
854 val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
857 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
858 val |= EDP_PSR2_SU_SDP_SCANLINE;
860 if (DISPLAY_VER(dev_priv) >= 20)
861 psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
863 if (intel_dp->psr.psr2_sel_fetch_enabled) {
866 tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
867 drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
868 } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
869 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
873 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
874 * recommending keep this bit unset while PSR2 is enabled.
876 intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
878 intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
882 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
884 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
885 return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
886 else if (DISPLAY_VER(dev_priv) >= 12)
887 return cpu_transcoder == TRANSCODER_A;
888 else if (DISPLAY_VER(dev_priv) >= 9)
889 return cpu_transcoder == TRANSCODER_EDP;
894 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
896 if (!crtc_state->hw.active)
899 return DIV_ROUND_UP(1000 * 1000,
900 drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
903 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
906 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
907 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
909 intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
910 EDP_PSR2_IDLE_FRAMES_MASK,
911 EDP_PSR2_IDLE_FRAMES(idle_frames));
914 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
916 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
918 psr2_program_idle_frames(intel_dp, 0);
919 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
922 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
924 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
926 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
927 psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
930 static void tgl_dc3co_disable_work(struct work_struct *work)
932 struct intel_dp *intel_dp =
933 container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
935 mutex_lock(&intel_dp->psr.lock);
936 /* If delayed work is pending, it is not idle */
937 if (delayed_work_pending(&intel_dp->psr.dc3co_work))
940 tgl_psr2_disable_dc3co(intel_dp);
942 mutex_unlock(&intel_dp->psr.lock);
945 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
947 if (!intel_dp->psr.dc3co_exitline)
950 cancel_delayed_work(&intel_dp->psr.dc3co_work);
951 /* Before PSR2 exit disallow dc3co*/
952 tgl_psr2_disable_dc3co(intel_dp);
956 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
957 struct intel_crtc_state *crtc_state)
959 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
960 enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
961 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
962 enum port port = dig_port->base.port;
964 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
965 return pipe <= PIPE_B && port <= PORT_B;
967 return pipe == PIPE_A && port == PORT_A;
971 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
972 struct intel_crtc_state *crtc_state)
974 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
975 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
976 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
980 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
981 * disable DC3CO until the changed dc3co activating/deactivating sequence
982 * is applied. B.Specs:49196
987 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
988 * TODO: when the issue is addressed, this restriction should be removed.
990 if (crtc_state->enable_psr2_sel_fetch)
993 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
996 if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
999 /* Wa_16011303918:adl-p */
1000 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1004 * DC3CO Exit time 200us B.Spec 49196
1005 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1008 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1010 if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
1013 crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1016 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1017 struct intel_crtc_state *crtc_state)
1019 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1021 if (!dev_priv->display.params.enable_psr2_sel_fetch &&
1022 intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1023 drm_dbg_kms(&dev_priv->drm,
1024 "PSR2 sel fetch not enabled, disabled by parameter\n");
1028 if (crtc_state->uapi.async_flip) {
1029 drm_dbg_kms(&dev_priv->drm,
1030 "PSR2 sel fetch not enabled, async flip enabled\n");
1034 return crtc_state->enable_psr2_sel_fetch = true;
1037 static bool psr2_granularity_check(struct intel_dp *intel_dp,
1038 struct intel_crtc_state *crtc_state)
1040 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1041 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1042 const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1043 const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1044 u16 y_granularity = 0;
1046 /* PSR2 HW only send full lines so we only need to validate the width */
1047 if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1050 if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1053 /* HW tracking is only aligned to 4 lines */
1054 if (!crtc_state->enable_psr2_sel_fetch)
1055 return intel_dp->psr.su_y_granularity == 4;
1058 * adl_p and mtl platforms have 1 line granularity.
1059 * For other platforms with SW tracking we can adjust the y coordinates
1060 * to match sink requirement if multiple of 4.
1062 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1063 y_granularity = intel_dp->psr.su_y_granularity;
1064 else if (intel_dp->psr.su_y_granularity <= 2)
1066 else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1067 y_granularity = intel_dp->psr.su_y_granularity;
1069 if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1072 if (crtc_state->dsc.compression_enable &&
1073 vdsc_cfg->slice_height % y_granularity)
1076 crtc_state->su_y_granularity = y_granularity;
1080 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1081 struct intel_crtc_state *crtc_state)
1083 const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1084 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1085 u32 hblank_total, hblank_ns, req_ns;
1087 hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1088 hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1090 /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1091 req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1093 if ((hblank_ns - req_ns) > 100)
1096 /* Not supported <13 / Wa_22012279113:adl-p */
1097 if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1100 crtc_state->req_psr2_sdp_prior_scanline = true;
1104 static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
1105 struct intel_crtc_state *crtc_state)
1107 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1108 int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
1111 if (DISPLAY_VER(i915) >= 12) {
1114 * According to Bspec it's 42us, but based on testing
1115 * it is not enough -> use 45 us.
1117 fast_wake_time = 45;
1118 max_wake_lines = 12;
1121 fast_wake_time = 32;
1125 io_wake_lines = intel_usecs_to_scanlines(
1126 &crtc_state->hw.adjusted_mode, io_wake_time);
1127 fast_wake_lines = intel_usecs_to_scanlines(
1128 &crtc_state->hw.adjusted_mode, fast_wake_time);
1130 if (io_wake_lines > max_wake_lines ||
1131 fast_wake_lines > max_wake_lines)
1134 if (i915->display.params.psr_safest_params)
1135 io_wake_lines = fast_wake_lines = max_wake_lines;
1137 /* According to Bspec lower limit should be set as 7 lines. */
1138 intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
1139 intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
1144 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1145 const struct drm_display_mode *adjusted_mode)
1147 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1148 int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1149 int entry_setup_frames = 0;
1151 if (psr_setup_time < 0) {
1152 drm_dbg_kms(&i915->drm,
1153 "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1154 intel_dp->psr_dpcd[1]);
1158 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1159 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1160 if (DISPLAY_VER(i915) >= 20) {
1161 /* setup entry frames can be up to 3 frames */
1162 entry_setup_frames = 1;
1163 drm_dbg_kms(&i915->drm,
1164 "PSR setup entry frames %d\n",
1165 entry_setup_frames);
1167 drm_dbg_kms(&i915->drm,
1168 "PSR condition failed: PSR setup time (%d us) too long\n",
1174 return entry_setup_frames;
1177 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1178 struct intel_crtc_state *crtc_state)
1180 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1181 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1182 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1183 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1185 if (!intel_dp->psr.sink_psr2_support)
1188 /* JSL and EHL only supports eDP 1.3 */
1189 if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1190 drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
1194 /* Wa_16011181250 */
1195 if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1197 drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
1201 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1202 drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
1206 if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
1207 drm_dbg_kms(&dev_priv->drm,
1208 "PSR2 not supported in transcoder %s\n",
1209 transcoder_name(crtc_state->cpu_transcoder));
1213 if (!psr2_global_enabled(intel_dp)) {
1214 drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
1219 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1220 * resolution requires DSC to be enabled, priority is given to DSC
1223 if (crtc_state->dsc.compression_enable &&
1224 (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
1225 drm_dbg_kms(&dev_priv->drm,
1226 "PSR2 cannot be enabled since DSC is enabled\n");
1230 if (crtc_state->crc_enabled) {
1231 drm_dbg_kms(&dev_priv->drm,
1232 "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
1236 if (DISPLAY_VER(dev_priv) >= 12) {
1240 } else if (DISPLAY_VER(dev_priv) >= 10) {
1244 } else if (DISPLAY_VER(dev_priv) == 9) {
1250 if (crtc_state->pipe_bpp > max_bpp) {
1251 drm_dbg_kms(&dev_priv->drm,
1252 "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1253 crtc_state->pipe_bpp, max_bpp);
1257 /* Wa_16011303918:adl-p */
1258 if (crtc_state->vrr.enable &&
1259 IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1260 drm_dbg_kms(&dev_priv->drm,
1261 "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1265 if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1266 drm_dbg_kms(&dev_priv->drm,
1267 "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1271 if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
1272 drm_dbg_kms(&dev_priv->drm,
1273 "PSR2 not enabled, Unable to use long enough wake times\n");
1277 /* Vblank >= PSR2_CTL Block Count Number maximum line count */
1278 if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
1279 crtc_state->hw.adjusted_mode.crtc_vblank_start <
1280 psr2_block_count_lines(intel_dp)) {
1281 drm_dbg_kms(&dev_priv->drm,
1282 "PSR2 not enabled, too short vblank time\n");
1286 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1287 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1288 !HAS_PSR_HW_TRACKING(dev_priv)) {
1289 drm_dbg_kms(&dev_priv->drm,
1290 "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
1295 if (!psr2_granularity_check(intel_dp, crtc_state)) {
1296 drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1300 if (!crtc_state->enable_psr2_sel_fetch &&
1301 (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1302 drm_dbg_kms(&dev_priv->drm,
1303 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1304 crtc_hdisplay, crtc_vdisplay,
1305 psr_max_h, psr_max_v);
1309 tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1313 crtc_state->enable_psr2_sel_fetch = false;
1317 static bool _psr_compute_config(struct intel_dp *intel_dp,
1318 struct intel_crtc_state *crtc_state)
1320 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1321 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1322 int entry_setup_frames;
1325 * Current PSR panels don't work reliably with VRR enabled
1326 * So if VRR is enabled, do not enable PSR.
1328 if (crtc_state->vrr.enable)
1331 if (!CAN_PSR(intel_dp))
1334 entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1336 if (entry_setup_frames >= 0) {
1337 intel_dp->psr.entry_setup_frames = entry_setup_frames;
1339 drm_dbg_kms(&dev_priv->drm,
1340 "PSR condition failed: PSR setup timing not met\n");
1347 void intel_psr_compute_config(struct intel_dp *intel_dp,
1348 struct intel_crtc_state *crtc_state,
1349 struct drm_connector_state *conn_state)
1351 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1352 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1354 if (!psr_global_enabled(intel_dp)) {
1355 drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1359 if (intel_dp->psr.sink_not_reliable) {
1360 drm_dbg_kms(&dev_priv->drm,
1361 "PSR sink implementation is not reliable\n");
1365 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1366 drm_dbg_kms(&dev_priv->drm,
1367 "PSR condition failed: Interlaced mode enabled\n");
1371 if (CAN_PANEL_REPLAY(intel_dp))
1372 crtc_state->has_panel_replay = true;
1374 crtc_state->has_psr = _psr_compute_config(intel_dp, crtc_state);
1376 if (!(crtc_state->has_panel_replay || crtc_state->has_psr))
1379 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1381 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1382 intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1383 &crtc_state->psr_vsc);
1386 void intel_psr_get_config(struct intel_encoder *encoder,
1387 struct intel_crtc_state *pipe_config)
1389 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1390 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1391 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1392 struct intel_dp *intel_dp;
1398 intel_dp = &dig_port->dp;
1399 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1402 mutex_lock(&intel_dp->psr.lock);
1403 if (!intel_dp->psr.enabled)
1406 if (intel_dp->psr.panel_replay_enabled) {
1407 pipe_config->has_panel_replay = true;
1410 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1411 * enabled/disabled because of frontbuffer tracking and others.
1413 pipe_config->has_psr = true;
1416 pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1417 pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1419 if (!intel_dp->psr.psr2_enabled)
1422 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1423 val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
1424 if (val & PSR2_MAN_TRK_CTL_ENABLE)
1425 pipe_config->enable_psr2_sel_fetch = true;
1428 if (DISPLAY_VER(dev_priv) >= 12) {
1429 val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
1430 pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1433 mutex_unlock(&intel_dp->psr.lock);
1436 static void intel_psr_activate(struct intel_dp *intel_dp)
1438 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1439 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1441 drm_WARN_ON(&dev_priv->drm,
1442 transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1443 intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
1445 drm_WARN_ON(&dev_priv->drm,
1446 intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1448 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1450 lockdep_assert_held(&intel_dp->psr.lock);
1452 /* psr1, psr2 and panel-replay are mutually exclusive.*/
1453 if (intel_dp->psr.panel_replay_enabled)
1454 dg2_activate_panel_replay(intel_dp);
1455 else if (intel_dp->psr.psr2_enabled)
1456 hsw_activate_psr2(intel_dp);
1458 hsw_activate_psr1(intel_dp);
1460 intel_dp->psr.active = true;
1463 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1465 switch (intel_dp->psr.pipe) {
1467 return LATENCY_REPORTING_REMOVED_PIPE_A;
1469 return LATENCY_REPORTING_REMOVED_PIPE_B;
1471 return LATENCY_REPORTING_REMOVED_PIPE_C;
1473 return LATENCY_REPORTING_REMOVED_PIPE_D;
1475 MISSING_CASE(intel_dp->psr.pipe);
1484 static void wm_optimization_wa(struct intel_dp *intel_dp,
1485 const struct intel_crtc_state *crtc_state)
1487 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1488 bool set_wa_bit = false;
1490 /* Wa_14015648006 */
1491 if (IS_DISPLAY_VER(dev_priv, 11, 14))
1492 set_wa_bit |= crtc_state->wm_level_disabled;
1494 /* Wa_16013835468 */
1495 if (DISPLAY_VER(dev_priv) == 12)
1496 set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1497 crtc_state->hw.adjusted_mode.crtc_vdisplay;
1500 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1501 0, wa_16013835468_bit_get(intel_dp));
1503 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1504 wa_16013835468_bit_get(intel_dp), 0);
1507 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1508 const struct intel_crtc_state *crtc_state)
1510 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1511 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1515 * Only HSW and BDW have PSR AUX registers that need to be setup.
1516 * SKL+ use hardcoded values PSR AUX transactions
1518 if (DISPLAY_VER(dev_priv) < 9)
1519 hsw_psr_setup_aux(intel_dp);
1522 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1523 * mask LPSP to avoid dependency on other drivers that might block
1524 * runtime_pm besides preventing other hw tracking issues now we
1525 * can rely on frontbuffer tracking.
1527 mask = EDP_PSR_DEBUG_MASK_MEMUP |
1528 EDP_PSR_DEBUG_MASK_HPD;
1531 * For some unknown reason on HSW non-ULT (or at least on
1532 * Dell Latitude E6540) external displays start to flicker
1533 * when PSR is enabled on the eDP. SR/PC6 residency is much
1534 * higher than should be possible with an external display.
1535 * As a workaround leave LPSP unmasked to prevent PSR entry
1536 * when external displays are active.
1538 if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
1539 mask |= EDP_PSR_DEBUG_MASK_LPSP;
1541 if (DISPLAY_VER(dev_priv) < 20)
1542 mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1545 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1546 * registers in order to keep the CURSURFLIVE tricks working :(
1548 if (IS_DISPLAY_VER(dev_priv, 9, 10))
1549 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1551 /* allow PSR with sprite enabled */
1552 if (IS_HASWELL(dev_priv))
1553 mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1555 intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
1557 psr_irq_control(intel_dp);
1560 * TODO: if future platforms supports DC3CO in more than one
1561 * transcoder, EXITLINE will need to be unset when disabling PSR
1563 if (intel_dp->psr.dc3co_exitline)
1564 intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1565 intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1567 if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1568 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1569 intel_dp->psr.psr2_sel_fetch_enabled ?
1570 IGNORE_PSR2_HW_TRACKING : 0);
1576 wm_optimization_wa(intel_dp, crtc_state);
1578 if (intel_dp->psr.psr2_enabled) {
1579 if (DISPLAY_VER(dev_priv) == 9)
1580 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1581 PSR2_VSC_ENABLE_PROG_HEADER |
1582 PSR2_ADD_VERTICAL_LINE_COUNT);
1585 * Wa_16014451276:adlp,mtl[a0,b0]
1586 * All supported adlp panels have 1-based X granularity, this may
1587 * cause issues if non-supported panels are used.
1589 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
1590 IS_ALDERLAKE_P(dev_priv))
1591 intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
1592 0, ADLP_1_BASED_X_GRANULARITY);
1594 /* Wa_16012604467:adlp,mtl[a0,b0] */
1595 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1596 intel_de_rmw(dev_priv,
1597 MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1598 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1599 else if (IS_ALDERLAKE_P(dev_priv))
1600 intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1601 CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1605 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1607 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1608 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1612 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1613 * will still keep the error set even after the reset done in the
1614 * irq_preinstall and irq_uninstall hooks.
1615 * And enabling in this situation cause the screen to freeze in the
1616 * first time that PSR HW tries to activate so lets keep PSR disabled
1617 * to avoid any rendering problems.
1619 val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
1620 val &= psr_irq_psr_error_bit_get(intel_dp);
1622 intel_dp->psr.sink_not_reliable = true;
1623 drm_dbg_kms(&dev_priv->drm,
1624 "PSR interruption error set, not enabling PSR\n");
1631 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1632 const struct intel_crtc_state *crtc_state)
1634 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1635 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1636 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1637 struct intel_encoder *encoder = &dig_port->base;
1640 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1642 intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1643 intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
1644 intel_dp->psr.busy_frontbuffer_bits = 0;
1645 intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1646 intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1647 /* DC5/DC6 requires at least 6 idle frames */
1648 val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1649 intel_dp->psr.dc3co_exit_delay = val;
1650 intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1651 intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1652 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1653 intel_dp->psr.req_psr2_sdp_prior_scanline =
1654 crtc_state->req_psr2_sdp_prior_scanline;
1656 if (!psr_interrupt_error_check(intel_dp))
1659 if (intel_dp->psr.panel_replay_enabled)
1660 drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
1662 drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1663 intel_dp->psr.psr2_enabled ? "2" : "1");
1665 intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
1666 intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1667 intel_psr_enable_sink(intel_dp);
1668 intel_psr_enable_source(intel_dp, crtc_state);
1669 intel_dp->psr.enabled = true;
1670 intel_dp->psr.paused = false;
1672 intel_psr_activate(intel_dp);
1675 static void intel_psr_exit(struct intel_dp *intel_dp)
1677 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1678 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1681 if (!intel_dp->psr.active) {
1682 if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
1683 val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
1684 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1687 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
1688 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1693 if (intel_dp->psr.panel_replay_enabled) {
1694 intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
1695 TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
1696 } else if (intel_dp->psr.psr2_enabled) {
1697 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1699 val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
1700 EDP_PSR2_ENABLE, 0);
1702 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1704 val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
1707 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1709 intel_dp->psr.active = false;
1712 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1714 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1715 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1716 i915_reg_t psr_status;
1717 u32 psr_status_mask;
1719 if (intel_dp->psr.psr2_enabled) {
1720 psr_status = EDP_PSR2_STATUS(cpu_transcoder);
1721 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1723 psr_status = psr_status_reg(dev_priv, cpu_transcoder);
1724 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1727 /* Wait till PSR is idle */
1728 if (intel_de_wait_for_clear(dev_priv, psr_status,
1729 psr_status_mask, 2000))
1730 drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1733 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1735 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1736 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1737 enum phy phy = intel_port_to_phy(dev_priv,
1738 dp_to_dig_port(intel_dp)->base.port);
1740 lockdep_assert_held(&intel_dp->psr.lock);
1742 if (!intel_dp->psr.enabled)
1745 if (intel_dp->psr.panel_replay_enabled)
1746 drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
1748 drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1749 intel_dp->psr.psr2_enabled ? "2" : "1");
1751 intel_psr_exit(intel_dp);
1752 intel_psr_wait_exit_locked(intel_dp);
1758 if (DISPLAY_VER(dev_priv) >= 11)
1759 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1760 wa_16013835468_bit_get(intel_dp), 0);
1762 if (intel_dp->psr.psr2_enabled) {
1763 /* Wa_16012604467:adlp,mtl[a0,b0] */
1764 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1765 intel_de_rmw(dev_priv,
1766 MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
1767 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
1768 else if (IS_ALDERLAKE_P(dev_priv))
1769 intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1770 CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1773 intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
1775 /* Disable PSR on Sink */
1776 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1778 if (intel_dp->psr.psr2_enabled)
1779 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1781 intel_dp->psr.enabled = false;
1782 intel_dp->psr.panel_replay_enabled = false;
1783 intel_dp->psr.psr2_enabled = false;
1784 intel_dp->psr.psr2_sel_fetch_enabled = false;
1785 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1789 * intel_psr_disable - Disable PSR
1790 * @intel_dp: Intel DP
1791 * @old_crtc_state: old CRTC state
1793 * This function needs to be called before disabling pipe.
1795 void intel_psr_disable(struct intel_dp *intel_dp,
1796 const struct intel_crtc_state *old_crtc_state)
1798 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1800 if (!old_crtc_state->has_psr)
1803 if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1806 mutex_lock(&intel_dp->psr.lock);
1808 intel_psr_disable_locked(intel_dp);
1810 mutex_unlock(&intel_dp->psr.lock);
1811 cancel_work_sync(&intel_dp->psr.work);
1812 cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1816 * intel_psr_pause - Pause PSR
1817 * @intel_dp: Intel DP
1819 * This function need to be called after enabling psr.
1821 void intel_psr_pause(struct intel_dp *intel_dp)
1823 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1824 struct intel_psr *psr = &intel_dp->psr;
1826 if (!CAN_PSR(intel_dp))
1829 mutex_lock(&psr->lock);
1831 if (!psr->enabled) {
1832 mutex_unlock(&psr->lock);
1836 /* If we ever hit this, we will need to add refcount to pause/resume */
1837 drm_WARN_ON(&dev_priv->drm, psr->paused);
1839 intel_psr_exit(intel_dp);
1840 intel_psr_wait_exit_locked(intel_dp);
1843 mutex_unlock(&psr->lock);
1845 cancel_work_sync(&psr->work);
1846 cancel_delayed_work_sync(&psr->dc3co_work);
1850 * intel_psr_resume - Resume PSR
1851 * @intel_dp: Intel DP
1853 * This function need to be called after pausing psr.
1855 void intel_psr_resume(struct intel_dp *intel_dp)
1857 struct intel_psr *psr = &intel_dp->psr;
1859 if (!CAN_PSR(intel_dp))
1862 mutex_lock(&psr->lock);
1867 psr->paused = false;
1868 intel_psr_activate(intel_dp);
1871 mutex_unlock(&psr->lock);
1874 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1876 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
1877 PSR2_MAN_TRK_CTL_ENABLE;
1880 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1882 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1883 ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1884 PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1887 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1889 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1890 ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1891 PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1894 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1896 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1897 ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1898 PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1901 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1903 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1904 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1906 if (intel_dp->psr.psr2_sel_fetch_enabled)
1907 intel_de_write(dev_priv,
1908 PSR2_MAN_TRK_CTL(cpu_transcoder),
1909 man_trk_ctl_enable_bit_get(dev_priv) |
1910 man_trk_ctl_partial_frame_bit_get(dev_priv) |
1911 man_trk_ctl_single_full_frame_bit_get(dev_priv) |
1912 man_trk_ctl_continuos_full_frame(dev_priv));
1915 * Display WA #0884: skl+
1916 * This documented WA for bxt can be safely applied
1917 * broadly so we can force HW tracking to exit PSR
1918 * instead of disabling and re-enabling.
1919 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1920 * but it makes more sense write to the current active
1923 * This workaround do not exist for platforms with display 10 or newer
1924 * but testing proved that it works for up display 13, for newer
1925 * than that testing will be needed.
1927 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1930 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1932 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1933 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1934 struct intel_encoder *encoder;
1936 if (!crtc_state->enable_psr2_sel_fetch)
1939 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1940 crtc_state->uapi.encoder_mask) {
1941 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1943 lockdep_assert_held(&intel_dp->psr.lock);
1944 if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
1949 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
1950 crtc_state->psr2_man_track_ctl);
1953 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1954 struct drm_rect *clip, bool full_update)
1956 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1957 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1958 u32 val = man_trk_ctl_enable_bit_get(dev_priv);
1960 /* SF partial frame enable has to be set even on full update */
1961 val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
1964 val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
1965 val |= man_trk_ctl_continuos_full_frame(dev_priv);
1972 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
1973 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
1974 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
1976 drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
1978 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1979 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1982 crtc_state->psr2_man_track_ctl = val;
1985 static void clip_area_update(struct drm_rect *overlap_damage_area,
1986 struct drm_rect *damage_area,
1987 struct drm_rect *pipe_src)
1989 if (!drm_rect_intersect(damage_area, pipe_src))
1992 if (overlap_damage_area->y1 == -1) {
1993 overlap_damage_area->y1 = damage_area->y1;
1994 overlap_damage_area->y2 = damage_area->y2;
1998 if (damage_area->y1 < overlap_damage_area->y1)
1999 overlap_damage_area->y1 = damage_area->y1;
2001 if (damage_area->y2 > overlap_damage_area->y2)
2002 overlap_damage_area->y2 = damage_area->y2;
2005 static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
2006 struct drm_rect *pipe_clip)
2008 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2009 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2012 /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2013 if (crtc_state->dsc.compression_enable &&
2014 (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
2015 y_alignment = vdsc_cfg->slice_height;
2017 y_alignment = crtc_state->su_y_granularity;
2019 pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
2020 if (pipe_clip->y2 % y_alignment)
2021 pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
2025 * TODO: Not clear how to handle planes with negative position,
2026 * also planes are not updated if they have a negative X
2027 * position so for now doing a full update in this cases
2029 * Plane scaling and rotation is not supported by selective fetch and both
2030 * properties can change without a modeset, so need to be check at every
2033 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2035 if (plane_state->uapi.dst.y1 < 0 ||
2036 plane_state->uapi.dst.x1 < 0 ||
2037 plane_state->scaler_id >= 0 ||
2038 plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2045 * Check for pipe properties that is not supported by selective fetch.
2047 * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2048 * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2049 * enabled and going to the full update path.
2051 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2053 if (crtc_state->scaler_state.scaler_id >= 0)
2059 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2060 struct intel_crtc *crtc)
2062 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2063 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2064 struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
2065 struct intel_plane_state *new_plane_state, *old_plane_state;
2066 struct intel_plane *plane;
2067 bool full_update = false;
2070 if (!crtc_state->enable_psr2_sel_fetch)
2073 if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2075 goto skip_sel_fetch_set_loop;
2079 * Calculate minimal selective fetch area of each plane and calculate
2080 * the pipe damaged area.
2081 * In the next loop the plane selective fetch area will actually be set
2082 * using whole pipe damaged area.
2084 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2085 new_plane_state, i) {
2086 struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2089 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2092 if (!new_plane_state->uapi.visible &&
2093 !old_plane_state->uapi.visible)
2096 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2102 * If visibility or plane moved, mark the whole plane area as
2103 * damaged as it needs to be complete redraw in the new and old
2106 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2107 !drm_rect_equals(&new_plane_state->uapi.dst,
2108 &old_plane_state->uapi.dst)) {
2109 if (old_plane_state->uapi.visible) {
2110 damaged_area.y1 = old_plane_state->uapi.dst.y1;
2111 damaged_area.y2 = old_plane_state->uapi.dst.y2;
2112 clip_area_update(&pipe_clip, &damaged_area,
2113 &crtc_state->pipe_src);
2116 if (new_plane_state->uapi.visible) {
2117 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2118 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2119 clip_area_update(&pipe_clip, &damaged_area,
2120 &crtc_state->pipe_src);
2123 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2124 /* If alpha changed mark the whole plane area as damaged */
2125 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2126 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2127 clip_area_update(&pipe_clip, &damaged_area,
2128 &crtc_state->pipe_src);
2132 src = drm_plane_state_src(&new_plane_state->uapi);
2133 drm_rect_fp_to_int(&src, &src);
2135 if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2136 &new_plane_state->uapi, &damaged_area))
2139 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2140 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2141 damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2142 damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2144 clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
2148 * TODO: For now we are just using full update in case
2149 * selective fetch area calculation fails. To optimize this we
2150 * should identify cases where this happens and fix the area
2151 * calculation for those.
2153 if (pipe_clip.y1 == -1) {
2154 drm_info_once(&dev_priv->drm,
2155 "Selective fetch area calculation failed in pipe %c\n",
2156 pipe_name(crtc->pipe));
2161 goto skip_sel_fetch_set_loop;
2163 /* Wa_14014971492 */
2164 if ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
2165 IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
2166 crtc_state->splitter.enable)
2169 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2173 intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
2176 * Now that we have the pipe damaged area check if it intersect with
2177 * every plane, if it does set the plane selective fetch area.
2179 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2180 new_plane_state, i) {
2181 struct drm_rect *sel_fetch_area, inter;
2182 struct intel_plane *linked = new_plane_state->planar_linked_plane;
2184 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2185 !new_plane_state->uapi.visible)
2189 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2190 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2191 sel_fetch_area->y1 = -1;
2192 sel_fetch_area->y2 = -1;
2194 * if plane sel fetch was previously enabled ->
2197 if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2198 crtc_state->update_planes |= BIT(plane->id);
2203 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2208 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2209 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2210 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2211 crtc_state->update_planes |= BIT(plane->id);
2214 * Sel_fetch_area is calculated for UV plane. Use
2215 * same area for Y plane as well.
2218 struct intel_plane_state *linked_new_plane_state;
2219 struct drm_rect *linked_sel_fetch_area;
2221 linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2222 if (IS_ERR(linked_new_plane_state))
2223 return PTR_ERR(linked_new_plane_state);
2225 linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2226 linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2227 linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2228 crtc_state->update_planes |= BIT(linked->id);
2232 skip_sel_fetch_set_loop:
2233 psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
2237 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2238 struct intel_crtc *crtc)
2240 struct drm_i915_private *i915 = to_i915(state->base.dev);
2241 const struct intel_crtc_state *old_crtc_state =
2242 intel_atomic_get_old_crtc_state(state, crtc);
2243 const struct intel_crtc_state *new_crtc_state =
2244 intel_atomic_get_new_crtc_state(state, crtc);
2245 struct intel_encoder *encoder;
2250 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2251 old_crtc_state->uapi.encoder_mask) {
2252 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2253 struct intel_psr *psr = &intel_dp->psr;
2254 bool needs_to_disable = false;
2256 mutex_lock(&psr->lock);
2259 * Reasons to disable:
2260 * - PSR disabled in new state
2261 * - All planes will go inactive
2262 * - Changing between PSR versions
2263 * - Display WA #1136: skl, bxt
2265 needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2266 needs_to_disable |= !new_crtc_state->has_psr;
2267 needs_to_disable |= !new_crtc_state->active_planes;
2268 needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
2269 needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2270 new_crtc_state->wm_level_disabled;
2272 if (psr->enabled && needs_to_disable)
2273 intel_psr_disable_locked(intel_dp);
2274 else if (psr->enabled && new_crtc_state->wm_level_disabled)
2275 /* Wa_14015648006 */
2276 wm_optimization_wa(intel_dp, new_crtc_state);
2278 mutex_unlock(&psr->lock);
2282 void intel_psr_post_plane_update(struct intel_atomic_state *state,
2283 struct intel_crtc *crtc)
2285 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2286 const struct intel_crtc_state *crtc_state =
2287 intel_atomic_get_new_crtc_state(state, crtc);
2288 struct intel_encoder *encoder;
2290 if (!(crtc_state->has_psr || crtc_state->has_panel_replay))
2293 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2294 crtc_state->uapi.encoder_mask) {
2295 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2296 struct intel_psr *psr = &intel_dp->psr;
2297 bool keep_disabled = false;
2299 mutex_lock(&psr->lock);
2301 drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2303 keep_disabled |= psr->sink_not_reliable;
2304 keep_disabled |= !crtc_state->active_planes;
2306 /* Display WA #1136: skl, bxt */
2307 keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2308 crtc_state->wm_level_disabled;
2310 if (!psr->enabled && !keep_disabled)
2311 intel_psr_enable_locked(intel_dp, crtc_state);
2312 else if (psr->enabled && !crtc_state->wm_level_disabled)
2313 /* Wa_14015648006 */
2314 wm_optimization_wa(intel_dp, crtc_state);
2316 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2317 if (crtc_state->crc_enabled && psr->enabled)
2318 psr_force_hw_tracking_exit(intel_dp);
2321 * Clear possible busy bits in case we have
2322 * invalidate -> flip -> flush sequence.
2324 intel_dp->psr.busy_frontbuffer_bits = 0;
2326 mutex_unlock(&psr->lock);
2330 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2332 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2333 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2336 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2337 * As all higher states has bit 4 of PSR2 state set we can just wait for
2338 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2340 return intel_de_wait_for_clear(dev_priv,
2341 EDP_PSR2_STATUS(cpu_transcoder),
2342 EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2345 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2347 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2348 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2351 * From bspec: Panel Self Refresh (BDW+)
2352 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2353 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2354 * defensive enough to cover everything.
2356 return intel_de_wait_for_clear(dev_priv,
2357 psr_status_reg(dev_priv, cpu_transcoder),
2358 EDP_PSR_STATUS_STATE_MASK, 50);
2362 * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2363 * @new_crtc_state: new CRTC state
2365 * This function is expected to be called from pipe_update_start() where it is
2366 * not expected to race with PSR enable or disable.
2368 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2370 struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2371 struct intel_encoder *encoder;
2373 if (!new_crtc_state->has_psr)
2376 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2377 new_crtc_state->uapi.encoder_mask) {
2378 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2381 lockdep_assert_held(&intel_dp->psr.lock);
2383 if (!intel_dp->psr.enabled)
2386 if (intel_dp->psr.psr2_enabled)
2387 ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2389 ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2392 drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2396 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2398 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2399 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2404 if (!intel_dp->psr.enabled)
2407 if (intel_dp->psr.psr2_enabled) {
2408 reg = EDP_PSR2_STATUS(cpu_transcoder);
2409 mask = EDP_PSR2_STATUS_STATE_MASK;
2411 reg = psr_status_reg(dev_priv, cpu_transcoder);
2412 mask = EDP_PSR_STATUS_STATE_MASK;
2415 mutex_unlock(&intel_dp->psr.lock);
2417 err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2419 drm_err(&dev_priv->drm,
2420 "Timed out waiting for PSR Idle for re-enable\n");
2422 /* After the unlocked wait, verify that PSR is still wanted! */
2423 mutex_lock(&intel_dp->psr.lock);
2424 return err == 0 && intel_dp->psr.enabled;
2427 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2429 struct drm_connector_list_iter conn_iter;
2430 struct drm_modeset_acquire_ctx ctx;
2431 struct drm_atomic_state *state;
2432 struct drm_connector *conn;
2435 state = drm_atomic_state_alloc(&dev_priv->drm);
2439 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2441 state->acquire_ctx = &ctx;
2442 to_intel_atomic_state(state)->internal = true;
2445 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2446 drm_for_each_connector_iter(conn, &conn_iter) {
2447 struct drm_connector_state *conn_state;
2448 struct drm_crtc_state *crtc_state;
2450 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2453 conn_state = drm_atomic_get_connector_state(state, conn);
2454 if (IS_ERR(conn_state)) {
2455 err = PTR_ERR(conn_state);
2459 if (!conn_state->crtc)
2462 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2463 if (IS_ERR(crtc_state)) {
2464 err = PTR_ERR(crtc_state);
2468 /* Mark mode as changed to trigger a pipe->update() */
2469 crtc_state->mode_changed = true;
2471 drm_connector_list_iter_end(&conn_iter);
2474 err = drm_atomic_commit(state);
2476 if (err == -EDEADLK) {
2477 drm_atomic_state_clear(state);
2478 err = drm_modeset_backoff(&ctx);
2483 drm_modeset_drop_locks(&ctx);
2484 drm_modeset_acquire_fini(&ctx);
2485 drm_atomic_state_put(state);
2490 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2492 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2493 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2497 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2498 mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2499 drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2503 ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2507 old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2508 intel_dp->psr.debug = val;
2511 * Do it right away if it's already enabled, otherwise it will be done
2512 * when enabling the source.
2514 if (intel_dp->psr.enabled)
2515 psr_irq_control(intel_dp);
2517 mutex_unlock(&intel_dp->psr.lock);
2519 if (old_mode != mode)
2520 ret = intel_psr_fastset_force(dev_priv);
2525 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2527 struct intel_psr *psr = &intel_dp->psr;
2529 intel_psr_disable_locked(intel_dp);
2530 psr->sink_not_reliable = true;
2531 /* let's make sure that sink is awaken */
2532 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2535 static void intel_psr_work(struct work_struct *work)
2537 struct intel_dp *intel_dp =
2538 container_of(work, typeof(*intel_dp), psr.work);
2540 mutex_lock(&intel_dp->psr.lock);
2542 if (!intel_dp->psr.enabled)
2545 if (READ_ONCE(intel_dp->psr.irq_aux_error))
2546 intel_psr_handle_irq(intel_dp);
2549 * We have to make sure PSR is ready for re-enable
2550 * otherwise it keeps disabled until next full enable/disable cycle.
2551 * PSR might take some time to get fully disabled
2552 * and be ready for re-enable.
2554 if (!__psr_wait_for_idle_locked(intel_dp))
2558 * The delayed work can race with an invalidate hence we need to
2559 * recheck. Since psr_flush first clears this and then reschedules we
2560 * won't ever miss a flush when bailing out here.
2562 if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2565 intel_psr_activate(intel_dp);
2567 mutex_unlock(&intel_dp->psr.lock);
2570 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2572 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2573 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2575 if (intel_dp->psr.psr2_sel_fetch_enabled) {
2578 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2579 /* Send one update otherwise lag is observed in screen */
2580 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2584 val = man_trk_ctl_enable_bit_get(dev_priv) |
2585 man_trk_ctl_partial_frame_bit_get(dev_priv) |
2586 man_trk_ctl_continuos_full_frame(dev_priv);
2587 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
2588 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2589 intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2591 intel_psr_exit(intel_dp);
2596 * intel_psr_invalidate - Invalidate PSR
2597 * @dev_priv: i915 device
2598 * @frontbuffer_bits: frontbuffer plane tracking bits
2599 * @origin: which operation caused the invalidate
2601 * Since the hardware frontbuffer tracking has gaps we need to integrate
2602 * with the software frontbuffer tracking. This function gets called every
2603 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2604 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2606 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2608 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2609 unsigned frontbuffer_bits, enum fb_op_origin origin)
2611 struct intel_encoder *encoder;
2613 if (origin == ORIGIN_FLIP)
2616 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2617 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2618 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2620 mutex_lock(&intel_dp->psr.lock);
2621 if (!intel_dp->psr.enabled) {
2622 mutex_unlock(&intel_dp->psr.lock);
2626 pipe_frontbuffer_bits &=
2627 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2628 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2630 if (pipe_frontbuffer_bits)
2631 _psr_invalidate_handle(intel_dp);
2633 mutex_unlock(&intel_dp->psr.lock);
2637 * When we will be completely rely on PSR2 S/W tracking in future,
2638 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2639 * event also therefore tgl_dc3co_flush_locked() require to be changed
2640 * accordingly in future.
2643 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2644 enum fb_op_origin origin)
2646 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2648 if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2649 !intel_dp->psr.active)
2653 * At every frontbuffer flush flip event modified delay of delayed work,
2654 * when delayed work schedules that means display has been idle.
2656 if (!(frontbuffer_bits &
2657 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2660 tgl_psr2_enable_dc3co(intel_dp);
2661 mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
2662 intel_dp->psr.dc3co_exit_delay);
2665 static void _psr_flush_handle(struct intel_dp *intel_dp)
2667 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2668 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2670 if (intel_dp->psr.psr2_sel_fetch_enabled) {
2671 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2672 /* can we turn CFF off? */
2673 if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2674 u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2675 man_trk_ctl_partial_frame_bit_get(dev_priv) |
2676 man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2677 man_trk_ctl_continuos_full_frame(dev_priv);
2680 * Set psr2_sel_fetch_cff_enabled as false to allow selective
2681 * updates. Still keep cff bit enabled as we don't have proper
2682 * SU configuration in case update is sent for any reason after
2683 * sff bit gets cleared by the HW on next vblank.
2685 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2687 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2688 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2692 * continuous full frame is disabled, only a single full
2695 psr_force_hw_tracking_exit(intel_dp);
2698 psr_force_hw_tracking_exit(intel_dp);
2700 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2701 queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
2706 * intel_psr_flush - Flush PSR
2707 * @dev_priv: i915 device
2708 * @frontbuffer_bits: frontbuffer plane tracking bits
2709 * @origin: which operation caused the flush
2711 * Since the hardware frontbuffer tracking has gaps we need to integrate
2712 * with the software frontbuffer tracking. This function gets called every
2713 * time frontbuffer rendering has completed and flushed out to memory. PSR
2714 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2716 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2718 void intel_psr_flush(struct drm_i915_private *dev_priv,
2719 unsigned frontbuffer_bits, enum fb_op_origin origin)
2721 struct intel_encoder *encoder;
2723 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2724 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2725 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2727 mutex_lock(&intel_dp->psr.lock);
2728 if (!intel_dp->psr.enabled) {
2729 mutex_unlock(&intel_dp->psr.lock);
2733 pipe_frontbuffer_bits &=
2734 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2735 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2738 * If the PSR is paused by an explicit intel_psr_paused() call,
2739 * we have to ensure that the PSR is not activated until
2740 * intel_psr_resume() is called.
2742 if (intel_dp->psr.paused)
2745 if (origin == ORIGIN_FLIP ||
2746 (origin == ORIGIN_CURSOR_UPDATE &&
2747 !intel_dp->psr.psr2_sel_fetch_enabled)) {
2748 tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2752 if (pipe_frontbuffer_bits == 0)
2755 /* By definition flush = invalidate + flush */
2756 _psr_flush_handle(intel_dp);
2758 mutex_unlock(&intel_dp->psr.lock);
2763 * intel_psr_init - Init basic PSR work and mutex.
2764 * @intel_dp: Intel DP
2766 * This function is called after the initializing connector.
2767 * (the initializing of connector treats the handling of connector capabilities)
2768 * And it initializes basic PSR stuff for each DP Encoder.
2770 void intel_psr_init(struct intel_dp *intel_dp)
2772 struct intel_connector *connector = intel_dp->attached_connector;
2773 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2774 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2776 if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
2779 if (!intel_dp_is_edp(intel_dp))
2780 intel_psr_init_dpcd(intel_dp);
2783 * HSW spec explicitly says PSR is tied to port A.
2784 * BDW+ platforms have a instance of PSR registers per transcoder but
2785 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2787 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2788 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2789 * But GEN12 supports a instance of PSR registers per transcoder.
2791 if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2792 drm_dbg_kms(&dev_priv->drm,
2793 "PSR condition failed: Port not supported\n");
2797 if (HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp))
2798 intel_dp->psr.source_panel_replay_support = true;
2800 intel_dp->psr.source_support = true;
2802 /* Set link_standby x link_off defaults */
2803 if (DISPLAY_VER(dev_priv) < 12)
2804 /* For new platforms up to TGL let's respect VBT back again */
2805 intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2807 INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2808 INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2809 mutex_init(&intel_dp->psr.lock);
2812 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2813 u8 *status, u8 *error_status)
2815 struct drm_dp_aux *aux = &intel_dp->aux;
2817 unsigned int offset;
2819 offset = intel_dp->psr.panel_replay_enabled ?
2820 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
2822 ret = drm_dp_dpcd_readb(aux, offset, status);
2826 offset = intel_dp->psr.panel_replay_enabled ?
2827 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
2829 ret = drm_dp_dpcd_readb(aux, offset, error_status);
2833 *status = *status & DP_PSR_SINK_STATE_MASK;
2838 static void psr_alpm_check(struct intel_dp *intel_dp)
2840 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2841 struct drm_dp_aux *aux = &intel_dp->aux;
2842 struct intel_psr *psr = &intel_dp->psr;
2846 if (!psr->psr2_enabled)
2849 r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2851 drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2855 if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2856 intel_psr_disable_locked(intel_dp);
2857 psr->sink_not_reliable = true;
2858 drm_dbg_kms(&dev_priv->drm,
2859 "ALPM lock timeout error, disabling PSR\n");
2861 /* Clearing error */
2862 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2866 static void psr_capability_changed_check(struct intel_dp *intel_dp)
2868 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2869 struct intel_psr *psr = &intel_dp->psr;
2873 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2875 drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2879 if (val & DP_PSR_CAPS_CHANGE) {
2880 intel_psr_disable_locked(intel_dp);
2881 psr->sink_not_reliable = true;
2882 drm_dbg_kms(&dev_priv->drm,
2883 "Sink PSR capability changed, disabling PSR\n");
2886 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2890 void intel_psr_short_pulse(struct intel_dp *intel_dp)
2892 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2893 struct intel_psr *psr = &intel_dp->psr;
2894 u8 status, error_status;
2895 const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2896 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2897 DP_PSR_LINK_CRC_ERROR;
2899 if (!CAN_PSR(intel_dp))
2902 mutex_lock(&psr->lock);
2907 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2908 drm_err(&dev_priv->drm,
2909 "Error reading PSR status or error status\n");
2913 if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2914 intel_psr_disable_locked(intel_dp);
2915 psr->sink_not_reliable = true;
2918 if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2919 drm_dbg_kms(&dev_priv->drm,
2920 "PSR sink internal error, disabling PSR\n");
2921 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2922 drm_dbg_kms(&dev_priv->drm,
2923 "PSR RFB storage error, disabling PSR\n");
2924 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2925 drm_dbg_kms(&dev_priv->drm,
2926 "PSR VSC SDP uncorrectable error, disabling PSR\n");
2927 if (error_status & DP_PSR_LINK_CRC_ERROR)
2928 drm_dbg_kms(&dev_priv->drm,
2929 "PSR Link CRC error, disabling PSR\n");
2931 if (error_status & ~errors)
2932 drm_err(&dev_priv->drm,
2933 "PSR_ERROR_STATUS unhandled errors %x\n",
2934 error_status & ~errors);
2935 /* clear status register */
2936 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2938 psr_alpm_check(intel_dp);
2939 psr_capability_changed_check(intel_dp);
2942 mutex_unlock(&psr->lock);
2945 bool intel_psr_enabled(struct intel_dp *intel_dp)
2949 if (!CAN_PSR(intel_dp))
2952 mutex_lock(&intel_dp->psr.lock);
2953 ret = intel_dp->psr.enabled;
2954 mutex_unlock(&intel_dp->psr.lock);
2960 * intel_psr_lock - grab PSR lock
2961 * @crtc_state: the crtc state
2963 * This is initially meant to be used by around CRTC update, when
2964 * vblank sensitive registers are updated and we need grab the lock
2965 * before it to avoid vblank evasion.
2967 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
2969 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2970 struct intel_encoder *encoder;
2972 if (!crtc_state->has_psr)
2975 for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2976 crtc_state->uapi.encoder_mask) {
2977 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2979 mutex_lock(&intel_dp->psr.lock);
2985 * intel_psr_unlock - release PSR lock
2986 * @crtc_state: the crtc state
2988 * Release the PSR lock that was held during pipe update.
2990 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
2992 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2993 struct intel_encoder *encoder;
2995 if (!crtc_state->has_psr)
2998 for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2999 crtc_state->uapi.encoder_mask) {
3000 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3002 mutex_unlock(&intel_dp->psr.lock);
3008 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3010 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3011 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3012 const char *status = "unknown";
3013 u32 val, status_val;
3015 if (intel_dp->psr.psr2_enabled) {
3016 static const char * const live_status[] = {
3029 val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
3030 status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3031 if (status_val < ARRAY_SIZE(live_status))
3032 status = live_status[status_val];
3034 static const char * const live_status[] = {
3044 val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
3045 status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3046 if (status_val < ARRAY_SIZE(live_status))
3047 status = live_status[status_val];
3050 seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3053 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
3055 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3056 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3057 struct intel_psr *psr = &intel_dp->psr;
3058 intel_wakeref_t wakeref;
3063 seq_printf(m, "Sink support: PSR = %s",
3064 str_yes_no(psr->sink_support));
3066 if (psr->sink_support)
3067 seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3068 seq_printf(m, ", Panel Replay = %s\n", str_yes_no(psr->sink_panel_replay_support));
3070 if (!(psr->sink_support || psr->sink_panel_replay_support))
3073 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3074 mutex_lock(&psr->lock);
3076 if (psr->panel_replay_enabled)
3077 status = "Panel Replay Enabled";
3078 else if (psr->enabled)
3079 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
3081 status = "disabled";
3082 seq_printf(m, "PSR mode: %s\n", status);
3084 if (!psr->enabled) {
3085 seq_printf(m, "PSR sink not reliable: %s\n",
3086 str_yes_no(psr->sink_not_reliable));
3091 if (psr->panel_replay_enabled) {
3092 val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
3093 enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
3094 } else if (psr->psr2_enabled) {
3095 val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
3096 enabled = val & EDP_PSR2_ENABLE;
3098 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
3099 enabled = val & EDP_PSR_ENABLE;
3101 seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
3102 str_enabled_disabled(enabled), val);
3103 psr_source_status(intel_dp, m);
3104 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3105 psr->busy_frontbuffer_bits);
3108 * SKL+ Perf counter is reset to 0 everytime DC state is entered
3110 val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
3111 seq_printf(m, "Performance counter: %u\n",
3112 REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3114 if (psr->debug & I915_PSR_DEBUG_IRQ) {
3115 seq_printf(m, "Last attempted entry at: %lld\n",
3116 psr->last_entry_attempt);
3117 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3120 if (psr->psr2_enabled) {
3121 u32 su_frames_val[3];
3125 * Reading all 3 registers before hand to minimize crossing a
3126 * frame boundary between register reads
3128 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3129 val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
3130 su_frames_val[frame / 3] = val;
3133 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3135 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3138 su_blocks = su_frames_val[frame / 3] &
3139 PSR2_SU_STATUS_MASK(frame);
3140 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3141 seq_printf(m, "%d\t%d\n", frame, su_blocks);
3144 seq_printf(m, "PSR2 selective fetch: %s\n",
3145 str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3149 mutex_unlock(&psr->lock);
3150 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3155 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3157 struct drm_i915_private *dev_priv = m->private;
3158 struct intel_dp *intel_dp = NULL;
3159 struct intel_encoder *encoder;
3161 if (!HAS_PSR(dev_priv))
3164 /* Find the first EDP which supports PSR */
3165 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3166 intel_dp = enc_to_intel_dp(encoder);
3173 return intel_psr_status(m, intel_dp);
3175 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3178 i915_edp_psr_debug_set(void *data, u64 val)
3180 struct drm_i915_private *dev_priv = data;
3181 struct intel_encoder *encoder;
3182 intel_wakeref_t wakeref;
3185 if (!HAS_PSR(dev_priv))
3188 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3189 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3191 drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3193 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3195 // TODO: split to each transcoder's PSR debug state
3196 ret = intel_psr_debug_set(intel_dp, val);
3198 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3205 i915_edp_psr_debug_get(void *data, u64 *val)
3207 struct drm_i915_private *dev_priv = data;
3208 struct intel_encoder *encoder;
3210 if (!HAS_PSR(dev_priv))
3213 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3214 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3216 // TODO: split to each transcoder's PSR debug state
3217 *val = READ_ONCE(intel_dp->psr.debug);
3224 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3225 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3228 void intel_psr_debugfs_register(struct drm_i915_private *i915)
3230 struct drm_minor *minor = i915->drm.primary;
3232 debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3233 i915, &i915_edp_psr_debug_fops);
3235 debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3236 i915, &i915_edp_psr_status_fops);
3239 static const char *psr_mode_str(struct intel_dp *intel_dp)
3241 if (intel_dp->psr.panel_replay_enabled)
3242 return "PANEL-REPLAY";
3243 else if (intel_dp->psr.enabled)
3249 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3251 struct intel_connector *connector = m->private;
3252 struct intel_dp *intel_dp = intel_attached_dp(connector);
3253 static const char * const sink_status[] = {
3255 "transition to active, capture and display",
3256 "active, display from RFB",
3257 "active, capture and display on sink device timings",
3258 "transition to inactive, capture and display, timing re-sync",
3261 "sink internal error",
3263 static const char * const panel_replay_status[] = {
3264 "Sink device frame is locked to the Source device",
3265 "Sink device is coasting, using the VTotal target",
3266 "Sink device is governing the frame rate (frame rate unlock is granted)",
3267 "Sink device in the process of re-locking with the Source device",
3271 u8 status, error_status;
3274 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
3275 seq_puts(m, "PSR/Panel-Replay Unsupported\n");
3279 if (connector->base.status != connector_status_connected)
3282 ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
3287 if (intel_dp->psr.panel_replay_enabled) {
3288 idx = (status & DP_SINK_FRAME_LOCKED_MASK) >> DP_SINK_FRAME_LOCKED_SHIFT;
3289 if (idx < ARRAY_SIZE(panel_replay_status))
3290 str = panel_replay_status[idx];
3291 } else if (intel_dp->psr.enabled) {
3292 idx = status & DP_PSR_SINK_STATE_MASK;
3293 if (idx < ARRAY_SIZE(sink_status))
3294 str = sink_status[idx];
3297 seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
3299 seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
3301 if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
3302 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3303 DP_PSR_LINK_CRC_ERROR))
3307 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3308 seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
3309 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3310 seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
3311 if (error_status & DP_PSR_LINK_CRC_ERROR)
3312 seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
3316 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3318 static int i915_psr_status_show(struct seq_file *m, void *data)
3320 struct intel_connector *connector = m->private;
3321 struct intel_dp *intel_dp = intel_attached_dp(connector);
3323 return intel_psr_status(m, intel_dp);
3325 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3327 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3329 struct drm_i915_private *i915 = to_i915(connector->base.dev);
3330 struct dentry *root = connector->base.debugfs_entry;
3332 /* TODO: Add support for MST connectors as well. */
3333 if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
3334 connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
3335 connector->mst_port)
3338 debugfs_create_file("i915_psr_sink_status", 0444, root,
3339 connector, &i915_psr_sink_status_fops);
3341 if (HAS_PSR(i915) || HAS_DP20(i915))
3342 debugfs_create_file("i915_psr_status", 0444, root,
3343 connector, &i915_psr_status_fops);