2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
26 #include <drm/drm_debugfs.h>
30 #include "intel_atomic.h"
31 #include "intel_crtc.h"
32 #include "intel_ddi.h"
34 #include "intel_display_types.h"
36 #include "intel_dp_aux.h"
37 #include "intel_frontbuffer.h"
38 #include "intel_hdmi.h"
39 #include "intel_psr.h"
40 #include "intel_psr_regs.h"
41 #include "intel_snps_phy.h"
42 #include "skl_universal_plane.h"
45 * DOC: Panel Self Refresh (PSR/SRD)
47 * Since Haswell Display controller supports Panel Self-Refresh on display
48 * panels witch have a remote frame buffer (RFB) implemented according to PSR
49 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
50 * when system is idle but display is on as it eliminates display refresh
51 * request to DDR memory completely as long as the frame buffer for that
52 * display is unchanged.
54 * Panel Self Refresh must be supported by both Hardware (source) and
57 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
58 * to power down the link and memory controller. For DSI panels the same idea
59 * is called "manual mode".
61 * The implementation uses the hardware-based PSR support which automatically
62 * enters/exits self-refresh mode. The hardware takes care of sending the
63 * required DP aux message and could even retrain the link (that part isn't
64 * enabled yet though). The hardware also keeps track of any frontbuffer
65 * changes to know when to exit self-refresh mode again. Unfortunately that
66 * part doesn't work too well, hence why the i915 PSR support uses the
67 * software frontbuffer tracking to make sure it doesn't miss a screen
68 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
69 * get called by the frontbuffer tracking code. Note that because of locking
70 * issues the self-refresh re-enable code is done from a work queue, which
71 * must be correctly synchronized/cancelled when shutting down the pipe."
73 * DC3CO (DC3 clock off)
75 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
76 * clock off automatically during PSR2 idle state.
77 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
78 * entry/exit allows the HW to enter a low-power state even when page flipping
79 * periodically (for instance a 30fps video playback scenario).
81 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
82 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
83 * frames, if no other flip occurs and the function above is executed, DC3CO is
84 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
86 * Front buffer modifications do not trigger DC3CO activation on purpose as it
87 * would bring a lot of complexity and most of the moderns systems will only
92 * Description of PSR mask bits:
94 * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
96 * When unmasked (nearly) all display register writes (eg. even
97 * SWF) trigger a PSR exit. Some registers are excluded from this
98 * and they have a more specific mask (described below). On icl+
99 * this bit no longer exists and is effectively always set.
101 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
103 * When unmasked (nearly) all pipe/plane register writes
104 * trigger a PSR exit. Some plane registers are excluded from this
105 * and they have a more specific mask (described below).
107 * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
108 * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
109 * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
111 * When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
112 * SPR_SURF/CURBASE are not included in this and instead are
113 * controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
114 * EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
116 * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
117 * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
119 * When unmasked PSR is blocked as long as the sprite
120 * plane is enabled. skl+ with their universal planes no
121 * longer have a mask bit like this, and no plane being
122 * enabledb blocks PSR.
124 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
125 * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
127 * When umasked CURPOS writes trigger a PSR exit. On skl+
128 * this doesn't exit but CURPOS is included in the
129 * PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
131 * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
132 * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
134 * When unmasked PSR is blocked as long as vblank and/or vsync
135 * interrupt is unmasked in IMR *and* enabled in IER.
137 * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
138 * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
140 * Selectcs whether PSR exit generates an extra vblank before
141 * the first frame is transmitted. Also note the opposite polarity
142 * if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
143 * unmasked==do not generate the extra vblank).
145 * With DC states enabled the extra vblank happens after link training,
146 * with DC states disabled it happens immediately upuon PSR exit trigger.
147 * No idea as of now why there is a difference. HSW/BDW (which don't
148 * even have DMC) always generate it after link training. Go figure.
150 * Unfortunately CHICKEN_TRANS itself seems to be double buffered
151 * and thus won't latch until the first vblank. So with DC states
152 * enabled the register effctively uses the reset value during DC5
153 * exit+PSR exit sequence, and thus the bit does nothing until
154 * latched by the vblank that it was trying to prevent from being
155 * generated in the first place. So we should probably call this
156 * one a chicken/egg bit instead on skl+.
158 * In standby mode (as opposed to link-off) this makes no difference
159 * as the timing generator keeps running the whole time generating
160 * normal periodic vblanks.
162 * WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
163 * and doing so makes the behaviour match the skl+ reset value.
165 * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
166 * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
168 * On BDW without this bit is no vblanks whatsoever are
169 * generated after PSR exit. On HSW this has no apparant effect.
170 * WaPsrDPRSUnmaskVBlankInSRD says to set this.
172 * The rest of the bits are more self-explanatory and/or
173 * irrelevant for normal operation.
176 bool intel_encoder_can_psr(struct intel_encoder *encoder)
178 if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
179 return CAN_PSR(enc_to_intel_dp(encoder)) ||
180 CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
185 static bool psr_global_enabled(struct intel_dp *intel_dp)
187 struct intel_connector *connector = intel_dp->attached_connector;
188 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
190 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
191 case I915_PSR_DEBUG_DEFAULT:
192 if (i915->display.params.enable_psr == -1)
193 return connector->panel.vbt.psr.enable;
194 return i915->display.params.enable_psr;
195 case I915_PSR_DEBUG_DISABLE:
202 static bool psr2_global_enabled(struct intel_dp *intel_dp)
204 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
206 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
207 case I915_PSR_DEBUG_DISABLE:
208 case I915_PSR_DEBUG_FORCE_PSR1:
211 if (i915->display.params.enable_psr == 1)
217 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
219 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
221 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
222 EDP_PSR_ERROR(intel_dp->psr.transcoder);
225 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
227 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
229 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
230 EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
233 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
235 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
237 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
238 EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
241 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
243 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
245 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
246 EDP_PSR_MASK(intel_dp->psr.transcoder);
249 static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
250 enum transcoder cpu_transcoder)
252 if (DISPLAY_VER(dev_priv) >= 8)
253 return EDP_PSR_CTL(cpu_transcoder);
258 static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
259 enum transcoder cpu_transcoder)
261 if (DISPLAY_VER(dev_priv) >= 8)
262 return EDP_PSR_DEBUG(cpu_transcoder);
264 return HSW_SRD_DEBUG;
267 static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
268 enum transcoder cpu_transcoder)
270 if (DISPLAY_VER(dev_priv) >= 8)
271 return EDP_PSR_PERF_CNT(cpu_transcoder);
273 return HSW_SRD_PERF_CNT;
276 static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
277 enum transcoder cpu_transcoder)
279 if (DISPLAY_VER(dev_priv) >= 8)
280 return EDP_PSR_STATUS(cpu_transcoder);
282 return HSW_SRD_STATUS;
285 static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
286 enum transcoder cpu_transcoder)
288 if (DISPLAY_VER(dev_priv) >= 12)
289 return TRANS_PSR_IMR(cpu_transcoder);
294 static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
295 enum transcoder cpu_transcoder)
297 if (DISPLAY_VER(dev_priv) >= 12)
298 return TRANS_PSR_IIR(cpu_transcoder);
303 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
304 enum transcoder cpu_transcoder)
306 if (DISPLAY_VER(dev_priv) >= 8)
307 return EDP_PSR_AUX_CTL(cpu_transcoder);
309 return HSW_SRD_AUX_CTL;
312 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
313 enum transcoder cpu_transcoder, int i)
315 if (DISPLAY_VER(dev_priv) >= 8)
316 return EDP_PSR_AUX_DATA(cpu_transcoder, i);
318 return HSW_SRD_AUX_DATA(i);
321 static void psr_irq_control(struct intel_dp *intel_dp)
323 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
324 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
327 mask = psr_irq_psr_error_bit_get(intel_dp);
328 if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
329 mask |= psr_irq_post_exit_bit_get(intel_dp) |
330 psr_irq_pre_entry_bit_get(intel_dp);
332 intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
333 psr_irq_mask_get(intel_dp), ~mask);
336 static void psr_event_print(struct drm_i915_private *i915,
337 u32 val, bool psr2_enabled)
339 drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
340 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
341 drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
342 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
343 drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
344 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
345 drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
346 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
347 drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
348 if (val & PSR_EVENT_GRAPHICS_RESET)
349 drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
350 if (val & PSR_EVENT_PCH_INTERRUPT)
351 drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
352 if (val & PSR_EVENT_MEMORY_UP)
353 drm_dbg_kms(&i915->drm, "\tMemory up\n");
354 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
355 drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
356 if (val & PSR_EVENT_WD_TIMER_EXPIRE)
357 drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
358 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
359 drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
360 if (val & PSR_EVENT_REGISTER_UPDATE)
361 drm_dbg_kms(&i915->drm, "\tRegister updated\n");
362 if (val & PSR_EVENT_HDCP_ENABLE)
363 drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
364 if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
365 drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
366 if (val & PSR_EVENT_VBI_ENABLE)
367 drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
368 if (val & PSR_EVENT_LPSP_MODE_EXIT)
369 drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
370 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
371 drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
374 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
376 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
377 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
378 ktime_t time_ns = ktime_get();
380 if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
381 intel_dp->psr.last_entry_attempt = time_ns;
382 drm_dbg_kms(&dev_priv->drm,
383 "[transcoder %s] PSR entry attempt in 2 vblanks\n",
384 transcoder_name(cpu_transcoder));
387 if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
388 intel_dp->psr.last_exit = time_ns;
389 drm_dbg_kms(&dev_priv->drm,
390 "[transcoder %s] PSR exit completed\n",
391 transcoder_name(cpu_transcoder));
393 if (DISPLAY_VER(dev_priv) >= 9) {
396 val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
398 psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
402 if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
403 drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
404 transcoder_name(cpu_transcoder));
406 intel_dp->psr.irq_aux_error = true;
409 * If this interruption is not masked it will keep
410 * interrupting so fast that it prevents the scheduled
412 * Also after a PSR error, we don't want to arm PSR
413 * again so we don't care about unmask the interruption
414 * or unset irq_aux_error.
416 intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
417 0, psr_irq_psr_error_bit_get(intel_dp));
419 queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
423 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
427 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
430 return alpm_caps & DP_ALPM_CAP;
433 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
435 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
436 u8 val = 8; /* assume the worst if we can't read the value */
438 if (drm_dp_dpcd_readb(&intel_dp->aux,
439 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
440 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
442 drm_dbg_kms(&i915->drm,
443 "Unable to get sink synchronization latency, assuming 8 frames\n");
447 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
449 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
454 /* If sink don't have specific granularity requirements set legacy ones */
455 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
456 /* As PSR2 HW sends full lines, we do not care about x granularity */
462 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
464 drm_dbg_kms(&i915->drm,
465 "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
467 * Spec says that if the value read is 0 the default granularity should
470 if (r != 2 || w == 0)
473 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
475 drm_dbg_kms(&i915->drm,
476 "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
483 intel_dp->psr.su_w_granularity = w;
484 intel_dp->psr.su_y_granularity = y;
487 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
489 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
492 intel_dp->psr.sink_panel_replay_support = false;
493 drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP, &pr_dpcd);
495 if (!(pr_dpcd & DP_PANEL_REPLAY_SUPPORT)) {
496 drm_dbg_kms(&i915->drm,
497 "Panel replay is not supported by panel\n");
501 drm_dbg_kms(&i915->drm,
502 "Panel replay is supported by panel\n");
503 intel_dp->psr.sink_panel_replay_support = true;
506 static void _psr_init_dpcd(struct intel_dp *intel_dp)
508 struct drm_i915_private *i915 =
509 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
511 drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
512 intel_dp->psr_dpcd[0]);
514 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
515 drm_dbg_kms(&i915->drm,
516 "PSR support not currently available for this panel\n");
520 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
521 drm_dbg_kms(&i915->drm,
522 "Panel lacks power state control, PSR cannot be enabled\n");
526 intel_dp->psr.sink_support = true;
527 intel_dp->psr.sink_sync_latency =
528 intel_dp_get_sink_sync_latency(intel_dp);
530 if (DISPLAY_VER(i915) >= 9 &&
531 intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
532 bool y_req = intel_dp->psr_dpcd[1] &
533 DP_PSR2_SU_Y_COORDINATE_REQUIRED;
534 bool alpm = intel_dp_get_alpm_status(intel_dp);
537 * All panels that supports PSR version 03h (PSR2 +
538 * Y-coordinate) can handle Y-coordinates in VSC but we are
539 * only sure that it is going to be used when required by the
540 * panel. This way panel is capable to do selective update
541 * without a aux frame sync.
543 * To support PSR version 02h and PSR version 03h without
544 * Y-coordinate requirement panels we would need to enable
547 intel_dp->psr.sink_psr2_support = y_req && alpm;
548 drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
549 intel_dp->psr.sink_psr2_support ? "" : "not ");
553 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
555 _panel_replay_init_dpcd(intel_dp);
557 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
558 sizeof(intel_dp->psr_dpcd));
560 if (intel_dp->psr_dpcd[0])
561 _psr_init_dpcd(intel_dp);
563 if (intel_dp->psr.sink_psr2_support) {
564 intel_dp->psr.colorimetry_support =
565 intel_dp_get_colorimetry_status(intel_dp);
566 intel_dp_get_su_granularity(intel_dp);
570 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
572 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
573 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
574 u32 aux_clock_divider, aux_ctl;
575 /* write DP_SET_POWER=D0 */
576 static const u8 aux_msg[] = {
577 [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
578 [1] = (DP_SET_POWER >> 8) & 0xff,
579 [2] = DP_SET_POWER & 0xff,
581 [4] = DP_SET_POWER_D0,
585 BUILD_BUG_ON(sizeof(aux_msg) > 20);
586 for (i = 0; i < sizeof(aux_msg); i += 4)
587 intel_de_write(dev_priv,
588 psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
589 intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
591 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
593 /* Start with bits set for DDI_AUX_CTL register */
594 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
597 /* Select only valid bits for SRD_AUX_CTL */
598 aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
599 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
600 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
601 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
603 intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
607 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
609 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
610 u8 dpcd_val = DP_PSR_ENABLE;
612 if (intel_dp->psr.panel_replay_enabled)
615 if (intel_dp->psr.psr2_enabled) {
616 /* Enable ALPM at sink for psr2 */
617 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
619 DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
621 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
623 if (intel_dp->psr.link_standby)
624 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
626 if (DISPLAY_VER(dev_priv) >= 8)
627 dpcd_val |= DP_PSR_CRC_VERIFICATION;
630 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
631 dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
633 if (intel_dp->psr.entry_setup_frames > 0)
634 dpcd_val |= DP_PSR_FRAME_CAPTURE;
636 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
638 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
641 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
643 struct intel_connector *connector = intel_dp->attached_connector;
644 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
647 if (DISPLAY_VER(dev_priv) >= 11)
648 val |= EDP_PSR_TP4_TIME_0us;
650 if (dev_priv->display.params.psr_safest_params) {
651 val |= EDP_PSR_TP1_TIME_2500us;
652 val |= EDP_PSR_TP2_TP3_TIME_2500us;
656 if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
657 val |= EDP_PSR_TP1_TIME_0us;
658 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
659 val |= EDP_PSR_TP1_TIME_100us;
660 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
661 val |= EDP_PSR_TP1_TIME_500us;
663 val |= EDP_PSR_TP1_TIME_2500us;
665 if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
666 val |= EDP_PSR_TP2_TP3_TIME_0us;
667 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
668 val |= EDP_PSR_TP2_TP3_TIME_100us;
669 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
670 val |= EDP_PSR_TP2_TP3_TIME_500us;
672 val |= EDP_PSR_TP2_TP3_TIME_2500us;
676 * "Do not skip both TP1 and TP2/TP3"
678 if (DISPLAY_VER(dev_priv) < 9 &&
679 connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
680 connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
681 val |= EDP_PSR_TP2_TP3_TIME_100us;
684 if (intel_dp_source_supports_tps3(dev_priv) &&
685 drm_dp_tps3_supported(intel_dp->dpcd))
686 val |= EDP_PSR_TP_TP1_TP3;
688 val |= EDP_PSR_TP_TP1_TP2;
693 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
695 struct intel_connector *connector = intel_dp->attached_connector;
696 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
699 /* Let's use 6 as the minimum to cover all known cases including the
700 * off-by-one issue that HW has in some cases.
702 idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
703 idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
705 if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
711 static void hsw_activate_psr1(struct intel_dp *intel_dp)
713 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
714 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
715 u32 max_sleep_time = 0x1f;
716 u32 val = EDP_PSR_ENABLE;
718 val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
720 if (DISPLAY_VER(dev_priv) < 20)
721 val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
723 if (IS_HASWELL(dev_priv))
724 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
726 if (intel_dp->psr.link_standby)
727 val |= EDP_PSR_LINK_STANDBY;
729 val |= intel_psr1_get_tp_time(intel_dp);
731 if (DISPLAY_VER(dev_priv) >= 8)
732 val |= EDP_PSR_CRC_ENABLE;
734 if (DISPLAY_VER(dev_priv) >= 20)
735 val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
737 intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
738 ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
741 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
743 struct intel_connector *connector = intel_dp->attached_connector;
744 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
747 if (dev_priv->display.params.psr_safest_params)
748 return EDP_PSR2_TP2_TIME_2500us;
750 if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
751 connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
752 val |= EDP_PSR2_TP2_TIME_50us;
753 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
754 val |= EDP_PSR2_TP2_TIME_100us;
755 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
756 val |= EDP_PSR2_TP2_TIME_500us;
758 val |= EDP_PSR2_TP2_TIME_2500us;
763 static int psr2_block_count_lines(struct intel_dp *intel_dp)
765 return intel_dp->psr.io_wake_lines < 9 &&
766 intel_dp->psr.fast_wake_lines < 9 ? 8 : 12;
769 static int psr2_block_count(struct intel_dp *intel_dp)
771 return psr2_block_count_lines(intel_dp) / 4;
774 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
776 u8 frames_before_su_entry;
778 frames_before_su_entry = max_t(u8,
779 intel_dp->psr.sink_sync_latency + 1,
782 /* Entry setup frames must be at least 1 less than frames before SU entry */
783 if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
784 frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
786 return frames_before_su_entry;
789 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
791 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
793 intel_de_rmw(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
794 0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
796 intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
797 TRANS_DP2_PANEL_REPLAY_ENABLE);
800 static void hsw_activate_psr2(struct intel_dp *intel_dp)
802 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
803 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
804 u32 val = EDP_PSR2_ENABLE;
807 val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
809 if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))
810 val |= EDP_SU_TRACK_ENABLE;
812 if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13)
813 val |= EDP_Y_COORDINATE_ENABLE;
815 val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
817 val |= intel_psr2_get_tp_time(intel_dp);
819 if (DISPLAY_VER(dev_priv) >= 12) {
820 if (psr2_block_count(intel_dp) > 2)
821 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
823 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
826 /* Wa_22012278275:adl-p */
827 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
828 static const u8 map[] = {
839 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
840 * comments bellow for more information
844 tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
845 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
847 tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
848 val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
849 } else if (DISPLAY_VER(dev_priv) >= 12) {
850 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
851 val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
852 } else if (DISPLAY_VER(dev_priv) >= 9) {
853 val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
854 val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
857 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
858 val |= EDP_PSR2_SU_SDP_SCANLINE;
860 if (DISPLAY_VER(dev_priv) >= 20)
861 psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
863 if (intel_dp->psr.psr2_sel_fetch_enabled) {
866 tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
867 drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
868 } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
869 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
873 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
874 * recommending keep this bit unset while PSR2 is enabled.
876 intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
878 intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
882 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
884 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
885 return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
886 else if (DISPLAY_VER(dev_priv) >= 12)
887 return cpu_transcoder == TRANSCODER_A;
888 else if (DISPLAY_VER(dev_priv) >= 9)
889 return cpu_transcoder == TRANSCODER_EDP;
894 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
896 if (!crtc_state->hw.active)
899 return DIV_ROUND_UP(1000 * 1000,
900 drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
903 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
906 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
907 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
909 intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
910 EDP_PSR2_IDLE_FRAMES_MASK,
911 EDP_PSR2_IDLE_FRAMES(idle_frames));
914 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
916 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
918 psr2_program_idle_frames(intel_dp, 0);
919 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
922 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
924 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
926 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
927 psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
930 static void tgl_dc3co_disable_work(struct work_struct *work)
932 struct intel_dp *intel_dp =
933 container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
935 mutex_lock(&intel_dp->psr.lock);
936 /* If delayed work is pending, it is not idle */
937 if (delayed_work_pending(&intel_dp->psr.dc3co_work))
940 tgl_psr2_disable_dc3co(intel_dp);
942 mutex_unlock(&intel_dp->psr.lock);
945 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
947 if (!intel_dp->psr.dc3co_exitline)
950 cancel_delayed_work(&intel_dp->psr.dc3co_work);
951 /* Before PSR2 exit disallow dc3co*/
952 tgl_psr2_disable_dc3co(intel_dp);
956 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
957 struct intel_crtc_state *crtc_state)
959 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
960 enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
961 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
962 enum port port = dig_port->base.port;
964 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
965 return pipe <= PIPE_B && port <= PORT_B;
967 return pipe == PIPE_A && port == PORT_A;
971 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
972 struct intel_crtc_state *crtc_state)
974 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
975 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
976 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
980 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
981 * disable DC3CO until the changed dc3co activating/deactivating sequence
982 * is applied. B.Specs:49196
987 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
988 * TODO: when the issue is addressed, this restriction should be removed.
990 if (crtc_state->enable_psr2_sel_fetch)
993 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
996 if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
999 /* Wa_16011303918:adl-p */
1000 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1004 * DC3CO Exit time 200us B.Spec 49196
1005 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1008 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1010 if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
1013 crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1016 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1017 struct intel_crtc_state *crtc_state)
1019 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1021 if (!dev_priv->display.params.enable_psr2_sel_fetch &&
1022 intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1023 drm_dbg_kms(&dev_priv->drm,
1024 "PSR2 sel fetch not enabled, disabled by parameter\n");
1028 if (crtc_state->uapi.async_flip) {
1029 drm_dbg_kms(&dev_priv->drm,
1030 "PSR2 sel fetch not enabled, async flip enabled\n");
1034 return crtc_state->enable_psr2_sel_fetch = true;
1037 static bool psr2_granularity_check(struct intel_dp *intel_dp,
1038 struct intel_crtc_state *crtc_state)
1040 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1041 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1042 const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1043 const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1044 u16 y_granularity = 0;
1046 /* PSR2 HW only send full lines so we only need to validate the width */
1047 if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1050 if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1053 /* HW tracking is only aligned to 4 lines */
1054 if (!crtc_state->enable_psr2_sel_fetch)
1055 return intel_dp->psr.su_y_granularity == 4;
1058 * adl_p and mtl platforms have 1 line granularity.
1059 * For other platforms with SW tracking we can adjust the y coordinates
1060 * to match sink requirement if multiple of 4.
1062 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1063 y_granularity = intel_dp->psr.su_y_granularity;
1064 else if (intel_dp->psr.su_y_granularity <= 2)
1066 else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1067 y_granularity = intel_dp->psr.su_y_granularity;
1069 if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1072 if (crtc_state->dsc.compression_enable &&
1073 vdsc_cfg->slice_height % y_granularity)
1076 crtc_state->su_y_granularity = y_granularity;
1080 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1081 struct intel_crtc_state *crtc_state)
1083 const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1084 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1085 u32 hblank_total, hblank_ns, req_ns;
1087 hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1088 hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1090 /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1091 req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1093 if ((hblank_ns - req_ns) > 100)
1096 /* Not supported <13 / Wa_22012279113:adl-p */
1097 if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1100 crtc_state->req_psr2_sdp_prior_scanline = true;
1104 static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
1105 struct intel_crtc_state *crtc_state)
1107 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1108 int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
1111 if (DISPLAY_VER(i915) >= 12) {
1114 * According to Bspec it's 42us, but based on testing
1115 * it is not enough -> use 45 us.
1117 fast_wake_time = 45;
1118 max_wake_lines = 12;
1121 fast_wake_time = 32;
1125 io_wake_lines = intel_usecs_to_scanlines(
1126 &crtc_state->hw.adjusted_mode, io_wake_time);
1127 fast_wake_lines = intel_usecs_to_scanlines(
1128 &crtc_state->hw.adjusted_mode, fast_wake_time);
1130 if (io_wake_lines > max_wake_lines ||
1131 fast_wake_lines > max_wake_lines)
1134 if (i915->display.params.psr_safest_params)
1135 io_wake_lines = fast_wake_lines = max_wake_lines;
1137 /* According to Bspec lower limit should be set as 7 lines. */
1138 intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
1139 intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
1144 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1145 const struct drm_display_mode *adjusted_mode)
1147 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1148 int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1149 int entry_setup_frames = 0;
1151 if (psr_setup_time < 0) {
1152 drm_dbg_kms(&i915->drm,
1153 "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1154 intel_dp->psr_dpcd[1]);
1158 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1159 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1160 if (DISPLAY_VER(i915) >= 20) {
1161 /* setup entry frames can be up to 3 frames */
1162 entry_setup_frames = 1;
1163 drm_dbg_kms(&i915->drm,
1164 "PSR setup entry frames %d\n",
1165 entry_setup_frames);
1167 drm_dbg_kms(&i915->drm,
1168 "PSR condition failed: PSR setup time (%d us) too long\n",
1174 return entry_setup_frames;
1177 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1178 struct intel_crtc_state *crtc_state)
1180 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1181 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1182 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1183 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1185 if (!intel_dp->psr.sink_psr2_support)
1188 /* JSL and EHL only supports eDP 1.3 */
1189 if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1190 drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
1194 /* Wa_16011181250 */
1195 if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1197 drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
1201 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1202 drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
1206 if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
1207 drm_dbg_kms(&dev_priv->drm,
1208 "PSR2 not supported in transcoder %s\n",
1209 transcoder_name(crtc_state->cpu_transcoder));
1213 if (!psr2_global_enabled(intel_dp)) {
1214 drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
1219 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1220 * resolution requires DSC to be enabled, priority is given to DSC
1223 if (crtc_state->dsc.compression_enable &&
1224 (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
1225 drm_dbg_kms(&dev_priv->drm,
1226 "PSR2 cannot be enabled since DSC is enabled\n");
1230 if (crtc_state->crc_enabled) {
1231 drm_dbg_kms(&dev_priv->drm,
1232 "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
1236 if (DISPLAY_VER(dev_priv) >= 12) {
1240 } else if (DISPLAY_VER(dev_priv) >= 10) {
1244 } else if (DISPLAY_VER(dev_priv) == 9) {
1250 if (crtc_state->pipe_bpp > max_bpp) {
1251 drm_dbg_kms(&dev_priv->drm,
1252 "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1253 crtc_state->pipe_bpp, max_bpp);
1257 /* Wa_16011303918:adl-p */
1258 if (crtc_state->vrr.enable &&
1259 IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1260 drm_dbg_kms(&dev_priv->drm,
1261 "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1265 if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1266 drm_dbg_kms(&dev_priv->drm,
1267 "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1271 if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
1272 drm_dbg_kms(&dev_priv->drm,
1273 "PSR2 not enabled, Unable to use long enough wake times\n");
1277 /* Vblank >= PSR2_CTL Block Count Number maximum line count */
1278 if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
1279 crtc_state->hw.adjusted_mode.crtc_vblank_start <
1280 psr2_block_count_lines(intel_dp)) {
1281 drm_dbg_kms(&dev_priv->drm,
1282 "PSR2 not enabled, too short vblank time\n");
1286 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1287 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1288 !HAS_PSR_HW_TRACKING(dev_priv)) {
1289 drm_dbg_kms(&dev_priv->drm,
1290 "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
1295 if (!psr2_granularity_check(intel_dp, crtc_state)) {
1296 drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1300 if (!crtc_state->enable_psr2_sel_fetch &&
1301 (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1302 drm_dbg_kms(&dev_priv->drm,
1303 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1304 crtc_hdisplay, crtc_vdisplay,
1305 psr_max_h, psr_max_v);
1309 tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1313 crtc_state->enable_psr2_sel_fetch = false;
1317 static bool _psr_compute_config(struct intel_dp *intel_dp,
1318 struct intel_crtc_state *crtc_state)
1320 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1321 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1322 int entry_setup_frames;
1325 * Current PSR panels don't work reliably with VRR enabled
1326 * So if VRR is enabled, do not enable PSR.
1328 if (crtc_state->vrr.enable)
1331 if (!CAN_PSR(intel_dp))
1334 entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1336 if (entry_setup_frames >= 0) {
1337 intel_dp->psr.entry_setup_frames = entry_setup_frames;
1339 drm_dbg_kms(&dev_priv->drm,
1340 "PSR condition failed: PSR setup timing not met\n");
1347 void intel_psr_compute_config(struct intel_dp *intel_dp,
1348 struct intel_crtc_state *crtc_state,
1349 struct drm_connector_state *conn_state)
1351 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1352 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1354 if (!psr_global_enabled(intel_dp)) {
1355 drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1359 if (intel_dp->psr.sink_not_reliable) {
1360 drm_dbg_kms(&dev_priv->drm,
1361 "PSR sink implementation is not reliable\n");
1365 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1366 drm_dbg_kms(&dev_priv->drm,
1367 "PSR condition failed: Interlaced mode enabled\n");
1371 if (CAN_PANEL_REPLAY(intel_dp))
1372 crtc_state->has_panel_replay = true;
1374 crtc_state->has_psr = _psr_compute_config(intel_dp, crtc_state);
1376 if (!(crtc_state->has_panel_replay || crtc_state->has_psr))
1379 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1381 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1382 intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1383 &crtc_state->psr_vsc);
1386 void intel_psr_get_config(struct intel_encoder *encoder,
1387 struct intel_crtc_state *pipe_config)
1389 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1390 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1391 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1392 struct intel_dp *intel_dp;
1398 intel_dp = &dig_port->dp;
1399 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1402 mutex_lock(&intel_dp->psr.lock);
1403 if (!intel_dp->psr.enabled)
1406 if (intel_dp->psr.panel_replay_enabled) {
1407 pipe_config->has_panel_replay = true;
1410 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1411 * enabled/disabled because of frontbuffer tracking and others.
1413 pipe_config->has_psr = true;
1416 pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1417 pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1419 if (!intel_dp->psr.psr2_enabled)
1422 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1423 val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
1424 if (val & PSR2_MAN_TRK_CTL_ENABLE)
1425 pipe_config->enable_psr2_sel_fetch = true;
1428 if (DISPLAY_VER(dev_priv) >= 12) {
1429 val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
1430 pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1433 mutex_unlock(&intel_dp->psr.lock);
1436 static void intel_psr_activate(struct intel_dp *intel_dp)
1438 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1439 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1441 drm_WARN_ON(&dev_priv->drm,
1442 transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1443 intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
1445 drm_WARN_ON(&dev_priv->drm,
1446 intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1448 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1450 lockdep_assert_held(&intel_dp->psr.lock);
1452 /* psr1, psr2 and panel-replay are mutually exclusive.*/
1453 if (intel_dp->psr.panel_replay_enabled)
1454 dg2_activate_panel_replay(intel_dp);
1455 else if (intel_dp->psr.psr2_enabled)
1456 hsw_activate_psr2(intel_dp);
1458 hsw_activate_psr1(intel_dp);
1460 intel_dp->psr.active = true;
1463 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1465 switch (intel_dp->psr.pipe) {
1467 return LATENCY_REPORTING_REMOVED_PIPE_A;
1469 return LATENCY_REPORTING_REMOVED_PIPE_B;
1471 return LATENCY_REPORTING_REMOVED_PIPE_C;
1473 return LATENCY_REPORTING_REMOVED_PIPE_D;
1475 MISSING_CASE(intel_dp->psr.pipe);
1484 static void wm_optimization_wa(struct intel_dp *intel_dp,
1485 const struct intel_crtc_state *crtc_state)
1487 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1488 bool set_wa_bit = false;
1490 /* Wa_14015648006 */
1491 if (IS_DISPLAY_VER(dev_priv, 11, 14))
1492 set_wa_bit |= crtc_state->wm_level_disabled;
1494 /* Wa_16013835468 */
1495 if (DISPLAY_VER(dev_priv) == 12)
1496 set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1497 crtc_state->hw.adjusted_mode.crtc_vdisplay;
1500 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1501 0, wa_16013835468_bit_get(intel_dp));
1503 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1504 wa_16013835468_bit_get(intel_dp), 0);
1507 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1508 const struct intel_crtc_state *crtc_state)
1510 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1511 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1515 * Only HSW and BDW have PSR AUX registers that need to be setup.
1516 * SKL+ use hardcoded values PSR AUX transactions
1518 if (DISPLAY_VER(dev_priv) < 9)
1519 hsw_psr_setup_aux(intel_dp);
1522 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1523 * mask LPSP to avoid dependency on other drivers that might block
1524 * runtime_pm besides preventing other hw tracking issues now we
1525 * can rely on frontbuffer tracking.
1527 mask = EDP_PSR_DEBUG_MASK_MEMUP |
1528 EDP_PSR_DEBUG_MASK_HPD |
1529 EDP_PSR_DEBUG_MASK_LPSP;
1531 if (DISPLAY_VER(dev_priv) < 20)
1532 mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1535 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1536 * registers in order to keep the CURSURFLIVE tricks working :(
1538 if (IS_DISPLAY_VER(dev_priv, 9, 10))
1539 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1541 /* allow PSR with sprite enabled */
1542 if (IS_HASWELL(dev_priv))
1543 mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1545 intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
1547 psr_irq_control(intel_dp);
1550 * TODO: if future platforms supports DC3CO in more than one
1551 * transcoder, EXITLINE will need to be unset when disabling PSR
1553 if (intel_dp->psr.dc3co_exitline)
1554 intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1555 intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1557 if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1558 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1559 intel_dp->psr.psr2_sel_fetch_enabled ?
1560 IGNORE_PSR2_HW_TRACKING : 0);
1566 wm_optimization_wa(intel_dp, crtc_state);
1568 if (intel_dp->psr.psr2_enabled) {
1569 if (DISPLAY_VER(dev_priv) == 9)
1570 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1571 PSR2_VSC_ENABLE_PROG_HEADER |
1572 PSR2_ADD_VERTICAL_LINE_COUNT);
1575 * Wa_16014451276:adlp,mtl[a0,b0]
1576 * All supported adlp panels have 1-based X granularity, this may
1577 * cause issues if non-supported panels are used.
1579 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
1580 IS_ALDERLAKE_P(dev_priv))
1581 intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
1582 0, ADLP_1_BASED_X_GRANULARITY);
1584 /* Wa_16012604467:adlp,mtl[a0,b0] */
1585 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1586 intel_de_rmw(dev_priv,
1587 MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1588 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1589 else if (IS_ALDERLAKE_P(dev_priv))
1590 intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1591 CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1595 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1597 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1598 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1602 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1603 * will still keep the error set even after the reset done in the
1604 * irq_preinstall and irq_uninstall hooks.
1605 * And enabling in this situation cause the screen to freeze in the
1606 * first time that PSR HW tries to activate so lets keep PSR disabled
1607 * to avoid any rendering problems.
1609 val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
1610 val &= psr_irq_psr_error_bit_get(intel_dp);
1612 intel_dp->psr.sink_not_reliable = true;
1613 drm_dbg_kms(&dev_priv->drm,
1614 "PSR interruption error set, not enabling PSR\n");
1621 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1622 const struct intel_crtc_state *crtc_state)
1624 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1625 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1626 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1627 struct intel_encoder *encoder = &dig_port->base;
1630 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1632 intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1633 intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
1634 intel_dp->psr.busy_frontbuffer_bits = 0;
1635 intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1636 intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1637 /* DC5/DC6 requires at least 6 idle frames */
1638 val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1639 intel_dp->psr.dc3co_exit_delay = val;
1640 intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1641 intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1642 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1643 intel_dp->psr.req_psr2_sdp_prior_scanline =
1644 crtc_state->req_psr2_sdp_prior_scanline;
1646 if (!psr_interrupt_error_check(intel_dp))
1649 if (intel_dp->psr.panel_replay_enabled)
1650 drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
1652 drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1653 intel_dp->psr.psr2_enabled ? "2" : "1");
1655 intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
1656 intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1657 intel_psr_enable_sink(intel_dp);
1658 intel_psr_enable_source(intel_dp, crtc_state);
1659 intel_dp->psr.enabled = true;
1660 intel_dp->psr.paused = false;
1662 intel_psr_activate(intel_dp);
1665 static void intel_psr_exit(struct intel_dp *intel_dp)
1667 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1668 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1671 if (!intel_dp->psr.active) {
1672 if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
1673 val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
1674 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1677 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
1678 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1683 if (intel_dp->psr.panel_replay_enabled) {
1684 intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
1685 TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
1686 } else if (intel_dp->psr.psr2_enabled) {
1687 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1689 val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
1690 EDP_PSR2_ENABLE, 0);
1692 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1694 val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
1697 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1699 intel_dp->psr.active = false;
1702 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1704 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1705 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1706 i915_reg_t psr_status;
1707 u32 psr_status_mask;
1709 if (intel_dp->psr.psr2_enabled) {
1710 psr_status = EDP_PSR2_STATUS(cpu_transcoder);
1711 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1713 psr_status = psr_status_reg(dev_priv, cpu_transcoder);
1714 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1717 /* Wait till PSR is idle */
1718 if (intel_de_wait_for_clear(dev_priv, psr_status,
1719 psr_status_mask, 2000))
1720 drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1723 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1725 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1726 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1727 enum phy phy = intel_port_to_phy(dev_priv,
1728 dp_to_dig_port(intel_dp)->base.port);
1730 lockdep_assert_held(&intel_dp->psr.lock);
1732 if (!intel_dp->psr.enabled)
1735 if (intel_dp->psr.panel_replay_enabled)
1736 drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
1738 drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1739 intel_dp->psr.psr2_enabled ? "2" : "1");
1741 intel_psr_exit(intel_dp);
1742 intel_psr_wait_exit_locked(intel_dp);
1748 if (DISPLAY_VER(dev_priv) >= 11)
1749 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1750 wa_16013835468_bit_get(intel_dp), 0);
1752 if (intel_dp->psr.psr2_enabled) {
1753 /* Wa_16012604467:adlp,mtl[a0,b0] */
1754 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1755 intel_de_rmw(dev_priv,
1756 MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
1757 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
1758 else if (IS_ALDERLAKE_P(dev_priv))
1759 intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1760 CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1763 intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
1765 /* Disable PSR on Sink */
1766 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1768 if (intel_dp->psr.psr2_enabled)
1769 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1771 intel_dp->psr.enabled = false;
1772 intel_dp->psr.panel_replay_enabled = false;
1773 intel_dp->psr.psr2_enabled = false;
1774 intel_dp->psr.psr2_sel_fetch_enabled = false;
1775 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1779 * intel_psr_disable - Disable PSR
1780 * @intel_dp: Intel DP
1781 * @old_crtc_state: old CRTC state
1783 * This function needs to be called before disabling pipe.
1785 void intel_psr_disable(struct intel_dp *intel_dp,
1786 const struct intel_crtc_state *old_crtc_state)
1788 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1790 if (!old_crtc_state->has_psr)
1793 if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1796 mutex_lock(&intel_dp->psr.lock);
1798 intel_psr_disable_locked(intel_dp);
1800 mutex_unlock(&intel_dp->psr.lock);
1801 cancel_work_sync(&intel_dp->psr.work);
1802 cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1806 * intel_psr_pause - Pause PSR
1807 * @intel_dp: Intel DP
1809 * This function need to be called after enabling psr.
1811 void intel_psr_pause(struct intel_dp *intel_dp)
1813 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1814 struct intel_psr *psr = &intel_dp->psr;
1816 if (!CAN_PSR(intel_dp))
1819 mutex_lock(&psr->lock);
1821 if (!psr->enabled) {
1822 mutex_unlock(&psr->lock);
1826 /* If we ever hit this, we will need to add refcount to pause/resume */
1827 drm_WARN_ON(&dev_priv->drm, psr->paused);
1829 intel_psr_exit(intel_dp);
1830 intel_psr_wait_exit_locked(intel_dp);
1833 mutex_unlock(&psr->lock);
1835 cancel_work_sync(&psr->work);
1836 cancel_delayed_work_sync(&psr->dc3co_work);
1840 * intel_psr_resume - Resume PSR
1841 * @intel_dp: Intel DP
1843 * This function need to be called after pausing psr.
1845 void intel_psr_resume(struct intel_dp *intel_dp)
1847 struct intel_psr *psr = &intel_dp->psr;
1849 if (!CAN_PSR(intel_dp))
1852 mutex_lock(&psr->lock);
1857 psr->paused = false;
1858 intel_psr_activate(intel_dp);
1861 mutex_unlock(&psr->lock);
1864 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1866 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
1867 PSR2_MAN_TRK_CTL_ENABLE;
1870 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1872 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1873 ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1874 PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1877 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1879 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1880 ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1881 PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1884 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1886 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1887 ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1888 PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1891 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1893 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1894 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1896 if (intel_dp->psr.psr2_sel_fetch_enabled)
1897 intel_de_write(dev_priv,
1898 PSR2_MAN_TRK_CTL(cpu_transcoder),
1899 man_trk_ctl_enable_bit_get(dev_priv) |
1900 man_trk_ctl_partial_frame_bit_get(dev_priv) |
1901 man_trk_ctl_single_full_frame_bit_get(dev_priv) |
1902 man_trk_ctl_continuos_full_frame(dev_priv));
1905 * Display WA #0884: skl+
1906 * This documented WA for bxt can be safely applied
1907 * broadly so we can force HW tracking to exit PSR
1908 * instead of disabling and re-enabling.
1909 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1910 * but it makes more sense write to the current active
1913 * This workaround do not exist for platforms with display 10 or newer
1914 * but testing proved that it works for up display 13, for newer
1915 * than that testing will be needed.
1917 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1920 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1922 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1923 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1924 struct intel_encoder *encoder;
1926 if (!crtc_state->enable_psr2_sel_fetch)
1929 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1930 crtc_state->uapi.encoder_mask) {
1931 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1933 lockdep_assert_held(&intel_dp->psr.lock);
1934 if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
1939 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
1940 crtc_state->psr2_man_track_ctl);
1943 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1944 struct drm_rect *clip, bool full_update)
1946 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1947 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1948 u32 val = man_trk_ctl_enable_bit_get(dev_priv);
1950 /* SF partial frame enable has to be set even on full update */
1951 val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
1954 val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
1955 val |= man_trk_ctl_continuos_full_frame(dev_priv);
1962 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
1963 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
1964 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
1966 drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
1968 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1969 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1972 crtc_state->psr2_man_track_ctl = val;
1975 static void clip_area_update(struct drm_rect *overlap_damage_area,
1976 struct drm_rect *damage_area,
1977 struct drm_rect *pipe_src)
1979 if (!drm_rect_intersect(damage_area, pipe_src))
1982 if (overlap_damage_area->y1 == -1) {
1983 overlap_damage_area->y1 = damage_area->y1;
1984 overlap_damage_area->y2 = damage_area->y2;
1988 if (damage_area->y1 < overlap_damage_area->y1)
1989 overlap_damage_area->y1 = damage_area->y1;
1991 if (damage_area->y2 > overlap_damage_area->y2)
1992 overlap_damage_area->y2 = damage_area->y2;
1995 static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
1996 struct drm_rect *pipe_clip)
1998 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1999 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2002 /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2003 if (crtc_state->dsc.compression_enable &&
2004 (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
2005 y_alignment = vdsc_cfg->slice_height;
2007 y_alignment = crtc_state->su_y_granularity;
2009 pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
2010 if (pipe_clip->y2 % y_alignment)
2011 pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
2015 * TODO: Not clear how to handle planes with negative position,
2016 * also planes are not updated if they have a negative X
2017 * position so for now doing a full update in this cases
2019 * Plane scaling and rotation is not supported by selective fetch and both
2020 * properties can change without a modeset, so need to be check at every
2023 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2025 if (plane_state->uapi.dst.y1 < 0 ||
2026 plane_state->uapi.dst.x1 < 0 ||
2027 plane_state->scaler_id >= 0 ||
2028 plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2035 * Check for pipe properties that is not supported by selective fetch.
2037 * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2038 * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2039 * enabled and going to the full update path.
2041 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2043 if (crtc_state->scaler_state.scaler_id >= 0)
2049 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2050 struct intel_crtc *crtc)
2052 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2053 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2054 struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
2055 struct intel_plane_state *new_plane_state, *old_plane_state;
2056 struct intel_plane *plane;
2057 bool full_update = false;
2060 if (!crtc_state->enable_psr2_sel_fetch)
2063 if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2065 goto skip_sel_fetch_set_loop;
2069 * Calculate minimal selective fetch area of each plane and calculate
2070 * the pipe damaged area.
2071 * In the next loop the plane selective fetch area will actually be set
2072 * using whole pipe damaged area.
2074 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2075 new_plane_state, i) {
2076 struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2079 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2082 if (!new_plane_state->uapi.visible &&
2083 !old_plane_state->uapi.visible)
2086 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2092 * If visibility or plane moved, mark the whole plane area as
2093 * damaged as it needs to be complete redraw in the new and old
2096 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2097 !drm_rect_equals(&new_plane_state->uapi.dst,
2098 &old_plane_state->uapi.dst)) {
2099 if (old_plane_state->uapi.visible) {
2100 damaged_area.y1 = old_plane_state->uapi.dst.y1;
2101 damaged_area.y2 = old_plane_state->uapi.dst.y2;
2102 clip_area_update(&pipe_clip, &damaged_area,
2103 &crtc_state->pipe_src);
2106 if (new_plane_state->uapi.visible) {
2107 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2108 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2109 clip_area_update(&pipe_clip, &damaged_area,
2110 &crtc_state->pipe_src);
2113 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2114 /* If alpha changed mark the whole plane area as damaged */
2115 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2116 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2117 clip_area_update(&pipe_clip, &damaged_area,
2118 &crtc_state->pipe_src);
2122 src = drm_plane_state_src(&new_plane_state->uapi);
2123 drm_rect_fp_to_int(&src, &src);
2125 if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2126 &new_plane_state->uapi, &damaged_area))
2129 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2130 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2131 damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2132 damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2134 clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
2138 * TODO: For now we are just using full update in case
2139 * selective fetch area calculation fails. To optimize this we
2140 * should identify cases where this happens and fix the area
2141 * calculation for those.
2143 if (pipe_clip.y1 == -1) {
2144 drm_info_once(&dev_priv->drm,
2145 "Selective fetch area calculation failed in pipe %c\n",
2146 pipe_name(crtc->pipe));
2151 goto skip_sel_fetch_set_loop;
2153 /* Wa_14014971492 */
2154 if ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
2155 IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
2156 crtc_state->splitter.enable)
2159 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2163 intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
2166 * Now that we have the pipe damaged area check if it intersect with
2167 * every plane, if it does set the plane selective fetch area.
2169 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2170 new_plane_state, i) {
2171 struct drm_rect *sel_fetch_area, inter;
2172 struct intel_plane *linked = new_plane_state->planar_linked_plane;
2174 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2175 !new_plane_state->uapi.visible)
2179 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2180 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2181 sel_fetch_area->y1 = -1;
2182 sel_fetch_area->y2 = -1;
2184 * if plane sel fetch was previously enabled ->
2187 if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2188 crtc_state->update_planes |= BIT(plane->id);
2193 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2198 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2199 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2200 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2201 crtc_state->update_planes |= BIT(plane->id);
2204 * Sel_fetch_area is calculated for UV plane. Use
2205 * same area for Y plane as well.
2208 struct intel_plane_state *linked_new_plane_state;
2209 struct drm_rect *linked_sel_fetch_area;
2211 linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2212 if (IS_ERR(linked_new_plane_state))
2213 return PTR_ERR(linked_new_plane_state);
2215 linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2216 linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2217 linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2218 crtc_state->update_planes |= BIT(linked->id);
2222 skip_sel_fetch_set_loop:
2223 psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
2227 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2228 struct intel_crtc *crtc)
2230 struct drm_i915_private *i915 = to_i915(state->base.dev);
2231 const struct intel_crtc_state *old_crtc_state =
2232 intel_atomic_get_old_crtc_state(state, crtc);
2233 const struct intel_crtc_state *new_crtc_state =
2234 intel_atomic_get_new_crtc_state(state, crtc);
2235 struct intel_encoder *encoder;
2240 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2241 old_crtc_state->uapi.encoder_mask) {
2242 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2243 struct intel_psr *psr = &intel_dp->psr;
2244 bool needs_to_disable = false;
2246 mutex_lock(&psr->lock);
2249 * Reasons to disable:
2250 * - PSR disabled in new state
2251 * - All planes will go inactive
2252 * - Changing between PSR versions
2253 * - Display WA #1136: skl, bxt
2255 needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2256 needs_to_disable |= !new_crtc_state->has_psr;
2257 needs_to_disable |= !new_crtc_state->active_planes;
2258 needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
2259 needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2260 new_crtc_state->wm_level_disabled;
2262 if (psr->enabled && needs_to_disable)
2263 intel_psr_disable_locked(intel_dp);
2264 else if (psr->enabled && new_crtc_state->wm_level_disabled)
2265 /* Wa_14015648006 */
2266 wm_optimization_wa(intel_dp, new_crtc_state);
2268 mutex_unlock(&psr->lock);
2272 void intel_psr_post_plane_update(struct intel_atomic_state *state,
2273 struct intel_crtc *crtc)
2275 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2276 const struct intel_crtc_state *crtc_state =
2277 intel_atomic_get_new_crtc_state(state, crtc);
2278 struct intel_encoder *encoder;
2280 if (!(crtc_state->has_psr || crtc_state->has_panel_replay))
2283 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2284 crtc_state->uapi.encoder_mask) {
2285 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2286 struct intel_psr *psr = &intel_dp->psr;
2287 bool keep_disabled = false;
2289 mutex_lock(&psr->lock);
2291 drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2293 keep_disabled |= psr->sink_not_reliable;
2294 keep_disabled |= !crtc_state->active_planes;
2296 /* Display WA #1136: skl, bxt */
2297 keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2298 crtc_state->wm_level_disabled;
2300 if (!psr->enabled && !keep_disabled)
2301 intel_psr_enable_locked(intel_dp, crtc_state);
2302 else if (psr->enabled && !crtc_state->wm_level_disabled)
2303 /* Wa_14015648006 */
2304 wm_optimization_wa(intel_dp, crtc_state);
2306 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2307 if (crtc_state->crc_enabled && psr->enabled)
2308 psr_force_hw_tracking_exit(intel_dp);
2311 * Clear possible busy bits in case we have
2312 * invalidate -> flip -> flush sequence.
2314 intel_dp->psr.busy_frontbuffer_bits = 0;
2316 mutex_unlock(&psr->lock);
2320 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2322 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2323 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2326 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2327 * As all higher states has bit 4 of PSR2 state set we can just wait for
2328 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2330 return intel_de_wait_for_clear(dev_priv,
2331 EDP_PSR2_STATUS(cpu_transcoder),
2332 EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2335 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2337 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2338 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2341 * From bspec: Panel Self Refresh (BDW+)
2342 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2343 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2344 * defensive enough to cover everything.
2346 return intel_de_wait_for_clear(dev_priv,
2347 psr_status_reg(dev_priv, cpu_transcoder),
2348 EDP_PSR_STATUS_STATE_MASK, 50);
2352 * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2353 * @new_crtc_state: new CRTC state
2355 * This function is expected to be called from pipe_update_start() where it is
2356 * not expected to race with PSR enable or disable.
2358 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2360 struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2361 struct intel_encoder *encoder;
2363 if (!new_crtc_state->has_psr)
2366 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2367 new_crtc_state->uapi.encoder_mask) {
2368 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2371 lockdep_assert_held(&intel_dp->psr.lock);
2373 if (!intel_dp->psr.enabled)
2376 if (intel_dp->psr.psr2_enabled)
2377 ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2379 ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2382 drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2386 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2388 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2389 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2394 if (!intel_dp->psr.enabled)
2397 if (intel_dp->psr.psr2_enabled) {
2398 reg = EDP_PSR2_STATUS(cpu_transcoder);
2399 mask = EDP_PSR2_STATUS_STATE_MASK;
2401 reg = psr_status_reg(dev_priv, cpu_transcoder);
2402 mask = EDP_PSR_STATUS_STATE_MASK;
2405 mutex_unlock(&intel_dp->psr.lock);
2407 err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2409 drm_err(&dev_priv->drm,
2410 "Timed out waiting for PSR Idle for re-enable\n");
2412 /* After the unlocked wait, verify that PSR is still wanted! */
2413 mutex_lock(&intel_dp->psr.lock);
2414 return err == 0 && intel_dp->psr.enabled;
2417 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2419 struct drm_connector_list_iter conn_iter;
2420 struct drm_modeset_acquire_ctx ctx;
2421 struct drm_atomic_state *state;
2422 struct drm_connector *conn;
2425 state = drm_atomic_state_alloc(&dev_priv->drm);
2429 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2431 state->acquire_ctx = &ctx;
2432 to_intel_atomic_state(state)->internal = true;
2435 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2436 drm_for_each_connector_iter(conn, &conn_iter) {
2437 struct drm_connector_state *conn_state;
2438 struct drm_crtc_state *crtc_state;
2440 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2443 conn_state = drm_atomic_get_connector_state(state, conn);
2444 if (IS_ERR(conn_state)) {
2445 err = PTR_ERR(conn_state);
2449 if (!conn_state->crtc)
2452 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2453 if (IS_ERR(crtc_state)) {
2454 err = PTR_ERR(crtc_state);
2458 /* Mark mode as changed to trigger a pipe->update() */
2459 crtc_state->mode_changed = true;
2461 drm_connector_list_iter_end(&conn_iter);
2464 err = drm_atomic_commit(state);
2466 if (err == -EDEADLK) {
2467 drm_atomic_state_clear(state);
2468 err = drm_modeset_backoff(&ctx);
2473 drm_modeset_drop_locks(&ctx);
2474 drm_modeset_acquire_fini(&ctx);
2475 drm_atomic_state_put(state);
2480 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2482 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2483 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2487 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2488 mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2489 drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2493 ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2497 old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2498 intel_dp->psr.debug = val;
2501 * Do it right away if it's already enabled, otherwise it will be done
2502 * when enabling the source.
2504 if (intel_dp->psr.enabled)
2505 psr_irq_control(intel_dp);
2507 mutex_unlock(&intel_dp->psr.lock);
2509 if (old_mode != mode)
2510 ret = intel_psr_fastset_force(dev_priv);
2515 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2517 struct intel_psr *psr = &intel_dp->psr;
2519 intel_psr_disable_locked(intel_dp);
2520 psr->sink_not_reliable = true;
2521 /* let's make sure that sink is awaken */
2522 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2525 static void intel_psr_work(struct work_struct *work)
2527 struct intel_dp *intel_dp =
2528 container_of(work, typeof(*intel_dp), psr.work);
2530 mutex_lock(&intel_dp->psr.lock);
2532 if (!intel_dp->psr.enabled)
2535 if (READ_ONCE(intel_dp->psr.irq_aux_error))
2536 intel_psr_handle_irq(intel_dp);
2539 * We have to make sure PSR is ready for re-enable
2540 * otherwise it keeps disabled until next full enable/disable cycle.
2541 * PSR might take some time to get fully disabled
2542 * and be ready for re-enable.
2544 if (!__psr_wait_for_idle_locked(intel_dp))
2548 * The delayed work can race with an invalidate hence we need to
2549 * recheck. Since psr_flush first clears this and then reschedules we
2550 * won't ever miss a flush when bailing out here.
2552 if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2555 intel_psr_activate(intel_dp);
2557 mutex_unlock(&intel_dp->psr.lock);
2560 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2562 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2563 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2565 if (intel_dp->psr.psr2_sel_fetch_enabled) {
2568 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2569 /* Send one update otherwise lag is observed in screen */
2570 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2574 val = man_trk_ctl_enable_bit_get(dev_priv) |
2575 man_trk_ctl_partial_frame_bit_get(dev_priv) |
2576 man_trk_ctl_continuos_full_frame(dev_priv);
2577 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
2578 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2579 intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2581 intel_psr_exit(intel_dp);
2586 * intel_psr_invalidate - Invalidate PSR
2587 * @dev_priv: i915 device
2588 * @frontbuffer_bits: frontbuffer plane tracking bits
2589 * @origin: which operation caused the invalidate
2591 * Since the hardware frontbuffer tracking has gaps we need to integrate
2592 * with the software frontbuffer tracking. This function gets called every
2593 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2594 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2596 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2598 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2599 unsigned frontbuffer_bits, enum fb_op_origin origin)
2601 struct intel_encoder *encoder;
2603 if (origin == ORIGIN_FLIP)
2606 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2607 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2608 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2610 mutex_lock(&intel_dp->psr.lock);
2611 if (!intel_dp->psr.enabled) {
2612 mutex_unlock(&intel_dp->psr.lock);
2616 pipe_frontbuffer_bits &=
2617 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2618 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2620 if (pipe_frontbuffer_bits)
2621 _psr_invalidate_handle(intel_dp);
2623 mutex_unlock(&intel_dp->psr.lock);
2627 * When we will be completely rely on PSR2 S/W tracking in future,
2628 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2629 * event also therefore tgl_dc3co_flush_locked() require to be changed
2630 * accordingly in future.
2633 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2634 enum fb_op_origin origin)
2636 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2638 if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2639 !intel_dp->psr.active)
2643 * At every frontbuffer flush flip event modified delay of delayed work,
2644 * when delayed work schedules that means display has been idle.
2646 if (!(frontbuffer_bits &
2647 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2650 tgl_psr2_enable_dc3co(intel_dp);
2651 mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
2652 intel_dp->psr.dc3co_exit_delay);
2655 static void _psr_flush_handle(struct intel_dp *intel_dp)
2657 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2658 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2660 if (intel_dp->psr.psr2_sel_fetch_enabled) {
2661 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2662 /* can we turn CFF off? */
2663 if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2664 u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2665 man_trk_ctl_partial_frame_bit_get(dev_priv) |
2666 man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2667 man_trk_ctl_continuos_full_frame(dev_priv);
2670 * Set psr2_sel_fetch_cff_enabled as false to allow selective
2671 * updates. Still keep cff bit enabled as we don't have proper
2672 * SU configuration in case update is sent for any reason after
2673 * sff bit gets cleared by the HW on next vblank.
2675 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2677 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2678 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2682 * continuous full frame is disabled, only a single full
2685 psr_force_hw_tracking_exit(intel_dp);
2688 psr_force_hw_tracking_exit(intel_dp);
2690 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2691 queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
2696 * intel_psr_flush - Flush PSR
2697 * @dev_priv: i915 device
2698 * @frontbuffer_bits: frontbuffer plane tracking bits
2699 * @origin: which operation caused the flush
2701 * Since the hardware frontbuffer tracking has gaps we need to integrate
2702 * with the software frontbuffer tracking. This function gets called every
2703 * time frontbuffer rendering has completed and flushed out to memory. PSR
2704 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2706 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2708 void intel_psr_flush(struct drm_i915_private *dev_priv,
2709 unsigned frontbuffer_bits, enum fb_op_origin origin)
2711 struct intel_encoder *encoder;
2713 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2714 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2715 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2717 mutex_lock(&intel_dp->psr.lock);
2718 if (!intel_dp->psr.enabled) {
2719 mutex_unlock(&intel_dp->psr.lock);
2723 pipe_frontbuffer_bits &=
2724 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2725 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2728 * If the PSR is paused by an explicit intel_psr_paused() call,
2729 * we have to ensure that the PSR is not activated until
2730 * intel_psr_resume() is called.
2732 if (intel_dp->psr.paused)
2735 if (origin == ORIGIN_FLIP ||
2736 (origin == ORIGIN_CURSOR_UPDATE &&
2737 !intel_dp->psr.psr2_sel_fetch_enabled)) {
2738 tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2742 if (pipe_frontbuffer_bits == 0)
2745 /* By definition flush = invalidate + flush */
2746 _psr_flush_handle(intel_dp);
2748 mutex_unlock(&intel_dp->psr.lock);
2753 * intel_psr_init - Init basic PSR work and mutex.
2754 * @intel_dp: Intel DP
2756 * This function is called after the initializing connector.
2757 * (the initializing of connector treats the handling of connector capabilities)
2758 * And it initializes basic PSR stuff for each DP Encoder.
2760 void intel_psr_init(struct intel_dp *intel_dp)
2762 struct intel_connector *connector = intel_dp->attached_connector;
2763 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2764 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2766 if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
2769 if (!intel_dp_is_edp(intel_dp))
2770 intel_psr_init_dpcd(intel_dp);
2773 * HSW spec explicitly says PSR is tied to port A.
2774 * BDW+ platforms have a instance of PSR registers per transcoder but
2775 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2777 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2778 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2779 * But GEN12 supports a instance of PSR registers per transcoder.
2781 if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2782 drm_dbg_kms(&dev_priv->drm,
2783 "PSR condition failed: Port not supported\n");
2787 if (HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp))
2788 intel_dp->psr.source_panel_replay_support = true;
2790 intel_dp->psr.source_support = true;
2792 /* Set link_standby x link_off defaults */
2793 if (DISPLAY_VER(dev_priv) < 12)
2794 /* For new platforms up to TGL let's respect VBT back again */
2795 intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2797 INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2798 INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2799 mutex_init(&intel_dp->psr.lock);
2802 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2803 u8 *status, u8 *error_status)
2805 struct drm_dp_aux *aux = &intel_dp->aux;
2807 unsigned int offset;
2809 offset = intel_dp->psr.panel_replay_enabled ?
2810 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
2812 ret = drm_dp_dpcd_readb(aux, offset, status);
2816 offset = intel_dp->psr.panel_replay_enabled ?
2817 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
2819 ret = drm_dp_dpcd_readb(aux, offset, error_status);
2823 *status = *status & DP_PSR_SINK_STATE_MASK;
2828 static void psr_alpm_check(struct intel_dp *intel_dp)
2830 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2831 struct drm_dp_aux *aux = &intel_dp->aux;
2832 struct intel_psr *psr = &intel_dp->psr;
2836 if (!psr->psr2_enabled)
2839 r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2841 drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2845 if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2846 intel_psr_disable_locked(intel_dp);
2847 psr->sink_not_reliable = true;
2848 drm_dbg_kms(&dev_priv->drm,
2849 "ALPM lock timeout error, disabling PSR\n");
2851 /* Clearing error */
2852 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2856 static void psr_capability_changed_check(struct intel_dp *intel_dp)
2858 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2859 struct intel_psr *psr = &intel_dp->psr;
2863 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2865 drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2869 if (val & DP_PSR_CAPS_CHANGE) {
2870 intel_psr_disable_locked(intel_dp);
2871 psr->sink_not_reliable = true;
2872 drm_dbg_kms(&dev_priv->drm,
2873 "Sink PSR capability changed, disabling PSR\n");
2876 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2880 void intel_psr_short_pulse(struct intel_dp *intel_dp)
2882 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2883 struct intel_psr *psr = &intel_dp->psr;
2884 u8 status, error_status;
2885 const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2886 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2887 DP_PSR_LINK_CRC_ERROR;
2889 if (!CAN_PSR(intel_dp))
2892 mutex_lock(&psr->lock);
2897 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2898 drm_err(&dev_priv->drm,
2899 "Error reading PSR status or error status\n");
2903 if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2904 intel_psr_disable_locked(intel_dp);
2905 psr->sink_not_reliable = true;
2908 if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2909 drm_dbg_kms(&dev_priv->drm,
2910 "PSR sink internal error, disabling PSR\n");
2911 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2912 drm_dbg_kms(&dev_priv->drm,
2913 "PSR RFB storage error, disabling PSR\n");
2914 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2915 drm_dbg_kms(&dev_priv->drm,
2916 "PSR VSC SDP uncorrectable error, disabling PSR\n");
2917 if (error_status & DP_PSR_LINK_CRC_ERROR)
2918 drm_dbg_kms(&dev_priv->drm,
2919 "PSR Link CRC error, disabling PSR\n");
2921 if (error_status & ~errors)
2922 drm_err(&dev_priv->drm,
2923 "PSR_ERROR_STATUS unhandled errors %x\n",
2924 error_status & ~errors);
2925 /* clear status register */
2926 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2928 psr_alpm_check(intel_dp);
2929 psr_capability_changed_check(intel_dp);
2932 mutex_unlock(&psr->lock);
2935 bool intel_psr_enabled(struct intel_dp *intel_dp)
2939 if (!CAN_PSR(intel_dp))
2942 mutex_lock(&intel_dp->psr.lock);
2943 ret = intel_dp->psr.enabled;
2944 mutex_unlock(&intel_dp->psr.lock);
2950 * intel_psr_lock - grab PSR lock
2951 * @crtc_state: the crtc state
2953 * This is initially meant to be used by around CRTC update, when
2954 * vblank sensitive registers are updated and we need grab the lock
2955 * before it to avoid vblank evasion.
2957 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
2959 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2960 struct intel_encoder *encoder;
2962 if (!crtc_state->has_psr)
2965 for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2966 crtc_state->uapi.encoder_mask) {
2967 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2969 mutex_lock(&intel_dp->psr.lock);
2975 * intel_psr_unlock - release PSR lock
2976 * @crtc_state: the crtc state
2978 * Release the PSR lock that was held during pipe update.
2980 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
2982 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2983 struct intel_encoder *encoder;
2985 if (!crtc_state->has_psr)
2988 for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2989 crtc_state->uapi.encoder_mask) {
2990 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2992 mutex_unlock(&intel_dp->psr.lock);
2998 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3000 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3001 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3002 const char *status = "unknown";
3003 u32 val, status_val;
3005 if (intel_dp->psr.psr2_enabled) {
3006 static const char * const live_status[] = {
3019 val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
3020 status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3021 if (status_val < ARRAY_SIZE(live_status))
3022 status = live_status[status_val];
3024 static const char * const live_status[] = {
3034 val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
3035 status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3036 if (status_val < ARRAY_SIZE(live_status))
3037 status = live_status[status_val];
3040 seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3043 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
3045 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3046 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3047 struct intel_psr *psr = &intel_dp->psr;
3048 intel_wakeref_t wakeref;
3053 seq_printf(m, "Sink support: PSR = %s",
3054 str_yes_no(psr->sink_support));
3056 if (psr->sink_support)
3057 seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3058 seq_printf(m, ", Panel Replay = %s\n", str_yes_no(psr->sink_panel_replay_support));
3060 if (!(psr->sink_support || psr->sink_panel_replay_support))
3063 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3064 mutex_lock(&psr->lock);
3066 if (psr->panel_replay_enabled)
3067 status = "Panel Replay Enabled";
3068 else if (psr->enabled)
3069 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
3071 status = "disabled";
3072 seq_printf(m, "PSR mode: %s\n", status);
3074 if (!psr->enabled) {
3075 seq_printf(m, "PSR sink not reliable: %s\n",
3076 str_yes_no(psr->sink_not_reliable));
3081 if (psr->panel_replay_enabled) {
3082 val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
3083 enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
3084 } else if (psr->psr2_enabled) {
3085 val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
3086 enabled = val & EDP_PSR2_ENABLE;
3088 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
3089 enabled = val & EDP_PSR_ENABLE;
3091 seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
3092 str_enabled_disabled(enabled), val);
3093 psr_source_status(intel_dp, m);
3094 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3095 psr->busy_frontbuffer_bits);
3098 * SKL+ Perf counter is reset to 0 everytime DC state is entered
3100 val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
3101 seq_printf(m, "Performance counter: %u\n",
3102 REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3104 if (psr->debug & I915_PSR_DEBUG_IRQ) {
3105 seq_printf(m, "Last attempted entry at: %lld\n",
3106 psr->last_entry_attempt);
3107 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3110 if (psr->psr2_enabled) {
3111 u32 su_frames_val[3];
3115 * Reading all 3 registers before hand to minimize crossing a
3116 * frame boundary between register reads
3118 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3119 val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
3120 su_frames_val[frame / 3] = val;
3123 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3125 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3128 su_blocks = su_frames_val[frame / 3] &
3129 PSR2_SU_STATUS_MASK(frame);
3130 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3131 seq_printf(m, "%d\t%d\n", frame, su_blocks);
3134 seq_printf(m, "PSR2 selective fetch: %s\n",
3135 str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3139 mutex_unlock(&psr->lock);
3140 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3145 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3147 struct drm_i915_private *dev_priv = m->private;
3148 struct intel_dp *intel_dp = NULL;
3149 struct intel_encoder *encoder;
3151 if (!HAS_PSR(dev_priv))
3154 /* Find the first EDP which supports PSR */
3155 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3156 intel_dp = enc_to_intel_dp(encoder);
3163 return intel_psr_status(m, intel_dp);
3165 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3168 i915_edp_psr_debug_set(void *data, u64 val)
3170 struct drm_i915_private *dev_priv = data;
3171 struct intel_encoder *encoder;
3172 intel_wakeref_t wakeref;
3175 if (!HAS_PSR(dev_priv))
3178 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3179 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3181 drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3183 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3185 // TODO: split to each transcoder's PSR debug state
3186 ret = intel_psr_debug_set(intel_dp, val);
3188 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3195 i915_edp_psr_debug_get(void *data, u64 *val)
3197 struct drm_i915_private *dev_priv = data;
3198 struct intel_encoder *encoder;
3200 if (!HAS_PSR(dev_priv))
3203 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3204 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3206 // TODO: split to each transcoder's PSR debug state
3207 *val = READ_ONCE(intel_dp->psr.debug);
3214 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3215 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3218 void intel_psr_debugfs_register(struct drm_i915_private *i915)
3220 struct drm_minor *minor = i915->drm.primary;
3222 debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3223 i915, &i915_edp_psr_debug_fops);
3225 debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3226 i915, &i915_edp_psr_status_fops);
3229 static const char *psr_mode_str(struct intel_dp *intel_dp)
3231 if (intel_dp->psr.panel_replay_enabled)
3232 return "PANEL-REPLAY";
3233 else if (intel_dp->psr.enabled)
3239 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3241 struct intel_connector *connector = m->private;
3242 struct intel_dp *intel_dp = intel_attached_dp(connector);
3243 static const char * const sink_status[] = {
3245 "transition to active, capture and display",
3246 "active, display from RFB",
3247 "active, capture and display on sink device timings",
3248 "transition to inactive, capture and display, timing re-sync",
3251 "sink internal error",
3253 static const char * const panel_replay_status[] = {
3254 "Sink device frame is locked to the Source device",
3255 "Sink device is coasting, using the VTotal target",
3256 "Sink device is governing the frame rate (frame rate unlock is granted)",
3257 "Sink device in the process of re-locking with the Source device",
3261 u8 status, error_status;
3264 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
3265 seq_puts(m, "PSR/Panel-Replay Unsupported\n");
3269 if (connector->base.status != connector_status_connected)
3272 ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
3277 if (intel_dp->psr.panel_replay_enabled) {
3278 idx = (status & DP_SINK_FRAME_LOCKED_MASK) >> DP_SINK_FRAME_LOCKED_SHIFT;
3279 if (idx < ARRAY_SIZE(panel_replay_status))
3280 str = panel_replay_status[idx];
3281 } else if (intel_dp->psr.enabled) {
3282 idx = status & DP_PSR_SINK_STATE_MASK;
3283 if (idx < ARRAY_SIZE(sink_status))
3284 str = sink_status[idx];
3287 seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
3289 seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
3291 if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
3292 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3293 DP_PSR_LINK_CRC_ERROR))
3297 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3298 seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
3299 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3300 seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
3301 if (error_status & DP_PSR_LINK_CRC_ERROR)
3302 seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
3306 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3308 static int i915_psr_status_show(struct seq_file *m, void *data)
3310 struct intel_connector *connector = m->private;
3311 struct intel_dp *intel_dp = intel_attached_dp(connector);
3313 return intel_psr_status(m, intel_dp);
3315 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3317 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3319 struct drm_i915_private *i915 = to_i915(connector->base.dev);
3320 struct dentry *root = connector->base.debugfs_entry;
3322 /* TODO: Add support for MST connectors as well. */
3323 if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
3324 connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
3325 connector->mst_port)
3328 debugfs_create_file("i915_psr_sink_status", 0444, root,
3329 connector, &i915_psr_sink_status_fops);
3331 if (HAS_PSR(i915) || HAS_DP20(i915))
3332 debugfs_create_file("i915_psr_status", 0444, root,
3333 connector, &i915_psr_status_fops);