2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include <drm/drm_atomic_helper.h>
26 #include "display/intel_dp.h"
29 #include "intel_drv.h"
30 #include "intel_psr.h"
31 #include "intel_sprite.h"
34 * DOC: Panel Self Refresh (PSR/SRD)
36 * Since Haswell Display controller supports Panel Self-Refresh on display
37 * panels witch have a remote frame buffer (RFB) implemented according to PSR
38 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
39 * when system is idle but display is on as it eliminates display refresh
40 * request to DDR memory completely as long as the frame buffer for that
41 * display is unchanged.
43 * Panel Self Refresh must be supported by both Hardware (source) and
46 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
47 * to power down the link and memory controller. For DSI panels the same idea
48 * is called "manual mode".
50 * The implementation uses the hardware-based PSR support which automatically
51 * enters/exits self-refresh mode. The hardware takes care of sending the
52 * required DP aux message and could even retrain the link (that part isn't
53 * enabled yet though). The hardware also keeps track of any frontbuffer
54 * changes to know when to exit self-refresh mode again. Unfortunately that
55 * part doesn't work too well, hence why the i915 PSR support uses the
56 * software frontbuffer tracking to make sure it doesn't miss a screen
57 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
58 * get called by the frontbuffer tracking code. Note that because of locking
59 * issues the self-refresh re-enable code is done from a work queue, which
60 * must be correctly synchronized/cancelled when shutting down the pipe."
63 static bool psr_global_enabled(u32 debug)
65 switch (debug & I915_PSR_DEBUG_MODE_MASK) {
66 case I915_PSR_DEBUG_DEFAULT:
67 return i915_modparams.enable_psr;
68 case I915_PSR_DEBUG_DISABLE:
75 static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
76 const struct intel_crtc_state *crtc_state)
78 /* Cannot enable DSC and PSR2 simultaneously */
79 WARN_ON(crtc_state->dsc_params.compression_enable &&
80 crtc_state->has_psr2);
82 switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
83 case I915_PSR_DEBUG_DISABLE:
84 case I915_PSR_DEBUG_FORCE_PSR1:
87 return crtc_state->has_psr2;
91 static int edp_psr_shift(enum transcoder cpu_transcoder)
93 switch (cpu_transcoder) {
95 return EDP_PSR_TRANSCODER_A_SHIFT;
97 return EDP_PSR_TRANSCODER_B_SHIFT;
99 return EDP_PSR_TRANSCODER_C_SHIFT;
101 MISSING_CASE(cpu_transcoder);
104 return EDP_PSR_TRANSCODER_EDP_SHIFT;
108 void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug)
110 u32 debug_mask, mask;
111 enum transcoder cpu_transcoder;
112 u32 transcoders = BIT(TRANSCODER_EDP);
114 if (INTEL_GEN(dev_priv) >= 8)
115 transcoders |= BIT(TRANSCODER_A) |
121 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
122 int shift = edp_psr_shift(cpu_transcoder);
124 mask |= EDP_PSR_ERROR(shift);
125 debug_mask |= EDP_PSR_POST_EXIT(shift) |
126 EDP_PSR_PRE_ENTRY(shift);
129 if (debug & I915_PSR_DEBUG_IRQ)
132 I915_WRITE(EDP_PSR_IMR, ~mask);
135 static void psr_event_print(u32 val, bool psr2_enabled)
137 DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val);
138 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
139 DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n");
140 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
141 DRM_DEBUG_KMS("\tPSR2 disabled\n");
142 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
143 DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n");
144 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
145 DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n");
146 if (val & PSR_EVENT_GRAPHICS_RESET)
147 DRM_DEBUG_KMS("\tGraphics reset\n");
148 if (val & PSR_EVENT_PCH_INTERRUPT)
149 DRM_DEBUG_KMS("\tPCH interrupt\n");
150 if (val & PSR_EVENT_MEMORY_UP)
151 DRM_DEBUG_KMS("\tMemory up\n");
152 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
153 DRM_DEBUG_KMS("\tFront buffer modification\n");
154 if (val & PSR_EVENT_WD_TIMER_EXPIRE)
155 DRM_DEBUG_KMS("\tPSR watchdog timer expired\n");
156 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
157 DRM_DEBUG_KMS("\tPIPE registers updated\n");
158 if (val & PSR_EVENT_REGISTER_UPDATE)
159 DRM_DEBUG_KMS("\tRegister updated\n");
160 if (val & PSR_EVENT_HDCP_ENABLE)
161 DRM_DEBUG_KMS("\tHDCP enabled\n");
162 if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
163 DRM_DEBUG_KMS("\tKVMR session enabled\n");
164 if (val & PSR_EVENT_VBI_ENABLE)
165 DRM_DEBUG_KMS("\tVBI enabled\n");
166 if (val & PSR_EVENT_LPSP_MODE_EXIT)
167 DRM_DEBUG_KMS("\tLPSP mode exited\n");
168 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
169 DRM_DEBUG_KMS("\tPSR disabled\n");
172 void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
174 u32 transcoders = BIT(TRANSCODER_EDP);
175 enum transcoder cpu_transcoder;
176 ktime_t time_ns = ktime_get();
179 if (INTEL_GEN(dev_priv) >= 8)
180 transcoders |= BIT(TRANSCODER_A) |
184 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
185 int shift = edp_psr_shift(cpu_transcoder);
187 if (psr_iir & EDP_PSR_ERROR(shift)) {
188 DRM_WARN("[transcoder %s] PSR aux error\n",
189 transcoder_name(cpu_transcoder));
191 dev_priv->psr.irq_aux_error = true;
194 * If this interruption is not masked it will keep
195 * interrupting so fast that it prevents the scheduled
197 * Also after a PSR error, we don't want to arm PSR
198 * again so we don't care about unmask the interruption
199 * or unset irq_aux_error.
201 mask |= EDP_PSR_ERROR(shift);
204 if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
205 dev_priv->psr.last_entry_attempt = time_ns;
206 DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
207 transcoder_name(cpu_transcoder));
210 if (psr_iir & EDP_PSR_POST_EXIT(shift)) {
211 dev_priv->psr.last_exit = time_ns;
212 DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
213 transcoder_name(cpu_transcoder));
215 if (INTEL_GEN(dev_priv) >= 9) {
216 u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
217 bool psr2_enabled = dev_priv->psr.psr2_enabled;
219 I915_WRITE(PSR_EVENT(cpu_transcoder), val);
220 psr_event_print(val, psr2_enabled);
226 mask |= I915_READ(EDP_PSR_IMR);
227 I915_WRITE(EDP_PSR_IMR, mask);
229 schedule_work(&dev_priv->psr.work);
233 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
237 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
240 return alpm_caps & DP_ALPM_CAP;
243 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
245 u8 val = 8; /* assume the worst if we can't read the value */
247 if (drm_dp_dpcd_readb(&intel_dp->aux,
248 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
249 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
251 DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
255 static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
261 * Returning the default X granularity if granularity not required or
264 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
267 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
269 DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n");
272 * Spec says that if the value read is 0 the default granularity should
275 if (r != 2 || val == 0)
281 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
283 struct drm_i915_private *dev_priv =
284 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
286 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
287 sizeof(intel_dp->psr_dpcd));
289 if (!intel_dp->psr_dpcd[0])
291 DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
292 intel_dp->psr_dpcd[0]);
294 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
295 DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
299 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
300 DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
304 dev_priv->psr.sink_support = true;
305 dev_priv->psr.sink_sync_latency =
306 intel_dp_get_sink_sync_latency(intel_dp);
308 WARN_ON(dev_priv->psr.dp);
309 dev_priv->psr.dp = intel_dp;
311 if (INTEL_GEN(dev_priv) >= 9 &&
312 (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
313 bool y_req = intel_dp->psr_dpcd[1] &
314 DP_PSR2_SU_Y_COORDINATE_REQUIRED;
315 bool alpm = intel_dp_get_alpm_status(intel_dp);
318 * All panels that supports PSR version 03h (PSR2 +
319 * Y-coordinate) can handle Y-coordinates in VSC but we are
320 * only sure that it is going to be used when required by the
321 * panel. This way panel is capable to do selective update
322 * without a aux frame sync.
324 * To support PSR version 02h and PSR version 03h without
325 * Y-coordinate requirement panels we would need to enable
328 dev_priv->psr.sink_psr2_support = y_req && alpm;
329 DRM_DEBUG_KMS("PSR2 %ssupported\n",
330 dev_priv->psr.sink_psr2_support ? "" : "not ");
332 if (dev_priv->psr.sink_psr2_support) {
333 dev_priv->psr.colorimetry_support =
334 intel_dp_get_colorimetry_status(intel_dp);
335 dev_priv->psr.su_x_granularity =
336 intel_dp_get_su_x_granulartiy(intel_dp);
341 static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
342 const struct intel_crtc_state *crtc_state)
344 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
345 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
346 struct dp_sdp psr_vsc;
348 if (dev_priv->psr.psr2_enabled) {
349 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
350 memset(&psr_vsc, 0, sizeof(psr_vsc));
351 psr_vsc.sdp_header.HB0 = 0;
352 psr_vsc.sdp_header.HB1 = 0x7;
353 if (dev_priv->psr.colorimetry_support) {
354 psr_vsc.sdp_header.HB2 = 0x5;
355 psr_vsc.sdp_header.HB3 = 0x13;
357 psr_vsc.sdp_header.HB2 = 0x4;
358 psr_vsc.sdp_header.HB3 = 0xe;
361 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
362 memset(&psr_vsc, 0, sizeof(psr_vsc));
363 psr_vsc.sdp_header.HB0 = 0;
364 psr_vsc.sdp_header.HB1 = 0x7;
365 psr_vsc.sdp_header.HB2 = 0x2;
366 psr_vsc.sdp_header.HB3 = 0x8;
369 intel_dig_port->write_infoframe(&intel_dig_port->base,
371 DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
374 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
376 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
377 u32 aux_clock_divider, aux_ctl;
379 static const u8 aux_msg[] = {
380 [0] = DP_AUX_NATIVE_WRITE << 4,
381 [1] = DP_SET_POWER >> 8,
382 [2] = DP_SET_POWER & 0xff,
384 [4] = DP_SET_POWER_D0,
386 u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
387 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
388 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
389 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
391 BUILD_BUG_ON(sizeof(aux_msg) > 20);
392 for (i = 0; i < sizeof(aux_msg); i += 4)
393 I915_WRITE(EDP_PSR_AUX_DATA(i >> 2),
394 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
396 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
398 /* Start with bits set for DDI_AUX_CTL register */
399 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
402 /* Select only valid bits for SRD_AUX_CTL */
403 aux_ctl &= psr_aux_mask;
404 I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl);
407 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
409 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
410 u8 dpcd_val = DP_PSR_ENABLE;
412 /* Enable ALPM at sink for psr2 */
413 if (dev_priv->psr.psr2_enabled) {
414 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
416 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
418 if (dev_priv->psr.link_standby)
419 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
421 if (INTEL_GEN(dev_priv) >= 8)
422 dpcd_val |= DP_PSR_CRC_VERIFICATION;
425 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
427 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
430 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
432 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
435 if (INTEL_GEN(dev_priv) >= 11)
436 val |= EDP_PSR_TP4_TIME_0US;
438 if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
439 val |= EDP_PSR_TP1_TIME_0us;
440 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
441 val |= EDP_PSR_TP1_TIME_100us;
442 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
443 val |= EDP_PSR_TP1_TIME_500us;
445 val |= EDP_PSR_TP1_TIME_2500us;
447 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
448 val |= EDP_PSR_TP2_TP3_TIME_0us;
449 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
450 val |= EDP_PSR_TP2_TP3_TIME_100us;
451 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
452 val |= EDP_PSR_TP2_TP3_TIME_500us;
454 val |= EDP_PSR_TP2_TP3_TIME_2500us;
456 if (intel_dp_source_supports_hbr2(intel_dp) &&
457 drm_dp_tps3_supported(intel_dp->dpcd))
458 val |= EDP_PSR_TP1_TP3_SEL;
460 val |= EDP_PSR_TP1_TP2_SEL;
465 static void hsw_activate_psr1(struct intel_dp *intel_dp)
467 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
468 u32 max_sleep_time = 0x1f;
469 u32 val = EDP_PSR_ENABLE;
471 /* Let's use 6 as the minimum to cover all known cases including the
472 * off-by-one issue that HW has in some cases.
474 int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
476 /* sink_sync_latency of 8 means source has to wait for more than 8
477 * frames, we'll go with 9 frames for now
479 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
480 val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
482 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
483 if (IS_HASWELL(dev_priv))
484 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
486 if (dev_priv->psr.link_standby)
487 val |= EDP_PSR_LINK_STANDBY;
489 val |= intel_psr1_get_tp_time(intel_dp);
491 if (INTEL_GEN(dev_priv) >= 8)
492 val |= EDP_PSR_CRC_ENABLE;
494 val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
495 I915_WRITE(EDP_PSR_CTL, val);
498 static void hsw_activate_psr2(struct intel_dp *intel_dp)
500 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
503 /* Let's use 6 as the minimum to cover all known cases including the
504 * off-by-one issue that HW has in some cases.
506 int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
508 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
509 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
511 val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
512 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
513 val |= EDP_Y_COORDINATE_ENABLE;
515 val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
517 if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
518 dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
519 val |= EDP_PSR2_TP2_TIME_50us;
520 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
521 val |= EDP_PSR2_TP2_TIME_100us;
522 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
523 val |= EDP_PSR2_TP2_TIME_500us;
525 val |= EDP_PSR2_TP2_TIME_2500us;
528 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
529 * recommending keep this bit unset while PSR2 is enabled.
531 I915_WRITE(EDP_PSR_CTL, 0);
533 I915_WRITE(EDP_PSR2_CTL, val);
536 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
537 struct intel_crtc_state *crtc_state)
539 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
540 int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
541 int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
542 int psr_max_h = 0, psr_max_v = 0;
544 if (!dev_priv->psr.sink_psr2_support)
548 * DSC and PSR2 cannot be enabled simultaneously. If a requested
549 * resolution requires DSC to be enabled, priority is given to DSC
552 if (crtc_state->dsc_params.compression_enable) {
553 DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
557 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
560 } else if (IS_GEN(dev_priv, 9)) {
565 if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
566 DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
567 crtc_hdisplay, crtc_vdisplay,
568 psr_max_h, psr_max_v);
573 * HW sends SU blocks of size four scan lines, which means the starting
574 * X coordinate and Y granularity requirements will always be met. We
575 * only need to validate the SU block width is a multiple of
578 if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
579 DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
580 crtc_hdisplay, dev_priv->psr.su_x_granularity);
584 if (crtc_state->crc_enabled) {
585 DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n");
592 void intel_psr_compute_config(struct intel_dp *intel_dp,
593 struct intel_crtc_state *crtc_state)
595 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
596 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
597 const struct drm_display_mode *adjusted_mode =
598 &crtc_state->base.adjusted_mode;
601 if (!CAN_PSR(dev_priv))
604 if (intel_dp != dev_priv->psr.dp)
608 * HSW spec explicitly says PSR is tied to port A.
609 * BDW+ platforms with DDI implementation of PSR have different
610 * PSR registers per transcoder and we only implement transcoder EDP
611 * ones. Since by Display design transcoder EDP is tied to port A
612 * we can safely escape based on the port A.
614 if (dig_port->base.port != PORT_A) {
615 DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
619 if (dev_priv->psr.sink_not_reliable) {
620 DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
624 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
625 DRM_DEBUG_KMS("PSR condition failed: Interlaced mode enabled\n");
629 psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
630 if (psr_setup_time < 0) {
631 DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
632 intel_dp->psr_dpcd[1]);
636 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
637 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
638 DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
643 crtc_state->has_psr = true;
644 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
647 static void intel_psr_activate(struct intel_dp *intel_dp)
649 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
651 if (INTEL_GEN(dev_priv) >= 9)
652 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
653 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
654 WARN_ON(dev_priv->psr.active);
655 lockdep_assert_held(&dev_priv->psr.lock);
657 /* psr1 and psr2 are mutually exclusive.*/
658 if (dev_priv->psr.psr2_enabled)
659 hsw_activate_psr2(intel_dp);
661 hsw_activate_psr1(intel_dp);
663 dev_priv->psr.active = true;
666 static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv,
667 enum transcoder cpu_transcoder)
669 static const i915_reg_t regs[] = {
670 [TRANSCODER_A] = CHICKEN_TRANS_A,
671 [TRANSCODER_B] = CHICKEN_TRANS_B,
672 [TRANSCODER_C] = CHICKEN_TRANS_C,
673 [TRANSCODER_EDP] = CHICKEN_TRANS_EDP,
676 WARN_ON(INTEL_GEN(dev_priv) < 9);
678 if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) ||
679 !regs[cpu_transcoder].reg))
680 cpu_transcoder = TRANSCODER_A;
682 return regs[cpu_transcoder];
685 static void intel_psr_enable_source(struct intel_dp *intel_dp,
686 const struct intel_crtc_state *crtc_state)
688 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
689 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
692 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
693 * use hardcoded values PSR AUX transactions
695 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
696 hsw_psr_setup_aux(intel_dp);
698 if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
699 !IS_GEMINILAKE(dev_priv))) {
700 i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
702 u32 chicken = I915_READ(reg);
704 chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
705 PSR2_ADD_VERTICAL_LINE_COUNT;
706 I915_WRITE(reg, chicken);
710 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
711 * mask LPSP to avoid dependency on other drivers that might block
712 * runtime_pm besides preventing other hw tracking issues now we
713 * can rely on frontbuffer tracking.
715 mask = EDP_PSR_DEBUG_MASK_MEMUP |
716 EDP_PSR_DEBUG_MASK_HPD |
717 EDP_PSR_DEBUG_MASK_LPSP |
718 EDP_PSR_DEBUG_MASK_MAX_SLEEP;
720 if (INTEL_GEN(dev_priv) < 11)
721 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
723 I915_WRITE(EDP_PSR_DEBUG, mask);
726 static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
727 const struct intel_crtc_state *crtc_state)
729 struct intel_dp *intel_dp = dev_priv->psr.dp;
731 WARN_ON(dev_priv->psr.enabled);
733 dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
734 dev_priv->psr.busy_frontbuffer_bits = 0;
735 dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
737 DRM_DEBUG_KMS("Enabling PSR%s\n",
738 dev_priv->psr.psr2_enabled ? "2" : "1");
739 intel_psr_setup_vsc(intel_dp, crtc_state);
740 intel_psr_enable_sink(intel_dp);
741 intel_psr_enable_source(intel_dp, crtc_state);
742 dev_priv->psr.enabled = true;
744 intel_psr_activate(intel_dp);
748 * intel_psr_enable - Enable PSR
749 * @intel_dp: Intel DP
750 * @crtc_state: new CRTC state
752 * This function can only be called after the pipe is fully trained and enabled.
754 void intel_psr_enable(struct intel_dp *intel_dp,
755 const struct intel_crtc_state *crtc_state)
757 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
759 if (!crtc_state->has_psr)
762 if (WARN_ON(!CAN_PSR(dev_priv)))
765 WARN_ON(dev_priv->drrs.dp);
767 mutex_lock(&dev_priv->psr.lock);
769 if (!psr_global_enabled(dev_priv->psr.debug)) {
770 DRM_DEBUG_KMS("PSR disabled by flag\n");
774 intel_psr_enable_locked(dev_priv, crtc_state);
777 mutex_unlock(&dev_priv->psr.lock);
780 static void intel_psr_exit(struct drm_i915_private *dev_priv)
784 if (!dev_priv->psr.active) {
785 if (INTEL_GEN(dev_priv) >= 9)
786 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
787 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
791 if (dev_priv->psr.psr2_enabled) {
792 val = I915_READ(EDP_PSR2_CTL);
793 WARN_ON(!(val & EDP_PSR2_ENABLE));
794 I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
796 val = I915_READ(EDP_PSR_CTL);
797 WARN_ON(!(val & EDP_PSR_ENABLE));
798 I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
800 dev_priv->psr.active = false;
803 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
805 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
806 i915_reg_t psr_status;
809 lockdep_assert_held(&dev_priv->psr.lock);
811 if (!dev_priv->psr.enabled)
814 DRM_DEBUG_KMS("Disabling PSR%s\n",
815 dev_priv->psr.psr2_enabled ? "2" : "1");
817 intel_psr_exit(dev_priv);
819 if (dev_priv->psr.psr2_enabled) {
820 psr_status = EDP_PSR2_STATUS;
821 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
823 psr_status = EDP_PSR_STATUS;
824 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
827 /* Wait till PSR is idle */
828 if (intel_wait_for_register(&dev_priv->uncore,
829 psr_status, psr_status_mask, 0, 2000))
830 DRM_ERROR("Timed out waiting PSR idle state\n");
832 /* Disable PSR on Sink */
833 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
835 dev_priv->psr.enabled = false;
839 * intel_psr_disable - Disable PSR
840 * @intel_dp: Intel DP
841 * @old_crtc_state: old CRTC state
843 * This function needs to be called before disabling pipe.
845 void intel_psr_disable(struct intel_dp *intel_dp,
846 const struct intel_crtc_state *old_crtc_state)
848 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
850 if (!old_crtc_state->has_psr)
853 if (WARN_ON(!CAN_PSR(dev_priv)))
856 mutex_lock(&dev_priv->psr.lock);
858 intel_psr_disable_locked(intel_dp);
860 mutex_unlock(&dev_priv->psr.lock);
861 cancel_work_sync(&dev_priv->psr.work);
864 static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
867 * Display WA #0884: all
868 * This documented WA for bxt can be safely applied
869 * broadly so we can force HW tracking to exit PSR
870 * instead of disabling and re-enabling.
871 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
872 * but it makes more sense write to the current active
875 I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
879 * intel_psr_update - Update PSR state
880 * @intel_dp: Intel DP
881 * @crtc_state: new CRTC state
883 * This functions will update PSR states, disabling, enabling or switching PSR
884 * version when executing fastsets. For full modeset, intel_psr_disable() and
885 * intel_psr_enable() should be called instead.
887 void intel_psr_update(struct intel_dp *intel_dp,
888 const struct intel_crtc_state *crtc_state)
890 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
891 struct i915_psr *psr = &dev_priv->psr;
892 bool enable, psr2_enable;
894 if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
897 mutex_lock(&dev_priv->psr.lock);
899 enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
900 psr2_enable = intel_psr2_enabled(dev_priv, crtc_state);
902 if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
903 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
904 if (crtc_state->crc_enabled && psr->enabled)
905 psr_force_hw_tracking_exit(dev_priv);
911 intel_psr_disable_locked(intel_dp);
914 intel_psr_enable_locked(dev_priv, crtc_state);
917 mutex_unlock(&dev_priv->psr.lock);
921 * intel_psr_wait_for_idle - wait for PSR1 to idle
922 * @new_crtc_state: new CRTC state
923 * @out_value: PSR status in case of failure
925 * This function is expected to be called from pipe_update_start() where it is
926 * not expected to race with PSR enable or disable.
928 * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
930 int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
933 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
934 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
936 if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
939 /* FIXME: Update this for PSR2 if we need to wait for idle */
940 if (READ_ONCE(dev_priv->psr.psr2_enabled))
944 * From bspec: Panel Self Refresh (BDW+)
945 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
946 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
947 * defensive enough to cover everything.
950 return __intel_wait_for_register(&dev_priv->uncore, EDP_PSR_STATUS,
951 EDP_PSR_STATUS_STATE_MASK,
952 EDP_PSR_STATUS_STATE_IDLE, 2, 50,
956 static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
962 if (!dev_priv->psr.enabled)
965 if (dev_priv->psr.psr2_enabled) {
966 reg = EDP_PSR2_STATUS;
967 mask = EDP_PSR2_STATUS_STATE_MASK;
969 reg = EDP_PSR_STATUS;
970 mask = EDP_PSR_STATUS_STATE_MASK;
973 mutex_unlock(&dev_priv->psr.lock);
975 err = intel_wait_for_register(&dev_priv->uncore, reg, mask, 0, 50);
977 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
979 /* After the unlocked wait, verify that PSR is still wanted! */
980 mutex_lock(&dev_priv->psr.lock);
981 return err == 0 && dev_priv->psr.enabled;
984 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
986 struct drm_device *dev = &dev_priv->drm;
987 struct drm_modeset_acquire_ctx ctx;
988 struct drm_atomic_state *state;
989 struct drm_crtc *crtc;
992 state = drm_atomic_state_alloc(dev);
996 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
997 state->acquire_ctx = &ctx;
1000 drm_for_each_crtc(crtc, dev) {
1001 struct drm_crtc_state *crtc_state;
1002 struct intel_crtc_state *intel_crtc_state;
1004 crtc_state = drm_atomic_get_crtc_state(state, crtc);
1005 if (IS_ERR(crtc_state)) {
1006 err = PTR_ERR(crtc_state);
1010 intel_crtc_state = to_intel_crtc_state(crtc_state);
1012 if (crtc_state->active && intel_crtc_state->has_psr) {
1013 /* Mark mode as changed to trigger a pipe->update() */
1014 crtc_state->mode_changed = true;
1019 err = drm_atomic_commit(state);
1022 if (err == -EDEADLK) {
1023 drm_atomic_state_clear(state);
1024 err = drm_modeset_backoff(&ctx);
1029 drm_modeset_drop_locks(&ctx);
1030 drm_modeset_acquire_fini(&ctx);
1031 drm_atomic_state_put(state);
1036 int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
1038 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
1042 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
1043 mode > I915_PSR_DEBUG_FORCE_PSR1) {
1044 DRM_DEBUG_KMS("Invalid debug mask %llx\n", val);
1048 ret = mutex_lock_interruptible(&dev_priv->psr.lock);
1052 old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
1053 dev_priv->psr.debug = val;
1054 intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
1056 mutex_unlock(&dev_priv->psr.lock);
1058 if (old_mode != mode)
1059 ret = intel_psr_fastset_force(dev_priv);
1064 static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
1066 struct i915_psr *psr = &dev_priv->psr;
1068 intel_psr_disable_locked(psr->dp);
1069 psr->sink_not_reliable = true;
1070 /* let's make sure that sink is awaken */
1071 drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
1074 static void intel_psr_work(struct work_struct *work)
1076 struct drm_i915_private *dev_priv =
1077 container_of(work, typeof(*dev_priv), psr.work);
1079 mutex_lock(&dev_priv->psr.lock);
1081 if (!dev_priv->psr.enabled)
1084 if (READ_ONCE(dev_priv->psr.irq_aux_error))
1085 intel_psr_handle_irq(dev_priv);
1088 * We have to make sure PSR is ready for re-enable
1089 * otherwise it keeps disabled until next full enable/disable cycle.
1090 * PSR might take some time to get fully disabled
1091 * and be ready for re-enable.
1093 if (!__psr_wait_for_idle_locked(dev_priv))
1097 * The delayed work can race with an invalidate hence we need to
1098 * recheck. Since psr_flush first clears this and then reschedules we
1099 * won't ever miss a flush when bailing out here.
1101 if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
1104 intel_psr_activate(dev_priv->psr.dp);
1106 mutex_unlock(&dev_priv->psr.lock);
1110 * intel_psr_invalidate - Invalidade PSR
1111 * @dev_priv: i915 device
1112 * @frontbuffer_bits: frontbuffer plane tracking bits
1113 * @origin: which operation caused the invalidate
1115 * Since the hardware frontbuffer tracking has gaps we need to integrate
1116 * with the software frontbuffer tracking. This function gets called every
1117 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
1118 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
1120 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
1122 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
1123 unsigned frontbuffer_bits, enum fb_op_origin origin)
1125 if (!CAN_PSR(dev_priv))
1128 if (origin == ORIGIN_FLIP)
1131 mutex_lock(&dev_priv->psr.lock);
1132 if (!dev_priv->psr.enabled) {
1133 mutex_unlock(&dev_priv->psr.lock);
1137 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
1138 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
1140 if (frontbuffer_bits)
1141 intel_psr_exit(dev_priv);
1143 mutex_unlock(&dev_priv->psr.lock);
1147 * intel_psr_flush - Flush PSR
1148 * @dev_priv: i915 device
1149 * @frontbuffer_bits: frontbuffer plane tracking bits
1150 * @origin: which operation caused the flush
1152 * Since the hardware frontbuffer tracking has gaps we need to integrate
1153 * with the software frontbuffer tracking. This function gets called every
1154 * time frontbuffer rendering has completed and flushed out to memory. PSR
1155 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
1157 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
1159 void intel_psr_flush(struct drm_i915_private *dev_priv,
1160 unsigned frontbuffer_bits, enum fb_op_origin origin)
1162 if (!CAN_PSR(dev_priv))
1165 if (origin == ORIGIN_FLIP)
1168 mutex_lock(&dev_priv->psr.lock);
1169 if (!dev_priv->psr.enabled) {
1170 mutex_unlock(&dev_priv->psr.lock);
1174 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
1175 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
1177 /* By definition flush = invalidate + flush */
1178 if (frontbuffer_bits)
1179 psr_force_hw_tracking_exit(dev_priv);
1181 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
1182 schedule_work(&dev_priv->psr.work);
1183 mutex_unlock(&dev_priv->psr.lock);
1187 * intel_psr_init - Init basic PSR work and mutex.
1188 * @dev_priv: i915 device private
1190 * This function is called only once at driver load to initialize basic
1193 void intel_psr_init(struct drm_i915_private *dev_priv)
1197 if (!HAS_PSR(dev_priv))
1200 dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
1201 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
1203 if (!dev_priv->psr.sink_support)
1206 if (i915_modparams.enable_psr == -1)
1207 if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
1208 i915_modparams.enable_psr = 0;
1211 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1212 * will still keep the error set even after the reset done in the
1213 * irq_preinstall and irq_uninstall hooks.
1214 * And enabling in this situation cause the screen to freeze in the
1215 * first time that PSR HW tries to activate so lets keep PSR disabled
1216 * to avoid any rendering problems.
1218 val = I915_READ(EDP_PSR_IIR);
1219 val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP));
1221 DRM_DEBUG_KMS("PSR interruption error set\n");
1222 dev_priv->psr.sink_not_reliable = true;
1225 /* Set link_standby x link_off defaults */
1226 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1227 /* HSW and BDW require workarounds that we don't implement. */
1228 dev_priv->psr.link_standby = false;
1230 /* For new platforms let's respect VBT back again */
1231 dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
1233 INIT_WORK(&dev_priv->psr.work, intel_psr_work);
1234 mutex_init(&dev_priv->psr.lock);
1237 void intel_psr_short_pulse(struct intel_dp *intel_dp)
1239 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1240 struct i915_psr *psr = &dev_priv->psr;
1242 const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
1243 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
1244 DP_PSR_LINK_CRC_ERROR;
1246 if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
1249 mutex_lock(&psr->lock);
1251 if (!psr->enabled || psr->dp != intel_dp)
1254 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) {
1255 DRM_ERROR("PSR_STATUS dpcd read failed\n");
1259 if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
1260 DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
1261 intel_psr_disable_locked(intel_dp);
1262 psr->sink_not_reliable = true;
1265 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
1266 DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n");
1270 if (val & DP_PSR_RFB_STORAGE_ERROR)
1271 DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
1272 if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
1273 DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
1274 if (val & DP_PSR_LINK_CRC_ERROR)
1275 DRM_ERROR("PSR Link CRC error, disabling PSR\n");
1278 DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
1281 intel_psr_disable_locked(intel_dp);
1282 psr->sink_not_reliable = true;
1284 /* clear status register */
1285 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
1287 mutex_unlock(&psr->lock);
1290 bool intel_psr_enabled(struct intel_dp *intel_dp)
1292 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1295 if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
1298 mutex_lock(&dev_priv->psr.lock);
1299 ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
1300 mutex_unlock(&dev_priv->psr.lock);