Merge tag 'timers-core-2024-01-21' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / display / intel_psr.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
26 #include <drm/drm_debugfs.h>
27
28 #include "i915_drv.h"
29 #include "i915_reg.h"
30 #include "intel_atomic.h"
31 #include "intel_crtc.h"
32 #include "intel_ddi.h"
33 #include "intel_de.h"
34 #include "intel_display_types.h"
35 #include "intel_dp.h"
36 #include "intel_dp_aux.h"
37 #include "intel_frontbuffer.h"
38 #include "intel_hdmi.h"
39 #include "intel_psr.h"
40 #include "intel_psr_regs.h"
41 #include "intel_snps_phy.h"
42 #include "skl_universal_plane.h"
43
44 /**
45  * DOC: Panel Self Refresh (PSR/SRD)
46  *
47  * Since Haswell Display controller supports Panel Self-Refresh on display
48  * panels witch have a remote frame buffer (RFB) implemented according to PSR
49  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
50  * when system is idle but display is on as it eliminates display refresh
51  * request to DDR memory completely as long as the frame buffer for that
52  * display is unchanged.
53  *
54  * Panel Self Refresh must be supported by both Hardware (source) and
55  * Panel (sink).
56  *
57  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
58  * to power down the link and memory controller. For DSI panels the same idea
59  * is called "manual mode".
60  *
61  * The implementation uses the hardware-based PSR support which automatically
62  * enters/exits self-refresh mode. The hardware takes care of sending the
63  * required DP aux message and could even retrain the link (that part isn't
64  * enabled yet though). The hardware also keeps track of any frontbuffer
65  * changes to know when to exit self-refresh mode again. Unfortunately that
66  * part doesn't work too well, hence why the i915 PSR support uses the
67  * software frontbuffer tracking to make sure it doesn't miss a screen
68  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
69  * get called by the frontbuffer tracking code. Note that because of locking
70  * issues the self-refresh re-enable code is done from a work queue, which
71  * must be correctly synchronized/cancelled when shutting down the pipe."
72  *
73  * DC3CO (DC3 clock off)
74  *
75  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
76  * clock off automatically during PSR2 idle state.
77  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
78  * entry/exit allows the HW to enter a low-power state even when page flipping
79  * periodically (for instance a 30fps video playback scenario).
80  *
81  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
82  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
83  * frames, if no other flip occurs and the function above is executed, DC3CO is
84  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
85  * of another flip.
86  * Front buffer modifications do not trigger DC3CO activation on purpose as it
87  * would bring a lot of complexity and most of the moderns systems will only
88  * use page flips.
89  */
90
91 /*
92  * Description of PSR mask bits:
93  *
94  * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
95  *
96  *  When unmasked (nearly) all display register writes (eg. even
97  *  SWF) trigger a PSR exit. Some registers are excluded from this
98  *  and they have a more specific mask (described below). On icl+
99  *  this bit no longer exists and is effectively always set.
100  *
101  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
102  *
103  *  When unmasked (nearly) all pipe/plane register writes
104  *  trigger a PSR exit. Some plane registers are excluded from this
105  *  and they have a more specific mask (described below).
106  *
107  * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
108  * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
109  * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
110  *
111  *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
112  *  SPR_SURF/CURBASE are not included in this and instead are
113  *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
114  *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
115  *
116  * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
117  * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
118  *
119  *  When unmasked PSR is blocked as long as the sprite
120  *  plane is enabled. skl+ with their universal planes no
121  *  longer have a mask bit like this, and no plane being
122  *  enabledb blocks PSR.
123  *
124  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
125  * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
126  *
127  *  When umasked CURPOS writes trigger a PSR exit. On skl+
128  *  this doesn't exit but CURPOS is included in the
129  *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
130  *
131  * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
132  * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
133  *
134  *  When unmasked PSR is blocked as long as vblank and/or vsync
135  *  interrupt is unmasked in IMR *and* enabled in IER.
136  *
137  * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
138  * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
139  *
140  *  Selectcs whether PSR exit generates an extra vblank before
141  *  the first frame is transmitted. Also note the opposite polarity
142  *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
143  *  unmasked==do not generate the extra vblank).
144  *
145  *  With DC states enabled the extra vblank happens after link training,
146  *  with DC states disabled it happens immediately upuon PSR exit trigger.
147  *  No idea as of now why there is a difference. HSW/BDW (which don't
148  *  even have DMC) always generate it after link training. Go figure.
149  *
150  *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
151  *  and thus won't latch until the first vblank. So with DC states
152  *  enabled the register effctively uses the reset value during DC5
153  *  exit+PSR exit sequence, and thus the bit does nothing until
154  *  latched by the vblank that it was trying to prevent from being
155  *  generated in the first place. So we should probably call this
156  *  one a chicken/egg bit instead on skl+.
157  *
158  *  In standby mode (as opposed to link-off) this makes no difference
159  *  as the timing generator keeps running the whole time generating
160  *  normal periodic vblanks.
161  *
162  *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
163  *  and doing so makes the behaviour match the skl+ reset value.
164  *
165  * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
166  * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
167  *
168  *  On BDW without this bit is no vblanks whatsoever are
169  *  generated after PSR exit. On HSW this has no apparant effect.
170  *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
171  *
172  * The rest of the bits are more self-explanatory and/or
173  * irrelevant for normal operation.
174  */
175
176 bool intel_encoder_can_psr(struct intel_encoder *encoder)
177 {
178         if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
179                 return CAN_PSR(enc_to_intel_dp(encoder)) ||
180                        CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
181         else
182                 return false;
183 }
184
185 static bool psr_global_enabled(struct intel_dp *intel_dp)
186 {
187         struct intel_connector *connector = intel_dp->attached_connector;
188         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
189
190         switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
191         case I915_PSR_DEBUG_DEFAULT:
192                 if (i915->display.params.enable_psr == -1)
193                         return connector->panel.vbt.psr.enable;
194                 return i915->display.params.enable_psr;
195         case I915_PSR_DEBUG_DISABLE:
196                 return false;
197         default:
198                 return true;
199         }
200 }
201
202 static bool psr2_global_enabled(struct intel_dp *intel_dp)
203 {
204         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
205
206         switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
207         case I915_PSR_DEBUG_DISABLE:
208         case I915_PSR_DEBUG_FORCE_PSR1:
209                 return false;
210         default:
211                 if (i915->display.params.enable_psr == 1)
212                         return false;
213                 return true;
214         }
215 }
216
217 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
218 {
219         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
220
221         return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
222                 EDP_PSR_ERROR(intel_dp->psr.transcoder);
223 }
224
225 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
226 {
227         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
228
229         return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
230                 EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
231 }
232
233 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
234 {
235         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
236
237         return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
238                 EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
239 }
240
241 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
242 {
243         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
244
245         return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
246                 EDP_PSR_MASK(intel_dp->psr.transcoder);
247 }
248
249 static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
250                               enum transcoder cpu_transcoder)
251 {
252         if (DISPLAY_VER(dev_priv) >= 8)
253                 return EDP_PSR_CTL(cpu_transcoder);
254         else
255                 return HSW_SRD_CTL;
256 }
257
258 static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
259                                 enum transcoder cpu_transcoder)
260 {
261         if (DISPLAY_VER(dev_priv) >= 8)
262                 return EDP_PSR_DEBUG(cpu_transcoder);
263         else
264                 return HSW_SRD_DEBUG;
265 }
266
267 static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
268                                    enum transcoder cpu_transcoder)
269 {
270         if (DISPLAY_VER(dev_priv) >= 8)
271                 return EDP_PSR_PERF_CNT(cpu_transcoder);
272         else
273                 return HSW_SRD_PERF_CNT;
274 }
275
276 static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
277                                  enum transcoder cpu_transcoder)
278 {
279         if (DISPLAY_VER(dev_priv) >= 8)
280                 return EDP_PSR_STATUS(cpu_transcoder);
281         else
282                 return HSW_SRD_STATUS;
283 }
284
285 static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
286                               enum transcoder cpu_transcoder)
287 {
288         if (DISPLAY_VER(dev_priv) >= 12)
289                 return TRANS_PSR_IMR(cpu_transcoder);
290         else
291                 return EDP_PSR_IMR;
292 }
293
294 static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
295                               enum transcoder cpu_transcoder)
296 {
297         if (DISPLAY_VER(dev_priv) >= 12)
298                 return TRANS_PSR_IIR(cpu_transcoder);
299         else
300                 return EDP_PSR_IIR;
301 }
302
303 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
304                                   enum transcoder cpu_transcoder)
305 {
306         if (DISPLAY_VER(dev_priv) >= 8)
307                 return EDP_PSR_AUX_CTL(cpu_transcoder);
308         else
309                 return HSW_SRD_AUX_CTL;
310 }
311
312 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
313                                    enum transcoder cpu_transcoder, int i)
314 {
315         if (DISPLAY_VER(dev_priv) >= 8)
316                 return EDP_PSR_AUX_DATA(cpu_transcoder, i);
317         else
318                 return HSW_SRD_AUX_DATA(i);
319 }
320
321 static void psr_irq_control(struct intel_dp *intel_dp)
322 {
323         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
324         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
325         u32 mask;
326
327         mask = psr_irq_psr_error_bit_get(intel_dp);
328         if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
329                 mask |= psr_irq_post_exit_bit_get(intel_dp) |
330                         psr_irq_pre_entry_bit_get(intel_dp);
331
332         intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
333                      psr_irq_mask_get(intel_dp), ~mask);
334 }
335
336 static void psr_event_print(struct drm_i915_private *i915,
337                             u32 val, bool psr2_enabled)
338 {
339         drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
340         if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
341                 drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
342         if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
343                 drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
344         if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
345                 drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
346         if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
347                 drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
348         if (val & PSR_EVENT_GRAPHICS_RESET)
349                 drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
350         if (val & PSR_EVENT_PCH_INTERRUPT)
351                 drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
352         if (val & PSR_EVENT_MEMORY_UP)
353                 drm_dbg_kms(&i915->drm, "\tMemory up\n");
354         if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
355                 drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
356         if (val & PSR_EVENT_WD_TIMER_EXPIRE)
357                 drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
358         if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
359                 drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
360         if (val & PSR_EVENT_REGISTER_UPDATE)
361                 drm_dbg_kms(&i915->drm, "\tRegister updated\n");
362         if (val & PSR_EVENT_HDCP_ENABLE)
363                 drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
364         if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
365                 drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
366         if (val & PSR_EVENT_VBI_ENABLE)
367                 drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
368         if (val & PSR_EVENT_LPSP_MODE_EXIT)
369                 drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
370         if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
371                 drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
372 }
373
374 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
375 {
376         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
377         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
378         ktime_t time_ns =  ktime_get();
379
380         if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
381                 intel_dp->psr.last_entry_attempt = time_ns;
382                 drm_dbg_kms(&dev_priv->drm,
383                             "[transcoder %s] PSR entry attempt in 2 vblanks\n",
384                             transcoder_name(cpu_transcoder));
385         }
386
387         if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
388                 intel_dp->psr.last_exit = time_ns;
389                 drm_dbg_kms(&dev_priv->drm,
390                             "[transcoder %s] PSR exit completed\n",
391                             transcoder_name(cpu_transcoder));
392
393                 if (DISPLAY_VER(dev_priv) >= 9) {
394                         u32 val;
395
396                         val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
397
398                         psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
399                 }
400         }
401
402         if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
403                 drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
404                          transcoder_name(cpu_transcoder));
405
406                 intel_dp->psr.irq_aux_error = true;
407
408                 /*
409                  * If this interruption is not masked it will keep
410                  * interrupting so fast that it prevents the scheduled
411                  * work to run.
412                  * Also after a PSR error, we don't want to arm PSR
413                  * again so we don't care about unmask the interruption
414                  * or unset irq_aux_error.
415                  */
416                 intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
417                              0, psr_irq_psr_error_bit_get(intel_dp));
418
419                 queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
420         }
421 }
422
423 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
424 {
425         u8 alpm_caps = 0;
426
427         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
428                               &alpm_caps) != 1)
429                 return false;
430         return alpm_caps & DP_ALPM_CAP;
431 }
432
433 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
434 {
435         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
436         u8 val = 8; /* assume the worst if we can't read the value */
437
438         if (drm_dp_dpcd_readb(&intel_dp->aux,
439                               DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
440                 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
441         else
442                 drm_dbg_kms(&i915->drm,
443                             "Unable to get sink synchronization latency, assuming 8 frames\n");
444         return val;
445 }
446
447 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
448 {
449         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
450         ssize_t r;
451         u16 w;
452         u8 y;
453
454         /* If sink don't have specific granularity requirements set legacy ones */
455         if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
456                 /* As PSR2 HW sends full lines, we do not care about x granularity */
457                 w = 4;
458                 y = 4;
459                 goto exit;
460         }
461
462         r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
463         if (r != 2)
464                 drm_dbg_kms(&i915->drm,
465                             "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
466         /*
467          * Spec says that if the value read is 0 the default granularity should
468          * be used instead.
469          */
470         if (r != 2 || w == 0)
471                 w = 4;
472
473         r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
474         if (r != 1) {
475                 drm_dbg_kms(&i915->drm,
476                             "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
477                 y = 4;
478         }
479         if (y == 0)
480                 y = 1;
481
482 exit:
483         intel_dp->psr.su_w_granularity = w;
484         intel_dp->psr.su_y_granularity = y;
485 }
486
487 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
488 {
489         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
490         u8 pr_dpcd = 0;
491
492         intel_dp->psr.sink_panel_replay_support = false;
493         drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP, &pr_dpcd);
494
495         if (!(pr_dpcd & DP_PANEL_REPLAY_SUPPORT)) {
496                 drm_dbg_kms(&i915->drm,
497                             "Panel replay is not supported by panel\n");
498                 return;
499         }
500
501         drm_dbg_kms(&i915->drm,
502                     "Panel replay is supported by panel\n");
503         intel_dp->psr.sink_panel_replay_support = true;
504 }
505
506 static void _psr_init_dpcd(struct intel_dp *intel_dp)
507 {
508         struct drm_i915_private *i915 =
509                 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
510
511         drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
512                     intel_dp->psr_dpcd[0]);
513
514         if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
515                 drm_dbg_kms(&i915->drm,
516                             "PSR support not currently available for this panel\n");
517                 return;
518         }
519
520         if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
521                 drm_dbg_kms(&i915->drm,
522                             "Panel lacks power state control, PSR cannot be enabled\n");
523                 return;
524         }
525
526         intel_dp->psr.sink_support = true;
527         intel_dp->psr.sink_sync_latency =
528                 intel_dp_get_sink_sync_latency(intel_dp);
529
530         if (DISPLAY_VER(i915) >= 9 &&
531             intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
532                 bool y_req = intel_dp->psr_dpcd[1] &
533                              DP_PSR2_SU_Y_COORDINATE_REQUIRED;
534                 bool alpm = intel_dp_get_alpm_status(intel_dp);
535
536                 /*
537                  * All panels that supports PSR version 03h (PSR2 +
538                  * Y-coordinate) can handle Y-coordinates in VSC but we are
539                  * only sure that it is going to be used when required by the
540                  * panel. This way panel is capable to do selective update
541                  * without a aux frame sync.
542                  *
543                  * To support PSR version 02h and PSR version 03h without
544                  * Y-coordinate requirement panels we would need to enable
545                  * GTC first.
546                  */
547                 intel_dp->psr.sink_psr2_support = y_req && alpm;
548                 drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
549                             intel_dp->psr.sink_psr2_support ? "" : "not ");
550         }
551 }
552
553 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
554 {
555         _panel_replay_init_dpcd(intel_dp);
556
557         drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
558                          sizeof(intel_dp->psr_dpcd));
559
560         if (intel_dp->psr_dpcd[0])
561                 _psr_init_dpcd(intel_dp);
562
563         if (intel_dp->psr.sink_psr2_support) {
564                 intel_dp->psr.colorimetry_support =
565                         intel_dp_get_colorimetry_status(intel_dp);
566                 intel_dp_get_su_granularity(intel_dp);
567         }
568 }
569
570 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
571 {
572         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
573         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
574         u32 aux_clock_divider, aux_ctl;
575         /* write DP_SET_POWER=D0 */
576         static const u8 aux_msg[] = {
577                 [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
578                 [1] = (DP_SET_POWER >> 8) & 0xff,
579                 [2] = DP_SET_POWER & 0xff,
580                 [3] = 1 - 1,
581                 [4] = DP_SET_POWER_D0,
582         };
583         int i;
584
585         BUILD_BUG_ON(sizeof(aux_msg) > 20);
586         for (i = 0; i < sizeof(aux_msg); i += 4)
587                 intel_de_write(dev_priv,
588                                psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
589                                intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
590
591         aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
592
593         /* Start with bits set for DDI_AUX_CTL register */
594         aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
595                                              aux_clock_divider);
596
597         /* Select only valid bits for SRD_AUX_CTL */
598         aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
599                 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
600                 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
601                 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
602
603         intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
604                        aux_ctl);
605 }
606
607 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
608 {
609         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
610         u8 dpcd_val = DP_PSR_ENABLE;
611
612         if (intel_dp->psr.panel_replay_enabled)
613                 return;
614
615         if (intel_dp->psr.psr2_enabled) {
616                 /* Enable ALPM at sink for psr2 */
617                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
618                                    DP_ALPM_ENABLE |
619                                    DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
620
621                 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
622         } else {
623                 if (intel_dp->psr.link_standby)
624                         dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
625
626                 if (DISPLAY_VER(dev_priv) >= 8)
627                         dpcd_val |= DP_PSR_CRC_VERIFICATION;
628         }
629
630         if (intel_dp->psr.req_psr2_sdp_prior_scanline)
631                 dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
632
633         if (intel_dp->psr.entry_setup_frames > 0)
634                 dpcd_val |= DP_PSR_FRAME_CAPTURE;
635
636         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
637
638         drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
639 }
640
641 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
642 {
643         struct intel_connector *connector = intel_dp->attached_connector;
644         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
645         u32 val = 0;
646
647         if (DISPLAY_VER(dev_priv) >= 11)
648                 val |= EDP_PSR_TP4_TIME_0us;
649
650         if (dev_priv->display.params.psr_safest_params) {
651                 val |= EDP_PSR_TP1_TIME_2500us;
652                 val |= EDP_PSR_TP2_TP3_TIME_2500us;
653                 goto check_tp3_sel;
654         }
655
656         if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
657                 val |= EDP_PSR_TP1_TIME_0us;
658         else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
659                 val |= EDP_PSR_TP1_TIME_100us;
660         else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
661                 val |= EDP_PSR_TP1_TIME_500us;
662         else
663                 val |= EDP_PSR_TP1_TIME_2500us;
664
665         if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
666                 val |= EDP_PSR_TP2_TP3_TIME_0us;
667         else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
668                 val |= EDP_PSR_TP2_TP3_TIME_100us;
669         else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
670                 val |= EDP_PSR_TP2_TP3_TIME_500us;
671         else
672                 val |= EDP_PSR_TP2_TP3_TIME_2500us;
673
674         /*
675          * WA 0479: hsw,bdw
676          * "Do not skip both TP1 and TP2/TP3"
677          */
678         if (DISPLAY_VER(dev_priv) < 9 &&
679             connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
680             connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
681                 val |= EDP_PSR_TP2_TP3_TIME_100us;
682
683 check_tp3_sel:
684         if (intel_dp_source_supports_tps3(dev_priv) &&
685             drm_dp_tps3_supported(intel_dp->dpcd))
686                 val |= EDP_PSR_TP_TP1_TP3;
687         else
688                 val |= EDP_PSR_TP_TP1_TP2;
689
690         return val;
691 }
692
693 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
694 {
695         struct intel_connector *connector = intel_dp->attached_connector;
696         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
697         int idle_frames;
698
699         /* Let's use 6 as the minimum to cover all known cases including the
700          * off-by-one issue that HW has in some cases.
701          */
702         idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
703         idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
704
705         if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
706                 idle_frames = 0xf;
707
708         return idle_frames;
709 }
710
711 static void hsw_activate_psr1(struct intel_dp *intel_dp)
712 {
713         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
714         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
715         u32 max_sleep_time = 0x1f;
716         u32 val = EDP_PSR_ENABLE;
717
718         val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
719
720         if (DISPLAY_VER(dev_priv) < 20)
721                 val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
722
723         if (IS_HASWELL(dev_priv))
724                 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
725
726         if (intel_dp->psr.link_standby)
727                 val |= EDP_PSR_LINK_STANDBY;
728
729         val |= intel_psr1_get_tp_time(intel_dp);
730
731         if (DISPLAY_VER(dev_priv) >= 8)
732                 val |= EDP_PSR_CRC_ENABLE;
733
734         if (DISPLAY_VER(dev_priv) >= 20)
735                 val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
736
737         intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
738                      ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
739 }
740
741 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
742 {
743         struct intel_connector *connector = intel_dp->attached_connector;
744         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
745         u32 val = 0;
746
747         if (dev_priv->display.params.psr_safest_params)
748                 return EDP_PSR2_TP2_TIME_2500us;
749
750         if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
751             connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
752                 val |= EDP_PSR2_TP2_TIME_50us;
753         else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
754                 val |= EDP_PSR2_TP2_TIME_100us;
755         else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
756                 val |= EDP_PSR2_TP2_TIME_500us;
757         else
758                 val |= EDP_PSR2_TP2_TIME_2500us;
759
760         return val;
761 }
762
763 static int psr2_block_count_lines(struct intel_dp *intel_dp)
764 {
765         return intel_dp->psr.io_wake_lines < 9 &&
766                 intel_dp->psr.fast_wake_lines < 9 ? 8 : 12;
767 }
768
769 static int psr2_block_count(struct intel_dp *intel_dp)
770 {
771         return psr2_block_count_lines(intel_dp) / 4;
772 }
773
774 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
775 {
776         u8 frames_before_su_entry;
777
778         frames_before_su_entry = max_t(u8,
779                                        intel_dp->psr.sink_sync_latency + 1,
780                                        2);
781
782         /* Entry setup frames must be at least 1 less than frames before SU entry */
783         if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
784                 frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
785
786         return frames_before_su_entry;
787 }
788
789 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
790 {
791         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
792
793         intel_de_rmw(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
794                      0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
795
796         intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
797                      TRANS_DP2_PANEL_REPLAY_ENABLE);
798 }
799
800 static void hsw_activate_psr2(struct intel_dp *intel_dp)
801 {
802         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
803         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
804         u32 val = EDP_PSR2_ENABLE;
805         u32 psr_val = 0;
806
807         val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
808
809         if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))
810                 val |= EDP_SU_TRACK_ENABLE;
811
812         if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13)
813                 val |= EDP_Y_COORDINATE_ENABLE;
814
815         val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
816
817         val |= intel_psr2_get_tp_time(intel_dp);
818
819         if (DISPLAY_VER(dev_priv) >= 12) {
820                 if (psr2_block_count(intel_dp) > 2)
821                         val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
822                 else
823                         val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
824         }
825
826         /* Wa_22012278275:adl-p */
827         if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
828                 static const u8 map[] = {
829                         2, /* 5 lines */
830                         1, /* 6 lines */
831                         0, /* 7 lines */
832                         3, /* 8 lines */
833                         6, /* 9 lines */
834                         5, /* 10 lines */
835                         4, /* 11 lines */
836                         7, /* 12 lines */
837                 };
838                 /*
839                  * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
840                  * comments bellow for more information
841                  */
842                 int tmp;
843
844                 tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
845                 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
846
847                 tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
848                 val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
849         } else if (DISPLAY_VER(dev_priv) >= 12) {
850                 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
851                 val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
852         } else if (DISPLAY_VER(dev_priv) >= 9) {
853                 val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
854                 val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
855         }
856
857         if (intel_dp->psr.req_psr2_sdp_prior_scanline)
858                 val |= EDP_PSR2_SU_SDP_SCANLINE;
859
860         if (DISPLAY_VER(dev_priv) >= 20)
861                 psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
862
863         if (intel_dp->psr.psr2_sel_fetch_enabled) {
864                 u32 tmp;
865
866                 tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
867                 drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
868         } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
869                 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
870         }
871
872         /*
873          * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
874          * recommending keep this bit unset while PSR2 is enabled.
875          */
876         intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
877
878         intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
879 }
880
881 static bool
882 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
883 {
884         if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
885                 return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
886         else if (DISPLAY_VER(dev_priv) >= 12)
887                 return cpu_transcoder == TRANSCODER_A;
888         else if (DISPLAY_VER(dev_priv) >= 9)
889                 return cpu_transcoder == TRANSCODER_EDP;
890         else
891                 return false;
892 }
893
894 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
895 {
896         if (!crtc_state->hw.active)
897                 return 0;
898
899         return DIV_ROUND_UP(1000 * 1000,
900                             drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
901 }
902
903 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
904                                      u32 idle_frames)
905 {
906         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
907         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
908
909         intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
910                      EDP_PSR2_IDLE_FRAMES_MASK,
911                      EDP_PSR2_IDLE_FRAMES(idle_frames));
912 }
913
914 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
915 {
916         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
917
918         psr2_program_idle_frames(intel_dp, 0);
919         intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
920 }
921
922 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
923 {
924         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
925
926         intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
927         psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
928 }
929
930 static void tgl_dc3co_disable_work(struct work_struct *work)
931 {
932         struct intel_dp *intel_dp =
933                 container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
934
935         mutex_lock(&intel_dp->psr.lock);
936         /* If delayed work is pending, it is not idle */
937         if (delayed_work_pending(&intel_dp->psr.dc3co_work))
938                 goto unlock;
939
940         tgl_psr2_disable_dc3co(intel_dp);
941 unlock:
942         mutex_unlock(&intel_dp->psr.lock);
943 }
944
945 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
946 {
947         if (!intel_dp->psr.dc3co_exitline)
948                 return;
949
950         cancel_delayed_work(&intel_dp->psr.dc3co_work);
951         /* Before PSR2 exit disallow dc3co*/
952         tgl_psr2_disable_dc3co(intel_dp);
953 }
954
955 static bool
956 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
957                               struct intel_crtc_state *crtc_state)
958 {
959         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
960         enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
961         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
962         enum port port = dig_port->base.port;
963
964         if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
965                 return pipe <= PIPE_B && port <= PORT_B;
966         else
967                 return pipe == PIPE_A && port == PORT_A;
968 }
969
970 static void
971 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
972                                   struct intel_crtc_state *crtc_state)
973 {
974         const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
975         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
976         struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
977         u32 exit_scanlines;
978
979         /*
980          * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
981          * disable DC3CO until the changed dc3co activating/deactivating sequence
982          * is applied. B.Specs:49196
983          */
984         return;
985
986         /*
987          * DMC's DC3CO exit mechanism has an issue with Selective Fecth
988          * TODO: when the issue is addressed, this restriction should be removed.
989          */
990         if (crtc_state->enable_psr2_sel_fetch)
991                 return;
992
993         if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
994                 return;
995
996         if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
997                 return;
998
999         /* Wa_16011303918:adl-p */
1000         if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1001                 return;
1002
1003         /*
1004          * DC3CO Exit time 200us B.Spec 49196
1005          * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1006          */
1007         exit_scanlines =
1008                 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1009
1010         if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
1011                 return;
1012
1013         crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1014 }
1015
1016 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1017                                               struct intel_crtc_state *crtc_state)
1018 {
1019         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1020
1021         if (!dev_priv->display.params.enable_psr2_sel_fetch &&
1022             intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1023                 drm_dbg_kms(&dev_priv->drm,
1024                             "PSR2 sel fetch not enabled, disabled by parameter\n");
1025                 return false;
1026         }
1027
1028         if (crtc_state->uapi.async_flip) {
1029                 drm_dbg_kms(&dev_priv->drm,
1030                             "PSR2 sel fetch not enabled, async flip enabled\n");
1031                 return false;
1032         }
1033
1034         return crtc_state->enable_psr2_sel_fetch = true;
1035 }
1036
1037 static bool psr2_granularity_check(struct intel_dp *intel_dp,
1038                                    struct intel_crtc_state *crtc_state)
1039 {
1040         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1041         const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1042         const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1043         const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1044         u16 y_granularity = 0;
1045
1046         /* PSR2 HW only send full lines so we only need to validate the width */
1047         if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1048                 return false;
1049
1050         if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1051                 return false;
1052
1053         /* HW tracking is only aligned to 4 lines */
1054         if (!crtc_state->enable_psr2_sel_fetch)
1055                 return intel_dp->psr.su_y_granularity == 4;
1056
1057         /*
1058          * adl_p and mtl platforms have 1 line granularity.
1059          * For other platforms with SW tracking we can adjust the y coordinates
1060          * to match sink requirement if multiple of 4.
1061          */
1062         if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1063                 y_granularity = intel_dp->psr.su_y_granularity;
1064         else if (intel_dp->psr.su_y_granularity <= 2)
1065                 y_granularity = 4;
1066         else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1067                 y_granularity = intel_dp->psr.su_y_granularity;
1068
1069         if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1070                 return false;
1071
1072         if (crtc_state->dsc.compression_enable &&
1073             vdsc_cfg->slice_height % y_granularity)
1074                 return false;
1075
1076         crtc_state->su_y_granularity = y_granularity;
1077         return true;
1078 }
1079
1080 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1081                                                         struct intel_crtc_state *crtc_state)
1082 {
1083         const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1084         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1085         u32 hblank_total, hblank_ns, req_ns;
1086
1087         hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1088         hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1089
1090         /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1091         req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1092
1093         if ((hblank_ns - req_ns) > 100)
1094                 return true;
1095
1096         /* Not supported <13 / Wa_22012279113:adl-p */
1097         if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1098                 return false;
1099
1100         crtc_state->req_psr2_sdp_prior_scanline = true;
1101         return true;
1102 }
1103
1104 static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
1105                                      struct intel_crtc_state *crtc_state)
1106 {
1107         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1108         int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
1109         u8 max_wake_lines;
1110
1111         if (DISPLAY_VER(i915) >= 12) {
1112                 io_wake_time = 42;
1113                 /*
1114                  * According to Bspec it's 42us, but based on testing
1115                  * it is not enough -> use 45 us.
1116                  */
1117                 fast_wake_time = 45;
1118                 max_wake_lines = 12;
1119         } else {
1120                 io_wake_time = 50;
1121                 fast_wake_time = 32;
1122                 max_wake_lines = 8;
1123         }
1124
1125         io_wake_lines = intel_usecs_to_scanlines(
1126                 &crtc_state->hw.adjusted_mode, io_wake_time);
1127         fast_wake_lines = intel_usecs_to_scanlines(
1128                 &crtc_state->hw.adjusted_mode, fast_wake_time);
1129
1130         if (io_wake_lines > max_wake_lines ||
1131             fast_wake_lines > max_wake_lines)
1132                 return false;
1133
1134         if (i915->display.params.psr_safest_params)
1135                 io_wake_lines = fast_wake_lines = max_wake_lines;
1136
1137         /* According to Bspec lower limit should be set as 7 lines. */
1138         intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
1139         intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
1140
1141         return true;
1142 }
1143
1144 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1145                                         const struct drm_display_mode *adjusted_mode)
1146 {
1147         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1148         int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1149         int entry_setup_frames = 0;
1150
1151         if (psr_setup_time < 0) {
1152                 drm_dbg_kms(&i915->drm,
1153                             "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1154                             intel_dp->psr_dpcd[1]);
1155                 return -ETIME;
1156         }
1157
1158         if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1159             adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1160                 if (DISPLAY_VER(i915) >= 20) {
1161                         /* setup entry frames can be up to 3 frames */
1162                         entry_setup_frames = 1;
1163                         drm_dbg_kms(&i915->drm,
1164                                     "PSR setup entry frames %d\n",
1165                                     entry_setup_frames);
1166                 } else {
1167                         drm_dbg_kms(&i915->drm,
1168                                     "PSR condition failed: PSR setup time (%d us) too long\n",
1169                                     psr_setup_time);
1170                         return -ETIME;
1171                 }
1172         }
1173
1174         return entry_setup_frames;
1175 }
1176
1177 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1178                                     struct intel_crtc_state *crtc_state)
1179 {
1180         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1181         int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1182         int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1183         int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1184
1185         if (!intel_dp->psr.sink_psr2_support)
1186                 return false;
1187
1188         /* JSL and EHL only supports eDP 1.3 */
1189         if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1190                 drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
1191                 return false;
1192         }
1193
1194         /* Wa_16011181250 */
1195         if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1196             IS_DG2(dev_priv)) {
1197                 drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
1198                 return false;
1199         }
1200
1201         if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1202                 drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
1203                 return false;
1204         }
1205
1206         if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
1207                 drm_dbg_kms(&dev_priv->drm,
1208                             "PSR2 not supported in transcoder %s\n",
1209                             transcoder_name(crtc_state->cpu_transcoder));
1210                 return false;
1211         }
1212
1213         if (!psr2_global_enabled(intel_dp)) {
1214                 drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
1215                 return false;
1216         }
1217
1218         /*
1219          * DSC and PSR2 cannot be enabled simultaneously. If a requested
1220          * resolution requires DSC to be enabled, priority is given to DSC
1221          * over PSR2.
1222          */
1223         if (crtc_state->dsc.compression_enable &&
1224             (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
1225                 drm_dbg_kms(&dev_priv->drm,
1226                             "PSR2 cannot be enabled since DSC is enabled\n");
1227                 return false;
1228         }
1229
1230         if (crtc_state->crc_enabled) {
1231                 drm_dbg_kms(&dev_priv->drm,
1232                             "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
1233                 return false;
1234         }
1235
1236         if (DISPLAY_VER(dev_priv) >= 12) {
1237                 psr_max_h = 5120;
1238                 psr_max_v = 3200;
1239                 max_bpp = 30;
1240         } else if (DISPLAY_VER(dev_priv) >= 10) {
1241                 psr_max_h = 4096;
1242                 psr_max_v = 2304;
1243                 max_bpp = 24;
1244         } else if (DISPLAY_VER(dev_priv) == 9) {
1245                 psr_max_h = 3640;
1246                 psr_max_v = 2304;
1247                 max_bpp = 24;
1248         }
1249
1250         if (crtc_state->pipe_bpp > max_bpp) {
1251                 drm_dbg_kms(&dev_priv->drm,
1252                             "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1253                             crtc_state->pipe_bpp, max_bpp);
1254                 return false;
1255         }
1256
1257         /* Wa_16011303918:adl-p */
1258         if (crtc_state->vrr.enable &&
1259             IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1260                 drm_dbg_kms(&dev_priv->drm,
1261                             "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1262                 return false;
1263         }
1264
1265         if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1266                 drm_dbg_kms(&dev_priv->drm,
1267                             "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1268                 return false;
1269         }
1270
1271         if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
1272                 drm_dbg_kms(&dev_priv->drm,
1273                             "PSR2 not enabled, Unable to use long enough wake times\n");
1274                 return false;
1275         }
1276
1277         /* Vblank >= PSR2_CTL Block Count Number maximum line count */
1278         if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
1279             crtc_state->hw.adjusted_mode.crtc_vblank_start <
1280             psr2_block_count_lines(intel_dp)) {
1281                 drm_dbg_kms(&dev_priv->drm,
1282                             "PSR2 not enabled, too short vblank time\n");
1283                 return false;
1284         }
1285
1286         if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1287                 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1288                     !HAS_PSR_HW_TRACKING(dev_priv)) {
1289                         drm_dbg_kms(&dev_priv->drm,
1290                                     "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
1291                         return false;
1292                 }
1293         }
1294
1295         if (!psr2_granularity_check(intel_dp, crtc_state)) {
1296                 drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1297                 goto unsupported;
1298         }
1299
1300         if (!crtc_state->enable_psr2_sel_fetch &&
1301             (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1302                 drm_dbg_kms(&dev_priv->drm,
1303                             "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1304                             crtc_hdisplay, crtc_vdisplay,
1305                             psr_max_h, psr_max_v);
1306                 goto unsupported;
1307         }
1308
1309         tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1310         return true;
1311
1312 unsupported:
1313         crtc_state->enable_psr2_sel_fetch = false;
1314         return false;
1315 }
1316
1317 static bool _psr_compute_config(struct intel_dp *intel_dp,
1318                                 struct intel_crtc_state *crtc_state)
1319 {
1320         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1321         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1322         int entry_setup_frames;
1323
1324         /*
1325          * Current PSR panels don't work reliably with VRR enabled
1326          * So if VRR is enabled, do not enable PSR.
1327          */
1328         if (crtc_state->vrr.enable)
1329                 return false;
1330
1331         if (!CAN_PSR(intel_dp))
1332                 return false;
1333
1334         entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1335
1336         if (entry_setup_frames >= 0) {
1337                 intel_dp->psr.entry_setup_frames = entry_setup_frames;
1338         } else {
1339                 drm_dbg_kms(&dev_priv->drm,
1340                             "PSR condition failed: PSR setup timing not met\n");
1341                 return false;
1342         }
1343
1344         return true;
1345 }
1346
1347 void intel_psr_compute_config(struct intel_dp *intel_dp,
1348                               struct intel_crtc_state *crtc_state,
1349                               struct drm_connector_state *conn_state)
1350 {
1351         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1352         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1353
1354         if (!psr_global_enabled(intel_dp)) {
1355                 drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1356                 return;
1357         }
1358
1359         if (intel_dp->psr.sink_not_reliable) {
1360                 drm_dbg_kms(&dev_priv->drm,
1361                             "PSR sink implementation is not reliable\n");
1362                 return;
1363         }
1364
1365         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1366                 drm_dbg_kms(&dev_priv->drm,
1367                             "PSR condition failed: Interlaced mode enabled\n");
1368                 return;
1369         }
1370
1371         if (CAN_PANEL_REPLAY(intel_dp))
1372                 crtc_state->has_panel_replay = true;
1373         else
1374                 crtc_state->has_psr = _psr_compute_config(intel_dp, crtc_state);
1375
1376         if (!(crtc_state->has_panel_replay || crtc_state->has_psr))
1377                 return;
1378
1379         crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1380
1381         crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1382         intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1383                                      &crtc_state->psr_vsc);
1384 }
1385
1386 void intel_psr_get_config(struct intel_encoder *encoder,
1387                           struct intel_crtc_state *pipe_config)
1388 {
1389         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1390         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1391         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1392         struct intel_dp *intel_dp;
1393         u32 val;
1394
1395         if (!dig_port)
1396                 return;
1397
1398         intel_dp = &dig_port->dp;
1399         if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1400                 return;
1401
1402         mutex_lock(&intel_dp->psr.lock);
1403         if (!intel_dp->psr.enabled)
1404                 goto unlock;
1405
1406         if (intel_dp->psr.panel_replay_enabled) {
1407                 pipe_config->has_panel_replay = true;
1408         } else {
1409                 /*
1410                  * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1411                  * enabled/disabled because of frontbuffer tracking and others.
1412                  */
1413                 pipe_config->has_psr = true;
1414         }
1415
1416         pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1417         pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1418
1419         if (!intel_dp->psr.psr2_enabled)
1420                 goto unlock;
1421
1422         if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1423                 val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
1424                 if (val & PSR2_MAN_TRK_CTL_ENABLE)
1425                         pipe_config->enable_psr2_sel_fetch = true;
1426         }
1427
1428         if (DISPLAY_VER(dev_priv) >= 12) {
1429                 val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
1430                 pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1431         }
1432 unlock:
1433         mutex_unlock(&intel_dp->psr.lock);
1434 }
1435
1436 static void intel_psr_activate(struct intel_dp *intel_dp)
1437 {
1438         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1439         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1440
1441         drm_WARN_ON(&dev_priv->drm,
1442                     transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1443                     intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
1444
1445         drm_WARN_ON(&dev_priv->drm,
1446                     intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1447
1448         drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1449
1450         lockdep_assert_held(&intel_dp->psr.lock);
1451
1452         /* psr1, psr2 and panel-replay are mutually exclusive.*/
1453         if (intel_dp->psr.panel_replay_enabled)
1454                 dg2_activate_panel_replay(intel_dp);
1455         else if (intel_dp->psr.psr2_enabled)
1456                 hsw_activate_psr2(intel_dp);
1457         else
1458                 hsw_activate_psr1(intel_dp);
1459
1460         intel_dp->psr.active = true;
1461 }
1462
1463 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1464 {
1465         switch (intel_dp->psr.pipe) {
1466         case PIPE_A:
1467                 return LATENCY_REPORTING_REMOVED_PIPE_A;
1468         case PIPE_B:
1469                 return LATENCY_REPORTING_REMOVED_PIPE_B;
1470         case PIPE_C:
1471                 return LATENCY_REPORTING_REMOVED_PIPE_C;
1472         case PIPE_D:
1473                 return LATENCY_REPORTING_REMOVED_PIPE_D;
1474         default:
1475                 MISSING_CASE(intel_dp->psr.pipe);
1476                 return 0;
1477         }
1478 }
1479
1480 /*
1481  * Wa_16013835468
1482  * Wa_14015648006
1483  */
1484 static void wm_optimization_wa(struct intel_dp *intel_dp,
1485                                const struct intel_crtc_state *crtc_state)
1486 {
1487         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1488         bool set_wa_bit = false;
1489
1490         /* Wa_14015648006 */
1491         if (IS_DISPLAY_VER(dev_priv, 11, 14))
1492                 set_wa_bit |= crtc_state->wm_level_disabled;
1493
1494         /* Wa_16013835468 */
1495         if (DISPLAY_VER(dev_priv) == 12)
1496                 set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1497                         crtc_state->hw.adjusted_mode.crtc_vdisplay;
1498
1499         if (set_wa_bit)
1500                 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1501                              0, wa_16013835468_bit_get(intel_dp));
1502         else
1503                 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1504                              wa_16013835468_bit_get(intel_dp), 0);
1505 }
1506
1507 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1508                                     const struct intel_crtc_state *crtc_state)
1509 {
1510         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1511         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1512         u32 mask;
1513
1514         /*
1515          * Only HSW and BDW have PSR AUX registers that need to be setup.
1516          * SKL+ use hardcoded values PSR AUX transactions
1517          */
1518         if (DISPLAY_VER(dev_priv) < 9)
1519                 hsw_psr_setup_aux(intel_dp);
1520
1521         /*
1522          * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1523          * mask LPSP to avoid dependency on other drivers that might block
1524          * runtime_pm besides preventing  other hw tracking issues now we
1525          * can rely on frontbuffer tracking.
1526          */
1527         mask = EDP_PSR_DEBUG_MASK_MEMUP |
1528                EDP_PSR_DEBUG_MASK_HPD |
1529                EDP_PSR_DEBUG_MASK_LPSP;
1530
1531         if (DISPLAY_VER(dev_priv) < 20)
1532                 mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1533
1534         /*
1535          * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1536          * registers in order to keep the CURSURFLIVE tricks working :(
1537          */
1538         if (IS_DISPLAY_VER(dev_priv, 9, 10))
1539                 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1540
1541         /* allow PSR with sprite enabled */
1542         if (IS_HASWELL(dev_priv))
1543                 mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1544
1545         intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
1546
1547         psr_irq_control(intel_dp);
1548
1549         /*
1550          * TODO: if future platforms supports DC3CO in more than one
1551          * transcoder, EXITLINE will need to be unset when disabling PSR
1552          */
1553         if (intel_dp->psr.dc3co_exitline)
1554                 intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1555                              intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1556
1557         if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1558                 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1559                              intel_dp->psr.psr2_sel_fetch_enabled ?
1560                              IGNORE_PSR2_HW_TRACKING : 0);
1561
1562         /*
1563          * Wa_16013835468
1564          * Wa_14015648006
1565          */
1566         wm_optimization_wa(intel_dp, crtc_state);
1567
1568         if (intel_dp->psr.psr2_enabled) {
1569                 if (DISPLAY_VER(dev_priv) == 9)
1570                         intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1571                                      PSR2_VSC_ENABLE_PROG_HEADER |
1572                                      PSR2_ADD_VERTICAL_LINE_COUNT);
1573
1574                 /*
1575                  * Wa_16014451276:adlp,mtl[a0,b0]
1576                  * All supported adlp panels have 1-based X granularity, this may
1577                  * cause issues if non-supported panels are used.
1578                  */
1579                 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
1580                     IS_ALDERLAKE_P(dev_priv))
1581                         intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
1582                                      0, ADLP_1_BASED_X_GRANULARITY);
1583
1584                 /* Wa_16012604467:adlp,mtl[a0,b0] */
1585                 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1586                         intel_de_rmw(dev_priv,
1587                                      MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1588                                      MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1589                 else if (IS_ALDERLAKE_P(dev_priv))
1590                         intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1591                                      CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1592         }
1593 }
1594
1595 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1596 {
1597         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1598         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1599         u32 val;
1600
1601         /*
1602          * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1603          * will still keep the error set even after the reset done in the
1604          * irq_preinstall and irq_uninstall hooks.
1605          * And enabling in this situation cause the screen to freeze in the
1606          * first time that PSR HW tries to activate so lets keep PSR disabled
1607          * to avoid any rendering problems.
1608          */
1609         val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
1610         val &= psr_irq_psr_error_bit_get(intel_dp);
1611         if (val) {
1612                 intel_dp->psr.sink_not_reliable = true;
1613                 drm_dbg_kms(&dev_priv->drm,
1614                             "PSR interruption error set, not enabling PSR\n");
1615                 return false;
1616         }
1617
1618         return true;
1619 }
1620
1621 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1622                                     const struct intel_crtc_state *crtc_state)
1623 {
1624         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1625         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1626         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1627         struct intel_encoder *encoder = &dig_port->base;
1628         u32 val;
1629
1630         drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1631
1632         intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1633         intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
1634         intel_dp->psr.busy_frontbuffer_bits = 0;
1635         intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1636         intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1637         /* DC5/DC6 requires at least 6 idle frames */
1638         val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1639         intel_dp->psr.dc3co_exit_delay = val;
1640         intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1641         intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1642         intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1643         intel_dp->psr.req_psr2_sdp_prior_scanline =
1644                 crtc_state->req_psr2_sdp_prior_scanline;
1645
1646         if (!psr_interrupt_error_check(intel_dp))
1647                 return;
1648
1649         if (intel_dp->psr.panel_replay_enabled)
1650                 drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
1651         else
1652                 drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1653                             intel_dp->psr.psr2_enabled ? "2" : "1");
1654
1655         intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
1656         intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1657         intel_psr_enable_sink(intel_dp);
1658         intel_psr_enable_source(intel_dp, crtc_state);
1659         intel_dp->psr.enabled = true;
1660         intel_dp->psr.paused = false;
1661
1662         intel_psr_activate(intel_dp);
1663 }
1664
1665 static void intel_psr_exit(struct intel_dp *intel_dp)
1666 {
1667         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1668         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1669         u32 val;
1670
1671         if (!intel_dp->psr.active) {
1672                 if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
1673                         val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
1674                         drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1675                 }
1676
1677                 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
1678                 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1679
1680                 return;
1681         }
1682
1683         if (intel_dp->psr.panel_replay_enabled) {
1684                 intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
1685                              TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
1686         } else if (intel_dp->psr.psr2_enabled) {
1687                 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1688
1689                 val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
1690                                    EDP_PSR2_ENABLE, 0);
1691
1692                 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1693         } else {
1694                 val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
1695                                    EDP_PSR_ENABLE, 0);
1696
1697                 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1698         }
1699         intel_dp->psr.active = false;
1700 }
1701
1702 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1703 {
1704         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1705         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1706         i915_reg_t psr_status;
1707         u32 psr_status_mask;
1708
1709         if (intel_dp->psr.psr2_enabled) {
1710                 psr_status = EDP_PSR2_STATUS(cpu_transcoder);
1711                 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1712         } else {
1713                 psr_status = psr_status_reg(dev_priv, cpu_transcoder);
1714                 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1715         }
1716
1717         /* Wait till PSR is idle */
1718         if (intel_de_wait_for_clear(dev_priv, psr_status,
1719                                     psr_status_mask, 2000))
1720                 drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1721 }
1722
1723 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1724 {
1725         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1726         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1727         enum phy phy = intel_port_to_phy(dev_priv,
1728                                          dp_to_dig_port(intel_dp)->base.port);
1729
1730         lockdep_assert_held(&intel_dp->psr.lock);
1731
1732         if (!intel_dp->psr.enabled)
1733                 return;
1734
1735         if (intel_dp->psr.panel_replay_enabled)
1736                 drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
1737         else
1738                 drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1739                             intel_dp->psr.psr2_enabled ? "2" : "1");
1740
1741         intel_psr_exit(intel_dp);
1742         intel_psr_wait_exit_locked(intel_dp);
1743
1744         /*
1745          * Wa_16013835468
1746          * Wa_14015648006
1747          */
1748         if (DISPLAY_VER(dev_priv) >= 11)
1749                 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1750                              wa_16013835468_bit_get(intel_dp), 0);
1751
1752         if (intel_dp->psr.psr2_enabled) {
1753                 /* Wa_16012604467:adlp,mtl[a0,b0] */
1754                 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1755                         intel_de_rmw(dev_priv,
1756                                      MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
1757                                      MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
1758                 else if (IS_ALDERLAKE_P(dev_priv))
1759                         intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1760                                      CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1761         }
1762
1763         intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
1764
1765         /* Disable PSR on Sink */
1766         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1767
1768         if (intel_dp->psr.psr2_enabled)
1769                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1770
1771         intel_dp->psr.enabled = false;
1772         intel_dp->psr.panel_replay_enabled = false;
1773         intel_dp->psr.psr2_enabled = false;
1774         intel_dp->psr.psr2_sel_fetch_enabled = false;
1775         intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1776 }
1777
1778 /**
1779  * intel_psr_disable - Disable PSR
1780  * @intel_dp: Intel DP
1781  * @old_crtc_state: old CRTC state
1782  *
1783  * This function needs to be called before disabling pipe.
1784  */
1785 void intel_psr_disable(struct intel_dp *intel_dp,
1786                        const struct intel_crtc_state *old_crtc_state)
1787 {
1788         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1789
1790         if (!old_crtc_state->has_psr)
1791                 return;
1792
1793         if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1794                 return;
1795
1796         mutex_lock(&intel_dp->psr.lock);
1797
1798         intel_psr_disable_locked(intel_dp);
1799
1800         mutex_unlock(&intel_dp->psr.lock);
1801         cancel_work_sync(&intel_dp->psr.work);
1802         cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1803 }
1804
1805 /**
1806  * intel_psr_pause - Pause PSR
1807  * @intel_dp: Intel DP
1808  *
1809  * This function need to be called after enabling psr.
1810  */
1811 void intel_psr_pause(struct intel_dp *intel_dp)
1812 {
1813         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1814         struct intel_psr *psr = &intel_dp->psr;
1815
1816         if (!CAN_PSR(intel_dp))
1817                 return;
1818
1819         mutex_lock(&psr->lock);
1820
1821         if (!psr->enabled) {
1822                 mutex_unlock(&psr->lock);
1823                 return;
1824         }
1825
1826         /* If we ever hit this, we will need to add refcount to pause/resume */
1827         drm_WARN_ON(&dev_priv->drm, psr->paused);
1828
1829         intel_psr_exit(intel_dp);
1830         intel_psr_wait_exit_locked(intel_dp);
1831         psr->paused = true;
1832
1833         mutex_unlock(&psr->lock);
1834
1835         cancel_work_sync(&psr->work);
1836         cancel_delayed_work_sync(&psr->dc3co_work);
1837 }
1838
1839 /**
1840  * intel_psr_resume - Resume PSR
1841  * @intel_dp: Intel DP
1842  *
1843  * This function need to be called after pausing psr.
1844  */
1845 void intel_psr_resume(struct intel_dp *intel_dp)
1846 {
1847         struct intel_psr *psr = &intel_dp->psr;
1848
1849         if (!CAN_PSR(intel_dp))
1850                 return;
1851
1852         mutex_lock(&psr->lock);
1853
1854         if (!psr->paused)
1855                 goto unlock;
1856
1857         psr->paused = false;
1858         intel_psr_activate(intel_dp);
1859
1860 unlock:
1861         mutex_unlock(&psr->lock);
1862 }
1863
1864 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1865 {
1866         return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
1867                 PSR2_MAN_TRK_CTL_ENABLE;
1868 }
1869
1870 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1871 {
1872         return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1873                ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1874                PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1875 }
1876
1877 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1878 {
1879         return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1880                ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1881                PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1882 }
1883
1884 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1885 {
1886         return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1887                ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1888                PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1889 }
1890
1891 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1892 {
1893         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1894         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1895
1896         if (intel_dp->psr.psr2_sel_fetch_enabled)
1897                 intel_de_write(dev_priv,
1898                                PSR2_MAN_TRK_CTL(cpu_transcoder),
1899                                man_trk_ctl_enable_bit_get(dev_priv) |
1900                                man_trk_ctl_partial_frame_bit_get(dev_priv) |
1901                                man_trk_ctl_single_full_frame_bit_get(dev_priv) |
1902                                man_trk_ctl_continuos_full_frame(dev_priv));
1903
1904         /*
1905          * Display WA #0884: skl+
1906          * This documented WA for bxt can be safely applied
1907          * broadly so we can force HW tracking to exit PSR
1908          * instead of disabling and re-enabling.
1909          * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1910          * but it makes more sense write to the current active
1911          * pipe.
1912          *
1913          * This workaround do not exist for platforms with display 10 or newer
1914          * but testing proved that it works for up display 13, for newer
1915          * than that testing will be needed.
1916          */
1917         intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1918 }
1919
1920 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1921 {
1922         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1923         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1924         struct intel_encoder *encoder;
1925
1926         if (!crtc_state->enable_psr2_sel_fetch)
1927                 return;
1928
1929         for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1930                                              crtc_state->uapi.encoder_mask) {
1931                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1932
1933                 lockdep_assert_held(&intel_dp->psr.lock);
1934                 if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
1935                         return;
1936                 break;
1937         }
1938
1939         intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
1940                        crtc_state->psr2_man_track_ctl);
1941 }
1942
1943 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1944                                   struct drm_rect *clip, bool full_update)
1945 {
1946         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1947         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1948         u32 val = man_trk_ctl_enable_bit_get(dev_priv);
1949
1950         /* SF partial frame enable has to be set even on full update */
1951         val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
1952
1953         if (full_update) {
1954                 val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
1955                 val |= man_trk_ctl_continuos_full_frame(dev_priv);
1956                 goto exit;
1957         }
1958
1959         if (clip->y1 == -1)
1960                 goto exit;
1961
1962         if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
1963                 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
1964                 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
1965         } else {
1966                 drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
1967
1968                 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1969                 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1970         }
1971 exit:
1972         crtc_state->psr2_man_track_ctl = val;
1973 }
1974
1975 static void clip_area_update(struct drm_rect *overlap_damage_area,
1976                              struct drm_rect *damage_area,
1977                              struct drm_rect *pipe_src)
1978 {
1979         if (!drm_rect_intersect(damage_area, pipe_src))
1980                 return;
1981
1982         if (overlap_damage_area->y1 == -1) {
1983                 overlap_damage_area->y1 = damage_area->y1;
1984                 overlap_damage_area->y2 = damage_area->y2;
1985                 return;
1986         }
1987
1988         if (damage_area->y1 < overlap_damage_area->y1)
1989                 overlap_damage_area->y1 = damage_area->y1;
1990
1991         if (damage_area->y2 > overlap_damage_area->y2)
1992                 overlap_damage_area->y2 = damage_area->y2;
1993 }
1994
1995 static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
1996                                                 struct drm_rect *pipe_clip)
1997 {
1998         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1999         const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2000         u16 y_alignment;
2001
2002         /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2003         if (crtc_state->dsc.compression_enable &&
2004             (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
2005                 y_alignment = vdsc_cfg->slice_height;
2006         else
2007                 y_alignment = crtc_state->su_y_granularity;
2008
2009         pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
2010         if (pipe_clip->y2 % y_alignment)
2011                 pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
2012 }
2013
2014 /*
2015  * TODO: Not clear how to handle planes with negative position,
2016  * also planes are not updated if they have a negative X
2017  * position so for now doing a full update in this cases
2018  *
2019  * Plane scaling and rotation is not supported by selective fetch and both
2020  * properties can change without a modeset, so need to be check at every
2021  * atomic commit.
2022  */
2023 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2024 {
2025         if (plane_state->uapi.dst.y1 < 0 ||
2026             plane_state->uapi.dst.x1 < 0 ||
2027             plane_state->scaler_id >= 0 ||
2028             plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2029                 return false;
2030
2031         return true;
2032 }
2033
2034 /*
2035  * Check for pipe properties that is not supported by selective fetch.
2036  *
2037  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2038  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2039  * enabled and going to the full update path.
2040  */
2041 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2042 {
2043         if (crtc_state->scaler_state.scaler_id >= 0)
2044                 return false;
2045
2046         return true;
2047 }
2048
2049 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2050                                 struct intel_crtc *crtc)
2051 {
2052         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2053         struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2054         struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
2055         struct intel_plane_state *new_plane_state, *old_plane_state;
2056         struct intel_plane *plane;
2057         bool full_update = false;
2058         int i, ret;
2059
2060         if (!crtc_state->enable_psr2_sel_fetch)
2061                 return 0;
2062
2063         if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2064                 full_update = true;
2065                 goto skip_sel_fetch_set_loop;
2066         }
2067
2068         /*
2069          * Calculate minimal selective fetch area of each plane and calculate
2070          * the pipe damaged area.
2071          * In the next loop the plane selective fetch area will actually be set
2072          * using whole pipe damaged area.
2073          */
2074         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2075                                              new_plane_state, i) {
2076                 struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2077                                                       .x2 = INT_MAX };
2078
2079                 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2080                         continue;
2081
2082                 if (!new_plane_state->uapi.visible &&
2083                     !old_plane_state->uapi.visible)
2084                         continue;
2085
2086                 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2087                         full_update = true;
2088                         break;
2089                 }
2090
2091                 /*
2092                  * If visibility or plane moved, mark the whole plane area as
2093                  * damaged as it needs to be complete redraw in the new and old
2094                  * position.
2095                  */
2096                 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2097                     !drm_rect_equals(&new_plane_state->uapi.dst,
2098                                      &old_plane_state->uapi.dst)) {
2099                         if (old_plane_state->uapi.visible) {
2100                                 damaged_area.y1 = old_plane_state->uapi.dst.y1;
2101                                 damaged_area.y2 = old_plane_state->uapi.dst.y2;
2102                                 clip_area_update(&pipe_clip, &damaged_area,
2103                                                  &crtc_state->pipe_src);
2104                         }
2105
2106                         if (new_plane_state->uapi.visible) {
2107                                 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2108                                 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2109                                 clip_area_update(&pipe_clip, &damaged_area,
2110                                                  &crtc_state->pipe_src);
2111                         }
2112                         continue;
2113                 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2114                         /* If alpha changed mark the whole plane area as damaged */
2115                         damaged_area.y1 = new_plane_state->uapi.dst.y1;
2116                         damaged_area.y2 = new_plane_state->uapi.dst.y2;
2117                         clip_area_update(&pipe_clip, &damaged_area,
2118                                          &crtc_state->pipe_src);
2119                         continue;
2120                 }
2121
2122                 src = drm_plane_state_src(&new_plane_state->uapi);
2123                 drm_rect_fp_to_int(&src, &src);
2124
2125                 if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2126                                                      &new_plane_state->uapi, &damaged_area))
2127                         continue;
2128
2129                 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2130                 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2131                 damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2132                 damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2133
2134                 clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
2135         }
2136
2137         /*
2138          * TODO: For now we are just using full update in case
2139          * selective fetch area calculation fails. To optimize this we
2140          * should identify cases where this happens and fix the area
2141          * calculation for those.
2142          */
2143         if (pipe_clip.y1 == -1) {
2144                 drm_info_once(&dev_priv->drm,
2145                               "Selective fetch area calculation failed in pipe %c\n",
2146                               pipe_name(crtc->pipe));
2147                 full_update = true;
2148         }
2149
2150         if (full_update)
2151                 goto skip_sel_fetch_set_loop;
2152
2153         /* Wa_14014971492 */
2154         if ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
2155              IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
2156             crtc_state->splitter.enable)
2157                 pipe_clip.y1 = 0;
2158
2159         ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2160         if (ret)
2161                 return ret;
2162
2163         intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
2164
2165         /*
2166          * Now that we have the pipe damaged area check if it intersect with
2167          * every plane, if it does set the plane selective fetch area.
2168          */
2169         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2170                                              new_plane_state, i) {
2171                 struct drm_rect *sel_fetch_area, inter;
2172                 struct intel_plane *linked = new_plane_state->planar_linked_plane;
2173
2174                 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2175                     !new_plane_state->uapi.visible)
2176                         continue;
2177
2178                 inter = pipe_clip;
2179                 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2180                 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2181                         sel_fetch_area->y1 = -1;
2182                         sel_fetch_area->y2 = -1;
2183                         /*
2184                          * if plane sel fetch was previously enabled ->
2185                          * disable it
2186                          */
2187                         if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2188                                 crtc_state->update_planes |= BIT(plane->id);
2189
2190                         continue;
2191                 }
2192
2193                 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2194                         full_update = true;
2195                         break;
2196                 }
2197
2198                 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2199                 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2200                 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2201                 crtc_state->update_planes |= BIT(plane->id);
2202
2203                 /*
2204                  * Sel_fetch_area is calculated for UV plane. Use
2205                  * same area for Y plane as well.
2206                  */
2207                 if (linked) {
2208                         struct intel_plane_state *linked_new_plane_state;
2209                         struct drm_rect *linked_sel_fetch_area;
2210
2211                         linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2212                         if (IS_ERR(linked_new_plane_state))
2213                                 return PTR_ERR(linked_new_plane_state);
2214
2215                         linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2216                         linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2217                         linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2218                         crtc_state->update_planes |= BIT(linked->id);
2219                 }
2220         }
2221
2222 skip_sel_fetch_set_loop:
2223         psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
2224         return 0;
2225 }
2226
2227 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2228                                 struct intel_crtc *crtc)
2229 {
2230         struct drm_i915_private *i915 = to_i915(state->base.dev);
2231         const struct intel_crtc_state *old_crtc_state =
2232                 intel_atomic_get_old_crtc_state(state, crtc);
2233         const struct intel_crtc_state *new_crtc_state =
2234                 intel_atomic_get_new_crtc_state(state, crtc);
2235         struct intel_encoder *encoder;
2236
2237         if (!HAS_PSR(i915))
2238                 return;
2239
2240         for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2241                                              old_crtc_state->uapi.encoder_mask) {
2242                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2243                 struct intel_psr *psr = &intel_dp->psr;
2244                 bool needs_to_disable = false;
2245
2246                 mutex_lock(&psr->lock);
2247
2248                 /*
2249                  * Reasons to disable:
2250                  * - PSR disabled in new state
2251                  * - All planes will go inactive
2252                  * - Changing between PSR versions
2253                  * - Display WA #1136: skl, bxt
2254                  */
2255                 needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2256                 needs_to_disable |= !new_crtc_state->has_psr;
2257                 needs_to_disable |= !new_crtc_state->active_planes;
2258                 needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
2259                 needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2260                         new_crtc_state->wm_level_disabled;
2261
2262                 if (psr->enabled && needs_to_disable)
2263                         intel_psr_disable_locked(intel_dp);
2264                 else if (psr->enabled && new_crtc_state->wm_level_disabled)
2265                         /* Wa_14015648006 */
2266                         wm_optimization_wa(intel_dp, new_crtc_state);
2267
2268                 mutex_unlock(&psr->lock);
2269         }
2270 }
2271
2272 void intel_psr_post_plane_update(struct intel_atomic_state *state,
2273                                  struct intel_crtc *crtc)
2274 {
2275         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2276         const struct intel_crtc_state *crtc_state =
2277                 intel_atomic_get_new_crtc_state(state, crtc);
2278         struct intel_encoder *encoder;
2279
2280         if (!(crtc_state->has_psr || crtc_state->has_panel_replay))
2281                 return;
2282
2283         for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2284                                              crtc_state->uapi.encoder_mask) {
2285                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2286                 struct intel_psr *psr = &intel_dp->psr;
2287                 bool keep_disabled = false;
2288
2289                 mutex_lock(&psr->lock);
2290
2291                 drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2292
2293                 keep_disabled |= psr->sink_not_reliable;
2294                 keep_disabled |= !crtc_state->active_planes;
2295
2296                 /* Display WA #1136: skl, bxt */
2297                 keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2298                         crtc_state->wm_level_disabled;
2299
2300                 if (!psr->enabled && !keep_disabled)
2301                         intel_psr_enable_locked(intel_dp, crtc_state);
2302                 else if (psr->enabled && !crtc_state->wm_level_disabled)
2303                         /* Wa_14015648006 */
2304                         wm_optimization_wa(intel_dp, crtc_state);
2305
2306                 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2307                 if (crtc_state->crc_enabled && psr->enabled)
2308                         psr_force_hw_tracking_exit(intel_dp);
2309
2310                 /*
2311                  * Clear possible busy bits in case we have
2312                  * invalidate -> flip -> flush sequence.
2313                  */
2314                 intel_dp->psr.busy_frontbuffer_bits = 0;
2315
2316                 mutex_unlock(&psr->lock);
2317         }
2318 }
2319
2320 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2321 {
2322         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2323         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2324
2325         /*
2326          * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2327          * As all higher states has bit 4 of PSR2 state set we can just wait for
2328          * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2329          */
2330         return intel_de_wait_for_clear(dev_priv,
2331                                        EDP_PSR2_STATUS(cpu_transcoder),
2332                                        EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2333 }
2334
2335 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2336 {
2337         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2338         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2339
2340         /*
2341          * From bspec: Panel Self Refresh (BDW+)
2342          * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2343          * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2344          * defensive enough to cover everything.
2345          */
2346         return intel_de_wait_for_clear(dev_priv,
2347                                        psr_status_reg(dev_priv, cpu_transcoder),
2348                                        EDP_PSR_STATUS_STATE_MASK, 50);
2349 }
2350
2351 /**
2352  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2353  * @new_crtc_state: new CRTC state
2354  *
2355  * This function is expected to be called from pipe_update_start() where it is
2356  * not expected to race with PSR enable or disable.
2357  */
2358 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2359 {
2360         struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2361         struct intel_encoder *encoder;
2362
2363         if (!new_crtc_state->has_psr)
2364                 return;
2365
2366         for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2367                                              new_crtc_state->uapi.encoder_mask) {
2368                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2369                 int ret;
2370
2371                 lockdep_assert_held(&intel_dp->psr.lock);
2372
2373                 if (!intel_dp->psr.enabled)
2374                         continue;
2375
2376                 if (intel_dp->psr.psr2_enabled)
2377                         ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2378                 else
2379                         ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2380
2381                 if (ret)
2382                         drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2383         }
2384 }
2385
2386 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2387 {
2388         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2389         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2390         i915_reg_t reg;
2391         u32 mask;
2392         int err;
2393
2394         if (!intel_dp->psr.enabled)
2395                 return false;
2396
2397         if (intel_dp->psr.psr2_enabled) {
2398                 reg = EDP_PSR2_STATUS(cpu_transcoder);
2399                 mask = EDP_PSR2_STATUS_STATE_MASK;
2400         } else {
2401                 reg = psr_status_reg(dev_priv, cpu_transcoder);
2402                 mask = EDP_PSR_STATUS_STATE_MASK;
2403         }
2404
2405         mutex_unlock(&intel_dp->psr.lock);
2406
2407         err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2408         if (err)
2409                 drm_err(&dev_priv->drm,
2410                         "Timed out waiting for PSR Idle for re-enable\n");
2411
2412         /* After the unlocked wait, verify that PSR is still wanted! */
2413         mutex_lock(&intel_dp->psr.lock);
2414         return err == 0 && intel_dp->psr.enabled;
2415 }
2416
2417 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2418 {
2419         struct drm_connector_list_iter conn_iter;
2420         struct drm_modeset_acquire_ctx ctx;
2421         struct drm_atomic_state *state;
2422         struct drm_connector *conn;
2423         int err = 0;
2424
2425         state = drm_atomic_state_alloc(&dev_priv->drm);
2426         if (!state)
2427                 return -ENOMEM;
2428
2429         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2430
2431         state->acquire_ctx = &ctx;
2432         to_intel_atomic_state(state)->internal = true;
2433
2434 retry:
2435         drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2436         drm_for_each_connector_iter(conn, &conn_iter) {
2437                 struct drm_connector_state *conn_state;
2438                 struct drm_crtc_state *crtc_state;
2439
2440                 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2441                         continue;
2442
2443                 conn_state = drm_atomic_get_connector_state(state, conn);
2444                 if (IS_ERR(conn_state)) {
2445                         err = PTR_ERR(conn_state);
2446                         break;
2447                 }
2448
2449                 if (!conn_state->crtc)
2450                         continue;
2451
2452                 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2453                 if (IS_ERR(crtc_state)) {
2454                         err = PTR_ERR(crtc_state);
2455                         break;
2456                 }
2457
2458                 /* Mark mode as changed to trigger a pipe->update() */
2459                 crtc_state->mode_changed = true;
2460         }
2461         drm_connector_list_iter_end(&conn_iter);
2462
2463         if (err == 0)
2464                 err = drm_atomic_commit(state);
2465
2466         if (err == -EDEADLK) {
2467                 drm_atomic_state_clear(state);
2468                 err = drm_modeset_backoff(&ctx);
2469                 if (!err)
2470                         goto retry;
2471         }
2472
2473         drm_modeset_drop_locks(&ctx);
2474         drm_modeset_acquire_fini(&ctx);
2475         drm_atomic_state_put(state);
2476
2477         return err;
2478 }
2479
2480 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2481 {
2482         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2483         const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2484         u32 old_mode;
2485         int ret;
2486
2487         if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2488             mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2489                 drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2490                 return -EINVAL;
2491         }
2492
2493         ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2494         if (ret)
2495                 return ret;
2496
2497         old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2498         intel_dp->psr.debug = val;
2499
2500         /*
2501          * Do it right away if it's already enabled, otherwise it will be done
2502          * when enabling the source.
2503          */
2504         if (intel_dp->psr.enabled)
2505                 psr_irq_control(intel_dp);
2506
2507         mutex_unlock(&intel_dp->psr.lock);
2508
2509         if (old_mode != mode)
2510                 ret = intel_psr_fastset_force(dev_priv);
2511
2512         return ret;
2513 }
2514
2515 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2516 {
2517         struct intel_psr *psr = &intel_dp->psr;
2518
2519         intel_psr_disable_locked(intel_dp);
2520         psr->sink_not_reliable = true;
2521         /* let's make sure that sink is awaken */
2522         drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2523 }
2524
2525 static void intel_psr_work(struct work_struct *work)
2526 {
2527         struct intel_dp *intel_dp =
2528                 container_of(work, typeof(*intel_dp), psr.work);
2529
2530         mutex_lock(&intel_dp->psr.lock);
2531
2532         if (!intel_dp->psr.enabled)
2533                 goto unlock;
2534
2535         if (READ_ONCE(intel_dp->psr.irq_aux_error))
2536                 intel_psr_handle_irq(intel_dp);
2537
2538         /*
2539          * We have to make sure PSR is ready for re-enable
2540          * otherwise it keeps disabled until next full enable/disable cycle.
2541          * PSR might take some time to get fully disabled
2542          * and be ready for re-enable.
2543          */
2544         if (!__psr_wait_for_idle_locked(intel_dp))
2545                 goto unlock;
2546
2547         /*
2548          * The delayed work can race with an invalidate hence we need to
2549          * recheck. Since psr_flush first clears this and then reschedules we
2550          * won't ever miss a flush when bailing out here.
2551          */
2552         if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2553                 goto unlock;
2554
2555         intel_psr_activate(intel_dp);
2556 unlock:
2557         mutex_unlock(&intel_dp->psr.lock);
2558 }
2559
2560 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2561 {
2562         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2563         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2564
2565         if (intel_dp->psr.psr2_sel_fetch_enabled) {
2566                 u32 val;
2567
2568                 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2569                         /* Send one update otherwise lag is observed in screen */
2570                         intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2571                         return;
2572                 }
2573
2574                 val = man_trk_ctl_enable_bit_get(dev_priv) |
2575                       man_trk_ctl_partial_frame_bit_get(dev_priv) |
2576                       man_trk_ctl_continuos_full_frame(dev_priv);
2577                 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
2578                 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2579                 intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2580         } else {
2581                 intel_psr_exit(intel_dp);
2582         }
2583 }
2584
2585 /**
2586  * intel_psr_invalidate - Invalidate PSR
2587  * @dev_priv: i915 device
2588  * @frontbuffer_bits: frontbuffer plane tracking bits
2589  * @origin: which operation caused the invalidate
2590  *
2591  * Since the hardware frontbuffer tracking has gaps we need to integrate
2592  * with the software frontbuffer tracking. This function gets called every
2593  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2594  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2595  *
2596  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2597  */
2598 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2599                           unsigned frontbuffer_bits, enum fb_op_origin origin)
2600 {
2601         struct intel_encoder *encoder;
2602
2603         if (origin == ORIGIN_FLIP)
2604                 return;
2605
2606         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2607                 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2608                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2609
2610                 mutex_lock(&intel_dp->psr.lock);
2611                 if (!intel_dp->psr.enabled) {
2612                         mutex_unlock(&intel_dp->psr.lock);
2613                         continue;
2614                 }
2615
2616                 pipe_frontbuffer_bits &=
2617                         INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2618                 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2619
2620                 if (pipe_frontbuffer_bits)
2621                         _psr_invalidate_handle(intel_dp);
2622
2623                 mutex_unlock(&intel_dp->psr.lock);
2624         }
2625 }
2626 /*
2627  * When we will be completely rely on PSR2 S/W tracking in future,
2628  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2629  * event also therefore tgl_dc3co_flush_locked() require to be changed
2630  * accordingly in future.
2631  */
2632 static void
2633 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2634                        enum fb_op_origin origin)
2635 {
2636         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2637
2638         if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2639             !intel_dp->psr.active)
2640                 return;
2641
2642         /*
2643          * At every frontbuffer flush flip event modified delay of delayed work,
2644          * when delayed work schedules that means display has been idle.
2645          */
2646         if (!(frontbuffer_bits &
2647             INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2648                 return;
2649
2650         tgl_psr2_enable_dc3co(intel_dp);
2651         mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
2652                          intel_dp->psr.dc3co_exit_delay);
2653 }
2654
2655 static void _psr_flush_handle(struct intel_dp *intel_dp)
2656 {
2657         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2658         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2659
2660         if (intel_dp->psr.psr2_sel_fetch_enabled) {
2661                 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2662                         /* can we turn CFF off? */
2663                         if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2664                                 u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2665                                         man_trk_ctl_partial_frame_bit_get(dev_priv) |
2666                                         man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2667                                         man_trk_ctl_continuos_full_frame(dev_priv);
2668
2669                                 /*
2670                                  * Set psr2_sel_fetch_cff_enabled as false to allow selective
2671                                  * updates. Still keep cff bit enabled as we don't have proper
2672                                  * SU configuration in case update is sent for any reason after
2673                                  * sff bit gets cleared by the HW on next vblank.
2674                                  */
2675                                 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2676                                                val);
2677                                 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2678                                 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2679                         }
2680                 } else {
2681                         /*
2682                          * continuous full frame is disabled, only a single full
2683                          * frame is required
2684                          */
2685                         psr_force_hw_tracking_exit(intel_dp);
2686                 }
2687         } else {
2688                 psr_force_hw_tracking_exit(intel_dp);
2689
2690                 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2691                         queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
2692         }
2693 }
2694
2695 /**
2696  * intel_psr_flush - Flush PSR
2697  * @dev_priv: i915 device
2698  * @frontbuffer_bits: frontbuffer plane tracking bits
2699  * @origin: which operation caused the flush
2700  *
2701  * Since the hardware frontbuffer tracking has gaps we need to integrate
2702  * with the software frontbuffer tracking. This function gets called every
2703  * time frontbuffer rendering has completed and flushed out to memory. PSR
2704  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2705  *
2706  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2707  */
2708 void intel_psr_flush(struct drm_i915_private *dev_priv,
2709                      unsigned frontbuffer_bits, enum fb_op_origin origin)
2710 {
2711         struct intel_encoder *encoder;
2712
2713         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2714                 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2715                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2716
2717                 mutex_lock(&intel_dp->psr.lock);
2718                 if (!intel_dp->psr.enabled) {
2719                         mutex_unlock(&intel_dp->psr.lock);
2720                         continue;
2721                 }
2722
2723                 pipe_frontbuffer_bits &=
2724                         INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2725                 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2726
2727                 /*
2728                  * If the PSR is paused by an explicit intel_psr_paused() call,
2729                  * we have to ensure that the PSR is not activated until
2730                  * intel_psr_resume() is called.
2731                  */
2732                 if (intel_dp->psr.paused)
2733                         goto unlock;
2734
2735                 if (origin == ORIGIN_FLIP ||
2736                     (origin == ORIGIN_CURSOR_UPDATE &&
2737                      !intel_dp->psr.psr2_sel_fetch_enabled)) {
2738                         tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2739                         goto unlock;
2740                 }
2741
2742                 if (pipe_frontbuffer_bits == 0)
2743                         goto unlock;
2744
2745                 /* By definition flush = invalidate + flush */
2746                 _psr_flush_handle(intel_dp);
2747 unlock:
2748                 mutex_unlock(&intel_dp->psr.lock);
2749         }
2750 }
2751
2752 /**
2753  * intel_psr_init - Init basic PSR work and mutex.
2754  * @intel_dp: Intel DP
2755  *
2756  * This function is called after the initializing connector.
2757  * (the initializing of connector treats the handling of connector capabilities)
2758  * And it initializes basic PSR stuff for each DP Encoder.
2759  */
2760 void intel_psr_init(struct intel_dp *intel_dp)
2761 {
2762         struct intel_connector *connector = intel_dp->attached_connector;
2763         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2764         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2765
2766         if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
2767                 return;
2768
2769         if (!intel_dp_is_edp(intel_dp))
2770                 intel_psr_init_dpcd(intel_dp);
2771
2772         /*
2773          * HSW spec explicitly says PSR is tied to port A.
2774          * BDW+ platforms have a instance of PSR registers per transcoder but
2775          * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2776          * than eDP one.
2777          * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2778          * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2779          * But GEN12 supports a instance of PSR registers per transcoder.
2780          */
2781         if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2782                 drm_dbg_kms(&dev_priv->drm,
2783                             "PSR condition failed: Port not supported\n");
2784                 return;
2785         }
2786
2787         if (HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp))
2788                 intel_dp->psr.source_panel_replay_support = true;
2789         else
2790                 intel_dp->psr.source_support = true;
2791
2792         /* Set link_standby x link_off defaults */
2793         if (DISPLAY_VER(dev_priv) < 12)
2794                 /* For new platforms up to TGL let's respect VBT back again */
2795                 intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2796
2797         INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2798         INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2799         mutex_init(&intel_dp->psr.lock);
2800 }
2801
2802 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2803                                            u8 *status, u8 *error_status)
2804 {
2805         struct drm_dp_aux *aux = &intel_dp->aux;
2806         int ret;
2807         unsigned int offset;
2808
2809         offset = intel_dp->psr.panel_replay_enabled ?
2810                  DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
2811
2812         ret = drm_dp_dpcd_readb(aux, offset, status);
2813         if (ret != 1)
2814                 return ret;
2815
2816         offset = intel_dp->psr.panel_replay_enabled ?
2817                  DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
2818
2819         ret = drm_dp_dpcd_readb(aux, offset, error_status);
2820         if (ret != 1)
2821                 return ret;
2822
2823         *status = *status & DP_PSR_SINK_STATE_MASK;
2824
2825         return 0;
2826 }
2827
2828 static void psr_alpm_check(struct intel_dp *intel_dp)
2829 {
2830         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2831         struct drm_dp_aux *aux = &intel_dp->aux;
2832         struct intel_psr *psr = &intel_dp->psr;
2833         u8 val;
2834         int r;
2835
2836         if (!psr->psr2_enabled)
2837                 return;
2838
2839         r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2840         if (r != 1) {
2841                 drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2842                 return;
2843         }
2844
2845         if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2846                 intel_psr_disable_locked(intel_dp);
2847                 psr->sink_not_reliable = true;
2848                 drm_dbg_kms(&dev_priv->drm,
2849                             "ALPM lock timeout error, disabling PSR\n");
2850
2851                 /* Clearing error */
2852                 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2853         }
2854 }
2855
2856 static void psr_capability_changed_check(struct intel_dp *intel_dp)
2857 {
2858         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2859         struct intel_psr *psr = &intel_dp->psr;
2860         u8 val;
2861         int r;
2862
2863         r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2864         if (r != 1) {
2865                 drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2866                 return;
2867         }
2868
2869         if (val & DP_PSR_CAPS_CHANGE) {
2870                 intel_psr_disable_locked(intel_dp);
2871                 psr->sink_not_reliable = true;
2872                 drm_dbg_kms(&dev_priv->drm,
2873                             "Sink PSR capability changed, disabling PSR\n");
2874
2875                 /* Clearing it */
2876                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2877         }
2878 }
2879
2880 void intel_psr_short_pulse(struct intel_dp *intel_dp)
2881 {
2882         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2883         struct intel_psr *psr = &intel_dp->psr;
2884         u8 status, error_status;
2885         const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2886                           DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2887                           DP_PSR_LINK_CRC_ERROR;
2888
2889         if (!CAN_PSR(intel_dp))
2890                 return;
2891
2892         mutex_lock(&psr->lock);
2893
2894         if (!psr->enabled)
2895                 goto exit;
2896
2897         if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2898                 drm_err(&dev_priv->drm,
2899                         "Error reading PSR status or error status\n");
2900                 goto exit;
2901         }
2902
2903         if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2904                 intel_psr_disable_locked(intel_dp);
2905                 psr->sink_not_reliable = true;
2906         }
2907
2908         if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2909                 drm_dbg_kms(&dev_priv->drm,
2910                             "PSR sink internal error, disabling PSR\n");
2911         if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2912                 drm_dbg_kms(&dev_priv->drm,
2913                             "PSR RFB storage error, disabling PSR\n");
2914         if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2915                 drm_dbg_kms(&dev_priv->drm,
2916                             "PSR VSC SDP uncorrectable error, disabling PSR\n");
2917         if (error_status & DP_PSR_LINK_CRC_ERROR)
2918                 drm_dbg_kms(&dev_priv->drm,
2919                             "PSR Link CRC error, disabling PSR\n");
2920
2921         if (error_status & ~errors)
2922                 drm_err(&dev_priv->drm,
2923                         "PSR_ERROR_STATUS unhandled errors %x\n",
2924                         error_status & ~errors);
2925         /* clear status register */
2926         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2927
2928         psr_alpm_check(intel_dp);
2929         psr_capability_changed_check(intel_dp);
2930
2931 exit:
2932         mutex_unlock(&psr->lock);
2933 }
2934
2935 bool intel_psr_enabled(struct intel_dp *intel_dp)
2936 {
2937         bool ret;
2938
2939         if (!CAN_PSR(intel_dp))
2940                 return false;
2941
2942         mutex_lock(&intel_dp->psr.lock);
2943         ret = intel_dp->psr.enabled;
2944         mutex_unlock(&intel_dp->psr.lock);
2945
2946         return ret;
2947 }
2948
2949 /**
2950  * intel_psr_lock - grab PSR lock
2951  * @crtc_state: the crtc state
2952  *
2953  * This is initially meant to be used by around CRTC update, when
2954  * vblank sensitive registers are updated and we need grab the lock
2955  * before it to avoid vblank evasion.
2956  */
2957 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
2958 {
2959         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2960         struct intel_encoder *encoder;
2961
2962         if (!crtc_state->has_psr)
2963                 return;
2964
2965         for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2966                                              crtc_state->uapi.encoder_mask) {
2967                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2968
2969                 mutex_lock(&intel_dp->psr.lock);
2970                 break;
2971         }
2972 }
2973
2974 /**
2975  * intel_psr_unlock - release PSR lock
2976  * @crtc_state: the crtc state
2977  *
2978  * Release the PSR lock that was held during pipe update.
2979  */
2980 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
2981 {
2982         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2983         struct intel_encoder *encoder;
2984
2985         if (!crtc_state->has_psr)
2986                 return;
2987
2988         for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2989                                              crtc_state->uapi.encoder_mask) {
2990                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2991
2992                 mutex_unlock(&intel_dp->psr.lock);
2993                 break;
2994         }
2995 }
2996
2997 static void
2998 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
2999 {
3000         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3001         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3002         const char *status = "unknown";
3003         u32 val, status_val;
3004
3005         if (intel_dp->psr.psr2_enabled) {
3006                 static const char * const live_status[] = {
3007                         "IDLE",
3008                         "CAPTURE",
3009                         "CAPTURE_FS",
3010                         "SLEEP",
3011                         "BUFON_FW",
3012                         "ML_UP",
3013                         "SU_STANDBY",
3014                         "FAST_SLEEP",
3015                         "DEEP_SLEEP",
3016                         "BUF_ON",
3017                         "TG_ON"
3018                 };
3019                 val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
3020                 status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3021                 if (status_val < ARRAY_SIZE(live_status))
3022                         status = live_status[status_val];
3023         } else {
3024                 static const char * const live_status[] = {
3025                         "IDLE",
3026                         "SRDONACK",
3027                         "SRDENT",
3028                         "BUFOFF",
3029                         "BUFON",
3030                         "AUXACK",
3031                         "SRDOFFACK",
3032                         "SRDENT_ON",
3033                 };
3034                 val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
3035                 status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3036                 if (status_val < ARRAY_SIZE(live_status))
3037                         status = live_status[status_val];
3038         }
3039
3040         seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3041 }
3042
3043 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
3044 {
3045         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3046         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3047         struct intel_psr *psr = &intel_dp->psr;
3048         intel_wakeref_t wakeref;
3049         const char *status;
3050         bool enabled;
3051         u32 val;
3052
3053         seq_printf(m, "Sink support: PSR = %s",
3054                    str_yes_no(psr->sink_support));
3055
3056         if (psr->sink_support)
3057                 seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3058         seq_printf(m, ", Panel Replay = %s\n", str_yes_no(psr->sink_panel_replay_support));
3059
3060         if (!(psr->sink_support || psr->sink_panel_replay_support))
3061                 return 0;
3062
3063         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3064         mutex_lock(&psr->lock);
3065
3066         if (psr->panel_replay_enabled)
3067                 status = "Panel Replay Enabled";
3068         else if (psr->enabled)
3069                 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
3070         else
3071                 status = "disabled";
3072         seq_printf(m, "PSR mode: %s\n", status);
3073
3074         if (!psr->enabled) {
3075                 seq_printf(m, "PSR sink not reliable: %s\n",
3076                            str_yes_no(psr->sink_not_reliable));
3077
3078                 goto unlock;
3079         }
3080
3081         if (psr->panel_replay_enabled) {
3082                 val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
3083                 enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
3084         } else if (psr->psr2_enabled) {
3085                 val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
3086                 enabled = val & EDP_PSR2_ENABLE;
3087         } else {
3088                 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
3089                 enabled = val & EDP_PSR_ENABLE;
3090         }
3091         seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
3092                    str_enabled_disabled(enabled), val);
3093         psr_source_status(intel_dp, m);
3094         seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3095                    psr->busy_frontbuffer_bits);
3096
3097         /*
3098          * SKL+ Perf counter is reset to 0 everytime DC state is entered
3099          */
3100         val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
3101         seq_printf(m, "Performance counter: %u\n",
3102                    REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3103
3104         if (psr->debug & I915_PSR_DEBUG_IRQ) {
3105                 seq_printf(m, "Last attempted entry at: %lld\n",
3106                            psr->last_entry_attempt);
3107                 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3108         }
3109
3110         if (psr->psr2_enabled) {
3111                 u32 su_frames_val[3];
3112                 int frame;
3113
3114                 /*
3115                  * Reading all 3 registers before hand to minimize crossing a
3116                  * frame boundary between register reads
3117                  */
3118                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3119                         val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
3120                         su_frames_val[frame / 3] = val;
3121                 }
3122
3123                 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3124
3125                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3126                         u32 su_blocks;
3127
3128                         su_blocks = su_frames_val[frame / 3] &
3129                                     PSR2_SU_STATUS_MASK(frame);
3130                         su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3131                         seq_printf(m, "%d\t%d\n", frame, su_blocks);
3132                 }
3133
3134                 seq_printf(m, "PSR2 selective fetch: %s\n",
3135                            str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3136         }
3137
3138 unlock:
3139         mutex_unlock(&psr->lock);
3140         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3141
3142         return 0;
3143 }
3144
3145 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3146 {
3147         struct drm_i915_private *dev_priv = m->private;
3148         struct intel_dp *intel_dp = NULL;
3149         struct intel_encoder *encoder;
3150
3151         if (!HAS_PSR(dev_priv))
3152                 return -ENODEV;
3153
3154         /* Find the first EDP which supports PSR */
3155         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3156                 intel_dp = enc_to_intel_dp(encoder);
3157                 break;
3158         }
3159
3160         if (!intel_dp)
3161                 return -ENODEV;
3162
3163         return intel_psr_status(m, intel_dp);
3164 }
3165 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3166
3167 static int
3168 i915_edp_psr_debug_set(void *data, u64 val)
3169 {
3170         struct drm_i915_private *dev_priv = data;
3171         struct intel_encoder *encoder;
3172         intel_wakeref_t wakeref;
3173         int ret = -ENODEV;
3174
3175         if (!HAS_PSR(dev_priv))
3176                 return ret;
3177
3178         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3179                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3180
3181                 drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3182
3183                 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3184
3185                 // TODO: split to each transcoder's PSR debug state
3186                 ret = intel_psr_debug_set(intel_dp, val);
3187
3188                 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3189         }
3190
3191         return ret;
3192 }
3193
3194 static int
3195 i915_edp_psr_debug_get(void *data, u64 *val)
3196 {
3197         struct drm_i915_private *dev_priv = data;
3198         struct intel_encoder *encoder;
3199
3200         if (!HAS_PSR(dev_priv))
3201                 return -ENODEV;
3202
3203         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3204                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3205
3206                 // TODO: split to each transcoder's PSR debug state
3207                 *val = READ_ONCE(intel_dp->psr.debug);
3208                 return 0;
3209         }
3210
3211         return -ENODEV;
3212 }
3213
3214 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3215                         i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3216                         "%llu\n");
3217
3218 void intel_psr_debugfs_register(struct drm_i915_private *i915)
3219 {
3220         struct drm_minor *minor = i915->drm.primary;
3221
3222         debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3223                             i915, &i915_edp_psr_debug_fops);
3224
3225         debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3226                             i915, &i915_edp_psr_status_fops);
3227 }
3228
3229 static const char *psr_mode_str(struct intel_dp *intel_dp)
3230 {
3231         if (intel_dp->psr.panel_replay_enabled)
3232                 return "PANEL-REPLAY";
3233         else if (intel_dp->psr.enabled)
3234                 return "PSR";
3235
3236         return "unknown";
3237 }
3238
3239 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3240 {
3241         struct intel_connector *connector = m->private;
3242         struct intel_dp *intel_dp = intel_attached_dp(connector);
3243         static const char * const sink_status[] = {
3244                 "inactive",
3245                 "transition to active, capture and display",
3246                 "active, display from RFB",
3247                 "active, capture and display on sink device timings",
3248                 "transition to inactive, capture and display, timing re-sync",
3249                 "reserved",
3250                 "reserved",
3251                 "sink internal error",
3252         };
3253         static const char * const panel_replay_status[] = {
3254                 "Sink device frame is locked to the Source device",
3255                 "Sink device is coasting, using the VTotal target",
3256                 "Sink device is governing the frame rate (frame rate unlock is granted)",
3257                 "Sink device in the process of re-locking with the Source device",
3258         };
3259         const char *str;
3260         int ret;
3261         u8 status, error_status;
3262         u32 idx;
3263
3264         if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
3265                 seq_puts(m, "PSR/Panel-Replay Unsupported\n");
3266                 return -ENODEV;
3267         }
3268
3269         if (connector->base.status != connector_status_connected)
3270                 return -ENODEV;
3271
3272         ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
3273         if (ret)
3274                 return ret;
3275
3276         str = "unknown";
3277         if (intel_dp->psr.panel_replay_enabled) {
3278                 idx = (status & DP_SINK_FRAME_LOCKED_MASK) >> DP_SINK_FRAME_LOCKED_SHIFT;
3279                 if (idx < ARRAY_SIZE(panel_replay_status))
3280                         str = panel_replay_status[idx];
3281         } else if (intel_dp->psr.enabled) {
3282                 idx = status & DP_PSR_SINK_STATE_MASK;
3283                 if (idx < ARRAY_SIZE(sink_status))
3284                         str = sink_status[idx];
3285         }
3286
3287         seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
3288
3289         seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
3290
3291         if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
3292                             DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3293                             DP_PSR_LINK_CRC_ERROR))
3294                 seq_puts(m, ":\n");
3295         else
3296                 seq_puts(m, "\n");
3297         if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3298                 seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
3299         if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3300                 seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
3301         if (error_status & DP_PSR_LINK_CRC_ERROR)
3302                 seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
3303
3304         return ret;
3305 }
3306 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3307
3308 static int i915_psr_status_show(struct seq_file *m, void *data)
3309 {
3310         struct intel_connector *connector = m->private;
3311         struct intel_dp *intel_dp = intel_attached_dp(connector);
3312
3313         return intel_psr_status(m, intel_dp);
3314 }
3315 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3316
3317 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3318 {
3319         struct drm_i915_private *i915 = to_i915(connector->base.dev);
3320         struct dentry *root = connector->base.debugfs_entry;
3321
3322         /* TODO: Add support for MST connectors as well. */
3323         if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
3324              connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
3325             connector->mst_port)
3326                 return;
3327
3328         debugfs_create_file("i915_psr_sink_status", 0444, root,
3329                             connector, &i915_psr_sink_status_fops);
3330
3331         if (HAS_PSR(i915) || HAS_DP20(i915))
3332                 debugfs_create_file("i915_psr_status", 0444, root,
3333                                     connector, &i915_psr_status_fops);
3334 }