1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
26
27 #include "i915_drv.h"
28 #include "i915_reg.h"
29 #include "intel_atomic.h"
30 #include "intel_crtc.h"
31 #include "intel_de.h"
32 #include "intel_display_types.h"
33 #include "intel_dp.h"
34 #include "intel_dp_aux.h"
35 #include "intel_hdmi.h"
36 #include "intel_psr.h"
37 #include "intel_psr_regs.h"
38 #include "intel_snps_phy.h"
39 #include "skl_universal_plane.h"
40
41 /**
42 * DOC: Panel Self Refresh (PSR/SRD)
43 *
44 * Since Haswell Display controller supports Panel Self-Refresh on display
45 * panels witch have a remote frame buffer (RFB) implemented according to PSR
46 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
47 * when system is idle but display is on as it eliminates display refresh
48 * request to DDR memory completely as long as the frame buffer for that
49 * display is unchanged.
50 *
51 * Panel Self Refresh must be supported by both Hardware (source) and
52 * Panel (sink).
53 *
54 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
55 * to power down the link and memory controller. For DSI panels the same idea
56 * is called "manual mode".
57 *
58 * The implementation uses the hardware-based PSR support which automatically
59 * enters/exits self-refresh mode. The hardware takes care of sending the
60 * required DP aux message and could even retrain the link (that part isn't
61 * enabled yet though). The hardware also keeps track of any frontbuffer
62 * changes to know when to exit self-refresh mode again. Unfortunately that
63 * part doesn't work too well, hence why the i915 PSR support uses the
64 * software frontbuffer tracking to make sure it doesn't miss a screen
65 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
66 * get called by the frontbuffer tracking code. Note that because of locking
67 * issues the self-refresh re-enable code is done from a work queue, which
68 * must be correctly synchronized/cancelled when shutting down the pipe."
69 *
70 * DC3CO (DC3 clock off)
71 *
72 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
73 * clock off automatically during PSR2 idle state.
74 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
75 * entry/exit allows the HW to enter a low-power state even when page flipping
76 * periodically (for instance a 30fps video playback scenario).
77 *
78 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
79 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
80 * frames, if no other flip occurs and the function above is executed, DC3CO is
81 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
82 * of another flip.
83 * Front buffer modifications do not trigger DC3CO activation on purpose as it
84 * would bring a lot of complexity and most of the moderns systems will only
85 * use page flips.
86 */
87
88 /*
89 * Description of PSR mask bits:
90 *
91 * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
92 *
93 * When unmasked (nearly) all display register writes (eg. even
94 * SWF) trigger a PSR exit. Some registers are excluded from this
95 * and they have a more specific mask (described below). On icl+
96 * this bit no longer exists and is effectively always set.
97 *
98 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
99 *
100 * When unmasked (nearly) all pipe/plane register writes
101 * trigger a PSR exit. Some plane registers are excluded from this
102 * and they have a more specific mask (described below).
103 *
104 * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
105 * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
106 * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
107 *
108 * When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
109 * SPR_SURF/CURBASE are not included in this and instead are
110 * controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
111 * EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
112 *
113 * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
114 * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
115 *
116 * When unmasked PSR is blocked as long as the sprite
117 * plane is enabled. skl+ with their universal planes no
118 * longer have a mask bit like this, and no plane being
119 * enabledb blocks PSR.
120 *
121 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
122 * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
123 *
124 * When umasked CURPOS writes trigger a PSR exit. On skl+
125 * this doesn't exit but CURPOS is included in the
126 * PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
127 *
128 * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
129 * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
130 *
131 * When unmasked PSR is blocked as long as vblank and/or vsync
132 * interrupt is unmasked in IMR *and* enabled in IER.
133 *
134 * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
135 * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
136 *
137 * Selectcs whether PSR exit generates an extra vblank before
138 * the first frame is transmitted. Also note the opposite polarity
139 * if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
140 * unmasked==do not generate the extra vblank).
141 *
142 * With DC states enabled the extra vblank happens after link training,
143 * with DC states disabled it happens immediately upuon PSR exit trigger.
144 * No idea as of now why there is a difference. HSW/BDW (which don't
145 * even have DMC) always generate it after link training. Go figure.
146 *
147 * Unfortunately CHICKEN_TRANS itself seems to be double buffered
148 * and thus won't latch until the first vblank. So with DC states
149 * enabled the register effctively uses the reset value during DC5
150 * exit+PSR exit sequence, and thus the bit does nothing until
151 * latched by the vblank that it was trying to prevent from being
152 * generated in the first place. So we should probably call this
153 * one a chicken/egg bit instead on skl+.
154 *
155 * In standby mode (as opposed to link-off) this makes no difference
156 * as the timing generator keeps running the whole time generating
157 * normal periodic vblanks.
158 *
159 * WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
160 * and doing so makes the behaviour match the skl+ reset value.
161 *
162 * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
163 * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
164 *
165 * On BDW without this bit is no vblanks whatsoever are
166 * generated after PSR exit. On HSW this has no apparant effect.
167 * WaPsrDPRSUnmaskVBlankInSRD says to set this.
168 *
169 * The rest of the bits are more self-explanatory and/or
170 * irrelevant for normal operation.
171 */
172
psr_global_enabled(struct intel_dp * intel_dp)173 static bool psr_global_enabled(struct intel_dp *intel_dp)
174 {
175 struct intel_connector *connector = intel_dp->attached_connector;
176 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
177
178 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
179 case I915_PSR_DEBUG_DEFAULT:
180 if (i915->params.enable_psr == -1)
181 return connector->panel.vbt.psr.enable;
182 return i915->params.enable_psr;
183 case I915_PSR_DEBUG_DISABLE:
184 return false;
185 default:
186 return true;
187 }
188 }
189
psr2_global_enabled(struct intel_dp * intel_dp)190 static bool psr2_global_enabled(struct intel_dp *intel_dp)
191 {
192 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
193
194 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
195 case I915_PSR_DEBUG_DISABLE:
196 case I915_PSR_DEBUG_FORCE_PSR1:
197 return false;
198 default:
199 if (i915->params.enable_psr == 1)
200 return false;
201 return true;
202 }
203 }
204
psr_irq_psr_error_bit_get(struct intel_dp * intel_dp)205 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
206 {
207 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
208
209 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
210 EDP_PSR_ERROR(intel_dp->psr.transcoder);
211 }
212
psr_irq_post_exit_bit_get(struct intel_dp * intel_dp)213 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
214 {
215 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
216
217 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
218 EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
219 }
220
psr_irq_pre_entry_bit_get(struct intel_dp * intel_dp)221 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
222 {
223 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
224
225 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
226 EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
227 }
228
psr_irq_mask_get(struct intel_dp * intel_dp)229 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
230 {
231 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
232
233 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
234 EDP_PSR_MASK(intel_dp->psr.transcoder);
235 }
236
psr_ctl_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)237 static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
238 enum transcoder cpu_transcoder)
239 {
240 if (DISPLAY_VER(dev_priv) >= 8)
241 return EDP_PSR_CTL(cpu_transcoder);
242 else
243 return HSW_SRD_CTL;
244 }
245
psr_debug_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)246 static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
247 enum transcoder cpu_transcoder)
248 {
249 if (DISPLAY_VER(dev_priv) >= 8)
250 return EDP_PSR_DEBUG(cpu_transcoder);
251 else
252 return HSW_SRD_DEBUG;
253 }
254
psr_perf_cnt_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)255 static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
256 enum transcoder cpu_transcoder)
257 {
258 if (DISPLAY_VER(dev_priv) >= 8)
259 return EDP_PSR_PERF_CNT(cpu_transcoder);
260 else
261 return HSW_SRD_PERF_CNT;
262 }
263
psr_status_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)264 static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
265 enum transcoder cpu_transcoder)
266 {
267 if (DISPLAY_VER(dev_priv) >= 8)
268 return EDP_PSR_STATUS(cpu_transcoder);
269 else
270 return HSW_SRD_STATUS;
271 }
272
psr_imr_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)273 static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
274 enum transcoder cpu_transcoder)
275 {
276 if (DISPLAY_VER(dev_priv) >= 12)
277 return TRANS_PSR_IMR(cpu_transcoder);
278 else
279 return EDP_PSR_IMR;
280 }
281
psr_iir_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)282 static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
283 enum transcoder cpu_transcoder)
284 {
285 if (DISPLAY_VER(dev_priv) >= 12)
286 return TRANS_PSR_IIR(cpu_transcoder);
287 else
288 return EDP_PSR_IIR;
289 }
290
psr_aux_ctl_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)291 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
292 enum transcoder cpu_transcoder)
293 {
294 if (DISPLAY_VER(dev_priv) >= 8)
295 return EDP_PSR_AUX_CTL(cpu_transcoder);
296 else
297 return HSW_SRD_AUX_CTL;
298 }
299
psr_aux_data_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder,int i)300 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
301 enum transcoder cpu_transcoder, int i)
302 {
303 if (DISPLAY_VER(dev_priv) >= 8)
304 return EDP_PSR_AUX_DATA(cpu_transcoder, i);
305 else
306 return HSW_SRD_AUX_DATA(i);
307 }
308
psr_irq_control(struct intel_dp * intel_dp)309 static void psr_irq_control(struct intel_dp *intel_dp)
310 {
311 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
312 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
313 u32 mask;
314
315 mask = psr_irq_psr_error_bit_get(intel_dp);
316 if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
317 mask |= psr_irq_post_exit_bit_get(intel_dp) |
318 psr_irq_pre_entry_bit_get(intel_dp);
319
320 intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
321 psr_irq_mask_get(intel_dp), ~mask);
322 }
323
psr_event_print(struct drm_i915_private * i915,u32 val,bool psr2_enabled)324 static void psr_event_print(struct drm_i915_private *i915,
325 u32 val, bool psr2_enabled)
326 {
327 drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
328 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
329 drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
330 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
331 drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
332 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
333 drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
334 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
335 drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
336 if (val & PSR_EVENT_GRAPHICS_RESET)
337 drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
338 if (val & PSR_EVENT_PCH_INTERRUPT)
339 drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
340 if (val & PSR_EVENT_MEMORY_UP)
341 drm_dbg_kms(&i915->drm, "\tMemory up\n");
342 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
343 drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
344 if (val & PSR_EVENT_WD_TIMER_EXPIRE)
345 drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
346 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
347 drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
348 if (val & PSR_EVENT_REGISTER_UPDATE)
349 drm_dbg_kms(&i915->drm, "\tRegister updated\n");
350 if (val & PSR_EVENT_HDCP_ENABLE)
351 drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
352 if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
353 drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
354 if (val & PSR_EVENT_VBI_ENABLE)
355 drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
356 if (val & PSR_EVENT_LPSP_MODE_EXIT)
357 drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
358 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
359 drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
360 }
361
intel_psr_irq_handler(struct intel_dp * intel_dp,u32 psr_iir)362 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
363 {
364 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
365 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
366 ktime_t time_ns = ktime_get();
367
368 if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
369 intel_dp->psr.last_entry_attempt = time_ns;
370 drm_dbg_kms(&dev_priv->drm,
371 "[transcoder %s] PSR entry attempt in 2 vblanks\n",
372 transcoder_name(cpu_transcoder));
373 }
374
375 if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
376 intel_dp->psr.last_exit = time_ns;
377 drm_dbg_kms(&dev_priv->drm,
378 "[transcoder %s] PSR exit completed\n",
379 transcoder_name(cpu_transcoder));
380
381 if (DISPLAY_VER(dev_priv) >= 9) {
382 u32 val;
383
384 val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
385
386 psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
387 }
388 }
389
390 if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
391 drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
392 transcoder_name(cpu_transcoder));
393
394 intel_dp->psr.irq_aux_error = true;
395
396 /*
397 * If this interruption is not masked it will keep
398 * interrupting so fast that it prevents the scheduled
399 * work to run.
400 * Also after a PSR error, we don't want to arm PSR
401 * again so we don't care about unmask the interruption
402 * or unset irq_aux_error.
403 */
404 intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
405 0, psr_irq_psr_error_bit_get(intel_dp));
406
407 queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
408 }
409 }
410
intel_dp_get_alpm_status(struct intel_dp * intel_dp)411 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
412 {
413 u8 alpm_caps = 0;
414
415 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
416 &alpm_caps) != 1)
417 return false;
418 return alpm_caps & DP_ALPM_CAP;
419 }
420
intel_dp_get_sink_sync_latency(struct intel_dp * intel_dp)421 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
422 {
423 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
424 u8 val = 8; /* assume the worst if we can't read the value */
425
426 if (drm_dp_dpcd_readb(&intel_dp->aux,
427 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
428 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
429 else
430 drm_dbg_kms(&i915->drm,
431 "Unable to get sink synchronization latency, assuming 8 frames\n");
432 return val;
433 }
434
intel_dp_get_su_granularity(struct intel_dp * intel_dp)435 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
436 {
437 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
438 ssize_t r;
439 u16 w;
440 u8 y;
441
442 /* If sink don't have specific granularity requirements set legacy ones */
443 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
444 /* As PSR2 HW sends full lines, we do not care about x granularity */
445 w = 4;
446 y = 4;
447 goto exit;
448 }
449
450 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
451 if (r != 2)
452 drm_dbg_kms(&i915->drm,
453 "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
454 /*
455 * Spec says that if the value read is 0 the default granularity should
456 * be used instead.
457 */
458 if (r != 2 || w == 0)
459 w = 4;
460
461 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
462 if (r != 1) {
463 drm_dbg_kms(&i915->drm,
464 "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
465 y = 4;
466 }
467 if (y == 0)
468 y = 1;
469
470 exit:
471 intel_dp->psr.su_w_granularity = w;
472 intel_dp->psr.su_y_granularity = y;
473 }
474
intel_psr_init_dpcd(struct intel_dp * intel_dp)475 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
476 {
477 struct drm_i915_private *dev_priv =
478 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
479
480 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
481 sizeof(intel_dp->psr_dpcd));
482
483 if (!intel_dp->psr_dpcd[0])
484 return;
485 drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
486 intel_dp->psr_dpcd[0]);
487
488 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
489 drm_dbg_kms(&dev_priv->drm,
490 "PSR support not currently available for this panel\n");
491 return;
492 }
493
494 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
495 drm_dbg_kms(&dev_priv->drm,
496 "Panel lacks power state control, PSR cannot be enabled\n");
497 return;
498 }
499
500 intel_dp->psr.sink_support = true;
501 intel_dp->psr.sink_sync_latency =
502 intel_dp_get_sink_sync_latency(intel_dp);
503
504 if (DISPLAY_VER(dev_priv) >= 9 &&
505 (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
506 bool y_req = intel_dp->psr_dpcd[1] &
507 DP_PSR2_SU_Y_COORDINATE_REQUIRED;
508 bool alpm = intel_dp_get_alpm_status(intel_dp);
509
510 /*
511 * All panels that supports PSR version 03h (PSR2 +
512 * Y-coordinate) can handle Y-coordinates in VSC but we are
513 * only sure that it is going to be used when required by the
514 * panel. This way panel is capable to do selective update
515 * without a aux frame sync.
516 *
517 * To support PSR version 02h and PSR version 03h without
518 * Y-coordinate requirement panels we would need to enable
519 * GTC first.
520 */
521 intel_dp->psr.sink_psr2_support = y_req && alpm;
522 drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
523 intel_dp->psr.sink_psr2_support ? "" : "not ");
524
525 if (intel_dp->psr.sink_psr2_support) {
526 intel_dp->psr.colorimetry_support =
527 intel_dp_get_colorimetry_status(intel_dp);
528 intel_dp_get_su_granularity(intel_dp);
529 }
530 }
531 }
532
hsw_psr_setup_aux(struct intel_dp * intel_dp)533 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
534 {
535 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
536 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
537 u32 aux_clock_divider, aux_ctl;
538 /* write DP_SET_POWER=D0 */
539 static const u8 aux_msg[] = {
540 [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
541 [1] = (DP_SET_POWER >> 8) & 0xff,
542 [2] = DP_SET_POWER & 0xff,
543 [3] = 1 - 1,
544 [4] = DP_SET_POWER_D0,
545 };
546 int i;
547
548 BUILD_BUG_ON(sizeof(aux_msg) > 20);
549 for (i = 0; i < sizeof(aux_msg); i += 4)
550 intel_de_write(dev_priv,
551 psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
552 intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
553
554 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
555
556 /* Start with bits set for DDI_AUX_CTL register */
557 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
558 aux_clock_divider);
559
560 /* Select only valid bits for SRD_AUX_CTL */
561 aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
562 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
563 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
564 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
565
566 intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
567 aux_ctl);
568 }
569
intel_psr_enable_sink(struct intel_dp * intel_dp)570 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
571 {
572 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
573 u8 dpcd_val = DP_PSR_ENABLE;
574
575 /* Enable ALPM at sink for psr2 */
576 if (intel_dp->psr.psr2_enabled) {
577 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
578 DP_ALPM_ENABLE |
579 DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
580
581 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
582 } else {
583 if (intel_dp->psr.link_standby)
584 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
585
586 if (DISPLAY_VER(dev_priv) >= 8)
587 dpcd_val |= DP_PSR_CRC_VERIFICATION;
588 }
589
590 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
591 dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
592
593 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
594
595 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
596 }
597
intel_psr1_get_tp_time(struct intel_dp * intel_dp)598 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
599 {
600 struct intel_connector *connector = intel_dp->attached_connector;
601 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
602 u32 val = 0;
603
604 if (DISPLAY_VER(dev_priv) >= 11)
605 val |= EDP_PSR_TP4_TIME_0us;
606
607 if (dev_priv->params.psr_safest_params) {
608 val |= EDP_PSR_TP1_TIME_2500us;
609 val |= EDP_PSR_TP2_TP3_TIME_2500us;
610 goto check_tp3_sel;
611 }
612
613 if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
614 val |= EDP_PSR_TP1_TIME_0us;
615 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
616 val |= EDP_PSR_TP1_TIME_100us;
617 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
618 val |= EDP_PSR_TP1_TIME_500us;
619 else
620 val |= EDP_PSR_TP1_TIME_2500us;
621
622 if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
623 val |= EDP_PSR_TP2_TP3_TIME_0us;
624 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
625 val |= EDP_PSR_TP2_TP3_TIME_100us;
626 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
627 val |= EDP_PSR_TP2_TP3_TIME_500us;
628 else
629 val |= EDP_PSR_TP2_TP3_TIME_2500us;
630
631 /*
632 * WA 0479: hsw,bdw
633 * "Do not skip both TP1 and TP2/TP3"
634 */
635 if (DISPLAY_VER(dev_priv) < 9 &&
636 connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
637 connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
638 val |= EDP_PSR_TP2_TP3_TIME_100us;
639
640 check_tp3_sel:
641 if (intel_dp_source_supports_tps3(dev_priv) &&
642 drm_dp_tps3_supported(intel_dp->dpcd))
643 val |= EDP_PSR_TP_TP1_TP3;
644 else
645 val |= EDP_PSR_TP_TP1_TP2;
646
647 return val;
648 }
649
psr_compute_idle_frames(struct intel_dp * intel_dp)650 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
651 {
652 struct intel_connector *connector = intel_dp->attached_connector;
653 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
654 int idle_frames;
655
656 /* Let's use 6 as the minimum to cover all known cases including the
657 * off-by-one issue that HW has in some cases.
658 */
659 idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
660 idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
661
662 if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
663 idle_frames = 0xf;
664
665 return idle_frames;
666 }
667
hsw_activate_psr1(struct intel_dp * intel_dp)668 static void hsw_activate_psr1(struct intel_dp *intel_dp)
669 {
670 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
671 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
672 u32 max_sleep_time = 0x1f;
673 u32 val = EDP_PSR_ENABLE;
674
675 val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
676
677 val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
678 if (IS_HASWELL(dev_priv))
679 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
680
681 if (intel_dp->psr.link_standby)
682 val |= EDP_PSR_LINK_STANDBY;
683
684 val |= intel_psr1_get_tp_time(intel_dp);
685
686 if (DISPLAY_VER(dev_priv) >= 8)
687 val |= EDP_PSR_CRC_ENABLE;
688
689 intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
690 ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
691 }
692
intel_psr2_get_tp_time(struct intel_dp * intel_dp)693 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
694 {
695 struct intel_connector *connector = intel_dp->attached_connector;
696 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
697 u32 val = 0;
698
699 if (dev_priv->params.psr_safest_params)
700 return EDP_PSR2_TP2_TIME_2500us;
701
702 if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
703 connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
704 val |= EDP_PSR2_TP2_TIME_50us;
705 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
706 val |= EDP_PSR2_TP2_TIME_100us;
707 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
708 val |= EDP_PSR2_TP2_TIME_500us;
709 else
710 val |= EDP_PSR2_TP2_TIME_2500us;
711
712 return val;
713 }
714
psr2_block_count_lines(struct intel_dp * intel_dp)715 static int psr2_block_count_lines(struct intel_dp *intel_dp)
716 {
717 return intel_dp->psr.io_wake_lines < 9 &&
718 intel_dp->psr.fast_wake_lines < 9 ? 8 : 12;
719 }
720
psr2_block_count(struct intel_dp * intel_dp)721 static int psr2_block_count(struct intel_dp *intel_dp)
722 {
723 return psr2_block_count_lines(intel_dp) / 4;
724 }
725
hsw_activate_psr2(struct intel_dp * intel_dp)726 static void hsw_activate_psr2(struct intel_dp *intel_dp)
727 {
728 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
729 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
730 u32 val = EDP_PSR2_ENABLE;
731
732 val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
733
734 if (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))
735 val |= EDP_SU_TRACK_ENABLE;
736
737 if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
738 val |= EDP_Y_COORDINATE_ENABLE;
739
740 val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
741 val |= intel_psr2_get_tp_time(intel_dp);
742
743 if (DISPLAY_VER(dev_priv) >= 12) {
744 if (psr2_block_count(intel_dp) > 2)
745 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
746 else
747 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
748 }
749
750 /* Wa_22012278275:adl-p */
751 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
752 static const u8 map[] = {
753 2, /* 5 lines */
754 1, /* 6 lines */
755 0, /* 7 lines */
756 3, /* 8 lines */
757 6, /* 9 lines */
758 5, /* 10 lines */
759 4, /* 11 lines */
760 7, /* 12 lines */
761 };
762 /*
763 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
764 * comments bellow for more information
765 */
766 int tmp;
767
768 tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
769 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
770
771 tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
772 val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
773 } else if (DISPLAY_VER(dev_priv) >= 12) {
774 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
775 val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
776 } else if (DISPLAY_VER(dev_priv) >= 9) {
777 val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
778 val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
779 }
780
781 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
782 val |= EDP_PSR2_SU_SDP_SCANLINE;
783
784 if (intel_dp->psr.psr2_sel_fetch_enabled) {
785 u32 tmp;
786
787 tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
788 drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
789 } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
790 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
791 }
792
793 /*
794 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
795 * recommending keep this bit unset while PSR2 is enabled.
796 */
797 intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), 0);
798
799 intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
800 }
801
802 static bool
transcoder_has_psr2(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)803 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
804 {
805 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
806 return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
807 else if (DISPLAY_VER(dev_priv) >= 12)
808 return cpu_transcoder == TRANSCODER_A;
809 else if (DISPLAY_VER(dev_priv) >= 9)
810 return cpu_transcoder == TRANSCODER_EDP;
811 else
812 return false;
813 }
814
intel_get_frame_time_us(const struct intel_crtc_state * cstate)815 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
816 {
817 if (!cstate || !cstate->hw.active)
818 return 0;
819
820 return DIV_ROUND_UP(1000 * 1000,
821 drm_mode_vrefresh(&cstate->hw.adjusted_mode));
822 }
823
psr2_program_idle_frames(struct intel_dp * intel_dp,u32 idle_frames)824 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
825 u32 idle_frames)
826 {
827 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
828 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
829
830 intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
831 EDP_PSR2_IDLE_FRAMES_MASK,
832 EDP_PSR2_IDLE_FRAMES(idle_frames));
833 }
834
tgl_psr2_enable_dc3co(struct intel_dp * intel_dp)835 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
836 {
837 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
838
839 psr2_program_idle_frames(intel_dp, 0);
840 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
841 }
842
tgl_psr2_disable_dc3co(struct intel_dp * intel_dp)843 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
844 {
845 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
846
847 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
848 psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
849 }
850
tgl_dc3co_disable_work(struct work_struct * work)851 static void tgl_dc3co_disable_work(struct work_struct *work)
852 {
853 struct intel_dp *intel_dp =
854 container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
855
856 mutex_lock(&intel_dp->psr.lock);
857 /* If delayed work is pending, it is not idle */
858 if (delayed_work_pending(&intel_dp->psr.dc3co_work))
859 goto unlock;
860
861 tgl_psr2_disable_dc3co(intel_dp);
862 unlock:
863 mutex_unlock(&intel_dp->psr.lock);
864 }
865
tgl_disallow_dc3co_on_psr2_exit(struct intel_dp * intel_dp)866 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
867 {
868 if (!intel_dp->psr.dc3co_exitline)
869 return;
870
871 cancel_delayed_work(&intel_dp->psr.dc3co_work);
872 /* Before PSR2 exit disallow dc3co*/
873 tgl_psr2_disable_dc3co(intel_dp);
874 }
875
876 static bool
dc3co_is_pipe_port_compatible(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)877 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
878 struct intel_crtc_state *crtc_state)
879 {
880 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
881 enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
882 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
883 enum port port = dig_port->base.port;
884
885 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
886 return pipe <= PIPE_B && port <= PORT_B;
887 else
888 return pipe == PIPE_A && port == PORT_A;
889 }
890
891 static void
tgl_dc3co_exitline_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)892 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
893 struct intel_crtc_state *crtc_state)
894 {
895 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
896 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
897 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
898 u32 exit_scanlines;
899
900 /*
901 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
902 * disable DC3CO until the changed dc3co activating/deactivating sequence
903 * is applied. B.Specs:49196
904 */
905 return;
906
907 /*
908 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
909 * TODO: when the issue is addressed, this restriction should be removed.
910 */
911 if (crtc_state->enable_psr2_sel_fetch)
912 return;
913
914 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
915 return;
916
917 if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
918 return;
919
920 /* Wa_16011303918:adl-p */
921 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
922 return;
923
924 /*
925 * DC3CO Exit time 200us B.Spec 49196
926 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
927 */
928 exit_scanlines =
929 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
930
931 if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
932 return;
933
934 crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
935 }
936
intel_psr2_sel_fetch_config_valid(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)937 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
938 struct intel_crtc_state *crtc_state)
939 {
940 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
941
942 if (!dev_priv->params.enable_psr2_sel_fetch &&
943 intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
944 drm_dbg_kms(&dev_priv->drm,
945 "PSR2 sel fetch not enabled, disabled by parameter\n");
946 return false;
947 }
948
949 if (crtc_state->uapi.async_flip) {
950 drm_dbg_kms(&dev_priv->drm,
951 "PSR2 sel fetch not enabled, async flip enabled\n");
952 return false;
953 }
954
955 return crtc_state->enable_psr2_sel_fetch = true;
956 }
957
psr2_granularity_check(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)958 static bool psr2_granularity_check(struct intel_dp *intel_dp,
959 struct intel_crtc_state *crtc_state)
960 {
961 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
962 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
963 const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
964 const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
965 u16 y_granularity = 0;
966
967 /* PSR2 HW only send full lines so we only need to validate the width */
968 if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
969 return false;
970
971 if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
972 return false;
973
974 /* HW tracking is only aligned to 4 lines */
975 if (!crtc_state->enable_psr2_sel_fetch)
976 return intel_dp->psr.su_y_granularity == 4;
977
978 /*
979 * adl_p and mtl platforms have 1 line granularity.
980 * For other platforms with SW tracking we can adjust the y coordinates
981 * to match sink requirement if multiple of 4.
982 */
983 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
984 y_granularity = intel_dp->psr.su_y_granularity;
985 else if (intel_dp->psr.su_y_granularity <= 2)
986 y_granularity = 4;
987 else if ((intel_dp->psr.su_y_granularity % 4) == 0)
988 y_granularity = intel_dp->psr.su_y_granularity;
989
990 if (y_granularity == 0 || crtc_vdisplay % y_granularity)
991 return false;
992
993 if (crtc_state->dsc.compression_enable &&
994 vdsc_cfg->slice_height % y_granularity)
995 return false;
996
997 crtc_state->su_y_granularity = y_granularity;
998 return true;
999 }
1000
_compute_psr2_sdp_prior_scanline_indication(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1001 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1002 struct intel_crtc_state *crtc_state)
1003 {
1004 const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1005 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1006 u32 hblank_total, hblank_ns, req_ns;
1007
1008 hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1009 hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1010
1011 /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1012 req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1013
1014 if ((hblank_ns - req_ns) > 100)
1015 return true;
1016
1017 /* Not supported <13 / Wa_22012279113:adl-p */
1018 if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1019 return false;
1020
1021 crtc_state->req_psr2_sdp_prior_scanline = true;
1022 return true;
1023 }
1024
_compute_psr2_wake_times(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1025 static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
1026 struct intel_crtc_state *crtc_state)
1027 {
1028 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1029 int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
1030 u8 max_wake_lines;
1031
1032 if (DISPLAY_VER(i915) >= 12) {
1033 io_wake_time = 42;
1034 /*
1035 * According to Bspec it's 42us, but based on testing
1036 * it is not enough -> use 45 us.
1037 */
1038 fast_wake_time = 45;
1039 max_wake_lines = 12;
1040 } else {
1041 io_wake_time = 50;
1042 fast_wake_time = 32;
1043 max_wake_lines = 8;
1044 }
1045
1046 io_wake_lines = intel_usecs_to_scanlines(
1047 &crtc_state->hw.adjusted_mode, io_wake_time);
1048 fast_wake_lines = intel_usecs_to_scanlines(
1049 &crtc_state->hw.adjusted_mode, fast_wake_time);
1050
1051 if (io_wake_lines > max_wake_lines ||
1052 fast_wake_lines > max_wake_lines)
1053 return false;
1054
1055 if (i915->params.psr_safest_params)
1056 io_wake_lines = fast_wake_lines = max_wake_lines;
1057
1058 /* According to Bspec lower limit should be set as 7 lines. */
1059 intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
1060 intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
1061
1062 return true;
1063 }
1064
intel_psr2_config_valid(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1065 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1066 struct intel_crtc_state *crtc_state)
1067 {
1068 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1069 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1070 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1071 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1072
1073 if (!intel_dp->psr.sink_psr2_support)
1074 return false;
1075
1076 /* JSL and EHL only supports eDP 1.3 */
1077 if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1078 drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
1079 return false;
1080 }
1081
1082 /* Wa_16011181250 */
1083 if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1084 IS_DG2(dev_priv)) {
1085 drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
1086 return false;
1087 }
1088
1089 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1090 drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
1091 return false;
1092 }
1093
1094 if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
1095 drm_dbg_kms(&dev_priv->drm,
1096 "PSR2 not supported in transcoder %s\n",
1097 transcoder_name(crtc_state->cpu_transcoder));
1098 return false;
1099 }
1100
1101 if (!psr2_global_enabled(intel_dp)) {
1102 drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
1103 return false;
1104 }
1105
1106 /*
1107 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1108 * resolution requires DSC to be enabled, priority is given to DSC
1109 * over PSR2.
1110 */
1111 if (crtc_state->dsc.compression_enable &&
1112 (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))) {
1113 drm_dbg_kms(&dev_priv->drm,
1114 "PSR2 cannot be enabled since DSC is enabled\n");
1115 return false;
1116 }
1117
1118 if (crtc_state->crc_enabled) {
1119 drm_dbg_kms(&dev_priv->drm,
1120 "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
1121 return false;
1122 }
1123
1124 if (DISPLAY_VER(dev_priv) >= 12) {
1125 psr_max_h = 5120;
1126 psr_max_v = 3200;
1127 max_bpp = 30;
1128 } else if (DISPLAY_VER(dev_priv) >= 10) {
1129 psr_max_h = 4096;
1130 psr_max_v = 2304;
1131 max_bpp = 24;
1132 } else if (DISPLAY_VER(dev_priv) == 9) {
1133 psr_max_h = 3640;
1134 psr_max_v = 2304;
1135 max_bpp = 24;
1136 }
1137
1138 if (crtc_state->pipe_bpp > max_bpp) {
1139 drm_dbg_kms(&dev_priv->drm,
1140 "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1141 crtc_state->pipe_bpp, max_bpp);
1142 return false;
1143 }
1144
1145 /* Wa_16011303918:adl-p */
1146 if (crtc_state->vrr.enable &&
1147 IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1148 drm_dbg_kms(&dev_priv->drm,
1149 "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1150 return false;
1151 }
1152
1153 if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1154 drm_dbg_kms(&dev_priv->drm,
1155 "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1156 return false;
1157 }
1158
1159 if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
1160 drm_dbg_kms(&dev_priv->drm,
1161 "PSR2 not enabled, Unable to use long enough wake times\n");
1162 return false;
1163 }
1164
1165 /* Vblank >= PSR2_CTL Block Count Number maximum line count */
1166 if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
1167 crtc_state->hw.adjusted_mode.crtc_vblank_start <
1168 psr2_block_count_lines(intel_dp)) {
1169 drm_dbg_kms(&dev_priv->drm,
1170 "PSR2 not enabled, too short vblank time\n");
1171 return false;
1172 }
1173
1174 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1175 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1176 !HAS_PSR_HW_TRACKING(dev_priv)) {
1177 drm_dbg_kms(&dev_priv->drm,
1178 "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
1179 return false;
1180 }
1181 }
1182
1183 if (!psr2_granularity_check(intel_dp, crtc_state)) {
1184 drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1185 goto unsupported;
1186 }
1187
1188 if (!crtc_state->enable_psr2_sel_fetch &&
1189 (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1190 drm_dbg_kms(&dev_priv->drm,
1191 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1192 crtc_hdisplay, crtc_vdisplay,
1193 psr_max_h, psr_max_v);
1194 goto unsupported;
1195 }
1196
1197 tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1198 return true;
1199
1200 unsupported:
1201 crtc_state->enable_psr2_sel_fetch = false;
1202 return false;
1203 }
1204
intel_psr_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state,struct drm_connector_state * conn_state)1205 void intel_psr_compute_config(struct intel_dp *intel_dp,
1206 struct intel_crtc_state *crtc_state,
1207 struct drm_connector_state *conn_state)
1208 {
1209 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1210 const struct drm_display_mode *adjusted_mode =
1211 &crtc_state->hw.adjusted_mode;
1212 int psr_setup_time;
1213
1214 /*
1215 * Current PSR panels don't work reliably with VRR enabled
1216 * So if VRR is enabled, do not enable PSR.
1217 */
1218 if (crtc_state->vrr.enable)
1219 return;
1220
1221 if (!CAN_PSR(intel_dp))
1222 return;
1223
1224 if (!psr_global_enabled(intel_dp)) {
1225 drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1226 return;
1227 }
1228
1229 if (intel_dp->psr.sink_not_reliable) {
1230 drm_dbg_kms(&dev_priv->drm,
1231 "PSR sink implementation is not reliable\n");
1232 return;
1233 }
1234
1235 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1236 drm_dbg_kms(&dev_priv->drm,
1237 "PSR condition failed: Interlaced mode enabled\n");
1238 return;
1239 }
1240
1241 psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1242 if (psr_setup_time < 0) {
1243 drm_dbg_kms(&dev_priv->drm,
1244 "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1245 intel_dp->psr_dpcd[1]);
1246 return;
1247 }
1248
1249 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1250 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1251 drm_dbg_kms(&dev_priv->drm,
1252 "PSR condition failed: PSR setup time (%d us) too long\n",
1253 psr_setup_time);
1254 return;
1255 }
1256
1257 crtc_state->has_psr = true;
1258 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1259
1260 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1261 intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1262 &crtc_state->psr_vsc);
1263 }
1264
intel_psr_get_config(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config)1265 void intel_psr_get_config(struct intel_encoder *encoder,
1266 struct intel_crtc_state *pipe_config)
1267 {
1268 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1269 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1270 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1271 struct intel_dp *intel_dp;
1272 u32 val;
1273
1274 if (!dig_port)
1275 return;
1276
1277 intel_dp = &dig_port->dp;
1278 if (!CAN_PSR(intel_dp))
1279 return;
1280
1281 mutex_lock(&intel_dp->psr.lock);
1282 if (!intel_dp->psr.enabled)
1283 goto unlock;
1284
1285 /*
1286 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1287 * enabled/disabled because of frontbuffer tracking and others.
1288 */
1289 pipe_config->has_psr = true;
1290 pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1291 pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1292
1293 if (!intel_dp->psr.psr2_enabled)
1294 goto unlock;
1295
1296 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1297 val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
1298 if (val & PSR2_MAN_TRK_CTL_ENABLE)
1299 pipe_config->enable_psr2_sel_fetch = true;
1300 }
1301
1302 if (DISPLAY_VER(dev_priv) >= 12) {
1303 val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
1304 pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1305 }
1306 unlock:
1307 mutex_unlock(&intel_dp->psr.lock);
1308 }
1309
intel_psr_activate(struct intel_dp * intel_dp)1310 static void intel_psr_activate(struct intel_dp *intel_dp)
1311 {
1312 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1313 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1314
1315 drm_WARN_ON(&dev_priv->drm,
1316 transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1317 intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
1318
1319 drm_WARN_ON(&dev_priv->drm,
1320 intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1321
1322 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1323
1324 lockdep_assert_held(&intel_dp->psr.lock);
1325
1326 /* psr1 and psr2 are mutually exclusive.*/
1327 if (intel_dp->psr.psr2_enabled)
1328 hsw_activate_psr2(intel_dp);
1329 else
1330 hsw_activate_psr1(intel_dp);
1331
1332 intel_dp->psr.active = true;
1333 }
1334
wa_16013835468_bit_get(struct intel_dp * intel_dp)1335 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1336 {
1337 switch (intel_dp->psr.pipe) {
1338 case PIPE_A:
1339 return LATENCY_REPORTING_REMOVED_PIPE_A;
1340 case PIPE_B:
1341 return LATENCY_REPORTING_REMOVED_PIPE_B;
1342 case PIPE_C:
1343 return LATENCY_REPORTING_REMOVED_PIPE_C;
1344 case PIPE_D:
1345 return LATENCY_REPORTING_REMOVED_PIPE_D;
1346 default:
1347 MISSING_CASE(intel_dp->psr.pipe);
1348 return 0;
1349 }
1350 }
1351
1352 /*
1353 * Wa_16013835468
1354 * Wa_14015648006
1355 */
wm_optimization_wa(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)1356 static void wm_optimization_wa(struct intel_dp *intel_dp,
1357 const struct intel_crtc_state *crtc_state)
1358 {
1359 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1360 bool set_wa_bit = false;
1361
1362 /* Wa_14015648006 */
1363 if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1364 IS_DISPLAY_VER(dev_priv, 11, 13))
1365 set_wa_bit |= crtc_state->wm_level_disabled;
1366
1367 /* Wa_16013835468 */
1368 if (DISPLAY_VER(dev_priv) == 12)
1369 set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1370 crtc_state->hw.adjusted_mode.crtc_vdisplay;
1371
1372 if (set_wa_bit)
1373 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1374 0, wa_16013835468_bit_get(intel_dp));
1375 else
1376 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1377 wa_16013835468_bit_get(intel_dp), 0);
1378 }
1379
intel_psr_enable_source(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)1380 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1381 const struct intel_crtc_state *crtc_state)
1382 {
1383 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1384 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1385 u32 mask;
1386
1387 /*
1388 * Only HSW and BDW have PSR AUX registers that need to be setup.
1389 * SKL+ use hardcoded values PSR AUX transactions
1390 */
1391 if (DISPLAY_VER(dev_priv) < 9)
1392 hsw_psr_setup_aux(intel_dp);
1393
1394 /*
1395 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1396 * mask LPSP to avoid dependency on other drivers that might block
1397 * runtime_pm besides preventing other hw tracking issues now we
1398 * can rely on frontbuffer tracking.
1399 */
1400 mask = EDP_PSR_DEBUG_MASK_MEMUP |
1401 EDP_PSR_DEBUG_MASK_HPD |
1402 EDP_PSR_DEBUG_MASK_LPSP |
1403 EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1404
1405 /*
1406 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1407 * registers in order to keep the CURSURFLIVE tricks working :(
1408 */
1409 if (IS_DISPLAY_VER(dev_priv, 9, 10))
1410 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1411
1412 /* allow PSR with sprite enabled */
1413 if (IS_HASWELL(dev_priv))
1414 mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1415
1416 intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
1417
1418 psr_irq_control(intel_dp);
1419
1420 /*
1421 * TODO: if future platforms supports DC3CO in more than one
1422 * transcoder, EXITLINE will need to be unset when disabling PSR
1423 */
1424 if (intel_dp->psr.dc3co_exitline)
1425 intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1426 intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1427
1428 if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1429 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1430 intel_dp->psr.psr2_sel_fetch_enabled ?
1431 IGNORE_PSR2_HW_TRACKING : 0);
1432
1433 /*
1434 * Wa_16013835468
1435 * Wa_14015648006
1436 */
1437 wm_optimization_wa(intel_dp, crtc_state);
1438
1439 if (intel_dp->psr.psr2_enabled) {
1440 if (DISPLAY_VER(dev_priv) == 9)
1441 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1442 PSR2_VSC_ENABLE_PROG_HEADER |
1443 PSR2_ADD_VERTICAL_LINE_COUNT);
1444
1445 /*
1446 * Wa_16014451276:adlp,mtl[a0,b0]
1447 * All supported adlp panels have 1-based X granularity, this may
1448 * cause issues if non-supported panels are used.
1449 */
1450 if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1451 intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 0,
1452 ADLP_1_BASED_X_GRANULARITY);
1453 else if (IS_ALDERLAKE_P(dev_priv))
1454 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1455 ADLP_1_BASED_X_GRANULARITY);
1456
1457 /* Wa_16012604467:adlp,mtl[a0,b0] */
1458 if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1459 intel_de_rmw(dev_priv,
1460 MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1461 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1462 else if (IS_ALDERLAKE_P(dev_priv))
1463 intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1464 CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1465 }
1466 }
1467
psr_interrupt_error_check(struct intel_dp * intel_dp)1468 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1469 {
1470 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1471 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1472 u32 val;
1473
1474 /*
1475 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1476 * will still keep the error set even after the reset done in the
1477 * irq_preinstall and irq_uninstall hooks.
1478 * And enabling in this situation cause the screen to freeze in the
1479 * first time that PSR HW tries to activate so lets keep PSR disabled
1480 * to avoid any rendering problems.
1481 */
1482 val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
1483 val &= psr_irq_psr_error_bit_get(intel_dp);
1484 if (val) {
1485 intel_dp->psr.sink_not_reliable = true;
1486 drm_dbg_kms(&dev_priv->drm,
1487 "PSR interruption error set, not enabling PSR\n");
1488 return false;
1489 }
1490
1491 return true;
1492 }
1493
intel_psr_enable_locked(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)1494 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1495 const struct intel_crtc_state *crtc_state)
1496 {
1497 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1498 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1499 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1500 struct intel_encoder *encoder = &dig_port->base;
1501 u32 val;
1502
1503 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1504
1505 intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1506 intel_dp->psr.busy_frontbuffer_bits = 0;
1507 intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1508 intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1509 /* DC5/DC6 requires at least 6 idle frames */
1510 val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1511 intel_dp->psr.dc3co_exit_delay = val;
1512 intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1513 intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1514 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1515 intel_dp->psr.req_psr2_sdp_prior_scanline =
1516 crtc_state->req_psr2_sdp_prior_scanline;
1517
1518 if (!psr_interrupt_error_check(intel_dp))
1519 return;
1520
1521 drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1522 intel_dp->psr.psr2_enabled ? "2" : "1");
1523 intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
1524 intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1525 intel_psr_enable_sink(intel_dp);
1526 intel_psr_enable_source(intel_dp, crtc_state);
1527 intel_dp->psr.enabled = true;
1528 intel_dp->psr.paused = false;
1529
1530 intel_psr_activate(intel_dp);
1531 }
1532
intel_psr_exit(struct intel_dp * intel_dp)1533 static void intel_psr_exit(struct intel_dp *intel_dp)
1534 {
1535 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1536 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1537 u32 val;
1538
1539 if (!intel_dp->psr.active) {
1540 if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
1541 val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
1542 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1543 }
1544
1545 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
1546 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1547
1548 return;
1549 }
1550
1551 if (intel_dp->psr.psr2_enabled) {
1552 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1553
1554 val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
1555 EDP_PSR2_ENABLE, 0);
1556
1557 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1558 } else {
1559 val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
1560 EDP_PSR_ENABLE, 0);
1561
1562 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1563 }
1564 intel_dp->psr.active = false;
1565 }
1566
intel_psr_wait_exit_locked(struct intel_dp * intel_dp)1567 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1568 {
1569 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1570 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1571 i915_reg_t psr_status;
1572 u32 psr_status_mask;
1573
1574 if (intel_dp->psr.psr2_enabled) {
1575 psr_status = EDP_PSR2_STATUS(cpu_transcoder);
1576 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1577 } else {
1578 psr_status = psr_status_reg(dev_priv, cpu_transcoder);
1579 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1580 }
1581
1582 /* Wait till PSR is idle */
1583 if (intel_de_wait_for_clear(dev_priv, psr_status,
1584 psr_status_mask, 2000))
1585 drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1586 }
1587
intel_psr_disable_locked(struct intel_dp * intel_dp)1588 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1589 {
1590 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1591 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1592 enum phy phy = intel_port_to_phy(dev_priv,
1593 dp_to_dig_port(intel_dp)->base.port);
1594
1595 lockdep_assert_held(&intel_dp->psr.lock);
1596
1597 if (!intel_dp->psr.enabled)
1598 return;
1599
1600 drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1601 intel_dp->psr.psr2_enabled ? "2" : "1");
1602
1603 intel_psr_exit(intel_dp);
1604 intel_psr_wait_exit_locked(intel_dp);
1605
1606 /*
1607 * Wa_16013835468
1608 * Wa_14015648006
1609 */
1610 if (DISPLAY_VER(dev_priv) >= 11)
1611 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1612 wa_16013835468_bit_get(intel_dp), 0);
1613
1614 if (intel_dp->psr.psr2_enabled) {
1615 /* Wa_16012604467:adlp,mtl[a0,b0] */
1616 if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1617 intel_de_rmw(dev_priv,
1618 MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
1619 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
1620 else if (IS_ALDERLAKE_P(dev_priv))
1621 intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1622 CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1623 }
1624
1625 intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
1626
1627 /* Disable PSR on Sink */
1628 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1629
1630 if (intel_dp->psr.psr2_enabled)
1631 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1632
1633 intel_dp->psr.enabled = false;
1634 intel_dp->psr.psr2_enabled = false;
1635 intel_dp->psr.psr2_sel_fetch_enabled = false;
1636 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1637 }
1638
1639 /**
1640 * intel_psr_disable - Disable PSR
1641 * @intel_dp: Intel DP
1642 * @old_crtc_state: old CRTC state
1643 *
1644 * This function needs to be called before disabling pipe.
1645 */
intel_psr_disable(struct intel_dp * intel_dp,const struct intel_crtc_state * old_crtc_state)1646 void intel_psr_disable(struct intel_dp *intel_dp,
1647 const struct intel_crtc_state *old_crtc_state)
1648 {
1649 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1650
1651 if (!old_crtc_state->has_psr)
1652 return;
1653
1654 if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1655 return;
1656
1657 mutex_lock(&intel_dp->psr.lock);
1658
1659 intel_psr_disable_locked(intel_dp);
1660
1661 mutex_unlock(&intel_dp->psr.lock);
1662 cancel_work_sync(&intel_dp->psr.work);
1663 cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1664 }
1665
1666 /**
1667 * intel_psr_pause - Pause PSR
1668 * @intel_dp: Intel DP
1669 *
1670 * This function need to be called after enabling psr.
1671 */
intel_psr_pause(struct intel_dp * intel_dp)1672 void intel_psr_pause(struct intel_dp *intel_dp)
1673 {
1674 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1675 struct intel_psr *psr = &intel_dp->psr;
1676
1677 if (!CAN_PSR(intel_dp))
1678 return;
1679
1680 mutex_lock(&psr->lock);
1681
1682 if (!psr->enabled) {
1683 mutex_unlock(&psr->lock);
1684 return;
1685 }
1686
1687 /* If we ever hit this, we will need to add refcount to pause/resume */
1688 drm_WARN_ON(&dev_priv->drm, psr->paused);
1689
1690 intel_psr_exit(intel_dp);
1691 intel_psr_wait_exit_locked(intel_dp);
1692 psr->paused = true;
1693
1694 mutex_unlock(&psr->lock);
1695
1696 cancel_work_sync(&psr->work);
1697 cancel_delayed_work_sync(&psr->dc3co_work);
1698 }
1699
1700 /**
1701 * intel_psr_resume - Resume PSR
1702 * @intel_dp: Intel DP
1703 *
1704 * This function need to be called after pausing psr.
1705 */
intel_psr_resume(struct intel_dp * intel_dp)1706 void intel_psr_resume(struct intel_dp *intel_dp)
1707 {
1708 struct intel_psr *psr = &intel_dp->psr;
1709
1710 if (!CAN_PSR(intel_dp))
1711 return;
1712
1713 mutex_lock(&psr->lock);
1714
1715 if (!psr->paused)
1716 goto unlock;
1717
1718 psr->paused = false;
1719 intel_psr_activate(intel_dp);
1720
1721 unlock:
1722 mutex_unlock(&psr->lock);
1723 }
1724
man_trk_ctl_enable_bit_get(struct drm_i915_private * dev_priv)1725 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1726 {
1727 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
1728 PSR2_MAN_TRK_CTL_ENABLE;
1729 }
1730
man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private * dev_priv)1731 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1732 {
1733 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1734 ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1735 PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1736 }
1737
man_trk_ctl_partial_frame_bit_get(struct drm_i915_private * dev_priv)1738 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1739 {
1740 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1741 ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1742 PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1743 }
1744
man_trk_ctl_continuos_full_frame(struct drm_i915_private * dev_priv)1745 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1746 {
1747 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1748 ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1749 PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1750 }
1751
psr_force_hw_tracking_exit(struct intel_dp * intel_dp)1752 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1753 {
1754 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1755 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1756
1757 if (intel_dp->psr.psr2_sel_fetch_enabled)
1758 intel_de_write(dev_priv,
1759 PSR2_MAN_TRK_CTL(cpu_transcoder),
1760 man_trk_ctl_enable_bit_get(dev_priv) |
1761 man_trk_ctl_partial_frame_bit_get(dev_priv) |
1762 man_trk_ctl_single_full_frame_bit_get(dev_priv) |
1763 man_trk_ctl_continuos_full_frame(dev_priv));
1764
1765 /*
1766 * Display WA #0884: skl+
1767 * This documented WA for bxt can be safely applied
1768 * broadly so we can force HW tracking to exit PSR
1769 * instead of disabling and re-enabling.
1770 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1771 * but it makes more sense write to the current active
1772 * pipe.
1773 *
1774 * This workaround do not exist for platforms with display 10 or newer
1775 * but testing proved that it works for up display 13, for newer
1776 * than that testing will be needed.
1777 */
1778 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1779 }
1780
intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane * plane,const struct intel_crtc_state * crtc_state)1781 void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
1782 const struct intel_crtc_state *crtc_state)
1783 {
1784 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1785 enum pipe pipe = plane->pipe;
1786
1787 if (!crtc_state->enable_psr2_sel_fetch)
1788 return;
1789
1790 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
1791 }
1792
intel_psr2_program_plane_sel_fetch_arm(struct intel_plane * plane,const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)1793 void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
1794 const struct intel_crtc_state *crtc_state,
1795 const struct intel_plane_state *plane_state)
1796 {
1797 struct drm_i915_private *i915 = to_i915(plane->base.dev);
1798 enum pipe pipe = plane->pipe;
1799
1800 if (!crtc_state->enable_psr2_sel_fetch)
1801 return;
1802
1803 if (plane->id == PLANE_CURSOR)
1804 intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1805 plane_state->ctl);
1806 else
1807 intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1808 PLANE_SEL_FETCH_CTL_ENABLE);
1809 }
1810
intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane * plane,const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state,int color_plane)1811 void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
1812 const struct intel_crtc_state *crtc_state,
1813 const struct intel_plane_state *plane_state,
1814 int color_plane)
1815 {
1816 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1817 enum pipe pipe = plane->pipe;
1818 const struct drm_rect *clip;
1819 u32 val;
1820 int x, y;
1821
1822 if (!crtc_state->enable_psr2_sel_fetch)
1823 return;
1824
1825 if (plane->id == PLANE_CURSOR)
1826 return;
1827
1828 clip = &plane_state->psr2_sel_fetch_area;
1829
1830 val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
1831 val |= plane_state->uapi.dst.x1;
1832 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
1833
1834 x = plane_state->view.color_plane[color_plane].x;
1835
1836 /*
1837 * From Bspec: UV surface Start Y Position = half of Y plane Y
1838 * start position.
1839 */
1840 if (!color_plane)
1841 y = plane_state->view.color_plane[color_plane].y + clip->y1;
1842 else
1843 y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
1844
1845 val = y << 16 | x;
1846
1847 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
1848 val);
1849
1850 /* Sizes are 0 based */
1851 val = (drm_rect_height(clip) - 1) << 16;
1852 val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
1853 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
1854 }
1855
intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state * crtc_state)1856 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1857 {
1858 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1859 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1860 struct intel_encoder *encoder;
1861
1862 if (!crtc_state->enable_psr2_sel_fetch)
1863 return;
1864
1865 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1866 crtc_state->uapi.encoder_mask) {
1867 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1868
1869 lockdep_assert_held(&intel_dp->psr.lock);
1870 if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
1871 return;
1872 break;
1873 }
1874
1875 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
1876 crtc_state->psr2_man_track_ctl);
1877 }
1878
psr2_man_trk_ctl_calc(struct intel_crtc_state * crtc_state,struct drm_rect * clip,bool full_update)1879 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1880 struct drm_rect *clip, bool full_update)
1881 {
1882 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1883 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1884 u32 val = man_trk_ctl_enable_bit_get(dev_priv);
1885
1886 /* SF partial frame enable has to be set even on full update */
1887 val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
1888
1889 if (full_update) {
1890 val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
1891 val |= man_trk_ctl_continuos_full_frame(dev_priv);
1892 goto exit;
1893 }
1894
1895 if (clip->y1 == -1)
1896 goto exit;
1897
1898 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
1899 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
1900 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
1901 } else {
1902 drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
1903
1904 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1905 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1906 }
1907 exit:
1908 crtc_state->psr2_man_track_ctl = val;
1909 }
1910
clip_area_update(struct drm_rect * overlap_damage_area,struct drm_rect * damage_area,struct drm_rect * pipe_src)1911 static void clip_area_update(struct drm_rect *overlap_damage_area,
1912 struct drm_rect *damage_area,
1913 struct drm_rect *pipe_src)
1914 {
1915 if (!drm_rect_intersect(damage_area, pipe_src))
1916 return;
1917
1918 if (overlap_damage_area->y1 == -1) {
1919 overlap_damage_area->y1 = damage_area->y1;
1920 overlap_damage_area->y2 = damage_area->y2;
1921 return;
1922 }
1923
1924 if (damage_area->y1 < overlap_damage_area->y1)
1925 overlap_damage_area->y1 = damage_area->y1;
1926
1927 if (damage_area->y2 > overlap_damage_area->y2)
1928 overlap_damage_area->y2 = damage_area->y2;
1929 }
1930
intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state * crtc_state,struct drm_rect * pipe_clip)1931 static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
1932 struct drm_rect *pipe_clip)
1933 {
1934 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1935 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1936 u16 y_alignment;
1937
1938 /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
1939 if (crtc_state->dsc.compression_enable &&
1940 (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
1941 y_alignment = vdsc_cfg->slice_height;
1942 else
1943 y_alignment = crtc_state->su_y_granularity;
1944
1945 pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
1946 if (pipe_clip->y2 % y_alignment)
1947 pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
1948 }
1949
1950 /*
1951 * TODO: Not clear how to handle planes with negative position,
1952 * also planes are not updated if they have a negative X
1953 * position so for now doing a full update in this cases
1954 *
1955 * Plane scaling and rotation is not supported by selective fetch and both
1956 * properties can change without a modeset, so need to be check at every
1957 * atomic commit.
1958 */
psr2_sel_fetch_plane_state_supported(const struct intel_plane_state * plane_state)1959 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
1960 {
1961 if (plane_state->uapi.dst.y1 < 0 ||
1962 plane_state->uapi.dst.x1 < 0 ||
1963 plane_state->scaler_id >= 0 ||
1964 plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
1965 return false;
1966
1967 return true;
1968 }
1969
1970 /*
1971 * Check for pipe properties that is not supported by selective fetch.
1972 *
1973 * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
1974 * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
1975 * enabled and going to the full update path.
1976 */
psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state * crtc_state)1977 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
1978 {
1979 if (crtc_state->scaler_state.scaler_id >= 0)
1980 return false;
1981
1982 return true;
1983 }
1984
intel_psr2_sel_fetch_update(struct intel_atomic_state * state,struct intel_crtc * crtc)1985 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
1986 struct intel_crtc *crtc)
1987 {
1988 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1989 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1990 struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
1991 struct intel_plane_state *new_plane_state, *old_plane_state;
1992 struct intel_plane *plane;
1993 bool full_update = false;
1994 int i, ret;
1995
1996 if (!crtc_state->enable_psr2_sel_fetch)
1997 return 0;
1998
1999 if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2000 full_update = true;
2001 goto skip_sel_fetch_set_loop;
2002 }
2003
2004 /*
2005 * Calculate minimal selective fetch area of each plane and calculate
2006 * the pipe damaged area.
2007 * In the next loop the plane selective fetch area will actually be set
2008 * using whole pipe damaged area.
2009 */
2010 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2011 new_plane_state, i) {
2012 struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2013 .x2 = INT_MAX };
2014
2015 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2016 continue;
2017
2018 if (!new_plane_state->uapi.visible &&
2019 !old_plane_state->uapi.visible)
2020 continue;
2021
2022 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2023 full_update = true;
2024 break;
2025 }
2026
2027 /*
2028 * If visibility or plane moved, mark the whole plane area as
2029 * damaged as it needs to be complete redraw in the new and old
2030 * position.
2031 */
2032 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2033 !drm_rect_equals(&new_plane_state->uapi.dst,
2034 &old_plane_state->uapi.dst)) {
2035 if (old_plane_state->uapi.visible) {
2036 damaged_area.y1 = old_plane_state->uapi.dst.y1;
2037 damaged_area.y2 = old_plane_state->uapi.dst.y2;
2038 clip_area_update(&pipe_clip, &damaged_area,
2039 &crtc_state->pipe_src);
2040 }
2041
2042 if (new_plane_state->uapi.visible) {
2043 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2044 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2045 clip_area_update(&pipe_clip, &damaged_area,
2046 &crtc_state->pipe_src);
2047 }
2048 continue;
2049 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2050 /* If alpha changed mark the whole plane area as damaged */
2051 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2052 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2053 clip_area_update(&pipe_clip, &damaged_area,
2054 &crtc_state->pipe_src);
2055 continue;
2056 }
2057
2058 src = drm_plane_state_src(&new_plane_state->uapi);
2059 drm_rect_fp_to_int(&src, &src);
2060
2061 if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2062 &new_plane_state->uapi, &damaged_area))
2063 continue;
2064
2065 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2066 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2067 damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2068 damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2069
2070 clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
2071 }
2072
2073 /*
2074 * TODO: For now we are just using full update in case
2075 * selective fetch area calculation fails. To optimize this we
2076 * should identify cases where this happens and fix the area
2077 * calculation for those.
2078 */
2079 if (pipe_clip.y1 == -1) {
2080 drm_info_once(&dev_priv->drm,
2081 "Selective fetch area calculation failed in pipe %c\n",
2082 pipe_name(crtc->pipe));
2083 full_update = true;
2084 }
2085
2086 if (full_update)
2087 goto skip_sel_fetch_set_loop;
2088
2089 /* Wa_14014971492 */
2090 if ((IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
2091 IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
2092 crtc_state->splitter.enable)
2093 pipe_clip.y1 = 0;
2094
2095 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2096 if (ret)
2097 return ret;
2098
2099 intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
2100
2101 /*
2102 * Now that we have the pipe damaged area check if it intersect with
2103 * every plane, if it does set the plane selective fetch area.
2104 */
2105 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2106 new_plane_state, i) {
2107 struct drm_rect *sel_fetch_area, inter;
2108 struct intel_plane *linked = new_plane_state->planar_linked_plane;
2109
2110 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2111 !new_plane_state->uapi.visible)
2112 continue;
2113
2114 inter = pipe_clip;
2115 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2116 continue;
2117
2118 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2119 full_update = true;
2120 break;
2121 }
2122
2123 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2124 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2125 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2126 crtc_state->update_planes |= BIT(plane->id);
2127
2128 /*
2129 * Sel_fetch_area is calculated for UV plane. Use
2130 * same area for Y plane as well.
2131 */
2132 if (linked) {
2133 struct intel_plane_state *linked_new_plane_state;
2134 struct drm_rect *linked_sel_fetch_area;
2135
2136 linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2137 if (IS_ERR(linked_new_plane_state))
2138 return PTR_ERR(linked_new_plane_state);
2139
2140 linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2141 linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2142 linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2143 crtc_state->update_planes |= BIT(linked->id);
2144 }
2145 }
2146
2147 skip_sel_fetch_set_loop:
2148 psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
2149 return 0;
2150 }
2151
intel_psr_pre_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)2152 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2153 struct intel_crtc *crtc)
2154 {
2155 struct drm_i915_private *i915 = to_i915(state->base.dev);
2156 const struct intel_crtc_state *old_crtc_state =
2157 intel_atomic_get_old_crtc_state(state, crtc);
2158 const struct intel_crtc_state *new_crtc_state =
2159 intel_atomic_get_new_crtc_state(state, crtc);
2160 struct intel_encoder *encoder;
2161
2162 if (!HAS_PSR(i915))
2163 return;
2164
2165 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2166 old_crtc_state->uapi.encoder_mask) {
2167 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2168 struct intel_psr *psr = &intel_dp->psr;
2169 bool needs_to_disable = false;
2170
2171 mutex_lock(&psr->lock);
2172
2173 /*
2174 * Reasons to disable:
2175 * - PSR disabled in new state
2176 * - All planes will go inactive
2177 * - Changing between PSR versions
2178 * - Display WA #1136: skl, bxt
2179 */
2180 needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2181 needs_to_disable |= !new_crtc_state->has_psr;
2182 needs_to_disable |= !new_crtc_state->active_planes;
2183 needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
2184 needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2185 new_crtc_state->wm_level_disabled;
2186
2187 if (psr->enabled && needs_to_disable)
2188 intel_psr_disable_locked(intel_dp);
2189 else if (psr->enabled && new_crtc_state->wm_level_disabled)
2190 /* Wa_14015648006 */
2191 wm_optimization_wa(intel_dp, new_crtc_state);
2192
2193 mutex_unlock(&psr->lock);
2194 }
2195 }
2196
_intel_psr_post_plane_update(const struct intel_atomic_state * state,const struct intel_crtc_state * crtc_state)2197 static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
2198 const struct intel_crtc_state *crtc_state)
2199 {
2200 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2201 struct intel_encoder *encoder;
2202
2203 if (!crtc_state->has_psr)
2204 return;
2205
2206 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2207 crtc_state->uapi.encoder_mask) {
2208 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2209 struct intel_psr *psr = &intel_dp->psr;
2210 bool keep_disabled = false;
2211
2212 mutex_lock(&psr->lock);
2213
2214 drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2215
2216 keep_disabled |= psr->sink_not_reliable;
2217 keep_disabled |= !crtc_state->active_planes;
2218
2219 /* Display WA #1136: skl, bxt */
2220 keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2221 crtc_state->wm_level_disabled;
2222
2223 if (!psr->enabled && !keep_disabled)
2224 intel_psr_enable_locked(intel_dp, crtc_state);
2225 else if (psr->enabled && !crtc_state->wm_level_disabled)
2226 /* Wa_14015648006 */
2227 wm_optimization_wa(intel_dp, crtc_state);
2228
2229 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2230 if (crtc_state->crc_enabled && psr->enabled)
2231 psr_force_hw_tracking_exit(intel_dp);
2232
2233 mutex_unlock(&psr->lock);
2234 }
2235 }
2236
intel_psr_post_plane_update(const struct intel_atomic_state * state)2237 void intel_psr_post_plane_update(const struct intel_atomic_state *state)
2238 {
2239 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2240 struct intel_crtc_state *crtc_state;
2241 struct intel_crtc *crtc;
2242 int i;
2243
2244 if (!HAS_PSR(dev_priv))
2245 return;
2246
2247 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
2248 _intel_psr_post_plane_update(state, crtc_state);
2249 }
2250
_psr2_ready_for_pipe_update_locked(struct intel_dp * intel_dp)2251 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2252 {
2253 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2254 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2255
2256 /*
2257 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2258 * As all higher states has bit 4 of PSR2 state set we can just wait for
2259 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2260 */
2261 return intel_de_wait_for_clear(dev_priv,
2262 EDP_PSR2_STATUS(cpu_transcoder),
2263 EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2264 }
2265
_psr1_ready_for_pipe_update_locked(struct intel_dp * intel_dp)2266 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2267 {
2268 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2269 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2270
2271 /*
2272 * From bspec: Panel Self Refresh (BDW+)
2273 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2274 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2275 * defensive enough to cover everything.
2276 */
2277 return intel_de_wait_for_clear(dev_priv,
2278 psr_status_reg(dev_priv, cpu_transcoder),
2279 EDP_PSR_STATUS_STATE_MASK, 50);
2280 }
2281
2282 /**
2283 * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2284 * @new_crtc_state: new CRTC state
2285 *
2286 * This function is expected to be called from pipe_update_start() where it is
2287 * not expected to race with PSR enable or disable.
2288 */
intel_psr_wait_for_idle_locked(const struct intel_crtc_state * new_crtc_state)2289 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2290 {
2291 struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2292 struct intel_encoder *encoder;
2293
2294 if (!new_crtc_state->has_psr)
2295 return;
2296
2297 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2298 new_crtc_state->uapi.encoder_mask) {
2299 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2300 int ret;
2301
2302 lockdep_assert_held(&intel_dp->psr.lock);
2303
2304 if (!intel_dp->psr.enabled)
2305 continue;
2306
2307 if (intel_dp->psr.psr2_enabled)
2308 ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2309 else
2310 ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2311
2312 if (ret)
2313 drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2314 }
2315 }
2316
__psr_wait_for_idle_locked(struct intel_dp * intel_dp)2317 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2318 {
2319 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2320 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2321 i915_reg_t reg;
2322 u32 mask;
2323 int err;
2324
2325 if (!intel_dp->psr.enabled)
2326 return false;
2327
2328 if (intel_dp->psr.psr2_enabled) {
2329 reg = EDP_PSR2_STATUS(cpu_transcoder);
2330 mask = EDP_PSR2_STATUS_STATE_MASK;
2331 } else {
2332 reg = psr_status_reg(dev_priv, cpu_transcoder);
2333 mask = EDP_PSR_STATUS_STATE_MASK;
2334 }
2335
2336 mutex_unlock(&intel_dp->psr.lock);
2337
2338 err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2339 if (err)
2340 drm_err(&dev_priv->drm,
2341 "Timed out waiting for PSR Idle for re-enable\n");
2342
2343 /* After the unlocked wait, verify that PSR is still wanted! */
2344 mutex_lock(&intel_dp->psr.lock);
2345 return err == 0 && intel_dp->psr.enabled;
2346 }
2347
intel_psr_fastset_force(struct drm_i915_private * dev_priv)2348 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2349 {
2350 struct drm_connector_list_iter conn_iter;
2351 struct drm_modeset_acquire_ctx ctx;
2352 struct drm_atomic_state *state;
2353 struct drm_connector *conn;
2354 int err = 0;
2355
2356 state = drm_atomic_state_alloc(&dev_priv->drm);
2357 if (!state)
2358 return -ENOMEM;
2359
2360 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2361
2362 state->acquire_ctx = &ctx;
2363 to_intel_atomic_state(state)->internal = true;
2364
2365 retry:
2366 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2367 drm_for_each_connector_iter(conn, &conn_iter) {
2368 struct drm_connector_state *conn_state;
2369 struct drm_crtc_state *crtc_state;
2370
2371 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2372 continue;
2373
2374 conn_state = drm_atomic_get_connector_state(state, conn);
2375 if (IS_ERR(conn_state)) {
2376 err = PTR_ERR(conn_state);
2377 break;
2378 }
2379
2380 if (!conn_state->crtc)
2381 continue;
2382
2383 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2384 if (IS_ERR(crtc_state)) {
2385 err = PTR_ERR(crtc_state);
2386 break;
2387 }
2388
2389 /* Mark mode as changed to trigger a pipe->update() */
2390 crtc_state->mode_changed = true;
2391 }
2392 drm_connector_list_iter_end(&conn_iter);
2393
2394 if (err == 0)
2395 err = drm_atomic_commit(state);
2396
2397 if (err == -EDEADLK) {
2398 drm_atomic_state_clear(state);
2399 err = drm_modeset_backoff(&ctx);
2400 if (!err)
2401 goto retry;
2402 }
2403
2404 drm_modeset_drop_locks(&ctx);
2405 drm_modeset_acquire_fini(&ctx);
2406 drm_atomic_state_put(state);
2407
2408 return err;
2409 }
2410
intel_psr_debug_set(struct intel_dp * intel_dp,u64 val)2411 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2412 {
2413 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2414 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2415 u32 old_mode;
2416 int ret;
2417
2418 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2419 mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2420 drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2421 return -EINVAL;
2422 }
2423
2424 ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2425 if (ret)
2426 return ret;
2427
2428 old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2429 intel_dp->psr.debug = val;
2430
2431 /*
2432 * Do it right away if it's already enabled, otherwise it will be done
2433 * when enabling the source.
2434 */
2435 if (intel_dp->psr.enabled)
2436 psr_irq_control(intel_dp);
2437
2438 mutex_unlock(&intel_dp->psr.lock);
2439
2440 if (old_mode != mode)
2441 ret = intel_psr_fastset_force(dev_priv);
2442
2443 return ret;
2444 }
2445
intel_psr_handle_irq(struct intel_dp * intel_dp)2446 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2447 {
2448 struct intel_psr *psr = &intel_dp->psr;
2449
2450 intel_psr_disable_locked(intel_dp);
2451 psr->sink_not_reliable = true;
2452 /* let's make sure that sink is awaken */
2453 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2454 }
2455
intel_psr_work(struct work_struct * work)2456 static void intel_psr_work(struct work_struct *work)
2457 {
2458 struct intel_dp *intel_dp =
2459 container_of(work, typeof(*intel_dp), psr.work);
2460
2461 mutex_lock(&intel_dp->psr.lock);
2462
2463 if (!intel_dp->psr.enabled)
2464 goto unlock;
2465
2466 if (READ_ONCE(intel_dp->psr.irq_aux_error))
2467 intel_psr_handle_irq(intel_dp);
2468
2469 /*
2470 * We have to make sure PSR is ready for re-enable
2471 * otherwise it keeps disabled until next full enable/disable cycle.
2472 * PSR might take some time to get fully disabled
2473 * and be ready for re-enable.
2474 */
2475 if (!__psr_wait_for_idle_locked(intel_dp))
2476 goto unlock;
2477
2478 /*
2479 * The delayed work can race with an invalidate hence we need to
2480 * recheck. Since psr_flush first clears this and then reschedules we
2481 * won't ever miss a flush when bailing out here.
2482 */
2483 if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2484 goto unlock;
2485
2486 intel_psr_activate(intel_dp);
2487 unlock:
2488 mutex_unlock(&intel_dp->psr.lock);
2489 }
2490
_psr_invalidate_handle(struct intel_dp * intel_dp)2491 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2492 {
2493 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2494 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2495
2496 if (intel_dp->psr.psr2_sel_fetch_enabled) {
2497 u32 val;
2498
2499 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2500 /* Send one update otherwise lag is observed in screen */
2501 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2502 return;
2503 }
2504
2505 val = man_trk_ctl_enable_bit_get(dev_priv) |
2506 man_trk_ctl_partial_frame_bit_get(dev_priv) |
2507 man_trk_ctl_continuos_full_frame(dev_priv);
2508 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
2509 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2510 intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2511 } else {
2512 intel_psr_exit(intel_dp);
2513 }
2514 }
2515
2516 /**
2517 * intel_psr_invalidate - Invalidate PSR
2518 * @dev_priv: i915 device
2519 * @frontbuffer_bits: frontbuffer plane tracking bits
2520 * @origin: which operation caused the invalidate
2521 *
2522 * Since the hardware frontbuffer tracking has gaps we need to integrate
2523 * with the software frontbuffer tracking. This function gets called every
2524 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2525 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2526 *
2527 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2528 */
intel_psr_invalidate(struct drm_i915_private * dev_priv,unsigned frontbuffer_bits,enum fb_op_origin origin)2529 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2530 unsigned frontbuffer_bits, enum fb_op_origin origin)
2531 {
2532 struct intel_encoder *encoder;
2533
2534 if (origin == ORIGIN_FLIP)
2535 return;
2536
2537 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2538 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2539 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2540
2541 mutex_lock(&intel_dp->psr.lock);
2542 if (!intel_dp->psr.enabled) {
2543 mutex_unlock(&intel_dp->psr.lock);
2544 continue;
2545 }
2546
2547 pipe_frontbuffer_bits &=
2548 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2549 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2550
2551 if (pipe_frontbuffer_bits)
2552 _psr_invalidate_handle(intel_dp);
2553
2554 mutex_unlock(&intel_dp->psr.lock);
2555 }
2556 }
2557 /*
2558 * When we will be completely rely on PSR2 S/W tracking in future,
2559 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2560 * event also therefore tgl_dc3co_flush_locked() require to be changed
2561 * accordingly in future.
2562 */
2563 static void
tgl_dc3co_flush_locked(struct intel_dp * intel_dp,unsigned int frontbuffer_bits,enum fb_op_origin origin)2564 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2565 enum fb_op_origin origin)
2566 {
2567 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2568
2569 if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2570 !intel_dp->psr.active)
2571 return;
2572
2573 /*
2574 * At every frontbuffer flush flip event modified delay of delayed work,
2575 * when delayed work schedules that means display has been idle.
2576 */
2577 if (!(frontbuffer_bits &
2578 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2579 return;
2580
2581 tgl_psr2_enable_dc3co(intel_dp);
2582 mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
2583 intel_dp->psr.dc3co_exit_delay);
2584 }
2585
_psr_flush_handle(struct intel_dp * intel_dp)2586 static void _psr_flush_handle(struct intel_dp *intel_dp)
2587 {
2588 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2589 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2590
2591 if (intel_dp->psr.psr2_sel_fetch_enabled) {
2592 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2593 /* can we turn CFF off? */
2594 if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2595 u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2596 man_trk_ctl_partial_frame_bit_get(dev_priv) |
2597 man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2598 man_trk_ctl_continuos_full_frame(dev_priv);
2599
2600 /*
2601 * Set psr2_sel_fetch_cff_enabled as false to allow selective
2602 * updates. Still keep cff bit enabled as we don't have proper
2603 * SU configuration in case update is sent for any reason after
2604 * sff bit gets cleared by the HW on next vblank.
2605 */
2606 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2607 val);
2608 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2609 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2610 }
2611 } else {
2612 /*
2613 * continuous full frame is disabled, only a single full
2614 * frame is required
2615 */
2616 psr_force_hw_tracking_exit(intel_dp);
2617 }
2618 } else {
2619 psr_force_hw_tracking_exit(intel_dp);
2620
2621 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2622 queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
2623 }
2624 }
2625
2626 /**
2627 * intel_psr_flush - Flush PSR
2628 * @dev_priv: i915 device
2629 * @frontbuffer_bits: frontbuffer plane tracking bits
2630 * @origin: which operation caused the flush
2631 *
2632 * Since the hardware frontbuffer tracking has gaps we need to integrate
2633 * with the software frontbuffer tracking. This function gets called every
2634 * time frontbuffer rendering has completed and flushed out to memory. PSR
2635 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2636 *
2637 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2638 */
intel_psr_flush(struct drm_i915_private * dev_priv,unsigned frontbuffer_bits,enum fb_op_origin origin)2639 void intel_psr_flush(struct drm_i915_private *dev_priv,
2640 unsigned frontbuffer_bits, enum fb_op_origin origin)
2641 {
2642 struct intel_encoder *encoder;
2643
2644 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2645 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2646 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2647
2648 mutex_lock(&intel_dp->psr.lock);
2649 if (!intel_dp->psr.enabled) {
2650 mutex_unlock(&intel_dp->psr.lock);
2651 continue;
2652 }
2653
2654 pipe_frontbuffer_bits &=
2655 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2656 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2657
2658 /*
2659 * If the PSR is paused by an explicit intel_psr_paused() call,
2660 * we have to ensure that the PSR is not activated until
2661 * intel_psr_resume() is called.
2662 */
2663 if (intel_dp->psr.paused)
2664 goto unlock;
2665
2666 if (origin == ORIGIN_FLIP ||
2667 (origin == ORIGIN_CURSOR_UPDATE &&
2668 !intel_dp->psr.psr2_sel_fetch_enabled)) {
2669 tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2670 goto unlock;
2671 }
2672
2673 if (pipe_frontbuffer_bits == 0)
2674 goto unlock;
2675
2676 /* By definition flush = invalidate + flush */
2677 _psr_flush_handle(intel_dp);
2678 unlock:
2679 mutex_unlock(&intel_dp->psr.lock);
2680 }
2681 }
2682
2683 /**
2684 * intel_psr_init - Init basic PSR work and mutex.
2685 * @intel_dp: Intel DP
2686 *
2687 * This function is called after the initializing connector.
2688 * (the initializing of connector treats the handling of connector capabilities)
2689 * And it initializes basic PSR stuff for each DP Encoder.
2690 */
intel_psr_init(struct intel_dp * intel_dp)2691 void intel_psr_init(struct intel_dp *intel_dp)
2692 {
2693 struct intel_connector *connector = intel_dp->attached_connector;
2694 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2695 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2696
2697 if (!HAS_PSR(dev_priv))
2698 return;
2699
2700 /*
2701 * HSW spec explicitly says PSR is tied to port A.
2702 * BDW+ platforms have a instance of PSR registers per transcoder but
2703 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2704 * than eDP one.
2705 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2706 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2707 * But GEN12 supports a instance of PSR registers per transcoder.
2708 */
2709 if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2710 drm_dbg_kms(&dev_priv->drm,
2711 "PSR condition failed: Port not supported\n");
2712 return;
2713 }
2714
2715 intel_dp->psr.source_support = true;
2716
2717 /* Set link_standby x link_off defaults */
2718 if (DISPLAY_VER(dev_priv) < 12)
2719 /* For new platforms up to TGL let's respect VBT back again */
2720 intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2721
2722 INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2723 INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2724 mutex_init(&intel_dp->psr.lock);
2725 }
2726
psr_get_status_and_error_status(struct intel_dp * intel_dp,u8 * status,u8 * error_status)2727 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2728 u8 *status, u8 *error_status)
2729 {
2730 struct drm_dp_aux *aux = &intel_dp->aux;
2731 int ret;
2732
2733 ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
2734 if (ret != 1)
2735 return ret;
2736
2737 ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
2738 if (ret != 1)
2739 return ret;
2740
2741 *status = *status & DP_PSR_SINK_STATE_MASK;
2742
2743 return 0;
2744 }
2745
psr_alpm_check(struct intel_dp * intel_dp)2746 static void psr_alpm_check(struct intel_dp *intel_dp)
2747 {
2748 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2749 struct drm_dp_aux *aux = &intel_dp->aux;
2750 struct intel_psr *psr = &intel_dp->psr;
2751 u8 val;
2752 int r;
2753
2754 if (!psr->psr2_enabled)
2755 return;
2756
2757 r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2758 if (r != 1) {
2759 drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2760 return;
2761 }
2762
2763 if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2764 intel_psr_disable_locked(intel_dp);
2765 psr->sink_not_reliable = true;
2766 drm_dbg_kms(&dev_priv->drm,
2767 "ALPM lock timeout error, disabling PSR\n");
2768
2769 /* Clearing error */
2770 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2771 }
2772 }
2773
psr_capability_changed_check(struct intel_dp * intel_dp)2774 static void psr_capability_changed_check(struct intel_dp *intel_dp)
2775 {
2776 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2777 struct intel_psr *psr = &intel_dp->psr;
2778 u8 val;
2779 int r;
2780
2781 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2782 if (r != 1) {
2783 drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2784 return;
2785 }
2786
2787 if (val & DP_PSR_CAPS_CHANGE) {
2788 intel_psr_disable_locked(intel_dp);
2789 psr->sink_not_reliable = true;
2790 drm_dbg_kms(&dev_priv->drm,
2791 "Sink PSR capability changed, disabling PSR\n");
2792
2793 /* Clearing it */
2794 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2795 }
2796 }
2797
intel_psr_short_pulse(struct intel_dp * intel_dp)2798 void intel_psr_short_pulse(struct intel_dp *intel_dp)
2799 {
2800 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2801 struct intel_psr *psr = &intel_dp->psr;
2802 u8 status, error_status;
2803 const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2804 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2805 DP_PSR_LINK_CRC_ERROR;
2806
2807 if (!CAN_PSR(intel_dp))
2808 return;
2809
2810 mutex_lock(&psr->lock);
2811
2812 if (!psr->enabled)
2813 goto exit;
2814
2815 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2816 drm_err(&dev_priv->drm,
2817 "Error reading PSR status or error status\n");
2818 goto exit;
2819 }
2820
2821 if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2822 intel_psr_disable_locked(intel_dp);
2823 psr->sink_not_reliable = true;
2824 }
2825
2826 if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2827 drm_dbg_kms(&dev_priv->drm,
2828 "PSR sink internal error, disabling PSR\n");
2829 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2830 drm_dbg_kms(&dev_priv->drm,
2831 "PSR RFB storage error, disabling PSR\n");
2832 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2833 drm_dbg_kms(&dev_priv->drm,
2834 "PSR VSC SDP uncorrectable error, disabling PSR\n");
2835 if (error_status & DP_PSR_LINK_CRC_ERROR)
2836 drm_dbg_kms(&dev_priv->drm,
2837 "PSR Link CRC error, disabling PSR\n");
2838
2839 if (error_status & ~errors)
2840 drm_err(&dev_priv->drm,
2841 "PSR_ERROR_STATUS unhandled errors %x\n",
2842 error_status & ~errors);
2843 /* clear status register */
2844 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2845
2846 psr_alpm_check(intel_dp);
2847 psr_capability_changed_check(intel_dp);
2848
2849 exit:
2850 mutex_unlock(&psr->lock);
2851 }
2852
intel_psr_enabled(struct intel_dp * intel_dp)2853 bool intel_psr_enabled(struct intel_dp *intel_dp)
2854 {
2855 bool ret;
2856
2857 if (!CAN_PSR(intel_dp))
2858 return false;
2859
2860 mutex_lock(&intel_dp->psr.lock);
2861 ret = intel_dp->psr.enabled;
2862 mutex_unlock(&intel_dp->psr.lock);
2863
2864 return ret;
2865 }
2866
2867 /**
2868 * intel_psr_lock - grab PSR lock
2869 * @crtc_state: the crtc state
2870 *
2871 * This is initially meant to be used by around CRTC update, when
2872 * vblank sensitive registers are updated and we need grab the lock
2873 * before it to avoid vblank evasion.
2874 */
intel_psr_lock(const struct intel_crtc_state * crtc_state)2875 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
2876 {
2877 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2878 struct intel_encoder *encoder;
2879
2880 if (!crtc_state->has_psr)
2881 return;
2882
2883 for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2884 crtc_state->uapi.encoder_mask) {
2885 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2886
2887 mutex_lock(&intel_dp->psr.lock);
2888 break;
2889 }
2890 }
2891
2892 /**
2893 * intel_psr_unlock - release PSR lock
2894 * @crtc_state: the crtc state
2895 *
2896 * Release the PSR lock that was held during pipe update.
2897 */
intel_psr_unlock(const struct intel_crtc_state * crtc_state)2898 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
2899 {
2900 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2901 struct intel_encoder *encoder;
2902
2903 if (!crtc_state->has_psr)
2904 return;
2905
2906 for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2907 crtc_state->uapi.encoder_mask) {
2908 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2909
2910 mutex_unlock(&intel_dp->psr.lock);
2911 break;
2912 }
2913 }
2914
2915 static void
psr_source_status(struct intel_dp * intel_dp,struct seq_file * m)2916 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
2917 {
2918 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2919 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2920 const char *status = "unknown";
2921 u32 val, status_val;
2922
2923 if (intel_dp->psr.psr2_enabled) {
2924 static const char * const live_status[] = {
2925 "IDLE",
2926 "CAPTURE",
2927 "CAPTURE_FS",
2928 "SLEEP",
2929 "BUFON_FW",
2930 "ML_UP",
2931 "SU_STANDBY",
2932 "FAST_SLEEP",
2933 "DEEP_SLEEP",
2934 "BUF_ON",
2935 "TG_ON"
2936 };
2937 val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
2938 status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
2939 if (status_val < ARRAY_SIZE(live_status))
2940 status = live_status[status_val];
2941 } else {
2942 static const char * const live_status[] = {
2943 "IDLE",
2944 "SRDONACK",
2945 "SRDENT",
2946 "BUFOFF",
2947 "BUFON",
2948 "AUXACK",
2949 "SRDOFFACK",
2950 "SRDENT_ON",
2951 };
2952 val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
2953 status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
2954 if (status_val < ARRAY_SIZE(live_status))
2955 status = live_status[status_val];
2956 }
2957
2958 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2959 }
2960
intel_psr_status(struct seq_file * m,struct intel_dp * intel_dp)2961 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
2962 {
2963 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2964 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2965 struct intel_psr *psr = &intel_dp->psr;
2966 intel_wakeref_t wakeref;
2967 const char *status;
2968 bool enabled;
2969 u32 val;
2970
2971 seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
2972 if (psr->sink_support)
2973 seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
2974 seq_puts(m, "\n");
2975
2976 if (!psr->sink_support)
2977 return 0;
2978
2979 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2980 mutex_lock(&psr->lock);
2981
2982 if (psr->enabled)
2983 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2984 else
2985 status = "disabled";
2986 seq_printf(m, "PSR mode: %s\n", status);
2987
2988 if (!psr->enabled) {
2989 seq_printf(m, "PSR sink not reliable: %s\n",
2990 str_yes_no(psr->sink_not_reliable));
2991
2992 goto unlock;
2993 }
2994
2995 if (psr->psr2_enabled) {
2996 val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
2997 enabled = val & EDP_PSR2_ENABLE;
2998 } else {
2999 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
3000 enabled = val & EDP_PSR_ENABLE;
3001 }
3002 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
3003 str_enabled_disabled(enabled), val);
3004 psr_source_status(intel_dp, m);
3005 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3006 psr->busy_frontbuffer_bits);
3007
3008 /*
3009 * SKL+ Perf counter is reset to 0 everytime DC state is entered
3010 */
3011 val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
3012 seq_printf(m, "Performance counter: %u\n",
3013 REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3014
3015 if (psr->debug & I915_PSR_DEBUG_IRQ) {
3016 seq_printf(m, "Last attempted entry at: %lld\n",
3017 psr->last_entry_attempt);
3018 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3019 }
3020
3021 if (psr->psr2_enabled) {
3022 u32 su_frames_val[3];
3023 int frame;
3024
3025 /*
3026 * Reading all 3 registers before hand to minimize crossing a
3027 * frame boundary between register reads
3028 */
3029 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3030 val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
3031 su_frames_val[frame / 3] = val;
3032 }
3033
3034 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3035
3036 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3037 u32 su_blocks;
3038
3039 su_blocks = su_frames_val[frame / 3] &
3040 PSR2_SU_STATUS_MASK(frame);
3041 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3042 seq_printf(m, "%d\t%d\n", frame, su_blocks);
3043 }
3044
3045 seq_printf(m, "PSR2 selective fetch: %s\n",
3046 str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3047 }
3048
3049 unlock:
3050 mutex_unlock(&psr->lock);
3051 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3052
3053 return 0;
3054 }
3055
i915_edp_psr_status_show(struct seq_file * m,void * data)3056 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3057 {
3058 struct drm_i915_private *dev_priv = m->private;
3059 struct intel_dp *intel_dp = NULL;
3060 struct intel_encoder *encoder;
3061
3062 if (!HAS_PSR(dev_priv))
3063 return -ENODEV;
3064
3065 /* Find the first EDP which supports PSR */
3066 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3067 intel_dp = enc_to_intel_dp(encoder);
3068 break;
3069 }
3070
3071 if (!intel_dp)
3072 return -ENODEV;
3073
3074 return intel_psr_status(m, intel_dp);
3075 }
3076 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3077
3078 static int
i915_edp_psr_debug_set(void * data,u64 val)3079 i915_edp_psr_debug_set(void *data, u64 val)
3080 {
3081 struct drm_i915_private *dev_priv = data;
3082 struct intel_encoder *encoder;
3083 intel_wakeref_t wakeref;
3084 int ret = -ENODEV;
3085
3086 if (!HAS_PSR(dev_priv))
3087 return ret;
3088
3089 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3090 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3091
3092 drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3093
3094 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3095
3096 // TODO: split to each transcoder's PSR debug state
3097 ret = intel_psr_debug_set(intel_dp, val);
3098
3099 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3100 }
3101
3102 return ret;
3103 }
3104
3105 static int
i915_edp_psr_debug_get(void * data,u64 * val)3106 i915_edp_psr_debug_get(void *data, u64 *val)
3107 {
3108 struct drm_i915_private *dev_priv = data;
3109 struct intel_encoder *encoder;
3110
3111 if (!HAS_PSR(dev_priv))
3112 return -ENODEV;
3113
3114 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3115 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3116
3117 // TODO: split to each transcoder's PSR debug state
3118 *val = READ_ONCE(intel_dp->psr.debug);
3119 return 0;
3120 }
3121
3122 return -ENODEV;
3123 }
3124
3125 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3126 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3127 "%llu\n");
3128
intel_psr_debugfs_register(struct drm_i915_private * i915)3129 void intel_psr_debugfs_register(struct drm_i915_private *i915)
3130 {
3131 struct drm_minor *minor = i915->drm.primary;
3132
3133 debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3134 i915, &i915_edp_psr_debug_fops);
3135
3136 debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3137 i915, &i915_edp_psr_status_fops);
3138 }
3139
i915_psr_sink_status_show(struct seq_file * m,void * data)3140 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3141 {
3142 struct intel_connector *connector = m->private;
3143 struct intel_dp *intel_dp = intel_attached_dp(connector);
3144 static const char * const sink_status[] = {
3145 "inactive",
3146 "transition to active, capture and display",
3147 "active, display from RFB",
3148 "active, capture and display on sink device timings",
3149 "transition to inactive, capture and display, timing re-sync",
3150 "reserved",
3151 "reserved",
3152 "sink internal error",
3153 };
3154 const char *str;
3155 int ret;
3156 u8 val;
3157
3158 if (!CAN_PSR(intel_dp)) {
3159 seq_puts(m, "PSR Unsupported\n");
3160 return -ENODEV;
3161 }
3162
3163 if (connector->base.status != connector_status_connected)
3164 return -ENODEV;
3165
3166 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
3167 if (ret != 1)
3168 return ret < 0 ? ret : -EIO;
3169
3170 val &= DP_PSR_SINK_STATE_MASK;
3171 if (val < ARRAY_SIZE(sink_status))
3172 str = sink_status[val];
3173 else
3174 str = "unknown";
3175
3176 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
3177
3178 return 0;
3179 }
3180 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3181
i915_psr_status_show(struct seq_file * m,void * data)3182 static int i915_psr_status_show(struct seq_file *m, void *data)
3183 {
3184 struct intel_connector *connector = m->private;
3185 struct intel_dp *intel_dp = intel_attached_dp(connector);
3186
3187 return intel_psr_status(m, intel_dp);
3188 }
3189 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3190
intel_psr_connector_debugfs_add(struct intel_connector * connector)3191 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3192 {
3193 struct drm_i915_private *i915 = to_i915(connector->base.dev);
3194 struct dentry *root = connector->base.debugfs_entry;
3195
3196 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
3197 return;
3198
3199 debugfs_create_file("i915_psr_sink_status", 0444, root,
3200 connector, &i915_psr_sink_status_fops);
3201
3202 if (HAS_PSR(i915))
3203 debugfs_create_file("i915_psr_status", 0444, root,
3204 connector, &i915_psr_status_fops);
3205 }
3206