1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /**
25  * DOC: Panel Self Refresh (PSR/SRD)
26  *
27  * Since Haswell Display controller supports Panel Self-Refresh on display
28  * panels witch have a remote frame buffer (RFB) implemented according to PSR
29  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
30  * when system is idle but display is on as it eliminates display refresh
31  * request to DDR memory completely as long as the frame buffer for that
32  * display is unchanged.
33  *
34  * Panel Self Refresh must be supported by both Hardware (source) and
35  * Panel (sink).
36  *
37  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
38  * to power down the link and memory controller. For DSI panels the same idea
39  * is called "manual mode".
40  *
41  * The implementation uses the hardware-based PSR support which automatically
42  * enters/exits self-refresh mode. The hardware takes care of sending the
43  * required DP aux message and could even retrain the link (that part isn't
44  * enabled yet though). The hardware also keeps track of any frontbuffer
45  * changes to know when to exit self-refresh mode again. Unfortunately that
46  * part doesn't work too well, hence why the i915 PSR support uses the
47  * software frontbuffer tracking to make sure it doesn't miss a screen
48  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
49  * get called by the frontbuffer tracking code. Note that because of locking
50  * issues the self-refresh re-enable code is done from a work queue, which
51  * must be correctly synchronized/cancelled when shutting down the pipe."
52  */
53 
54 #include <drm/drmP.h>
55 
56 #include "intel_drv.h"
57 #include "i915_drv.h"
58 
intel_psr_irq_control(struct drm_i915_private * dev_priv,bool debug)59 void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug)
60 {
61 	u32 debug_mask, mask;
62 
63 	mask = EDP_PSR_ERROR(TRANSCODER_EDP);
64 	debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) |
65 		     EDP_PSR_PRE_ENTRY(TRANSCODER_EDP);
66 
67 	if (INTEL_GEN(dev_priv) >= 8) {
68 		mask |= EDP_PSR_ERROR(TRANSCODER_A) |
69 			EDP_PSR_ERROR(TRANSCODER_B) |
70 			EDP_PSR_ERROR(TRANSCODER_C);
71 
72 		debug_mask |= EDP_PSR_POST_EXIT(TRANSCODER_A) |
73 			      EDP_PSR_PRE_ENTRY(TRANSCODER_A) |
74 			      EDP_PSR_POST_EXIT(TRANSCODER_B) |
75 			      EDP_PSR_PRE_ENTRY(TRANSCODER_B) |
76 			      EDP_PSR_POST_EXIT(TRANSCODER_C) |
77 			      EDP_PSR_PRE_ENTRY(TRANSCODER_C);
78 	}
79 
80 	if (debug)
81 		mask |= debug_mask;
82 
83 	WRITE_ONCE(dev_priv->psr.debug, debug);
84 	I915_WRITE(EDP_PSR_IMR, ~mask);
85 }
86 
psr_event_print(u32 val,bool psr2_enabled)87 static void psr_event_print(u32 val, bool psr2_enabled)
88 {
89 	DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val);
90 	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
91 		DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n");
92 	if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
93 		DRM_DEBUG_KMS("\tPSR2 disabled\n");
94 	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
95 		DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n");
96 	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
97 		DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n");
98 	if (val & PSR_EVENT_GRAPHICS_RESET)
99 		DRM_DEBUG_KMS("\tGraphics reset\n");
100 	if (val & PSR_EVENT_PCH_INTERRUPT)
101 		DRM_DEBUG_KMS("\tPCH interrupt\n");
102 	if (val & PSR_EVENT_MEMORY_UP)
103 		DRM_DEBUG_KMS("\tMemory up\n");
104 	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
105 		DRM_DEBUG_KMS("\tFront buffer modification\n");
106 	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
107 		DRM_DEBUG_KMS("\tPSR watchdog timer expired\n");
108 	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
109 		DRM_DEBUG_KMS("\tPIPE registers updated\n");
110 	if (val & PSR_EVENT_REGISTER_UPDATE)
111 		DRM_DEBUG_KMS("\tRegister updated\n");
112 	if (val & PSR_EVENT_HDCP_ENABLE)
113 		DRM_DEBUG_KMS("\tHDCP enabled\n");
114 	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
115 		DRM_DEBUG_KMS("\tKVMR session enabled\n");
116 	if (val & PSR_EVENT_VBI_ENABLE)
117 		DRM_DEBUG_KMS("\tVBI enabled\n");
118 	if (val & PSR_EVENT_LPSP_MODE_EXIT)
119 		DRM_DEBUG_KMS("\tLPSP mode exited\n");
120 	if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
121 		DRM_DEBUG_KMS("\tPSR disabled\n");
122 }
123 
intel_psr_irq_handler(struct drm_i915_private * dev_priv,u32 psr_iir)124 void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
125 {
126 	u32 transcoders = BIT(TRANSCODER_EDP);
127 	enum transcoder cpu_transcoder;
128 	ktime_t time_ns =  ktime_get();
129 
130 	if (INTEL_GEN(dev_priv) >= 8)
131 		transcoders |= BIT(TRANSCODER_A) |
132 			       BIT(TRANSCODER_B) |
133 			       BIT(TRANSCODER_C);
134 
135 	for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
136 		/* FIXME: Exit PSR and link train manually when this happens. */
137 		if (psr_iir & EDP_PSR_ERROR(cpu_transcoder))
138 			DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n",
139 				      transcoder_name(cpu_transcoder));
140 
141 		if (psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder)) {
142 			dev_priv->psr.last_entry_attempt = time_ns;
143 			DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
144 				      transcoder_name(cpu_transcoder));
145 		}
146 
147 		if (psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder)) {
148 			dev_priv->psr.last_exit = time_ns;
149 			DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
150 				      transcoder_name(cpu_transcoder));
151 
152 			if (INTEL_GEN(dev_priv) >= 9) {
153 				u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
154 				bool psr2_enabled = dev_priv->psr.psr2_enabled;
155 
156 				I915_WRITE(PSR_EVENT(cpu_transcoder), val);
157 				psr_event_print(val, psr2_enabled);
158 			}
159 		}
160 	}
161 }
162 
intel_dp_get_colorimetry_status(struct intel_dp * intel_dp)163 static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
164 {
165 	uint8_t dprx = 0;
166 
167 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
168 			      &dprx) != 1)
169 		return false;
170 	return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
171 }
172 
intel_dp_get_alpm_status(struct intel_dp * intel_dp)173 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
174 {
175 	uint8_t alpm_caps = 0;
176 
177 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
178 			      &alpm_caps) != 1)
179 		return false;
180 	return alpm_caps & DP_ALPM_CAP;
181 }
182 
intel_dp_get_sink_sync_latency(struct intel_dp * intel_dp)183 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
184 {
185 	u8 val = 8; /* assume the worst if we can't read the value */
186 
187 	if (drm_dp_dpcd_readb(&intel_dp->aux,
188 			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
189 		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
190 	else
191 		DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
192 	return val;
193 }
194 
intel_psr_init_dpcd(struct intel_dp * intel_dp)195 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
196 {
197 	struct drm_i915_private *dev_priv =
198 		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
199 
200 	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
201 			 sizeof(intel_dp->psr_dpcd));
202 
203 	if (!intel_dp->psr_dpcd[0])
204 		return;
205 	DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
206 		      intel_dp->psr_dpcd[0]);
207 
208 	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
209 		DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
210 		return;
211 	}
212 	dev_priv->psr.sink_support = true;
213 	dev_priv->psr.sink_sync_latency =
214 		intel_dp_get_sink_sync_latency(intel_dp);
215 
216 	if (INTEL_GEN(dev_priv) >= 9 &&
217 	    (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
218 		bool y_req = intel_dp->psr_dpcd[1] &
219 			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
220 		bool alpm = intel_dp_get_alpm_status(intel_dp);
221 
222 		/*
223 		 * All panels that supports PSR version 03h (PSR2 +
224 		 * Y-coordinate) can handle Y-coordinates in VSC but we are
225 		 * only sure that it is going to be used when required by the
226 		 * panel. This way panel is capable to do selective update
227 		 * without a aux frame sync.
228 		 *
229 		 * To support PSR version 02h and PSR version 03h without
230 		 * Y-coordinate requirement panels we would need to enable
231 		 * GTC first.
232 		 */
233 		dev_priv->psr.sink_psr2_support = y_req && alpm;
234 		DRM_DEBUG_KMS("PSR2 %ssupported\n",
235 			      dev_priv->psr.sink_psr2_support ? "" : "not ");
236 
237 		if (dev_priv->psr.sink_psr2_support) {
238 			dev_priv->psr.colorimetry_support =
239 				intel_dp_get_colorimetry_status(intel_dp);
240 		}
241 	}
242 }
243 
intel_psr_setup_vsc(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)244 static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
245 				const struct intel_crtc_state *crtc_state)
246 {
247 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
248 	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
249 	struct edp_vsc_psr psr_vsc;
250 
251 	if (dev_priv->psr.psr2_enabled) {
252 		/* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
253 		memset(&psr_vsc, 0, sizeof(psr_vsc));
254 		psr_vsc.sdp_header.HB0 = 0;
255 		psr_vsc.sdp_header.HB1 = 0x7;
256 		if (dev_priv->psr.colorimetry_support) {
257 			psr_vsc.sdp_header.HB2 = 0x5;
258 			psr_vsc.sdp_header.HB3 = 0x13;
259 		} else {
260 			psr_vsc.sdp_header.HB2 = 0x4;
261 			psr_vsc.sdp_header.HB3 = 0xe;
262 		}
263 	} else {
264 		/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
265 		memset(&psr_vsc, 0, sizeof(psr_vsc));
266 		psr_vsc.sdp_header.HB0 = 0;
267 		psr_vsc.sdp_header.HB1 = 0x7;
268 		psr_vsc.sdp_header.HB2 = 0x2;
269 		psr_vsc.sdp_header.HB3 = 0x8;
270 	}
271 
272 	intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state,
273 					DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
274 }
275 
hsw_psr_setup_aux(struct intel_dp * intel_dp)276 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
277 {
278 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
279 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
280 	u32 aux_clock_divider, aux_ctl;
281 	int i;
282 	static const uint8_t aux_msg[] = {
283 		[0] = DP_AUX_NATIVE_WRITE << 4,
284 		[1] = DP_SET_POWER >> 8,
285 		[2] = DP_SET_POWER & 0xff,
286 		[3] = 1 - 1,
287 		[4] = DP_SET_POWER_D0,
288 	};
289 	u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
290 			   EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
291 			   EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
292 			   EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
293 
294 	BUILD_BUG_ON(sizeof(aux_msg) > 20);
295 	for (i = 0; i < sizeof(aux_msg); i += 4)
296 		I915_WRITE(EDP_PSR_AUX_DATA(i >> 2),
297 			   intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
298 
299 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
300 
301 	/* Start with bits set for DDI_AUX_CTL register */
302 	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
303 					     aux_clock_divider);
304 
305 	/* Select only valid bits for SRD_AUX_CTL */
306 	aux_ctl &= psr_aux_mask;
307 	I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl);
308 }
309 
intel_psr_enable_sink(struct intel_dp * intel_dp)310 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
311 {
312 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
313 	struct drm_device *dev = dig_port->base.base.dev;
314 	struct drm_i915_private *dev_priv = to_i915(dev);
315 	u8 dpcd_val = DP_PSR_ENABLE;
316 
317 	/* Enable ALPM at sink for psr2 */
318 	if (dev_priv->psr.psr2_enabled) {
319 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
320 				   DP_ALPM_ENABLE);
321 		dpcd_val |= DP_PSR_ENABLE_PSR2;
322 	}
323 
324 	if (dev_priv->psr.link_standby)
325 		dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
326 	if (!dev_priv->psr.psr2_enabled && INTEL_GEN(dev_priv) >= 8)
327 		dpcd_val |= DP_PSR_CRC_VERIFICATION;
328 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
329 
330 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
331 }
332 
hsw_activate_psr1(struct intel_dp * intel_dp)333 static void hsw_activate_psr1(struct intel_dp *intel_dp)
334 {
335 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
336 	struct drm_device *dev = dig_port->base.base.dev;
337 	struct drm_i915_private *dev_priv = to_i915(dev);
338 	u32 max_sleep_time = 0x1f;
339 	u32 val = EDP_PSR_ENABLE;
340 
341 	/* Let's use 6 as the minimum to cover all known cases including the
342 	 * off-by-one issue that HW has in some cases.
343 	 */
344 	int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
345 
346 	/* sink_sync_latency of 8 means source has to wait for more than 8
347 	 * frames, we'll go with 9 frames for now
348 	 */
349 	idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
350 	val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
351 
352 	val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
353 	if (IS_HASWELL(dev_priv))
354 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
355 
356 	if (dev_priv->psr.link_standby)
357 		val |= EDP_PSR_LINK_STANDBY;
358 
359 	if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
360 		val |=  EDP_PSR_TP1_TIME_0us;
361 	else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
362 		val |= EDP_PSR_TP1_TIME_100us;
363 	else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
364 		val |= EDP_PSR_TP1_TIME_500us;
365 	else
366 		val |= EDP_PSR_TP1_TIME_2500us;
367 
368 	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
369 		val |=  EDP_PSR_TP2_TP3_TIME_0us;
370 	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
371 		val |= EDP_PSR_TP2_TP3_TIME_100us;
372 	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
373 		val |= EDP_PSR_TP2_TP3_TIME_500us;
374 	else
375 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
376 
377 	if (intel_dp_source_supports_hbr2(intel_dp) &&
378 	    drm_dp_tps3_supported(intel_dp->dpcd))
379 		val |= EDP_PSR_TP1_TP3_SEL;
380 	else
381 		val |= EDP_PSR_TP1_TP2_SEL;
382 
383 	if (INTEL_GEN(dev_priv) >= 8)
384 		val |= EDP_PSR_CRC_ENABLE;
385 
386 	val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
387 	I915_WRITE(EDP_PSR_CTL, val);
388 }
389 
hsw_activate_psr2(struct intel_dp * intel_dp)390 static void hsw_activate_psr2(struct intel_dp *intel_dp)
391 {
392 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
393 	struct drm_device *dev = dig_port->base.base.dev;
394 	struct drm_i915_private *dev_priv = to_i915(dev);
395 	u32 val;
396 
397 	/* Let's use 6 as the minimum to cover all known cases including the
398 	 * off-by-one issue that HW has in some cases.
399 	 */
400 	int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
401 
402 	idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
403 	val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
404 
405 	/* FIXME: selective update is probably totally broken because it doesn't
406 	 * mesh at all with our frontbuffer tracking. And the hw alone isn't
407 	 * good enough. */
408 	val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
409 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
410 		val |= EDP_Y_COORDINATE_ENABLE;
411 
412 	val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
413 
414 	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us >= 0 &&
415 	    dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 50)
416 		val |= EDP_PSR2_TP2_TIME_50us;
417 	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
418 		val |= EDP_PSR2_TP2_TIME_100us;
419 	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
420 		val |= EDP_PSR2_TP2_TIME_500us;
421 	else
422 		val |= EDP_PSR2_TP2_TIME_2500us;
423 
424 	I915_WRITE(EDP_PSR2_CTL, val);
425 }
426 
intel_psr2_config_valid(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)427 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
428 				    struct intel_crtc_state *crtc_state)
429 {
430 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
431 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
432 	int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
433 	int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
434 	int psr_max_h = 0, psr_max_v = 0;
435 
436 	/*
437 	 * FIXME psr2_support is messed up. It's both computed
438 	 * dynamically during PSR enable, and extracted from sink
439 	 * caps during eDP detection.
440 	 */
441 	if (!dev_priv->psr.sink_psr2_support)
442 		return false;
443 
444 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
445 		psr_max_h = 4096;
446 		psr_max_v = 2304;
447 	} else if (IS_GEN9(dev_priv)) {
448 		psr_max_h = 3640;
449 		psr_max_v = 2304;
450 	}
451 
452 	if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
453 		DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
454 			      crtc_hdisplay, crtc_vdisplay,
455 			      psr_max_h, psr_max_v);
456 		return false;
457 	}
458 
459 	return true;
460 }
461 
intel_psr_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)462 void intel_psr_compute_config(struct intel_dp *intel_dp,
463 			      struct intel_crtc_state *crtc_state)
464 {
465 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
466 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
467 	const struct drm_display_mode *adjusted_mode =
468 		&crtc_state->base.adjusted_mode;
469 	int psr_setup_time;
470 
471 	if (!CAN_PSR(dev_priv))
472 		return;
473 
474 	if (!i915_modparams.enable_psr) {
475 		DRM_DEBUG_KMS("PSR disable by flag\n");
476 		return;
477 	}
478 
479 	/*
480 	 * HSW spec explicitly says PSR is tied to port A.
481 	 * BDW+ platforms with DDI implementation of PSR have different
482 	 * PSR registers per transcoder and we only implement transcoder EDP
483 	 * ones. Since by Display design transcoder EDP is tied to port A
484 	 * we can safely escape based on the port A.
485 	 */
486 	if (dig_port->base.port != PORT_A) {
487 		DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
488 		return;
489 	}
490 
491 	if (IS_HASWELL(dev_priv) &&
492 	    I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
493 		      S3D_ENABLE) {
494 		DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
495 		return;
496 	}
497 
498 	if (IS_HASWELL(dev_priv) &&
499 	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
500 		DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
501 		return;
502 	}
503 
504 	psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
505 	if (psr_setup_time < 0) {
506 		DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
507 			      intel_dp->psr_dpcd[1]);
508 		return;
509 	}
510 
511 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
512 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
513 		DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
514 			      psr_setup_time);
515 		return;
516 	}
517 
518 	crtc_state->has_psr = true;
519 	crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
520 	DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : "");
521 }
522 
intel_psr_activate(struct intel_dp * intel_dp)523 static void intel_psr_activate(struct intel_dp *intel_dp)
524 {
525 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
526 	struct drm_device *dev = intel_dig_port->base.base.dev;
527 	struct drm_i915_private *dev_priv = to_i915(dev);
528 
529 	if (INTEL_GEN(dev_priv) >= 9)
530 		WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
531 	WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
532 	WARN_ON(dev_priv->psr.active);
533 	lockdep_assert_held(&dev_priv->psr.lock);
534 
535 	/* psr1 and psr2 are mutually exclusive.*/
536 	if (dev_priv->psr.psr2_enabled)
537 		hsw_activate_psr2(intel_dp);
538 	else
539 		hsw_activate_psr1(intel_dp);
540 
541 	dev_priv->psr.active = true;
542 }
543 
intel_psr_enable_source(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)544 static void intel_psr_enable_source(struct intel_dp *intel_dp,
545 				    const struct intel_crtc_state *crtc_state)
546 {
547 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
548 	struct drm_device *dev = dig_port->base.base.dev;
549 	struct drm_i915_private *dev_priv = to_i915(dev);
550 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
551 
552 	/* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
553 	 * use hardcoded values PSR AUX transactions
554 	 */
555 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
556 		hsw_psr_setup_aux(intel_dp);
557 
558 	if (dev_priv->psr.psr2_enabled) {
559 		u32 chicken = I915_READ(CHICKEN_TRANS(cpu_transcoder));
560 
561 		if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv))
562 			chicken |= (PSR2_VSC_ENABLE_PROG_HEADER
563 				   | PSR2_ADD_VERTICAL_LINE_COUNT);
564 
565 		else
566 			chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL;
567 		I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
568 
569 		I915_WRITE(EDP_PSR_DEBUG,
570 			   EDP_PSR_DEBUG_MASK_MEMUP |
571 			   EDP_PSR_DEBUG_MASK_HPD |
572 			   EDP_PSR_DEBUG_MASK_LPSP |
573 			   EDP_PSR_DEBUG_MASK_MAX_SLEEP |
574 			   EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
575 	} else {
576 		/*
577 		 * Per Spec: Avoid continuous PSR exit by masking MEMUP
578 		 * and HPD. also mask LPSP to avoid dependency on other
579 		 * drivers that might block runtime_pm besides
580 		 * preventing  other hw tracking issues now we can rely
581 		 * on frontbuffer tracking.
582 		 */
583 		I915_WRITE(EDP_PSR_DEBUG,
584 			   EDP_PSR_DEBUG_MASK_MEMUP |
585 			   EDP_PSR_DEBUG_MASK_HPD |
586 			   EDP_PSR_DEBUG_MASK_LPSP |
587 			   EDP_PSR_DEBUG_MASK_DISP_REG_WRITE |
588 			   EDP_PSR_DEBUG_MASK_MAX_SLEEP);
589 	}
590 }
591 
592 /**
593  * intel_psr_enable - Enable PSR
594  * @intel_dp: Intel DP
595  * @crtc_state: new CRTC state
596  *
597  * This function can only be called after the pipe is fully trained and enabled.
598  */
intel_psr_enable(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)599 void intel_psr_enable(struct intel_dp *intel_dp,
600 		      const struct intel_crtc_state *crtc_state)
601 {
602 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
603 	struct drm_device *dev = intel_dig_port->base.base.dev;
604 	struct drm_i915_private *dev_priv = to_i915(dev);
605 
606 	if (!crtc_state->has_psr)
607 		return;
608 
609 	if (WARN_ON(!CAN_PSR(dev_priv)))
610 		return;
611 
612 	WARN_ON(dev_priv->drrs.dp);
613 	mutex_lock(&dev_priv->psr.lock);
614 	if (dev_priv->psr.enabled) {
615 		DRM_DEBUG_KMS("PSR already in use\n");
616 		goto unlock;
617 	}
618 
619 	dev_priv->psr.psr2_enabled = crtc_state->has_psr2;
620 	dev_priv->psr.busy_frontbuffer_bits = 0;
621 
622 	intel_psr_setup_vsc(intel_dp, crtc_state);
623 	intel_psr_enable_sink(intel_dp);
624 	intel_psr_enable_source(intel_dp, crtc_state);
625 	dev_priv->psr.enabled = intel_dp;
626 
627 	intel_psr_activate(intel_dp);
628 
629 unlock:
630 	mutex_unlock(&dev_priv->psr.lock);
631 }
632 
633 static void
intel_psr_disable_source(struct intel_dp * intel_dp)634 intel_psr_disable_source(struct intel_dp *intel_dp)
635 {
636 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
637 	struct drm_device *dev = intel_dig_port->base.base.dev;
638 	struct drm_i915_private *dev_priv = to_i915(dev);
639 
640 	if (dev_priv->psr.active) {
641 		i915_reg_t psr_status;
642 		u32 psr_status_mask;
643 
644 		if (dev_priv->psr.psr2_enabled) {
645 			psr_status = EDP_PSR2_STATUS;
646 			psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
647 
648 			I915_WRITE(EDP_PSR2_CTL,
649 				   I915_READ(EDP_PSR2_CTL) &
650 				   ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
651 
652 		} else {
653 			psr_status = EDP_PSR_STATUS;
654 			psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
655 
656 			I915_WRITE(EDP_PSR_CTL,
657 				   I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
658 		}
659 
660 		/* Wait till PSR is idle */
661 		if (intel_wait_for_register(dev_priv,
662 					    psr_status, psr_status_mask, 0,
663 					    2000))
664 			DRM_ERROR("Timed out waiting for PSR Idle State\n");
665 
666 		dev_priv->psr.active = false;
667 	} else {
668 		if (dev_priv->psr.psr2_enabled)
669 			WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
670 		else
671 			WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
672 	}
673 }
674 
intel_psr_disable_locked(struct intel_dp * intel_dp)675 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
676 {
677 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
678 	struct drm_device *dev = intel_dig_port->base.base.dev;
679 	struct drm_i915_private *dev_priv = to_i915(dev);
680 
681 	lockdep_assert_held(&dev_priv->psr.lock);
682 
683 	if (!dev_priv->psr.enabled)
684 		return;
685 
686 	intel_psr_disable_source(intel_dp);
687 
688 	/* Disable PSR on Sink */
689 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
690 
691 	dev_priv->psr.enabled = NULL;
692 }
693 
694 /**
695  * intel_psr_disable - Disable PSR
696  * @intel_dp: Intel DP
697  * @old_crtc_state: old CRTC state
698  *
699  * This function needs to be called before disabling pipe.
700  */
intel_psr_disable(struct intel_dp * intel_dp,const struct intel_crtc_state * old_crtc_state)701 void intel_psr_disable(struct intel_dp *intel_dp,
702 		       const struct intel_crtc_state *old_crtc_state)
703 {
704 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
705 	struct drm_device *dev = intel_dig_port->base.base.dev;
706 	struct drm_i915_private *dev_priv = to_i915(dev);
707 
708 	if (!old_crtc_state->has_psr)
709 		return;
710 
711 	if (WARN_ON(!CAN_PSR(dev_priv)))
712 		return;
713 
714 	mutex_lock(&dev_priv->psr.lock);
715 	intel_psr_disable_locked(intel_dp);
716 	mutex_unlock(&dev_priv->psr.lock);
717 	cancel_work_sync(&dev_priv->psr.work);
718 }
719 
intel_psr_wait_for_idle(const struct intel_crtc_state * new_crtc_state)720 int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
721 {
722 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
723 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
724 	i915_reg_t reg;
725 	u32 mask;
726 
727 	if (!new_crtc_state->has_psr)
728 		return 0;
729 
730 	/*
731 	 * The sole user right now is intel_pipe_update_start(),
732 	 * which won't race with psr_enable/disable, which is
733 	 * where psr2_enabled is written to. So, we don't need
734 	 * to acquire the psr.lock. More importantly, we want the
735 	 * latency inside intel_pipe_update_start() to be as low
736 	 * as possible, so no need to acquire psr.lock when it is
737 	 * not needed and will induce latencies in the atomic
738 	 * update path.
739 	 */
740 	if (dev_priv->psr.psr2_enabled) {
741 		reg = EDP_PSR2_STATUS;
742 		mask = EDP_PSR2_STATUS_STATE_MASK;
743 	} else {
744 		reg = EDP_PSR_STATUS;
745 		mask = EDP_PSR_STATUS_STATE_MASK;
746 	}
747 
748 	/*
749 	 * Max time for PSR to idle = Inverse of the refresh rate +
750 	 * 6 ms of exit training time + 1.5 ms of aux channel
751 	 * handshake. 50 msec is defesive enough to cover everything.
752 	 */
753 	return intel_wait_for_register(dev_priv, reg, mask,
754 				       EDP_PSR_STATUS_STATE_IDLE, 50);
755 }
756 
__psr_wait_for_idle_locked(struct drm_i915_private * dev_priv)757 static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
758 {
759 	struct intel_dp *intel_dp;
760 	i915_reg_t reg;
761 	u32 mask;
762 	int err;
763 
764 	intel_dp = dev_priv->psr.enabled;
765 	if (!intel_dp)
766 		return false;
767 
768 	if (dev_priv->psr.psr2_enabled) {
769 		reg = EDP_PSR2_STATUS;
770 		mask = EDP_PSR2_STATUS_STATE_MASK;
771 	} else {
772 		reg = EDP_PSR_STATUS;
773 		mask = EDP_PSR_STATUS_STATE_MASK;
774 	}
775 
776 	mutex_unlock(&dev_priv->psr.lock);
777 
778 	err = intel_wait_for_register(dev_priv, reg, mask, 0, 50);
779 	if (err)
780 		DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
781 
782 	/* After the unlocked wait, verify that PSR is still wanted! */
783 	mutex_lock(&dev_priv->psr.lock);
784 	return err == 0 && dev_priv->psr.enabled;
785 }
786 
intel_psr_work(struct work_struct * work)787 static void intel_psr_work(struct work_struct *work)
788 {
789 	struct drm_i915_private *dev_priv =
790 		container_of(work, typeof(*dev_priv), psr.work);
791 
792 	mutex_lock(&dev_priv->psr.lock);
793 
794 	if (!dev_priv->psr.enabled)
795 		goto unlock;
796 
797 	/*
798 	 * We have to make sure PSR is ready for re-enable
799 	 * otherwise it keeps disabled until next full enable/disable cycle.
800 	 * PSR might take some time to get fully disabled
801 	 * and be ready for re-enable.
802 	 */
803 	if (!__psr_wait_for_idle_locked(dev_priv))
804 		goto unlock;
805 
806 	/*
807 	 * The delayed work can race with an invalidate hence we need to
808 	 * recheck. Since psr_flush first clears this and then reschedules we
809 	 * won't ever miss a flush when bailing out here.
810 	 */
811 	if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
812 		goto unlock;
813 
814 	intel_psr_activate(dev_priv->psr.enabled);
815 unlock:
816 	mutex_unlock(&dev_priv->psr.lock);
817 }
818 
intel_psr_exit(struct drm_i915_private * dev_priv)819 static void intel_psr_exit(struct drm_i915_private *dev_priv)
820 {
821 	u32 val;
822 
823 	if (!dev_priv->psr.active)
824 		return;
825 
826 	if (dev_priv->psr.psr2_enabled) {
827 		val = I915_READ(EDP_PSR2_CTL);
828 		WARN_ON(!(val & EDP_PSR2_ENABLE));
829 		I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
830 	} else {
831 		val = I915_READ(EDP_PSR_CTL);
832 		WARN_ON(!(val & EDP_PSR_ENABLE));
833 		I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
834 	}
835 	dev_priv->psr.active = false;
836 }
837 
838 /**
839  * intel_psr_invalidate - Invalidade PSR
840  * @dev_priv: i915 device
841  * @frontbuffer_bits: frontbuffer plane tracking bits
842  * @origin: which operation caused the invalidate
843  *
844  * Since the hardware frontbuffer tracking has gaps we need to integrate
845  * with the software frontbuffer tracking. This function gets called every
846  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
847  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
848  *
849  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
850  */
intel_psr_invalidate(struct drm_i915_private * dev_priv,unsigned frontbuffer_bits,enum fb_op_origin origin)851 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
852 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
853 {
854 	struct drm_crtc *crtc;
855 	enum pipe pipe;
856 
857 	if (!CAN_PSR(dev_priv))
858 		return;
859 
860 	if (origin == ORIGIN_FLIP)
861 		return;
862 
863 	mutex_lock(&dev_priv->psr.lock);
864 	if (!dev_priv->psr.enabled) {
865 		mutex_unlock(&dev_priv->psr.lock);
866 		return;
867 	}
868 
869 	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
870 	pipe = to_intel_crtc(crtc)->pipe;
871 
872 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
873 	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
874 
875 	if (frontbuffer_bits)
876 		intel_psr_exit(dev_priv);
877 
878 	mutex_unlock(&dev_priv->psr.lock);
879 }
880 
881 /**
882  * intel_psr_flush - Flush PSR
883  * @dev_priv: i915 device
884  * @frontbuffer_bits: frontbuffer plane tracking bits
885  * @origin: which operation caused the flush
886  *
887  * Since the hardware frontbuffer tracking has gaps we need to integrate
888  * with the software frontbuffer tracking. This function gets called every
889  * time frontbuffer rendering has completed and flushed out to memory. PSR
890  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
891  *
892  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
893  */
intel_psr_flush(struct drm_i915_private * dev_priv,unsigned frontbuffer_bits,enum fb_op_origin origin)894 void intel_psr_flush(struct drm_i915_private *dev_priv,
895 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
896 {
897 	struct drm_crtc *crtc;
898 	enum pipe pipe;
899 
900 	if (!CAN_PSR(dev_priv))
901 		return;
902 
903 	if (origin == ORIGIN_FLIP)
904 		return;
905 
906 	mutex_lock(&dev_priv->psr.lock);
907 	if (!dev_priv->psr.enabled) {
908 		mutex_unlock(&dev_priv->psr.lock);
909 		return;
910 	}
911 
912 	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
913 	pipe = to_intel_crtc(crtc)->pipe;
914 
915 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
916 	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
917 
918 	/* By definition flush = invalidate + flush */
919 	if (frontbuffer_bits) {
920 		if (dev_priv->psr.psr2_enabled) {
921 			intel_psr_exit(dev_priv);
922 		} else {
923 			/*
924 			 * Display WA #0884: all
925 			 * This documented WA for bxt can be safely applied
926 			 * broadly so we can force HW tracking to exit PSR
927 			 * instead of disabling and re-enabling.
928 			 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
929 			 * but it makes more sense write to the current active
930 			 * pipe.
931 			 */
932 			I915_WRITE(CURSURFLIVE(pipe), 0);
933 		}
934 	}
935 
936 	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
937 		schedule_work(&dev_priv->psr.work);
938 	mutex_unlock(&dev_priv->psr.lock);
939 }
940 
941 /**
942  * intel_psr_init - Init basic PSR work and mutex.
943  * @dev_priv: i915 device private
944  *
945  * This function is  called only once at driver load to initialize basic
946  * PSR stuff.
947  */
intel_psr_init(struct drm_i915_private * dev_priv)948 void intel_psr_init(struct drm_i915_private *dev_priv)
949 {
950 	if (!HAS_PSR(dev_priv))
951 		return;
952 
953 	dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
954 		HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
955 
956 	if (!dev_priv->psr.sink_support)
957 		return;
958 
959 	if (i915_modparams.enable_psr == -1) {
960 		i915_modparams.enable_psr = dev_priv->vbt.psr.enable;
961 
962 		/* Per platform default: all disabled. */
963 		i915_modparams.enable_psr = 0;
964 	}
965 
966 	/* Set link_standby x link_off defaults */
967 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
968 		/* HSW and BDW require workarounds that we don't implement. */
969 		dev_priv->psr.link_standby = false;
970 	else
971 		/* For new platforms let's respect VBT back again */
972 		dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
973 
974 	INIT_WORK(&dev_priv->psr.work, intel_psr_work);
975 	mutex_init(&dev_priv->psr.lock);
976 }
977 
intel_psr_short_pulse(struct intel_dp * intel_dp)978 void intel_psr_short_pulse(struct intel_dp *intel_dp)
979 {
980 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
981 	struct drm_device *dev = intel_dig_port->base.base.dev;
982 	struct drm_i915_private *dev_priv = to_i915(dev);
983 	struct i915_psr *psr = &dev_priv->psr;
984 	u8 val;
985 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
986 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
987 			  DP_PSR_LINK_CRC_ERROR;
988 
989 	if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
990 		return;
991 
992 	mutex_lock(&psr->lock);
993 
994 	if (psr->enabled != intel_dp)
995 		goto exit;
996 
997 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) {
998 		DRM_ERROR("PSR_STATUS dpcd read failed\n");
999 		goto exit;
1000 	}
1001 
1002 	if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
1003 		DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
1004 		intel_psr_disable_locked(intel_dp);
1005 	}
1006 
1007 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
1008 		DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n");
1009 		goto exit;
1010 	}
1011 
1012 	if (val & DP_PSR_RFB_STORAGE_ERROR)
1013 		DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
1014 	if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
1015 		DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
1016 	if (val & DP_PSR_LINK_CRC_ERROR)
1017 		DRM_ERROR("PSR Link CRC error, disabling PSR\n");
1018 
1019 	if (val & ~errors)
1020 		DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
1021 			  val & ~errors);
1022 	if (val & errors)
1023 		intel_psr_disable_locked(intel_dp);
1024 	/* clear status register */
1025 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
1026 
1027 	/* TODO: handle PSR2 errors */
1028 exit:
1029 	mutex_unlock(&psr->lock);
1030 }
1031