1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include <linux/circ_buf.h>
32 #include <linux/cpuidle.h>
33 #include <linux/slab.h>
34 #include <linux/sysrq.h>
35 
36 #include <drm/drm_drv.h>
37 #include <drm/drm_irq.h>
38 #include <drm/i915_drm.h>
39 
40 #include "display/intel_display_types.h"
41 #include "display/intel_fifo_underrun.h"
42 #include "display/intel_hotplug.h"
43 #include "display/intel_lpe_audio.h"
44 #include "display/intel_psr.h"
45 
46 #include "gt/intel_gt.h"
47 #include "gt/intel_gt_irq.h"
48 #include "gt/intel_gt_pm_irq.h"
49 
50 #include "i915_drv.h"
51 #include "i915_irq.h"
52 #include "i915_trace.h"
53 #include "intel_pm.h"
54 
55 /**
56  * DOC: interrupt handling
57  *
58  * These functions provide the basic support for enabling and disabling the
59  * interrupt handling support. There's a lot more functionality in i915_irq.c
60  * and related files, but that will be described in separate chapters.
61  */
62 
63 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
64 
65 static const u32 hpd_ilk[HPD_NUM_PINS] = {
66 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
67 };
68 
69 static const u32 hpd_ivb[HPD_NUM_PINS] = {
70 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
71 };
72 
73 static const u32 hpd_bdw[HPD_NUM_PINS] = {
74 	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
75 };
76 
77 static const u32 hpd_ibx[HPD_NUM_PINS] = {
78 	[HPD_CRT] = SDE_CRT_HOTPLUG,
79 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
80 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
81 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
82 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
83 };
84 
85 static const u32 hpd_cpt[HPD_NUM_PINS] = {
86 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
87 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
88 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
89 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
90 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
91 };
92 
93 static const u32 hpd_spt[HPD_NUM_PINS] = {
94 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
95 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
96 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
97 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
98 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
99 };
100 
101 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
102 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
103 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
104 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
105 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
106 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
107 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
108 };
109 
110 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
111 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
112 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
113 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
114 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
115 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
116 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
117 };
118 
119 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
120 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
121 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
122 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
123 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
124 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
125 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
126 };
127 
128 /* BXT hpd list */
129 static const u32 hpd_bxt[HPD_NUM_PINS] = {
130 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
131 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
132 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
133 };
134 
135 static const u32 hpd_gen11[HPD_NUM_PINS] = {
136 	[HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
137 	[HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
138 	[HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
139 	[HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
140 };
141 
142 static const u32 hpd_gen12[HPD_NUM_PINS] = {
143 	[HPD_PORT_D] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
144 	[HPD_PORT_E] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
145 	[HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
146 	[HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
147 	[HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG,
148 	[HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG
149 };
150 
151 static const u32 hpd_icp[HPD_NUM_PINS] = {
152 	[HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
153 	[HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
154 	[HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
155 	[HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
156 	[HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
157 	[HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
158 };
159 
160 static const u32 hpd_mcc[HPD_NUM_PINS] = {
161 	[HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
162 	[HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
163 	[HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP
164 };
165 
166 static const u32 hpd_tgp[HPD_NUM_PINS] = {
167 	[HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
168 	[HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
169 	[HPD_PORT_C] = SDE_DDIC_HOTPLUG_TGP,
170 	[HPD_PORT_D] = SDE_TC1_HOTPLUG_ICP,
171 	[HPD_PORT_E] = SDE_TC2_HOTPLUG_ICP,
172 	[HPD_PORT_F] = SDE_TC3_HOTPLUG_ICP,
173 	[HPD_PORT_G] = SDE_TC4_HOTPLUG_ICP,
174 	[HPD_PORT_H] = SDE_TC5_HOTPLUG_TGP,
175 	[HPD_PORT_I] = SDE_TC6_HOTPLUG_TGP,
176 };
177 
gen3_irq_reset(struct intel_uncore * uncore,i915_reg_t imr,i915_reg_t iir,i915_reg_t ier)178 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
179 		    i915_reg_t iir, i915_reg_t ier)
180 {
181 	intel_uncore_write(uncore, imr, 0xffffffff);
182 	intel_uncore_posting_read(uncore, imr);
183 
184 	intel_uncore_write(uncore, ier, 0);
185 
186 	/* IIR can theoretically queue up two events. Be paranoid. */
187 	intel_uncore_write(uncore, iir, 0xffffffff);
188 	intel_uncore_posting_read(uncore, iir);
189 	intel_uncore_write(uncore, iir, 0xffffffff);
190 	intel_uncore_posting_read(uncore, iir);
191 }
192 
gen2_irq_reset(struct intel_uncore * uncore)193 void gen2_irq_reset(struct intel_uncore *uncore)
194 {
195 	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
196 	intel_uncore_posting_read16(uncore, GEN2_IMR);
197 
198 	intel_uncore_write16(uncore, GEN2_IER, 0);
199 
200 	/* IIR can theoretically queue up two events. Be paranoid. */
201 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
202 	intel_uncore_posting_read16(uncore, GEN2_IIR);
203 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
204 	intel_uncore_posting_read16(uncore, GEN2_IIR);
205 }
206 
207 /*
208  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
209  */
gen3_assert_iir_is_zero(struct intel_uncore * uncore,i915_reg_t reg)210 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
211 {
212 	u32 val = intel_uncore_read(uncore, reg);
213 
214 	if (val == 0)
215 		return;
216 
217 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
218 	     i915_mmio_reg_offset(reg), val);
219 	intel_uncore_write(uncore, reg, 0xffffffff);
220 	intel_uncore_posting_read(uncore, reg);
221 	intel_uncore_write(uncore, reg, 0xffffffff);
222 	intel_uncore_posting_read(uncore, reg);
223 }
224 
gen2_assert_iir_is_zero(struct intel_uncore * uncore)225 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
226 {
227 	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
228 
229 	if (val == 0)
230 		return;
231 
232 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
233 	     i915_mmio_reg_offset(GEN2_IIR), val);
234 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
235 	intel_uncore_posting_read16(uncore, GEN2_IIR);
236 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
237 	intel_uncore_posting_read16(uncore, GEN2_IIR);
238 }
239 
gen3_irq_init(struct intel_uncore * uncore,i915_reg_t imr,u32 imr_val,i915_reg_t ier,u32 ier_val,i915_reg_t iir)240 void gen3_irq_init(struct intel_uncore *uncore,
241 		   i915_reg_t imr, u32 imr_val,
242 		   i915_reg_t ier, u32 ier_val,
243 		   i915_reg_t iir)
244 {
245 	gen3_assert_iir_is_zero(uncore, iir);
246 
247 	intel_uncore_write(uncore, ier, ier_val);
248 	intel_uncore_write(uncore, imr, imr_val);
249 	intel_uncore_posting_read(uncore, imr);
250 }
251 
gen2_irq_init(struct intel_uncore * uncore,u32 imr_val,u32 ier_val)252 void gen2_irq_init(struct intel_uncore *uncore,
253 		   u32 imr_val, u32 ier_val)
254 {
255 	gen2_assert_iir_is_zero(uncore);
256 
257 	intel_uncore_write16(uncore, GEN2_IER, ier_val);
258 	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
259 	intel_uncore_posting_read16(uncore, GEN2_IMR);
260 }
261 
262 /* For display hotplug interrupt */
263 static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private * dev_priv,u32 mask,u32 bits)264 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
265 				     u32 mask,
266 				     u32 bits)
267 {
268 	u32 val;
269 
270 	lockdep_assert_held(&dev_priv->irq_lock);
271 	WARN_ON(bits & ~mask);
272 
273 	val = I915_READ(PORT_HOTPLUG_EN);
274 	val &= ~mask;
275 	val |= bits;
276 	I915_WRITE(PORT_HOTPLUG_EN, val);
277 }
278 
279 /**
280  * i915_hotplug_interrupt_update - update hotplug interrupt enable
281  * @dev_priv: driver private
282  * @mask: bits to update
283  * @bits: bits to enable
284  * NOTE: the HPD enable bits are modified both inside and outside
285  * of an interrupt context. To avoid that read-modify-write cycles
286  * interfer, these bits are protected by a spinlock. Since this
287  * function is usually not called from a context where the lock is
288  * held already, this function acquires the lock itself. A non-locking
289  * version is also available.
290  */
i915_hotplug_interrupt_update(struct drm_i915_private * dev_priv,u32 mask,u32 bits)291 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
292 				   u32 mask,
293 				   u32 bits)
294 {
295 	spin_lock_irq(&dev_priv->irq_lock);
296 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
297 	spin_unlock_irq(&dev_priv->irq_lock);
298 }
299 
300 /**
301  * ilk_update_display_irq - update DEIMR
302  * @dev_priv: driver private
303  * @interrupt_mask: mask of interrupt bits to update
304  * @enabled_irq_mask: mask of interrupt bits to enable
305  */
ilk_update_display_irq(struct drm_i915_private * dev_priv,u32 interrupt_mask,u32 enabled_irq_mask)306 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
307 			    u32 interrupt_mask,
308 			    u32 enabled_irq_mask)
309 {
310 	u32 new_val;
311 
312 	lockdep_assert_held(&dev_priv->irq_lock);
313 
314 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
315 
316 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
317 		return;
318 
319 	new_val = dev_priv->irq_mask;
320 	new_val &= ~interrupt_mask;
321 	new_val |= (~enabled_irq_mask & interrupt_mask);
322 
323 	if (new_val != dev_priv->irq_mask) {
324 		dev_priv->irq_mask = new_val;
325 		I915_WRITE(DEIMR, dev_priv->irq_mask);
326 		POSTING_READ(DEIMR);
327 	}
328 }
329 
gen6_pm_iir(struct drm_i915_private * dev_priv)330 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
331 {
332 	WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
333 
334 	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
335 }
336 
gen11_reset_rps_interrupts(struct drm_i915_private * dev_priv)337 void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
338 {
339 	struct intel_gt *gt = &dev_priv->gt;
340 
341 	spin_lock_irq(&gt->irq_lock);
342 
343 	while (gen11_gt_reset_one_iir(gt, 0, GEN11_GTPM))
344 		;
345 
346 	dev_priv->gt_pm.rps.pm_iir = 0;
347 
348 	spin_unlock_irq(&gt->irq_lock);
349 }
350 
gen6_reset_rps_interrupts(struct drm_i915_private * dev_priv)351 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
352 {
353 	struct intel_gt *gt = &dev_priv->gt;
354 
355 	spin_lock_irq(&gt->irq_lock);
356 	gen6_gt_pm_reset_iir(gt, GEN6_PM_RPS_EVENTS);
357 	dev_priv->gt_pm.rps.pm_iir = 0;
358 	spin_unlock_irq(&gt->irq_lock);
359 }
360 
gen6_enable_rps_interrupts(struct drm_i915_private * dev_priv)361 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
362 {
363 	struct intel_gt *gt = &dev_priv->gt;
364 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
365 
366 	if (READ_ONCE(rps->interrupts_enabled))
367 		return;
368 
369 	spin_lock_irq(&gt->irq_lock);
370 	WARN_ON_ONCE(rps->pm_iir);
371 
372 	if (INTEL_GEN(dev_priv) >= 11)
373 		WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GTPM));
374 	else
375 		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
376 
377 	rps->interrupts_enabled = true;
378 	gen6_gt_pm_enable_irq(gt, dev_priv->pm_rps_events);
379 
380 	spin_unlock_irq(&gt->irq_lock);
381 }
382 
gen6_sanitize_rps_pm_mask(const struct drm_i915_private * i915,u32 mask)383 u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915, u32 mask)
384 {
385 	return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
386 }
387 
gen6_disable_rps_interrupts(struct drm_i915_private * dev_priv)388 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
389 {
390 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
391 	struct intel_gt *gt = &dev_priv->gt;
392 
393 	if (!READ_ONCE(rps->interrupts_enabled))
394 		return;
395 
396 	spin_lock_irq(&gt->irq_lock);
397 	rps->interrupts_enabled = false;
398 
399 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
400 
401 	gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
402 
403 	spin_unlock_irq(&gt->irq_lock);
404 	intel_synchronize_irq(dev_priv);
405 
406 	/* Now that we will not be generating any more work, flush any
407 	 * outstanding tasks. As we are called on the RPS idle path,
408 	 * we will reset the GPU to minimum frequencies, so the current
409 	 * state of the worker can be discarded.
410 	 */
411 	cancel_work_sync(&rps->work);
412 	if (INTEL_GEN(dev_priv) >= 11)
413 		gen11_reset_rps_interrupts(dev_priv);
414 	else
415 		gen6_reset_rps_interrupts(dev_priv);
416 }
417 
gen9_reset_guc_interrupts(struct intel_guc * guc)418 void gen9_reset_guc_interrupts(struct intel_guc *guc)
419 {
420 	struct intel_gt *gt = guc_to_gt(guc);
421 
422 	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
423 
424 	spin_lock_irq(&gt->irq_lock);
425 	gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
426 	spin_unlock_irq(&gt->irq_lock);
427 }
428 
gen9_enable_guc_interrupts(struct intel_guc * guc)429 void gen9_enable_guc_interrupts(struct intel_guc *guc)
430 {
431 	struct intel_gt *gt = guc_to_gt(guc);
432 
433 	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
434 
435 	spin_lock_irq(&gt->irq_lock);
436 	if (!guc->interrupts.enabled) {
437 		WARN_ON_ONCE(intel_uncore_read(gt->uncore,
438 					       gen6_pm_iir(gt->i915)) &
439 			     gt->pm_guc_events);
440 		guc->interrupts.enabled = true;
441 		gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
442 	}
443 	spin_unlock_irq(&gt->irq_lock);
444 }
445 
gen9_disable_guc_interrupts(struct intel_guc * guc)446 void gen9_disable_guc_interrupts(struct intel_guc *guc)
447 {
448 	struct intel_gt *gt = guc_to_gt(guc);
449 
450 	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
451 
452 	spin_lock_irq(&gt->irq_lock);
453 	guc->interrupts.enabled = false;
454 
455 	gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
456 
457 	spin_unlock_irq(&gt->irq_lock);
458 	intel_synchronize_irq(gt->i915);
459 
460 	gen9_reset_guc_interrupts(guc);
461 }
462 
gen11_reset_guc_interrupts(struct intel_guc * guc)463 void gen11_reset_guc_interrupts(struct intel_guc *guc)
464 {
465 	struct intel_gt *gt = guc_to_gt(guc);
466 
467 	spin_lock_irq(&gt->irq_lock);
468 	gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
469 	spin_unlock_irq(&gt->irq_lock);
470 }
471 
gen11_enable_guc_interrupts(struct intel_guc * guc)472 void gen11_enable_guc_interrupts(struct intel_guc *guc)
473 {
474 	struct intel_gt *gt = guc_to_gt(guc);
475 
476 	spin_lock_irq(&gt->irq_lock);
477 	if (!guc->interrupts.enabled) {
478 		u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
479 
480 		WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
481 		intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, events);
482 		intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~events);
483 		guc->interrupts.enabled = true;
484 	}
485 	spin_unlock_irq(&gt->irq_lock);
486 }
487 
gen11_disable_guc_interrupts(struct intel_guc * guc)488 void gen11_disable_guc_interrupts(struct intel_guc *guc)
489 {
490 	struct intel_gt *gt = guc_to_gt(guc);
491 
492 	spin_lock_irq(&gt->irq_lock);
493 	guc->interrupts.enabled = false;
494 
495 	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
496 	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
497 
498 	spin_unlock_irq(&gt->irq_lock);
499 	intel_synchronize_irq(gt->i915);
500 
501 	gen11_reset_guc_interrupts(guc);
502 }
503 
504 /**
505  * bdw_update_port_irq - update DE port interrupt
506  * @dev_priv: driver private
507  * @interrupt_mask: mask of interrupt bits to update
508  * @enabled_irq_mask: mask of interrupt bits to enable
509  */
bdw_update_port_irq(struct drm_i915_private * dev_priv,u32 interrupt_mask,u32 enabled_irq_mask)510 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
511 				u32 interrupt_mask,
512 				u32 enabled_irq_mask)
513 {
514 	u32 new_val;
515 	u32 old_val;
516 
517 	lockdep_assert_held(&dev_priv->irq_lock);
518 
519 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
520 
521 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
522 		return;
523 
524 	old_val = I915_READ(GEN8_DE_PORT_IMR);
525 
526 	new_val = old_val;
527 	new_val &= ~interrupt_mask;
528 	new_val |= (~enabled_irq_mask & interrupt_mask);
529 
530 	if (new_val != old_val) {
531 		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
532 		POSTING_READ(GEN8_DE_PORT_IMR);
533 	}
534 }
535 
536 /**
537  * bdw_update_pipe_irq - update DE pipe interrupt
538  * @dev_priv: driver private
539  * @pipe: pipe whose interrupt to update
540  * @interrupt_mask: mask of interrupt bits to update
541  * @enabled_irq_mask: mask of interrupt bits to enable
542  */
bdw_update_pipe_irq(struct drm_i915_private * dev_priv,enum pipe pipe,u32 interrupt_mask,u32 enabled_irq_mask)543 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
544 			 enum pipe pipe,
545 			 u32 interrupt_mask,
546 			 u32 enabled_irq_mask)
547 {
548 	u32 new_val;
549 
550 	lockdep_assert_held(&dev_priv->irq_lock);
551 
552 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
553 
554 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
555 		return;
556 
557 	new_val = dev_priv->de_irq_mask[pipe];
558 	new_val &= ~interrupt_mask;
559 	new_val |= (~enabled_irq_mask & interrupt_mask);
560 
561 	if (new_val != dev_priv->de_irq_mask[pipe]) {
562 		dev_priv->de_irq_mask[pipe] = new_val;
563 		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
564 		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
565 	}
566 }
567 
568 /**
569  * ibx_display_interrupt_update - update SDEIMR
570  * @dev_priv: driver private
571  * @interrupt_mask: mask of interrupt bits to update
572  * @enabled_irq_mask: mask of interrupt bits to enable
573  */
ibx_display_interrupt_update(struct drm_i915_private * dev_priv,u32 interrupt_mask,u32 enabled_irq_mask)574 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
575 				  u32 interrupt_mask,
576 				  u32 enabled_irq_mask)
577 {
578 	u32 sdeimr = I915_READ(SDEIMR);
579 	sdeimr &= ~interrupt_mask;
580 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
581 
582 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
583 
584 	lockdep_assert_held(&dev_priv->irq_lock);
585 
586 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
587 		return;
588 
589 	I915_WRITE(SDEIMR, sdeimr);
590 	POSTING_READ(SDEIMR);
591 }
592 
i915_pipestat_enable_mask(struct drm_i915_private * dev_priv,enum pipe pipe)593 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
594 			      enum pipe pipe)
595 {
596 	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
597 	u32 enable_mask = status_mask << 16;
598 
599 	lockdep_assert_held(&dev_priv->irq_lock);
600 
601 	if (INTEL_GEN(dev_priv) < 5)
602 		goto out;
603 
604 	/*
605 	 * On pipe A we don't support the PSR interrupt yet,
606 	 * on pipe B and C the same bit MBZ.
607 	 */
608 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
609 		return 0;
610 	/*
611 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
612 	 * A the same bit is for perf counters which we don't use either.
613 	 */
614 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
615 		return 0;
616 
617 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
618 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
619 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
620 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
621 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
622 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
623 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
624 
625 out:
626 	WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
627 		  status_mask & ~PIPESTAT_INT_STATUS_MASK,
628 		  "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
629 		  pipe_name(pipe), enable_mask, status_mask);
630 
631 	return enable_mask;
632 }
633 
i915_enable_pipestat(struct drm_i915_private * dev_priv,enum pipe pipe,u32 status_mask)634 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
635 			  enum pipe pipe, u32 status_mask)
636 {
637 	i915_reg_t reg = PIPESTAT(pipe);
638 	u32 enable_mask;
639 
640 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
641 		  "pipe %c: status_mask=0x%x\n",
642 		  pipe_name(pipe), status_mask);
643 
644 	lockdep_assert_held(&dev_priv->irq_lock);
645 	WARN_ON(!intel_irqs_enabled(dev_priv));
646 
647 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
648 		return;
649 
650 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
651 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
652 
653 	I915_WRITE(reg, enable_mask | status_mask);
654 	POSTING_READ(reg);
655 }
656 
i915_disable_pipestat(struct drm_i915_private * dev_priv,enum pipe pipe,u32 status_mask)657 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
658 			   enum pipe pipe, u32 status_mask)
659 {
660 	i915_reg_t reg = PIPESTAT(pipe);
661 	u32 enable_mask;
662 
663 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
664 		  "pipe %c: status_mask=0x%x\n",
665 		  pipe_name(pipe), status_mask);
666 
667 	lockdep_assert_held(&dev_priv->irq_lock);
668 	WARN_ON(!intel_irqs_enabled(dev_priv));
669 
670 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
671 		return;
672 
673 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
674 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
675 
676 	I915_WRITE(reg, enable_mask | status_mask);
677 	POSTING_READ(reg);
678 }
679 
i915_has_asle(struct drm_i915_private * dev_priv)680 static bool i915_has_asle(struct drm_i915_private *dev_priv)
681 {
682 	if (!dev_priv->opregion.asle)
683 		return false;
684 
685 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
686 }
687 
688 /**
689  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
690  * @dev_priv: i915 device private
691  */
i915_enable_asle_pipestat(struct drm_i915_private * dev_priv)692 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
693 {
694 	if (!i915_has_asle(dev_priv))
695 		return;
696 
697 	spin_lock_irq(&dev_priv->irq_lock);
698 
699 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
700 	if (INTEL_GEN(dev_priv) >= 4)
701 		i915_enable_pipestat(dev_priv, PIPE_A,
702 				     PIPE_LEGACY_BLC_EVENT_STATUS);
703 
704 	spin_unlock_irq(&dev_priv->irq_lock);
705 }
706 
707 /*
708  * This timing diagram depicts the video signal in and
709  * around the vertical blanking period.
710  *
711  * Assumptions about the fictitious mode used in this example:
712  *  vblank_start >= 3
713  *  vsync_start = vblank_start + 1
714  *  vsync_end = vblank_start + 2
715  *  vtotal = vblank_start + 3
716  *
717  *           start of vblank:
718  *           latch double buffered registers
719  *           increment frame counter (ctg+)
720  *           generate start of vblank interrupt (gen4+)
721  *           |
722  *           |          frame start:
723  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
724  *           |          may be shifted forward 1-3 extra lines via PIPECONF
725  *           |          |
726  *           |          |  start of vsync:
727  *           |          |  generate vsync interrupt
728  *           |          |  |
729  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
730  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
731  * ----va---> <-----------------vb--------------------> <--------va-------------
732  *       |          |       <----vs----->                     |
733  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
734  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
735  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
736  *       |          |                                         |
737  *       last visible pixel                                   first visible pixel
738  *                  |                                         increment frame counter (gen3/4)
739  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
740  *
741  * x  = horizontal active
742  * _  = horizontal blanking
743  * hs = horizontal sync
744  * va = vertical active
745  * vb = vertical blanking
746  * vs = vertical sync
747  * vbs = vblank_start (number)
748  *
749  * Summary:
750  * - most events happen at the start of horizontal sync
751  * - frame start happens at the start of horizontal blank, 1-4 lines
752  *   (depending on PIPECONF settings) after the start of vblank
753  * - gen3/4 pixel and frame counter are synchronized with the start
754  *   of horizontal active on the first line of vertical active
755  */
756 
757 /* Called from drm generic code, passed a 'crtc', which
758  * we use as a pipe index
759  */
i915_get_vblank_counter(struct drm_crtc * crtc)760 u32 i915_get_vblank_counter(struct drm_crtc *crtc)
761 {
762 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
763 	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
764 	const struct drm_display_mode *mode = &vblank->hwmode;
765 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
766 	i915_reg_t high_frame, low_frame;
767 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
768 	unsigned long irqflags;
769 
770 	/*
771 	 * On i965gm TV output the frame counter only works up to
772 	 * the point when we enable the TV encoder. After that the
773 	 * frame counter ceases to work and reads zero. We need a
774 	 * vblank wait before enabling the TV encoder and so we
775 	 * have to enable vblank interrupts while the frame counter
776 	 * is still in a working state. However the core vblank code
777 	 * does not like us returning non-zero frame counter values
778 	 * when we've told it that we don't have a working frame
779 	 * counter. Thus we must stop non-zero values leaking out.
780 	 */
781 	if (!vblank->max_vblank_count)
782 		return 0;
783 
784 	htotal = mode->crtc_htotal;
785 	hsync_start = mode->crtc_hsync_start;
786 	vbl_start = mode->crtc_vblank_start;
787 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
788 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
789 
790 	/* Convert to pixel count */
791 	vbl_start *= htotal;
792 
793 	/* Start of vblank event occurs at start of hsync */
794 	vbl_start -= htotal - hsync_start;
795 
796 	high_frame = PIPEFRAME(pipe);
797 	low_frame = PIPEFRAMEPIXEL(pipe);
798 
799 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
800 
801 	/*
802 	 * High & low register fields aren't synchronized, so make sure
803 	 * we get a low value that's stable across two reads of the high
804 	 * register.
805 	 */
806 	do {
807 		high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
808 		low   = I915_READ_FW(low_frame);
809 		high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
810 	} while (high1 != high2);
811 
812 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
813 
814 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
815 	pixel = low & PIPE_PIXEL_MASK;
816 	low >>= PIPE_FRAME_LOW_SHIFT;
817 
818 	/*
819 	 * The frame counter increments at beginning of active.
820 	 * Cook up a vblank counter by also checking the pixel
821 	 * counter against vblank start.
822 	 */
823 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
824 }
825 
g4x_get_vblank_counter(struct drm_crtc * crtc)826 u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
827 {
828 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
829 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
830 
831 	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
832 }
833 
834 /*
835  * On certain encoders on certain platforms, pipe
836  * scanline register will not work to get the scanline,
837  * since the timings are driven from the PORT or issues
838  * with scanline register updates.
839  * This function will use Framestamp and current
840  * timestamp registers to calculate the scanline.
841  */
__intel_get_crtc_scanline_from_timestamp(struct intel_crtc * crtc)842 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
843 {
844 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
845 	struct drm_vblank_crtc *vblank =
846 		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
847 	const struct drm_display_mode *mode = &vblank->hwmode;
848 	u32 vblank_start = mode->crtc_vblank_start;
849 	u32 vtotal = mode->crtc_vtotal;
850 	u32 htotal = mode->crtc_htotal;
851 	u32 clock = mode->crtc_clock;
852 	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
853 
854 	/*
855 	 * To avoid the race condition where we might cross into the
856 	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
857 	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
858 	 * during the same frame.
859 	 */
860 	do {
861 		/*
862 		 * This field provides read back of the display
863 		 * pipe frame time stamp. The time stamp value
864 		 * is sampled at every start of vertical blank.
865 		 */
866 		scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
867 
868 		/*
869 		 * The TIMESTAMP_CTR register has the current
870 		 * time stamp value.
871 		 */
872 		scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
873 
874 		scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
875 	} while (scan_post_time != scan_prev_time);
876 
877 	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
878 					clock), 1000 * htotal);
879 	scanline = min(scanline, vtotal - 1);
880 	scanline = (scanline + vblank_start) % vtotal;
881 
882 	return scanline;
883 }
884 
885 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
__intel_get_crtc_scanline(struct intel_crtc * crtc)886 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
887 {
888 	struct drm_device *dev = crtc->base.dev;
889 	struct drm_i915_private *dev_priv = to_i915(dev);
890 	const struct drm_display_mode *mode;
891 	struct drm_vblank_crtc *vblank;
892 	enum pipe pipe = crtc->pipe;
893 	int position, vtotal;
894 
895 	if (!crtc->active)
896 		return -1;
897 
898 	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
899 	mode = &vblank->hwmode;
900 
901 	if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
902 		return __intel_get_crtc_scanline_from_timestamp(crtc);
903 
904 	vtotal = mode->crtc_vtotal;
905 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
906 		vtotal /= 2;
907 
908 	if (IS_GEN(dev_priv, 2))
909 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
910 	else
911 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
912 
913 	/*
914 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
915 	 * read it just before the start of vblank.  So try it again
916 	 * so we don't accidentally end up spanning a vblank frame
917 	 * increment, causing the pipe_update_end() code to squak at us.
918 	 *
919 	 * The nature of this problem means we can't simply check the ISR
920 	 * bit and return the vblank start value; nor can we use the scanline
921 	 * debug register in the transcoder as it appears to have the same
922 	 * problem.  We may need to extend this to include other platforms,
923 	 * but so far testing only shows the problem on HSW.
924 	 */
925 	if (HAS_DDI(dev_priv) && !position) {
926 		int i, temp;
927 
928 		for (i = 0; i < 100; i++) {
929 			udelay(1);
930 			temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
931 			if (temp != position) {
932 				position = temp;
933 				break;
934 			}
935 		}
936 	}
937 
938 	/*
939 	 * See update_scanline_offset() for the details on the
940 	 * scanline_offset adjustment.
941 	 */
942 	return (position + crtc->scanline_offset) % vtotal;
943 }
944 
i915_get_crtc_scanoutpos(struct drm_device * dev,unsigned int pipe,bool in_vblank_irq,int * vpos,int * hpos,ktime_t * stime,ktime_t * etime,const struct drm_display_mode * mode)945 bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
946 			      bool in_vblank_irq, int *vpos, int *hpos,
947 			      ktime_t *stime, ktime_t *etime,
948 			      const struct drm_display_mode *mode)
949 {
950 	struct drm_i915_private *dev_priv = to_i915(dev);
951 	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
952 								pipe);
953 	int position;
954 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
955 	unsigned long irqflags;
956 	bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
957 		IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
958 		mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
959 
960 	if (WARN_ON(!mode->crtc_clock)) {
961 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
962 				 "pipe %c\n", pipe_name(pipe));
963 		return false;
964 	}
965 
966 	htotal = mode->crtc_htotal;
967 	hsync_start = mode->crtc_hsync_start;
968 	vtotal = mode->crtc_vtotal;
969 	vbl_start = mode->crtc_vblank_start;
970 	vbl_end = mode->crtc_vblank_end;
971 
972 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
973 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
974 		vbl_end /= 2;
975 		vtotal /= 2;
976 	}
977 
978 	/*
979 	 * Lock uncore.lock, as we will do multiple timing critical raw
980 	 * register reads, potentially with preemption disabled, so the
981 	 * following code must not block on uncore.lock.
982 	 */
983 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
984 
985 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
986 
987 	/* Get optional system timestamp before query. */
988 	if (stime)
989 		*stime = ktime_get();
990 
991 	if (use_scanline_counter) {
992 		/* No obvious pixelcount register. Only query vertical
993 		 * scanout position from Display scan line register.
994 		 */
995 		position = __intel_get_crtc_scanline(intel_crtc);
996 	} else {
997 		/* Have access to pixelcount since start of frame.
998 		 * We can split this into vertical and horizontal
999 		 * scanout position.
1000 		 */
1001 		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
1002 
1003 		/* convert to pixel counts */
1004 		vbl_start *= htotal;
1005 		vbl_end *= htotal;
1006 		vtotal *= htotal;
1007 
1008 		/*
1009 		 * In interlaced modes, the pixel counter counts all pixels,
1010 		 * so one field will have htotal more pixels. In order to avoid
1011 		 * the reported position from jumping backwards when the pixel
1012 		 * counter is beyond the length of the shorter field, just
1013 		 * clamp the position the length of the shorter field. This
1014 		 * matches how the scanline counter based position works since
1015 		 * the scanline counter doesn't count the two half lines.
1016 		 */
1017 		if (position >= vtotal)
1018 			position = vtotal - 1;
1019 
1020 		/*
1021 		 * Start of vblank interrupt is triggered at start of hsync,
1022 		 * just prior to the first active line of vblank. However we
1023 		 * consider lines to start at the leading edge of horizontal
1024 		 * active. So, should we get here before we've crossed into
1025 		 * the horizontal active of the first line in vblank, we would
1026 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
1027 		 * always add htotal-hsync_start to the current pixel position.
1028 		 */
1029 		position = (position + htotal - hsync_start) % vtotal;
1030 	}
1031 
1032 	/* Get optional system timestamp after query. */
1033 	if (etime)
1034 		*etime = ktime_get();
1035 
1036 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1037 
1038 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1039 
1040 	/*
1041 	 * While in vblank, position will be negative
1042 	 * counting up towards 0 at vbl_end. And outside
1043 	 * vblank, position will be positive counting
1044 	 * up since vbl_end.
1045 	 */
1046 	if (position >= vbl_start)
1047 		position -= vbl_end;
1048 	else
1049 		position += vtotal - vbl_end;
1050 
1051 	if (use_scanline_counter) {
1052 		*vpos = position;
1053 		*hpos = 0;
1054 	} else {
1055 		*vpos = position / htotal;
1056 		*hpos = position - (*vpos * htotal);
1057 	}
1058 
1059 	return true;
1060 }
1061 
intel_get_crtc_scanline(struct intel_crtc * crtc)1062 int intel_get_crtc_scanline(struct intel_crtc *crtc)
1063 {
1064 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1065 	unsigned long irqflags;
1066 	int position;
1067 
1068 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1069 	position = __intel_get_crtc_scanline(crtc);
1070 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1071 
1072 	return position;
1073 }
1074 
ironlake_rps_change_irq_handler(struct drm_i915_private * dev_priv)1075 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1076 {
1077 	struct intel_uncore *uncore = &dev_priv->uncore;
1078 	u32 busy_up, busy_down, max_avg, min_avg;
1079 	u8 new_delay;
1080 
1081 	spin_lock(&mchdev_lock);
1082 
1083 	intel_uncore_write16(uncore,
1084 			     MEMINTRSTS,
1085 			     intel_uncore_read(uncore, MEMINTRSTS));
1086 
1087 	new_delay = dev_priv->ips.cur_delay;
1088 
1089 	intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
1090 	busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
1091 	busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
1092 	max_avg = intel_uncore_read(uncore, RCBMAXAVG);
1093 	min_avg = intel_uncore_read(uncore, RCBMINAVG);
1094 
1095 	/* Handle RCS change request from hw */
1096 	if (busy_up > max_avg) {
1097 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1098 			new_delay = dev_priv->ips.cur_delay - 1;
1099 		if (new_delay < dev_priv->ips.max_delay)
1100 			new_delay = dev_priv->ips.max_delay;
1101 	} else if (busy_down < min_avg) {
1102 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1103 			new_delay = dev_priv->ips.cur_delay + 1;
1104 		if (new_delay > dev_priv->ips.min_delay)
1105 			new_delay = dev_priv->ips.min_delay;
1106 	}
1107 
1108 	if (ironlake_set_drps(dev_priv, new_delay))
1109 		dev_priv->ips.cur_delay = new_delay;
1110 
1111 	spin_unlock(&mchdev_lock);
1112 
1113 	return;
1114 }
1115 
vlv_c0_read(struct drm_i915_private * dev_priv,struct intel_rps_ei * ei)1116 static void vlv_c0_read(struct drm_i915_private *dev_priv,
1117 			struct intel_rps_ei *ei)
1118 {
1119 	ei->ktime = ktime_get_raw();
1120 	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1121 	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1122 }
1123 
gen6_rps_reset_ei(struct drm_i915_private * dev_priv)1124 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1125 {
1126 	memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
1127 }
1128 
vlv_wa_c0_ei(struct drm_i915_private * dev_priv,u32 pm_iir)1129 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1130 {
1131 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1132 	const struct intel_rps_ei *prev = &rps->ei;
1133 	struct intel_rps_ei now;
1134 	u32 events = 0;
1135 
1136 	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1137 		return 0;
1138 
1139 	vlv_c0_read(dev_priv, &now);
1140 
1141 	if (prev->ktime) {
1142 		u64 time, c0;
1143 		u32 render, media;
1144 
1145 		time = ktime_us_delta(now.ktime, prev->ktime);
1146 
1147 		time *= dev_priv->czclk_freq;
1148 
1149 		/* Workload can be split between render + media,
1150 		 * e.g. SwapBuffers being blitted in X after being rendered in
1151 		 * mesa. To account for this we need to combine both engines
1152 		 * into our activity counter.
1153 		 */
1154 		render = now.render_c0 - prev->render_c0;
1155 		media = now.media_c0 - prev->media_c0;
1156 		c0 = max(render, media);
1157 		c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1158 
1159 		if (c0 > time * rps->power.up_threshold)
1160 			events = GEN6_PM_RP_UP_THRESHOLD;
1161 		else if (c0 < time * rps->power.down_threshold)
1162 			events = GEN6_PM_RP_DOWN_THRESHOLD;
1163 	}
1164 
1165 	rps->ei = now;
1166 	return events;
1167 }
1168 
gen6_pm_rps_work(struct work_struct * work)1169 static void gen6_pm_rps_work(struct work_struct *work)
1170 {
1171 	struct drm_i915_private *dev_priv =
1172 		container_of(work, struct drm_i915_private, gt_pm.rps.work);
1173 	struct intel_gt *gt = &dev_priv->gt;
1174 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1175 	bool client_boost = false;
1176 	int new_delay, adj, min, max;
1177 	u32 pm_iir = 0;
1178 
1179 	spin_lock_irq(&gt->irq_lock);
1180 	if (rps->interrupts_enabled) {
1181 		pm_iir = fetch_and_zero(&rps->pm_iir);
1182 		client_boost = atomic_read(&rps->num_waiters);
1183 	}
1184 	spin_unlock_irq(&gt->irq_lock);
1185 
1186 	/* Make sure we didn't queue anything we're not going to process. */
1187 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1188 	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1189 		goto out;
1190 
1191 	mutex_lock(&rps->lock);
1192 
1193 	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1194 
1195 	adj = rps->last_adj;
1196 	new_delay = rps->cur_freq;
1197 	min = rps->min_freq_softlimit;
1198 	max = rps->max_freq_softlimit;
1199 	if (client_boost)
1200 		max = rps->max_freq;
1201 	if (client_boost && new_delay < rps->boost_freq) {
1202 		new_delay = rps->boost_freq;
1203 		adj = 0;
1204 	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1205 		if (adj > 0)
1206 			adj *= 2;
1207 		else /* CHV needs even encode values */
1208 			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1209 
1210 		if (new_delay >= rps->max_freq_softlimit)
1211 			adj = 0;
1212 	} else if (client_boost) {
1213 		adj = 0;
1214 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1215 		if (rps->cur_freq > rps->efficient_freq)
1216 			new_delay = rps->efficient_freq;
1217 		else if (rps->cur_freq > rps->min_freq_softlimit)
1218 			new_delay = rps->min_freq_softlimit;
1219 		adj = 0;
1220 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1221 		if (adj < 0)
1222 			adj *= 2;
1223 		else /* CHV needs even encode values */
1224 			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1225 
1226 		if (new_delay <= rps->min_freq_softlimit)
1227 			adj = 0;
1228 	} else { /* unknown event */
1229 		adj = 0;
1230 	}
1231 
1232 	rps->last_adj = adj;
1233 
1234 	/*
1235 	 * Limit deboosting and boosting to keep ourselves at the extremes
1236 	 * when in the respective power modes (i.e. slowly decrease frequencies
1237 	 * while in the HIGH_POWER zone and slowly increase frequencies while
1238 	 * in the LOW_POWER zone). On idle, we will hit the timeout and drop
1239 	 * to the next level quickly, and conversely if busy we expect to
1240 	 * hit a waitboost and rapidly switch into max power.
1241 	 */
1242 	if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
1243 	    (adj > 0 && rps->power.mode == LOW_POWER))
1244 		rps->last_adj = 0;
1245 
1246 	/* sysfs frequency interfaces may have snuck in while servicing the
1247 	 * interrupt
1248 	 */
1249 	new_delay += adj;
1250 	new_delay = clamp_t(int, new_delay, min, max);
1251 
1252 	if (intel_set_rps(dev_priv, new_delay)) {
1253 		DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1254 		rps->last_adj = 0;
1255 	}
1256 
1257 	mutex_unlock(&rps->lock);
1258 
1259 out:
1260 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1261 	spin_lock_irq(&gt->irq_lock);
1262 	if (rps->interrupts_enabled)
1263 		gen6_gt_pm_unmask_irq(gt, dev_priv->pm_rps_events);
1264 	spin_unlock_irq(&gt->irq_lock);
1265 }
1266 
1267 
1268 /**
1269  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1270  * occurred.
1271  * @work: workqueue struct
1272  *
1273  * Doesn't actually do anything except notify userspace. As a consequence of
1274  * this event, userspace should try to remap the bad rows since statistically
1275  * it is likely the same row is more likely to go bad again.
1276  */
ivybridge_parity_work(struct work_struct * work)1277 static void ivybridge_parity_work(struct work_struct *work)
1278 {
1279 	struct drm_i915_private *dev_priv =
1280 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1281 	struct intel_gt *gt = &dev_priv->gt;
1282 	u32 error_status, row, bank, subbank;
1283 	char *parity_event[6];
1284 	u32 misccpctl;
1285 	u8 slice = 0;
1286 
1287 	/* We must turn off DOP level clock gating to access the L3 registers.
1288 	 * In order to prevent a get/put style interface, acquire struct mutex
1289 	 * any time we access those registers.
1290 	 */
1291 	mutex_lock(&dev_priv->drm.struct_mutex);
1292 
1293 	/* If we've screwed up tracking, just let the interrupt fire again */
1294 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
1295 		goto out;
1296 
1297 	misccpctl = I915_READ(GEN7_MISCCPCTL);
1298 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1299 	POSTING_READ(GEN7_MISCCPCTL);
1300 
1301 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1302 		i915_reg_t reg;
1303 
1304 		slice--;
1305 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1306 			break;
1307 
1308 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1309 
1310 		reg = GEN7_L3CDERRST1(slice);
1311 
1312 		error_status = I915_READ(reg);
1313 		row = GEN7_PARITY_ERROR_ROW(error_status);
1314 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1315 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1316 
1317 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1318 		POSTING_READ(reg);
1319 
1320 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1321 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1322 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1323 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1324 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1325 		parity_event[5] = NULL;
1326 
1327 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1328 				   KOBJ_CHANGE, parity_event);
1329 
1330 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1331 			  slice, row, bank, subbank);
1332 
1333 		kfree(parity_event[4]);
1334 		kfree(parity_event[3]);
1335 		kfree(parity_event[2]);
1336 		kfree(parity_event[1]);
1337 	}
1338 
1339 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1340 
1341 out:
1342 	WARN_ON(dev_priv->l3_parity.which_slice);
1343 	spin_lock_irq(&gt->irq_lock);
1344 	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
1345 	spin_unlock_irq(&gt->irq_lock);
1346 
1347 	mutex_unlock(&dev_priv->drm.struct_mutex);
1348 }
1349 
gen11_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1350 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1351 {
1352 	switch (pin) {
1353 	case HPD_PORT_C:
1354 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1355 	case HPD_PORT_D:
1356 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1357 	case HPD_PORT_E:
1358 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1359 	case HPD_PORT_F:
1360 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1361 	default:
1362 		return false;
1363 	}
1364 }
1365 
gen12_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1366 static bool gen12_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1367 {
1368 	switch (pin) {
1369 	case HPD_PORT_D:
1370 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1371 	case HPD_PORT_E:
1372 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1373 	case HPD_PORT_F:
1374 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1375 	case HPD_PORT_G:
1376 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1377 	case HPD_PORT_H:
1378 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5);
1379 	case HPD_PORT_I:
1380 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6);
1381 	default:
1382 		return false;
1383 	}
1384 }
1385 
bxt_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1386 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1387 {
1388 	switch (pin) {
1389 	case HPD_PORT_A:
1390 		return val & PORTA_HOTPLUG_LONG_DETECT;
1391 	case HPD_PORT_B:
1392 		return val & PORTB_HOTPLUG_LONG_DETECT;
1393 	case HPD_PORT_C:
1394 		return val & PORTC_HOTPLUG_LONG_DETECT;
1395 	default:
1396 		return false;
1397 	}
1398 }
1399 
icp_ddi_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1400 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1401 {
1402 	switch (pin) {
1403 	case HPD_PORT_A:
1404 		return val & ICP_DDIA_HPD_LONG_DETECT;
1405 	case HPD_PORT_B:
1406 		return val & ICP_DDIB_HPD_LONG_DETECT;
1407 	case HPD_PORT_C:
1408 		return val & TGP_DDIC_HPD_LONG_DETECT;
1409 	default:
1410 		return false;
1411 	}
1412 }
1413 
icp_tc_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1414 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1415 {
1416 	switch (pin) {
1417 	case HPD_PORT_C:
1418 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1419 	case HPD_PORT_D:
1420 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1421 	case HPD_PORT_E:
1422 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1423 	case HPD_PORT_F:
1424 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1425 	default:
1426 		return false;
1427 	}
1428 }
1429 
tgp_ddi_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1430 static bool tgp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1431 {
1432 	switch (pin) {
1433 	case HPD_PORT_A:
1434 		return val & ICP_DDIA_HPD_LONG_DETECT;
1435 	case HPD_PORT_B:
1436 		return val & ICP_DDIB_HPD_LONG_DETECT;
1437 	case HPD_PORT_C:
1438 		return val & TGP_DDIC_HPD_LONG_DETECT;
1439 	default:
1440 		return false;
1441 	}
1442 }
1443 
tgp_tc_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1444 static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1445 {
1446 	switch (pin) {
1447 	case HPD_PORT_D:
1448 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1449 	case HPD_PORT_E:
1450 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1451 	case HPD_PORT_F:
1452 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1453 	case HPD_PORT_G:
1454 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1455 	case HPD_PORT_H:
1456 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5);
1457 	case HPD_PORT_I:
1458 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6);
1459 	default:
1460 		return false;
1461 	}
1462 }
1463 
spt_port_hotplug2_long_detect(enum hpd_pin pin,u32 val)1464 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1465 {
1466 	switch (pin) {
1467 	case HPD_PORT_E:
1468 		return val & PORTE_HOTPLUG_LONG_DETECT;
1469 	default:
1470 		return false;
1471 	}
1472 }
1473 
spt_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1474 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1475 {
1476 	switch (pin) {
1477 	case HPD_PORT_A:
1478 		return val & PORTA_HOTPLUG_LONG_DETECT;
1479 	case HPD_PORT_B:
1480 		return val & PORTB_HOTPLUG_LONG_DETECT;
1481 	case HPD_PORT_C:
1482 		return val & PORTC_HOTPLUG_LONG_DETECT;
1483 	case HPD_PORT_D:
1484 		return val & PORTD_HOTPLUG_LONG_DETECT;
1485 	default:
1486 		return false;
1487 	}
1488 }
1489 
ilk_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1490 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1491 {
1492 	switch (pin) {
1493 	case HPD_PORT_A:
1494 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1495 	default:
1496 		return false;
1497 	}
1498 }
1499 
pch_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1500 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1501 {
1502 	switch (pin) {
1503 	case HPD_PORT_B:
1504 		return val & PORTB_HOTPLUG_LONG_DETECT;
1505 	case HPD_PORT_C:
1506 		return val & PORTC_HOTPLUG_LONG_DETECT;
1507 	case HPD_PORT_D:
1508 		return val & PORTD_HOTPLUG_LONG_DETECT;
1509 	default:
1510 		return false;
1511 	}
1512 }
1513 
i9xx_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1514 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1515 {
1516 	switch (pin) {
1517 	case HPD_PORT_B:
1518 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1519 	case HPD_PORT_C:
1520 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1521 	case HPD_PORT_D:
1522 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1523 	default:
1524 		return false;
1525 	}
1526 }
1527 
1528 /*
1529  * Get a bit mask of pins that have triggered, and which ones may be long.
1530  * This can be called multiple times with the same masks to accumulate
1531  * hotplug detection results from several registers.
1532  *
1533  * Note that the caller is expected to zero out the masks initially.
1534  */
intel_get_hpd_pins(struct drm_i915_private * dev_priv,u32 * pin_mask,u32 * long_mask,u32 hotplug_trigger,u32 dig_hotplug_reg,const u32 hpd[HPD_NUM_PINS],bool long_pulse_detect (enum hpd_pin pin,u32 val))1535 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1536 			       u32 *pin_mask, u32 *long_mask,
1537 			       u32 hotplug_trigger, u32 dig_hotplug_reg,
1538 			       const u32 hpd[HPD_NUM_PINS],
1539 			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
1540 {
1541 	enum hpd_pin pin;
1542 
1543 	BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
1544 
1545 	for_each_hpd_pin(pin) {
1546 		if ((hpd[pin] & hotplug_trigger) == 0)
1547 			continue;
1548 
1549 		*pin_mask |= BIT(pin);
1550 
1551 		if (long_pulse_detect(pin, dig_hotplug_reg))
1552 			*long_mask |= BIT(pin);
1553 	}
1554 
1555 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1556 			 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1557 
1558 }
1559 
gmbus_irq_handler(struct drm_i915_private * dev_priv)1560 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1561 {
1562 	wake_up_all(&dev_priv->gmbus_wait_queue);
1563 }
1564 
dp_aux_irq_handler(struct drm_i915_private * dev_priv)1565 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1566 {
1567 	wake_up_all(&dev_priv->gmbus_wait_queue);
1568 }
1569 
1570 #if defined(CONFIG_DEBUG_FS)
display_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe,u32 crc0,u32 crc1,u32 crc2,u32 crc3,u32 crc4)1571 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1572 					 enum pipe pipe,
1573 					 u32 crc0, u32 crc1,
1574 					 u32 crc2, u32 crc3,
1575 					 u32 crc4)
1576 {
1577 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1578 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1579 	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1580 
1581 	trace_intel_pipe_crc(crtc, crcs);
1582 
1583 	spin_lock(&pipe_crc->lock);
1584 	/*
1585 	 * For some not yet identified reason, the first CRC is
1586 	 * bonkers. So let's just wait for the next vblank and read
1587 	 * out the buggy result.
1588 	 *
1589 	 * On GEN8+ sometimes the second CRC is bonkers as well, so
1590 	 * don't trust that one either.
1591 	 */
1592 	if (pipe_crc->skipped <= 0 ||
1593 	    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1594 		pipe_crc->skipped++;
1595 		spin_unlock(&pipe_crc->lock);
1596 		return;
1597 	}
1598 	spin_unlock(&pipe_crc->lock);
1599 
1600 	drm_crtc_add_crc_entry(&crtc->base, true,
1601 				drm_crtc_accurate_vblank_count(&crtc->base),
1602 				crcs);
1603 }
1604 #else
1605 static inline void
display_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe,u32 crc0,u32 crc1,u32 crc2,u32 crc3,u32 crc4)1606 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1607 			     enum pipe pipe,
1608 			     u32 crc0, u32 crc1,
1609 			     u32 crc2, u32 crc3,
1610 			     u32 crc4) {}
1611 #endif
1612 
1613 
hsw_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe)1614 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1615 				     enum pipe pipe)
1616 {
1617 	display_pipe_crc_irq_handler(dev_priv, pipe,
1618 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1619 				     0, 0, 0, 0);
1620 }
1621 
ivb_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe)1622 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1623 				     enum pipe pipe)
1624 {
1625 	display_pipe_crc_irq_handler(dev_priv, pipe,
1626 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1627 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1628 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1629 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1630 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1631 }
1632 
i9xx_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe)1633 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1634 				      enum pipe pipe)
1635 {
1636 	u32 res1, res2;
1637 
1638 	if (INTEL_GEN(dev_priv) >= 3)
1639 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1640 	else
1641 		res1 = 0;
1642 
1643 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1644 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1645 	else
1646 		res2 = 0;
1647 
1648 	display_pipe_crc_irq_handler(dev_priv, pipe,
1649 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
1650 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1651 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1652 				     res1, res2);
1653 }
1654 
1655 /* The RPS events need forcewake, so we add them to a work queue and mask their
1656  * IMR bits until the work is done. Other interrupts can be processed without
1657  * the work queue. */
gen11_rps_irq_handler(struct intel_gt * gt,u32 pm_iir)1658 void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir)
1659 {
1660 	struct drm_i915_private *i915 = gt->i915;
1661 	struct intel_rps *rps = &i915->gt_pm.rps;
1662 	const u32 events = i915->pm_rps_events & pm_iir;
1663 
1664 	lockdep_assert_held(&gt->irq_lock);
1665 
1666 	if (unlikely(!events))
1667 		return;
1668 
1669 	gen6_gt_pm_mask_irq(gt, events);
1670 
1671 	if (!rps->interrupts_enabled)
1672 		return;
1673 
1674 	rps->pm_iir |= events;
1675 	schedule_work(&rps->work);
1676 }
1677 
gen6_rps_irq_handler(struct drm_i915_private * dev_priv,u32 pm_iir)1678 void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1679 {
1680 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1681 	struct intel_gt *gt = &dev_priv->gt;
1682 
1683 	if (pm_iir & dev_priv->pm_rps_events) {
1684 		spin_lock(&gt->irq_lock);
1685 		gen6_gt_pm_mask_irq(gt, pm_iir & dev_priv->pm_rps_events);
1686 		if (rps->interrupts_enabled) {
1687 			rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
1688 			schedule_work(&rps->work);
1689 		}
1690 		spin_unlock(&gt->irq_lock);
1691 	}
1692 
1693 	if (INTEL_GEN(dev_priv) >= 8)
1694 		return;
1695 
1696 	if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1697 		intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
1698 
1699 	if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1700 		DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1701 }
1702 
i9xx_pipestat_irq_reset(struct drm_i915_private * dev_priv)1703 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1704 {
1705 	enum pipe pipe;
1706 
1707 	for_each_pipe(dev_priv, pipe) {
1708 		I915_WRITE(PIPESTAT(pipe),
1709 			   PIPESTAT_INT_STATUS_MASK |
1710 			   PIPE_FIFO_UNDERRUN_STATUS);
1711 
1712 		dev_priv->pipestat_irq_mask[pipe] = 0;
1713 	}
1714 }
1715 
i9xx_pipestat_irq_ack(struct drm_i915_private * dev_priv,u32 iir,u32 pipe_stats[I915_MAX_PIPES])1716 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1717 				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1718 {
1719 	int pipe;
1720 
1721 	spin_lock(&dev_priv->irq_lock);
1722 
1723 	if (!dev_priv->display_irqs_enabled) {
1724 		spin_unlock(&dev_priv->irq_lock);
1725 		return;
1726 	}
1727 
1728 	for_each_pipe(dev_priv, pipe) {
1729 		i915_reg_t reg;
1730 		u32 status_mask, enable_mask, iir_bit = 0;
1731 
1732 		/*
1733 		 * PIPESTAT bits get signalled even when the interrupt is
1734 		 * disabled with the mask bits, and some of the status bits do
1735 		 * not generate interrupts at all (like the underrun bit). Hence
1736 		 * we need to be careful that we only handle what we want to
1737 		 * handle.
1738 		 */
1739 
1740 		/* fifo underruns are filterered in the underrun handler. */
1741 		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1742 
1743 		switch (pipe) {
1744 		case PIPE_A:
1745 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1746 			break;
1747 		case PIPE_B:
1748 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1749 			break;
1750 		case PIPE_C:
1751 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1752 			break;
1753 		}
1754 		if (iir & iir_bit)
1755 			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1756 
1757 		if (!status_mask)
1758 			continue;
1759 
1760 		reg = PIPESTAT(pipe);
1761 		pipe_stats[pipe] = I915_READ(reg) & status_mask;
1762 		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1763 
1764 		/*
1765 		 * Clear the PIPE*STAT regs before the IIR
1766 		 *
1767 		 * Toggle the enable bits to make sure we get an
1768 		 * edge in the ISR pipe event bit if we don't clear
1769 		 * all the enabled status bits. Otherwise the edge
1770 		 * triggered IIR on i965/g4x wouldn't notice that
1771 		 * an interrupt is still pending.
1772 		 */
1773 		if (pipe_stats[pipe]) {
1774 			I915_WRITE(reg, pipe_stats[pipe]);
1775 			I915_WRITE(reg, enable_mask);
1776 		}
1777 	}
1778 	spin_unlock(&dev_priv->irq_lock);
1779 }
1780 
i8xx_pipestat_irq_handler(struct drm_i915_private * dev_priv,u16 iir,u32 pipe_stats[I915_MAX_PIPES])1781 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1782 				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1783 {
1784 	enum pipe pipe;
1785 
1786 	for_each_pipe(dev_priv, pipe) {
1787 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1788 			drm_handle_vblank(&dev_priv->drm, pipe);
1789 
1790 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1791 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1792 
1793 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1794 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1795 	}
1796 }
1797 
i915_pipestat_irq_handler(struct drm_i915_private * dev_priv,u32 iir,u32 pipe_stats[I915_MAX_PIPES])1798 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1799 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1800 {
1801 	bool blc_event = false;
1802 	enum pipe pipe;
1803 
1804 	for_each_pipe(dev_priv, pipe) {
1805 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1806 			drm_handle_vblank(&dev_priv->drm, pipe);
1807 
1808 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1809 			blc_event = true;
1810 
1811 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1812 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1813 
1814 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1815 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1816 	}
1817 
1818 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1819 		intel_opregion_asle_intr(dev_priv);
1820 }
1821 
i965_pipestat_irq_handler(struct drm_i915_private * dev_priv,u32 iir,u32 pipe_stats[I915_MAX_PIPES])1822 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1823 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1824 {
1825 	bool blc_event = false;
1826 	enum pipe pipe;
1827 
1828 	for_each_pipe(dev_priv, pipe) {
1829 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1830 			drm_handle_vblank(&dev_priv->drm, pipe);
1831 
1832 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1833 			blc_event = true;
1834 
1835 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1836 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1837 
1838 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1839 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1840 	}
1841 
1842 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1843 		intel_opregion_asle_intr(dev_priv);
1844 
1845 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1846 		gmbus_irq_handler(dev_priv);
1847 }
1848 
valleyview_pipestat_irq_handler(struct drm_i915_private * dev_priv,u32 pipe_stats[I915_MAX_PIPES])1849 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1850 					    u32 pipe_stats[I915_MAX_PIPES])
1851 {
1852 	enum pipe pipe;
1853 
1854 	for_each_pipe(dev_priv, pipe) {
1855 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1856 			drm_handle_vblank(&dev_priv->drm, pipe);
1857 
1858 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1859 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1860 
1861 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1862 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1863 	}
1864 
1865 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1866 		gmbus_irq_handler(dev_priv);
1867 }
1868 
i9xx_hpd_irq_ack(struct drm_i915_private * dev_priv)1869 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1870 {
1871 	u32 hotplug_status = 0, hotplug_status_mask;
1872 	int i;
1873 
1874 	if (IS_G4X(dev_priv) ||
1875 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1876 		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1877 			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1878 	else
1879 		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1880 
1881 	/*
1882 	 * We absolutely have to clear all the pending interrupt
1883 	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
1884 	 * interrupt bit won't have an edge, and the i965/g4x
1885 	 * edge triggered IIR will not notice that an interrupt
1886 	 * is still pending. We can't use PORT_HOTPLUG_EN to
1887 	 * guarantee the edge as the act of toggling the enable
1888 	 * bits can itself generate a new hotplug interrupt :(
1889 	 */
1890 	for (i = 0; i < 10; i++) {
1891 		u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
1892 
1893 		if (tmp == 0)
1894 			return hotplug_status;
1895 
1896 		hotplug_status |= tmp;
1897 		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1898 	}
1899 
1900 	WARN_ONCE(1,
1901 		  "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1902 		  I915_READ(PORT_HOTPLUG_STAT));
1903 
1904 	return hotplug_status;
1905 }
1906 
i9xx_hpd_irq_handler(struct drm_i915_private * dev_priv,u32 hotplug_status)1907 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1908 				 u32 hotplug_status)
1909 {
1910 	u32 pin_mask = 0, long_mask = 0;
1911 
1912 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
1913 	    IS_CHERRYVIEW(dev_priv)) {
1914 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1915 
1916 		if (hotplug_trigger) {
1917 			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1918 					   hotplug_trigger, hotplug_trigger,
1919 					   hpd_status_g4x,
1920 					   i9xx_port_hotplug_long_detect);
1921 
1922 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1923 		}
1924 
1925 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1926 			dp_aux_irq_handler(dev_priv);
1927 	} else {
1928 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1929 
1930 		if (hotplug_trigger) {
1931 			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1932 					   hotplug_trigger, hotplug_trigger,
1933 					   hpd_status_i915,
1934 					   i9xx_port_hotplug_long_detect);
1935 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1936 		}
1937 	}
1938 }
1939 
valleyview_irq_handler(int irq,void * arg)1940 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1941 {
1942 	struct drm_i915_private *dev_priv = arg;
1943 	irqreturn_t ret = IRQ_NONE;
1944 
1945 	if (!intel_irqs_enabled(dev_priv))
1946 		return IRQ_NONE;
1947 
1948 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1949 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1950 
1951 	do {
1952 		u32 iir, gt_iir, pm_iir;
1953 		u32 pipe_stats[I915_MAX_PIPES] = {};
1954 		u32 hotplug_status = 0;
1955 		u32 ier = 0;
1956 
1957 		gt_iir = I915_READ(GTIIR);
1958 		pm_iir = I915_READ(GEN6_PMIIR);
1959 		iir = I915_READ(VLV_IIR);
1960 
1961 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1962 			break;
1963 
1964 		ret = IRQ_HANDLED;
1965 
1966 		/*
1967 		 * Theory on interrupt generation, based on empirical evidence:
1968 		 *
1969 		 * x = ((VLV_IIR & VLV_IER) ||
1970 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1971 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1972 		 *
1973 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1974 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1975 		 * guarantee the CPU interrupt will be raised again even if we
1976 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1977 		 * bits this time around.
1978 		 */
1979 		I915_WRITE(VLV_MASTER_IER, 0);
1980 		ier = I915_READ(VLV_IER);
1981 		I915_WRITE(VLV_IER, 0);
1982 
1983 		if (gt_iir)
1984 			I915_WRITE(GTIIR, gt_iir);
1985 		if (pm_iir)
1986 			I915_WRITE(GEN6_PMIIR, pm_iir);
1987 
1988 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1989 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1990 
1991 		/* Call regardless, as some status bits might not be
1992 		 * signalled in iir */
1993 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1994 
1995 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1996 			   I915_LPE_PIPE_B_INTERRUPT))
1997 			intel_lpe_audio_irq_handler(dev_priv);
1998 
1999 		/*
2000 		 * VLV_IIR is single buffered, and reflects the level
2001 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
2002 		 */
2003 		if (iir)
2004 			I915_WRITE(VLV_IIR, iir);
2005 
2006 		I915_WRITE(VLV_IER, ier);
2007 		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2008 
2009 		if (gt_iir)
2010 			gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
2011 		if (pm_iir)
2012 			gen6_rps_irq_handler(dev_priv, pm_iir);
2013 
2014 		if (hotplug_status)
2015 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2016 
2017 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2018 	} while (0);
2019 
2020 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2021 
2022 	return ret;
2023 }
2024 
cherryview_irq_handler(int irq,void * arg)2025 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
2026 {
2027 	struct drm_i915_private *dev_priv = arg;
2028 	irqreturn_t ret = IRQ_NONE;
2029 
2030 	if (!intel_irqs_enabled(dev_priv))
2031 		return IRQ_NONE;
2032 
2033 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2034 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2035 
2036 	do {
2037 		u32 master_ctl, iir;
2038 		u32 pipe_stats[I915_MAX_PIPES] = {};
2039 		u32 hotplug_status = 0;
2040 		u32 gt_iir[4];
2041 		u32 ier = 0;
2042 
2043 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
2044 		iir = I915_READ(VLV_IIR);
2045 
2046 		if (master_ctl == 0 && iir == 0)
2047 			break;
2048 
2049 		ret = IRQ_HANDLED;
2050 
2051 		/*
2052 		 * Theory on interrupt generation, based on empirical evidence:
2053 		 *
2054 		 * x = ((VLV_IIR & VLV_IER) ||
2055 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
2056 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
2057 		 *
2058 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2059 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
2060 		 * guarantee the CPU interrupt will be raised again even if we
2061 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
2062 		 * bits this time around.
2063 		 */
2064 		I915_WRITE(GEN8_MASTER_IRQ, 0);
2065 		ier = I915_READ(VLV_IER);
2066 		I915_WRITE(VLV_IER, 0);
2067 
2068 		gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
2069 
2070 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
2071 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2072 
2073 		/* Call regardless, as some status bits might not be
2074 		 * signalled in iir */
2075 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2076 
2077 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2078 			   I915_LPE_PIPE_B_INTERRUPT |
2079 			   I915_LPE_PIPE_C_INTERRUPT))
2080 			intel_lpe_audio_irq_handler(dev_priv);
2081 
2082 		/*
2083 		 * VLV_IIR is single buffered, and reflects the level
2084 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
2085 		 */
2086 		if (iir)
2087 			I915_WRITE(VLV_IIR, iir);
2088 
2089 		I915_WRITE(VLV_IER, ier);
2090 		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2091 
2092 		gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
2093 
2094 		if (hotplug_status)
2095 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2096 
2097 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2098 	} while (0);
2099 
2100 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2101 
2102 	return ret;
2103 }
2104 
ibx_hpd_irq_handler(struct drm_i915_private * dev_priv,u32 hotplug_trigger,const u32 hpd[HPD_NUM_PINS])2105 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2106 				u32 hotplug_trigger,
2107 				const u32 hpd[HPD_NUM_PINS])
2108 {
2109 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2110 
2111 	/*
2112 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
2113 	 * unless we touch the hotplug register, even if hotplug_trigger is
2114 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
2115 	 * errors.
2116 	 */
2117 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2118 	if (!hotplug_trigger) {
2119 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
2120 			PORTD_HOTPLUG_STATUS_MASK |
2121 			PORTC_HOTPLUG_STATUS_MASK |
2122 			PORTB_HOTPLUG_STATUS_MASK;
2123 		dig_hotplug_reg &= ~mask;
2124 	}
2125 
2126 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2127 	if (!hotplug_trigger)
2128 		return;
2129 
2130 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2131 			   dig_hotplug_reg, hpd,
2132 			   pch_port_hotplug_long_detect);
2133 
2134 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2135 }
2136 
ibx_irq_handler(struct drm_i915_private * dev_priv,u32 pch_iir)2137 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2138 {
2139 	int pipe;
2140 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2141 
2142 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
2143 
2144 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
2145 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2146 			       SDE_AUDIO_POWER_SHIFT);
2147 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2148 				 port_name(port));
2149 	}
2150 
2151 	if (pch_iir & SDE_AUX_MASK)
2152 		dp_aux_irq_handler(dev_priv);
2153 
2154 	if (pch_iir & SDE_GMBUS)
2155 		gmbus_irq_handler(dev_priv);
2156 
2157 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
2158 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2159 
2160 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
2161 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2162 
2163 	if (pch_iir & SDE_POISON)
2164 		DRM_ERROR("PCH poison interrupt\n");
2165 
2166 	if (pch_iir & SDE_FDI_MASK)
2167 		for_each_pipe(dev_priv, pipe)
2168 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2169 					 pipe_name(pipe),
2170 					 I915_READ(FDI_RX_IIR(pipe)));
2171 
2172 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2173 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2174 
2175 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2176 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2177 
2178 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2179 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
2180 
2181 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2182 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
2183 }
2184 
ivb_err_int_handler(struct drm_i915_private * dev_priv)2185 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2186 {
2187 	u32 err_int = I915_READ(GEN7_ERR_INT);
2188 	enum pipe pipe;
2189 
2190 	if (err_int & ERR_INT_POISON)
2191 		DRM_ERROR("Poison interrupt\n");
2192 
2193 	for_each_pipe(dev_priv, pipe) {
2194 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2195 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2196 
2197 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2198 			if (IS_IVYBRIDGE(dev_priv))
2199 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
2200 			else
2201 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
2202 		}
2203 	}
2204 
2205 	I915_WRITE(GEN7_ERR_INT, err_int);
2206 }
2207 
cpt_serr_int_handler(struct drm_i915_private * dev_priv)2208 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2209 {
2210 	u32 serr_int = I915_READ(SERR_INT);
2211 	enum pipe pipe;
2212 
2213 	if (serr_int & SERR_INT_POISON)
2214 		DRM_ERROR("PCH poison interrupt\n");
2215 
2216 	for_each_pipe(dev_priv, pipe)
2217 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
2218 			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
2219 
2220 	I915_WRITE(SERR_INT, serr_int);
2221 }
2222 
cpt_irq_handler(struct drm_i915_private * dev_priv,u32 pch_iir)2223 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2224 {
2225 	int pipe;
2226 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2227 
2228 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2229 
2230 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2231 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2232 			       SDE_AUDIO_POWER_SHIFT_CPT);
2233 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2234 				 port_name(port));
2235 	}
2236 
2237 	if (pch_iir & SDE_AUX_MASK_CPT)
2238 		dp_aux_irq_handler(dev_priv);
2239 
2240 	if (pch_iir & SDE_GMBUS_CPT)
2241 		gmbus_irq_handler(dev_priv);
2242 
2243 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2244 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2245 
2246 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2247 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2248 
2249 	if (pch_iir & SDE_FDI_MASK_CPT)
2250 		for_each_pipe(dev_priv, pipe)
2251 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2252 					 pipe_name(pipe),
2253 					 I915_READ(FDI_RX_IIR(pipe)));
2254 
2255 	if (pch_iir & SDE_ERROR_CPT)
2256 		cpt_serr_int_handler(dev_priv);
2257 }
2258 
icp_irq_handler(struct drm_i915_private * dev_priv,u32 pch_iir,const u32 * pins)2259 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir,
2260 			    const u32 *pins)
2261 {
2262 	u32 ddi_hotplug_trigger;
2263 	u32 tc_hotplug_trigger;
2264 	u32 pin_mask = 0, long_mask = 0;
2265 
2266 	if (HAS_PCH_MCC(dev_priv)) {
2267 		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
2268 		tc_hotplug_trigger = 0;
2269 	} else {
2270 		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
2271 		tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
2272 	}
2273 
2274 	if (ddi_hotplug_trigger) {
2275 		u32 dig_hotplug_reg;
2276 
2277 		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
2278 		I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
2279 
2280 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2281 				   ddi_hotplug_trigger,
2282 				   dig_hotplug_reg, pins,
2283 				   icp_ddi_port_hotplug_long_detect);
2284 	}
2285 
2286 	if (tc_hotplug_trigger) {
2287 		u32 dig_hotplug_reg;
2288 
2289 		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
2290 		I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
2291 
2292 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2293 				   tc_hotplug_trigger,
2294 				   dig_hotplug_reg, pins,
2295 				   icp_tc_port_hotplug_long_detect);
2296 	}
2297 
2298 	if (pin_mask)
2299 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2300 
2301 	if (pch_iir & SDE_GMBUS_ICP)
2302 		gmbus_irq_handler(dev_priv);
2303 }
2304 
tgp_irq_handler(struct drm_i915_private * dev_priv,u32 pch_iir)2305 static void tgp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2306 {
2307 	u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
2308 	u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
2309 	u32 pin_mask = 0, long_mask = 0;
2310 
2311 	if (ddi_hotplug_trigger) {
2312 		u32 dig_hotplug_reg;
2313 
2314 		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
2315 		I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
2316 
2317 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2318 				   ddi_hotplug_trigger,
2319 				   dig_hotplug_reg, hpd_tgp,
2320 				   tgp_ddi_port_hotplug_long_detect);
2321 	}
2322 
2323 	if (tc_hotplug_trigger) {
2324 		u32 dig_hotplug_reg;
2325 
2326 		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
2327 		I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
2328 
2329 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2330 				   tc_hotplug_trigger,
2331 				   dig_hotplug_reg, hpd_tgp,
2332 				   tgp_tc_port_hotplug_long_detect);
2333 	}
2334 
2335 	if (pin_mask)
2336 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2337 
2338 	if (pch_iir & SDE_GMBUS_ICP)
2339 		gmbus_irq_handler(dev_priv);
2340 }
2341 
spt_irq_handler(struct drm_i915_private * dev_priv,u32 pch_iir)2342 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2343 {
2344 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2345 		~SDE_PORTE_HOTPLUG_SPT;
2346 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2347 	u32 pin_mask = 0, long_mask = 0;
2348 
2349 	if (hotplug_trigger) {
2350 		u32 dig_hotplug_reg;
2351 
2352 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2353 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2354 
2355 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2356 				   hotplug_trigger, dig_hotplug_reg, hpd_spt,
2357 				   spt_port_hotplug_long_detect);
2358 	}
2359 
2360 	if (hotplug2_trigger) {
2361 		u32 dig_hotplug_reg;
2362 
2363 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2364 		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2365 
2366 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2367 				   hotplug2_trigger, dig_hotplug_reg, hpd_spt,
2368 				   spt_port_hotplug2_long_detect);
2369 	}
2370 
2371 	if (pin_mask)
2372 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2373 
2374 	if (pch_iir & SDE_GMBUS_CPT)
2375 		gmbus_irq_handler(dev_priv);
2376 }
2377 
ilk_hpd_irq_handler(struct drm_i915_private * dev_priv,u32 hotplug_trigger,const u32 hpd[HPD_NUM_PINS])2378 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2379 				u32 hotplug_trigger,
2380 				const u32 hpd[HPD_NUM_PINS])
2381 {
2382 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2383 
2384 	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2385 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2386 
2387 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2388 			   dig_hotplug_reg, hpd,
2389 			   ilk_port_hotplug_long_detect);
2390 
2391 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2392 }
2393 
ilk_display_irq_handler(struct drm_i915_private * dev_priv,u32 de_iir)2394 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2395 				    u32 de_iir)
2396 {
2397 	enum pipe pipe;
2398 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2399 
2400 	if (hotplug_trigger)
2401 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2402 
2403 	if (de_iir & DE_AUX_CHANNEL_A)
2404 		dp_aux_irq_handler(dev_priv);
2405 
2406 	if (de_iir & DE_GSE)
2407 		intel_opregion_asle_intr(dev_priv);
2408 
2409 	if (de_iir & DE_POISON)
2410 		DRM_ERROR("Poison interrupt\n");
2411 
2412 	for_each_pipe(dev_priv, pipe) {
2413 		if (de_iir & DE_PIPE_VBLANK(pipe))
2414 			drm_handle_vblank(&dev_priv->drm, pipe);
2415 
2416 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2417 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2418 
2419 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2420 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2421 	}
2422 
2423 	/* check event from PCH */
2424 	if (de_iir & DE_PCH_EVENT) {
2425 		u32 pch_iir = I915_READ(SDEIIR);
2426 
2427 		if (HAS_PCH_CPT(dev_priv))
2428 			cpt_irq_handler(dev_priv, pch_iir);
2429 		else
2430 			ibx_irq_handler(dev_priv, pch_iir);
2431 
2432 		/* should clear PCH hotplug event before clear CPU irq */
2433 		I915_WRITE(SDEIIR, pch_iir);
2434 	}
2435 
2436 	if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
2437 		ironlake_rps_change_irq_handler(dev_priv);
2438 }
2439 
ivb_display_irq_handler(struct drm_i915_private * dev_priv,u32 de_iir)2440 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2441 				    u32 de_iir)
2442 {
2443 	enum pipe pipe;
2444 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2445 
2446 	if (hotplug_trigger)
2447 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2448 
2449 	if (de_iir & DE_ERR_INT_IVB)
2450 		ivb_err_int_handler(dev_priv);
2451 
2452 	if (de_iir & DE_EDP_PSR_INT_HSW) {
2453 		u32 psr_iir = I915_READ(EDP_PSR_IIR);
2454 
2455 		intel_psr_irq_handler(dev_priv, psr_iir);
2456 		I915_WRITE(EDP_PSR_IIR, psr_iir);
2457 	}
2458 
2459 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2460 		dp_aux_irq_handler(dev_priv);
2461 
2462 	if (de_iir & DE_GSE_IVB)
2463 		intel_opregion_asle_intr(dev_priv);
2464 
2465 	for_each_pipe(dev_priv, pipe) {
2466 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2467 			drm_handle_vblank(&dev_priv->drm, pipe);
2468 	}
2469 
2470 	/* check event from PCH */
2471 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2472 		u32 pch_iir = I915_READ(SDEIIR);
2473 
2474 		cpt_irq_handler(dev_priv, pch_iir);
2475 
2476 		/* clear PCH hotplug event before clear CPU irq */
2477 		I915_WRITE(SDEIIR, pch_iir);
2478 	}
2479 }
2480 
2481 /*
2482  * To handle irqs with the minimum potential races with fresh interrupts, we:
2483  * 1 - Disable Master Interrupt Control.
2484  * 2 - Find the source(s) of the interrupt.
2485  * 3 - Clear the Interrupt Identity bits (IIR).
2486  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2487  * 5 - Re-enable Master Interrupt Control.
2488  */
ironlake_irq_handler(int irq,void * arg)2489 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2490 {
2491 	struct drm_i915_private *dev_priv = arg;
2492 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2493 	irqreturn_t ret = IRQ_NONE;
2494 
2495 	if (!intel_irqs_enabled(dev_priv))
2496 		return IRQ_NONE;
2497 
2498 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2499 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2500 
2501 	/* disable master interrupt before clearing iir  */
2502 	de_ier = I915_READ(DEIER);
2503 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2504 
2505 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2506 	 * interrupts will will be stored on its back queue, and then we'll be
2507 	 * able to process them after we restore SDEIER (as soon as we restore
2508 	 * it, we'll get an interrupt if SDEIIR still has something to process
2509 	 * due to its back queue). */
2510 	if (!HAS_PCH_NOP(dev_priv)) {
2511 		sde_ier = I915_READ(SDEIER);
2512 		I915_WRITE(SDEIER, 0);
2513 	}
2514 
2515 	/* Find, clear, then process each source of interrupt */
2516 
2517 	gt_iir = I915_READ(GTIIR);
2518 	if (gt_iir) {
2519 		I915_WRITE(GTIIR, gt_iir);
2520 		ret = IRQ_HANDLED;
2521 		if (INTEL_GEN(dev_priv) >= 6)
2522 			gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
2523 		else
2524 			gen5_gt_irq_handler(&dev_priv->gt, gt_iir);
2525 	}
2526 
2527 	de_iir = I915_READ(DEIIR);
2528 	if (de_iir) {
2529 		I915_WRITE(DEIIR, de_iir);
2530 		ret = IRQ_HANDLED;
2531 		if (INTEL_GEN(dev_priv) >= 7)
2532 			ivb_display_irq_handler(dev_priv, de_iir);
2533 		else
2534 			ilk_display_irq_handler(dev_priv, de_iir);
2535 	}
2536 
2537 	if (INTEL_GEN(dev_priv) >= 6) {
2538 		u32 pm_iir = I915_READ(GEN6_PMIIR);
2539 		if (pm_iir) {
2540 			I915_WRITE(GEN6_PMIIR, pm_iir);
2541 			ret = IRQ_HANDLED;
2542 			gen6_rps_irq_handler(dev_priv, pm_iir);
2543 		}
2544 	}
2545 
2546 	I915_WRITE(DEIER, de_ier);
2547 	if (!HAS_PCH_NOP(dev_priv))
2548 		I915_WRITE(SDEIER, sde_ier);
2549 
2550 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2551 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2552 
2553 	return ret;
2554 }
2555 
bxt_hpd_irq_handler(struct drm_i915_private * dev_priv,u32 hotplug_trigger,const u32 hpd[HPD_NUM_PINS])2556 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2557 				u32 hotplug_trigger,
2558 				const u32 hpd[HPD_NUM_PINS])
2559 {
2560 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2561 
2562 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2563 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2564 
2565 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2566 			   dig_hotplug_reg, hpd,
2567 			   bxt_port_hotplug_long_detect);
2568 
2569 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2570 }
2571 
gen11_hpd_irq_handler(struct drm_i915_private * dev_priv,u32 iir)2572 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2573 {
2574 	u32 pin_mask = 0, long_mask = 0;
2575 	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2576 	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2577 	long_pulse_detect_func long_pulse_detect;
2578 	const u32 *hpd;
2579 
2580 	if (INTEL_GEN(dev_priv) >= 12) {
2581 		long_pulse_detect = gen12_port_hotplug_long_detect;
2582 		hpd = hpd_gen12;
2583 	} else {
2584 		long_pulse_detect = gen11_port_hotplug_long_detect;
2585 		hpd = hpd_gen11;
2586 	}
2587 
2588 	if (trigger_tc) {
2589 		u32 dig_hotplug_reg;
2590 
2591 		dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2592 		I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2593 
2594 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2595 				   dig_hotplug_reg, hpd, long_pulse_detect);
2596 	}
2597 
2598 	if (trigger_tbt) {
2599 		u32 dig_hotplug_reg;
2600 
2601 		dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2602 		I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2603 
2604 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2605 				   dig_hotplug_reg, hpd, long_pulse_detect);
2606 	}
2607 
2608 	if (pin_mask)
2609 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2610 	else
2611 		DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
2612 }
2613 
gen8_de_port_aux_mask(struct drm_i915_private * dev_priv)2614 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2615 {
2616 	u32 mask;
2617 
2618 	if (INTEL_GEN(dev_priv) >= 12)
2619 		/* TODO: Add AUX entries for USBC */
2620 		return TGL_DE_PORT_AUX_DDIA |
2621 			TGL_DE_PORT_AUX_DDIB |
2622 			TGL_DE_PORT_AUX_DDIC;
2623 
2624 	mask = GEN8_AUX_CHANNEL_A;
2625 	if (INTEL_GEN(dev_priv) >= 9)
2626 		mask |= GEN9_AUX_CHANNEL_B |
2627 			GEN9_AUX_CHANNEL_C |
2628 			GEN9_AUX_CHANNEL_D;
2629 
2630 	if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
2631 		mask |= CNL_AUX_CHANNEL_F;
2632 
2633 	if (IS_GEN(dev_priv, 11))
2634 		mask |= ICL_AUX_CHANNEL_E;
2635 
2636 	return mask;
2637 }
2638 
gen8_de_pipe_fault_mask(struct drm_i915_private * dev_priv)2639 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
2640 {
2641 	if (INTEL_GEN(dev_priv) >= 9)
2642 		return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2643 	else
2644 		return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2645 }
2646 
2647 static void
gen8_de_misc_irq_handler(struct drm_i915_private * dev_priv,u32 iir)2648 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2649 {
2650 	bool found = false;
2651 
2652 	if (iir & GEN8_DE_MISC_GSE) {
2653 		intel_opregion_asle_intr(dev_priv);
2654 		found = true;
2655 	}
2656 
2657 	if (iir & GEN8_DE_EDP_PSR) {
2658 		u32 psr_iir = I915_READ(EDP_PSR_IIR);
2659 
2660 		intel_psr_irq_handler(dev_priv, psr_iir);
2661 		I915_WRITE(EDP_PSR_IIR, psr_iir);
2662 		found = true;
2663 	}
2664 
2665 	if (!found)
2666 		DRM_ERROR("Unexpected DE Misc interrupt\n");
2667 }
2668 
2669 static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private * dev_priv,u32 master_ctl)2670 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2671 {
2672 	irqreturn_t ret = IRQ_NONE;
2673 	u32 iir;
2674 	enum pipe pipe;
2675 
2676 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2677 		iir = I915_READ(GEN8_DE_MISC_IIR);
2678 		if (iir) {
2679 			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2680 			ret = IRQ_HANDLED;
2681 			gen8_de_misc_irq_handler(dev_priv, iir);
2682 		} else {
2683 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2684 		}
2685 	}
2686 
2687 	if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2688 		iir = I915_READ(GEN11_DE_HPD_IIR);
2689 		if (iir) {
2690 			I915_WRITE(GEN11_DE_HPD_IIR, iir);
2691 			ret = IRQ_HANDLED;
2692 			gen11_hpd_irq_handler(dev_priv, iir);
2693 		} else {
2694 			DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
2695 		}
2696 	}
2697 
2698 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2699 		iir = I915_READ(GEN8_DE_PORT_IIR);
2700 		if (iir) {
2701 			u32 tmp_mask;
2702 			bool found = false;
2703 
2704 			I915_WRITE(GEN8_DE_PORT_IIR, iir);
2705 			ret = IRQ_HANDLED;
2706 
2707 			if (iir & gen8_de_port_aux_mask(dev_priv)) {
2708 				dp_aux_irq_handler(dev_priv);
2709 				found = true;
2710 			}
2711 
2712 			if (IS_GEN9_LP(dev_priv)) {
2713 				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2714 				if (tmp_mask) {
2715 					bxt_hpd_irq_handler(dev_priv, tmp_mask,
2716 							    hpd_bxt);
2717 					found = true;
2718 				}
2719 			} else if (IS_BROADWELL(dev_priv)) {
2720 				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2721 				if (tmp_mask) {
2722 					ilk_hpd_irq_handler(dev_priv,
2723 							    tmp_mask, hpd_bdw);
2724 					found = true;
2725 				}
2726 			}
2727 
2728 			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2729 				gmbus_irq_handler(dev_priv);
2730 				found = true;
2731 			}
2732 
2733 			if (!found)
2734 				DRM_ERROR("Unexpected DE Port interrupt\n");
2735 		}
2736 		else
2737 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2738 	}
2739 
2740 	for_each_pipe(dev_priv, pipe) {
2741 		u32 fault_errors;
2742 
2743 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2744 			continue;
2745 
2746 		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2747 		if (!iir) {
2748 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2749 			continue;
2750 		}
2751 
2752 		ret = IRQ_HANDLED;
2753 		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2754 
2755 		if (iir & GEN8_PIPE_VBLANK)
2756 			drm_handle_vblank(&dev_priv->drm, pipe);
2757 
2758 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2759 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2760 
2761 		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2762 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2763 
2764 		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2765 		if (fault_errors)
2766 			DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
2767 				  pipe_name(pipe),
2768 				  fault_errors);
2769 	}
2770 
2771 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2772 	    master_ctl & GEN8_DE_PCH_IRQ) {
2773 		/*
2774 		 * FIXME(BDW): Assume for now that the new interrupt handling
2775 		 * scheme also closed the SDE interrupt handling race we've seen
2776 		 * on older pch-split platforms. But this needs testing.
2777 		 */
2778 		iir = I915_READ(SDEIIR);
2779 		if (iir) {
2780 			I915_WRITE(SDEIIR, iir);
2781 			ret = IRQ_HANDLED;
2782 
2783 			if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
2784 				tgp_irq_handler(dev_priv, iir);
2785 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC)
2786 				icp_irq_handler(dev_priv, iir, hpd_mcc);
2787 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2788 				icp_irq_handler(dev_priv, iir, hpd_icp);
2789 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2790 				spt_irq_handler(dev_priv, iir);
2791 			else
2792 				cpt_irq_handler(dev_priv, iir);
2793 		} else {
2794 			/*
2795 			 * Like on previous PCH there seems to be something
2796 			 * fishy going on with forwarding PCH interrupts.
2797 			 */
2798 			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2799 		}
2800 	}
2801 
2802 	return ret;
2803 }
2804 
gen8_master_intr_disable(void __iomem * const regs)2805 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2806 {
2807 	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2808 
2809 	/*
2810 	 * Now with master disabled, get a sample of level indications
2811 	 * for this interrupt. Indications will be cleared on related acks.
2812 	 * New indications can and will light up during processing,
2813 	 * and will generate new interrupt after enabling master.
2814 	 */
2815 	return raw_reg_read(regs, GEN8_MASTER_IRQ);
2816 }
2817 
gen8_master_intr_enable(void __iomem * const regs)2818 static inline void gen8_master_intr_enable(void __iomem * const regs)
2819 {
2820 	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2821 }
2822 
gen8_irq_handler(int irq,void * arg)2823 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2824 {
2825 	struct drm_i915_private *dev_priv = arg;
2826 	void __iomem * const regs = dev_priv->uncore.regs;
2827 	u32 master_ctl;
2828 	u32 gt_iir[4];
2829 
2830 	if (!intel_irqs_enabled(dev_priv))
2831 		return IRQ_NONE;
2832 
2833 	master_ctl = gen8_master_intr_disable(regs);
2834 	if (!master_ctl) {
2835 		gen8_master_intr_enable(regs);
2836 		return IRQ_NONE;
2837 	}
2838 
2839 	/* Find, clear, then process each source of interrupt */
2840 	gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
2841 
2842 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2843 	if (master_ctl & ~GEN8_GT_IRQS) {
2844 		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2845 		gen8_de_irq_handler(dev_priv, master_ctl);
2846 		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2847 	}
2848 
2849 	gen8_master_intr_enable(regs);
2850 
2851 	gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
2852 
2853 	return IRQ_HANDLED;
2854 }
2855 
2856 static u32
gen11_gu_misc_irq_ack(struct intel_gt * gt,const u32 master_ctl)2857 gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
2858 {
2859 	void __iomem * const regs = gt->uncore->regs;
2860 	u32 iir;
2861 
2862 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
2863 		return 0;
2864 
2865 	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2866 	if (likely(iir))
2867 		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2868 
2869 	return iir;
2870 }
2871 
2872 static void
gen11_gu_misc_irq_handler(struct intel_gt * gt,const u32 iir)2873 gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
2874 {
2875 	if (iir & GEN11_GU_MISC_GSE)
2876 		intel_opregion_asle_intr(gt->i915);
2877 }
2878 
gen11_master_intr_disable(void __iomem * const regs)2879 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2880 {
2881 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2882 
2883 	/*
2884 	 * Now with master disabled, get a sample of level indications
2885 	 * for this interrupt. Indications will be cleared on related acks.
2886 	 * New indications can and will light up during processing,
2887 	 * and will generate new interrupt after enabling master.
2888 	 */
2889 	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2890 }
2891 
gen11_master_intr_enable(void __iomem * const regs)2892 static inline void gen11_master_intr_enable(void __iomem * const regs)
2893 {
2894 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2895 }
2896 
gen11_irq_handler(int irq,void * arg)2897 static irqreturn_t gen11_irq_handler(int irq, void *arg)
2898 {
2899 	struct drm_i915_private * const i915 = arg;
2900 	void __iomem * const regs = i915->uncore.regs;
2901 	struct intel_gt *gt = &i915->gt;
2902 	u32 master_ctl;
2903 	u32 gu_misc_iir;
2904 
2905 	if (!intel_irqs_enabled(i915))
2906 		return IRQ_NONE;
2907 
2908 	master_ctl = gen11_master_intr_disable(regs);
2909 	if (!master_ctl) {
2910 		gen11_master_intr_enable(regs);
2911 		return IRQ_NONE;
2912 	}
2913 
2914 	/* Find, clear, then process each source of interrupt. */
2915 	gen11_gt_irq_handler(gt, master_ctl);
2916 
2917 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2918 	if (master_ctl & GEN11_DISPLAY_IRQ) {
2919 		const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2920 
2921 		disable_rpm_wakeref_asserts(&i915->runtime_pm);
2922 		/*
2923 		 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2924 		 * for the display related bits.
2925 		 */
2926 		gen8_de_irq_handler(i915, disp_ctl);
2927 		enable_rpm_wakeref_asserts(&i915->runtime_pm);
2928 	}
2929 
2930 	gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2931 
2932 	gen11_master_intr_enable(regs);
2933 
2934 	gen11_gu_misc_irq_handler(gt, gu_misc_iir);
2935 
2936 	return IRQ_HANDLED;
2937 }
2938 
2939 /* Called from drm generic code, passed 'crtc' which
2940  * we use as a pipe index
2941  */
i8xx_enable_vblank(struct drm_crtc * crtc)2942 int i8xx_enable_vblank(struct drm_crtc *crtc)
2943 {
2944 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2945 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2946 	unsigned long irqflags;
2947 
2948 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2949 	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2950 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2951 
2952 	return 0;
2953 }
2954 
i945gm_enable_vblank(struct drm_crtc * crtc)2955 int i945gm_enable_vblank(struct drm_crtc *crtc)
2956 {
2957 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2958 
2959 	if (dev_priv->i945gm_vblank.enabled++ == 0)
2960 		schedule_work(&dev_priv->i945gm_vblank.work);
2961 
2962 	return i8xx_enable_vblank(crtc);
2963 }
2964 
i965_enable_vblank(struct drm_crtc * crtc)2965 int i965_enable_vblank(struct drm_crtc *crtc)
2966 {
2967 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2968 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2969 	unsigned long irqflags;
2970 
2971 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2972 	i915_enable_pipestat(dev_priv, pipe,
2973 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2974 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2975 
2976 	return 0;
2977 }
2978 
ilk_enable_vblank(struct drm_crtc * crtc)2979 int ilk_enable_vblank(struct drm_crtc *crtc)
2980 {
2981 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2982 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2983 	unsigned long irqflags;
2984 	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2985 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2986 
2987 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2988 	ilk_enable_display_irq(dev_priv, bit);
2989 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2990 
2991 	/* Even though there is no DMC, frame counter can get stuck when
2992 	 * PSR is active as no frames are generated.
2993 	 */
2994 	if (HAS_PSR(dev_priv))
2995 		drm_crtc_vblank_restore(crtc);
2996 
2997 	return 0;
2998 }
2999 
bdw_enable_vblank(struct drm_crtc * crtc)3000 int bdw_enable_vblank(struct drm_crtc *crtc)
3001 {
3002 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3003 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3004 	unsigned long irqflags;
3005 
3006 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3007 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3008 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3009 
3010 	/* Even if there is no DMC, frame counter can get stuck when
3011 	 * PSR is active as no frames are generated, so check only for PSR.
3012 	 */
3013 	if (HAS_PSR(dev_priv))
3014 		drm_crtc_vblank_restore(crtc);
3015 
3016 	return 0;
3017 }
3018 
3019 /* Called from drm generic code, passed 'crtc' which
3020  * we use as a pipe index
3021  */
i8xx_disable_vblank(struct drm_crtc * crtc)3022 void i8xx_disable_vblank(struct drm_crtc *crtc)
3023 {
3024 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3025 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3026 	unsigned long irqflags;
3027 
3028 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3029 	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3030 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3031 }
3032 
i945gm_disable_vblank(struct drm_crtc * crtc)3033 void i945gm_disable_vblank(struct drm_crtc *crtc)
3034 {
3035 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3036 
3037 	i8xx_disable_vblank(crtc);
3038 
3039 	if (--dev_priv->i945gm_vblank.enabled == 0)
3040 		schedule_work(&dev_priv->i945gm_vblank.work);
3041 }
3042 
i965_disable_vblank(struct drm_crtc * crtc)3043 void i965_disable_vblank(struct drm_crtc *crtc)
3044 {
3045 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3046 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3047 	unsigned long irqflags;
3048 
3049 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3050 	i915_disable_pipestat(dev_priv, pipe,
3051 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
3052 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3053 }
3054 
ilk_disable_vblank(struct drm_crtc * crtc)3055 void ilk_disable_vblank(struct drm_crtc *crtc)
3056 {
3057 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3058 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3059 	unsigned long irqflags;
3060 	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
3061 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3062 
3063 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3064 	ilk_disable_display_irq(dev_priv, bit);
3065 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3066 }
3067 
bdw_disable_vblank(struct drm_crtc * crtc)3068 void bdw_disable_vblank(struct drm_crtc *crtc)
3069 {
3070 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3071 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3072 	unsigned long irqflags;
3073 
3074 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3075 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3076 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3077 }
3078 
i945gm_vblank_work_func(struct work_struct * work)3079 static void i945gm_vblank_work_func(struct work_struct *work)
3080 {
3081 	struct drm_i915_private *dev_priv =
3082 		container_of(work, struct drm_i915_private, i945gm_vblank.work);
3083 
3084 	/*
3085 	 * Vblank interrupts fail to wake up the device from C3,
3086 	 * hence we want to prevent C3 usage while vblank interrupts
3087 	 * are enabled.
3088 	 */
3089 	pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
3090 			      READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
3091 			      dev_priv->i945gm_vblank.c3_disable_latency :
3092 			      PM_QOS_DEFAULT_VALUE);
3093 }
3094 
cstate_disable_latency(const char * name)3095 static int cstate_disable_latency(const char *name)
3096 {
3097 	const struct cpuidle_driver *drv;
3098 	int i;
3099 
3100 	drv = cpuidle_get_driver();
3101 	if (!drv)
3102 		return 0;
3103 
3104 	for (i = 0; i < drv->state_count; i++) {
3105 		const struct cpuidle_state *state = &drv->states[i];
3106 
3107 		if (!strcmp(state->name, name))
3108 			return state->exit_latency ?
3109 				state->exit_latency - 1 : 0;
3110 	}
3111 
3112 	return 0;
3113 }
3114 
i945gm_vblank_work_init(struct drm_i915_private * dev_priv)3115 static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
3116 {
3117 	INIT_WORK(&dev_priv->i945gm_vblank.work,
3118 		  i945gm_vblank_work_func);
3119 
3120 	dev_priv->i945gm_vblank.c3_disable_latency =
3121 		cstate_disable_latency("C3");
3122 	pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
3123 			   PM_QOS_CPU_DMA_LATENCY,
3124 			   PM_QOS_DEFAULT_VALUE);
3125 }
3126 
i945gm_vblank_work_fini(struct drm_i915_private * dev_priv)3127 static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
3128 {
3129 	cancel_work_sync(&dev_priv->i945gm_vblank.work);
3130 	pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
3131 }
3132 
ibx_irq_reset(struct drm_i915_private * dev_priv)3133 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
3134 {
3135 	struct intel_uncore *uncore = &dev_priv->uncore;
3136 
3137 	if (HAS_PCH_NOP(dev_priv))
3138 		return;
3139 
3140 	GEN3_IRQ_RESET(uncore, SDE);
3141 
3142 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3143 		I915_WRITE(SERR_INT, 0xffffffff);
3144 }
3145 
3146 /*
3147  * SDEIER is also touched by the interrupt handler to work around missed PCH
3148  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3149  * instead we unconditionally enable all PCH interrupt sources here, but then
3150  * only unmask them as needed with SDEIMR.
3151  *
3152  * This function needs to be called before interrupts are enabled.
3153  */
ibx_irq_pre_postinstall(struct drm_i915_private * dev_priv)3154 static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
3155 {
3156 	if (HAS_PCH_NOP(dev_priv))
3157 		return;
3158 
3159 	WARN_ON(I915_READ(SDEIER) != 0);
3160 	I915_WRITE(SDEIER, 0xffffffff);
3161 	POSTING_READ(SDEIER);
3162 }
3163 
vlv_display_irq_reset(struct drm_i915_private * dev_priv)3164 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3165 {
3166 	struct intel_uncore *uncore = &dev_priv->uncore;
3167 
3168 	if (IS_CHERRYVIEW(dev_priv))
3169 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3170 	else
3171 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
3172 
3173 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3174 	intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3175 
3176 	i9xx_pipestat_irq_reset(dev_priv);
3177 
3178 	GEN3_IRQ_RESET(uncore, VLV_);
3179 	dev_priv->irq_mask = ~0u;
3180 }
3181 
vlv_display_irq_postinstall(struct drm_i915_private * dev_priv)3182 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3183 {
3184 	struct intel_uncore *uncore = &dev_priv->uncore;
3185 
3186 	u32 pipestat_mask;
3187 	u32 enable_mask;
3188 	enum pipe pipe;
3189 
3190 	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3191 
3192 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3193 	for_each_pipe(dev_priv, pipe)
3194 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3195 
3196 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3197 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3198 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3199 		I915_LPE_PIPE_A_INTERRUPT |
3200 		I915_LPE_PIPE_B_INTERRUPT;
3201 
3202 	if (IS_CHERRYVIEW(dev_priv))
3203 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3204 			I915_LPE_PIPE_C_INTERRUPT;
3205 
3206 	WARN_ON(dev_priv->irq_mask != ~0u);
3207 
3208 	dev_priv->irq_mask = ~enable_mask;
3209 
3210 	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
3211 }
3212 
3213 /* drm_dma.h hooks
3214 */
ironlake_irq_reset(struct drm_i915_private * dev_priv)3215 static void ironlake_irq_reset(struct drm_i915_private *dev_priv)
3216 {
3217 	struct intel_uncore *uncore = &dev_priv->uncore;
3218 
3219 	GEN3_IRQ_RESET(uncore, DE);
3220 	if (IS_GEN(dev_priv, 7))
3221 		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
3222 
3223 	if (IS_HASWELL(dev_priv)) {
3224 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3225 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3226 	}
3227 
3228 	gen5_gt_irq_reset(&dev_priv->gt);
3229 
3230 	ibx_irq_reset(dev_priv);
3231 }
3232 
valleyview_irq_reset(struct drm_i915_private * dev_priv)3233 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
3234 {
3235 	I915_WRITE(VLV_MASTER_IER, 0);
3236 	POSTING_READ(VLV_MASTER_IER);
3237 
3238 	gen5_gt_irq_reset(&dev_priv->gt);
3239 
3240 	spin_lock_irq(&dev_priv->irq_lock);
3241 	if (dev_priv->display_irqs_enabled)
3242 		vlv_display_irq_reset(dev_priv);
3243 	spin_unlock_irq(&dev_priv->irq_lock);
3244 }
3245 
gen8_irq_reset(struct drm_i915_private * dev_priv)3246 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
3247 {
3248 	struct intel_uncore *uncore = &dev_priv->uncore;
3249 	int pipe;
3250 
3251 	gen8_master_intr_disable(dev_priv->uncore.regs);
3252 
3253 	gen8_gt_irq_reset(&dev_priv->gt);
3254 
3255 	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3256 	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3257 
3258 	for_each_pipe(dev_priv, pipe)
3259 		if (intel_display_power_is_enabled(dev_priv,
3260 						   POWER_DOMAIN_PIPE(pipe)))
3261 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3262 
3263 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3264 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3265 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3266 
3267 	if (HAS_PCH_SPLIT(dev_priv))
3268 		ibx_irq_reset(dev_priv);
3269 }
3270 
gen11_irq_reset(struct drm_i915_private * dev_priv)3271 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
3272 {
3273 	struct intel_uncore *uncore = &dev_priv->uncore;
3274 	int pipe;
3275 
3276 	gen11_master_intr_disable(dev_priv->uncore.regs);
3277 
3278 	gen11_gt_irq_reset(&dev_priv->gt);
3279 
3280 	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
3281 
3282 	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3283 	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3284 
3285 	for_each_pipe(dev_priv, pipe)
3286 		if (intel_display_power_is_enabled(dev_priv,
3287 						   POWER_DOMAIN_PIPE(pipe)))
3288 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3289 
3290 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3291 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3292 	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3293 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3294 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3295 
3296 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3297 		GEN3_IRQ_RESET(uncore, SDE);
3298 }
3299 
gen8_irq_power_well_post_enable(struct drm_i915_private * dev_priv,u8 pipe_mask)3300 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3301 				     u8 pipe_mask)
3302 {
3303 	struct intel_uncore *uncore = &dev_priv->uncore;
3304 
3305 	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3306 	enum pipe pipe;
3307 
3308 	spin_lock_irq(&dev_priv->irq_lock);
3309 
3310 	if (!intel_irqs_enabled(dev_priv)) {
3311 		spin_unlock_irq(&dev_priv->irq_lock);
3312 		return;
3313 	}
3314 
3315 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3316 		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3317 				  dev_priv->de_irq_mask[pipe],
3318 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3319 
3320 	spin_unlock_irq(&dev_priv->irq_lock);
3321 }
3322 
gen8_irq_power_well_pre_disable(struct drm_i915_private * dev_priv,u8 pipe_mask)3323 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3324 				     u8 pipe_mask)
3325 {
3326 	struct intel_uncore *uncore = &dev_priv->uncore;
3327 	enum pipe pipe;
3328 
3329 	spin_lock_irq(&dev_priv->irq_lock);
3330 
3331 	if (!intel_irqs_enabled(dev_priv)) {
3332 		spin_unlock_irq(&dev_priv->irq_lock);
3333 		return;
3334 	}
3335 
3336 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3337 		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3338 
3339 	spin_unlock_irq(&dev_priv->irq_lock);
3340 
3341 	/* make sure we're done processing display irqs */
3342 	intel_synchronize_irq(dev_priv);
3343 }
3344 
cherryview_irq_reset(struct drm_i915_private * dev_priv)3345 static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
3346 {
3347 	struct intel_uncore *uncore = &dev_priv->uncore;
3348 
3349 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3350 	POSTING_READ(GEN8_MASTER_IRQ);
3351 
3352 	gen8_gt_irq_reset(&dev_priv->gt);
3353 
3354 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3355 
3356 	spin_lock_irq(&dev_priv->irq_lock);
3357 	if (dev_priv->display_irqs_enabled)
3358 		vlv_display_irq_reset(dev_priv);
3359 	spin_unlock_irq(&dev_priv->irq_lock);
3360 }
3361 
intel_hpd_enabled_irqs(struct drm_i915_private * dev_priv,const u32 hpd[HPD_NUM_PINS])3362 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3363 				  const u32 hpd[HPD_NUM_PINS])
3364 {
3365 	struct intel_encoder *encoder;
3366 	u32 enabled_irqs = 0;
3367 
3368 	for_each_intel_encoder(&dev_priv->drm, encoder)
3369 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3370 			enabled_irqs |= hpd[encoder->hpd_pin];
3371 
3372 	return enabled_irqs;
3373 }
3374 
ibx_hpd_detection_setup(struct drm_i915_private * dev_priv)3375 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3376 {
3377 	u32 hotplug;
3378 
3379 	/*
3380 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3381 	 * duration to 2ms (which is the minimum in the Display Port spec).
3382 	 * The pulse duration bits are reserved on LPT+.
3383 	 */
3384 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3385 	hotplug &= ~(PORTB_PULSE_DURATION_MASK |
3386 		     PORTC_PULSE_DURATION_MASK |
3387 		     PORTD_PULSE_DURATION_MASK);
3388 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3389 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3390 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3391 	/*
3392 	 * When CPU and PCH are on the same package, port A
3393 	 * HPD must be enabled in both north and south.
3394 	 */
3395 	if (HAS_PCH_LPT_LP(dev_priv))
3396 		hotplug |= PORTA_HOTPLUG_ENABLE;
3397 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3398 }
3399 
ibx_hpd_irq_setup(struct drm_i915_private * dev_priv)3400 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3401 {
3402 	u32 hotplug_irqs, enabled_irqs;
3403 
3404 	if (HAS_PCH_IBX(dev_priv)) {
3405 		hotplug_irqs = SDE_HOTPLUG_MASK;
3406 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3407 	} else {
3408 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3409 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3410 	}
3411 
3412 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3413 
3414 	ibx_hpd_detection_setup(dev_priv);
3415 }
3416 
icp_hpd_detection_setup(struct drm_i915_private * dev_priv,u32 ddi_hotplug_enable_mask,u32 tc_hotplug_enable_mask)3417 static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
3418 				    u32 ddi_hotplug_enable_mask,
3419 				    u32 tc_hotplug_enable_mask)
3420 {
3421 	u32 hotplug;
3422 
3423 	hotplug = I915_READ(SHOTPLUG_CTL_DDI);
3424 	hotplug |= ddi_hotplug_enable_mask;
3425 	I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
3426 
3427 	if (tc_hotplug_enable_mask) {
3428 		hotplug = I915_READ(SHOTPLUG_CTL_TC);
3429 		hotplug |= tc_hotplug_enable_mask;
3430 		I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
3431 	}
3432 }
3433 
icp_hpd_irq_setup(struct drm_i915_private * dev_priv)3434 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3435 {
3436 	u32 hotplug_irqs, enabled_irqs;
3437 
3438 	hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
3439 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
3440 
3441 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3442 
3443 	icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
3444 				ICP_TC_HPD_ENABLE_MASK);
3445 }
3446 
mcc_hpd_irq_setup(struct drm_i915_private * dev_priv)3447 static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
3448 {
3449 	u32 hotplug_irqs, enabled_irqs;
3450 
3451 	hotplug_irqs = SDE_DDI_MASK_TGP;
3452 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_mcc);
3453 
3454 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3455 
3456 	icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
3457 }
3458 
tgp_hpd_irq_setup(struct drm_i915_private * dev_priv)3459 static void tgp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3460 {
3461 	u32 hotplug_irqs, enabled_irqs;
3462 
3463 	hotplug_irqs = SDE_DDI_MASK_TGP | SDE_TC_MASK_TGP;
3464 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_tgp);
3465 
3466 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3467 
3468 	icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
3469 				TGP_TC_HPD_ENABLE_MASK);
3470 }
3471 
gen11_hpd_detection_setup(struct drm_i915_private * dev_priv)3472 static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
3473 {
3474 	u32 hotplug;
3475 
3476 	hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3477 	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3478 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3479 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3480 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3481 	I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3482 
3483 	hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3484 	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3485 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3486 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3487 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3488 	I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3489 }
3490 
gen11_hpd_irq_setup(struct drm_i915_private * dev_priv)3491 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3492 {
3493 	u32 hotplug_irqs, enabled_irqs;
3494 	const u32 *hpd;
3495 	u32 val;
3496 
3497 	hpd = INTEL_GEN(dev_priv) >= 12 ? hpd_gen12 : hpd_gen11;
3498 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd);
3499 	hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3500 
3501 	val = I915_READ(GEN11_DE_HPD_IMR);
3502 	val &= ~hotplug_irqs;
3503 	I915_WRITE(GEN11_DE_HPD_IMR, val);
3504 	POSTING_READ(GEN11_DE_HPD_IMR);
3505 
3506 	gen11_hpd_detection_setup(dev_priv);
3507 
3508 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
3509 		tgp_hpd_irq_setup(dev_priv);
3510 	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3511 		icp_hpd_irq_setup(dev_priv);
3512 }
3513 
spt_hpd_detection_setup(struct drm_i915_private * dev_priv)3514 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3515 {
3516 	u32 val, hotplug;
3517 
3518 	/* Display WA #1179 WaHardHangonHotPlug: cnp */
3519 	if (HAS_PCH_CNP(dev_priv)) {
3520 		val = I915_READ(SOUTH_CHICKEN1);
3521 		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3522 		val |= CHASSIS_CLK_REQ_DURATION(0xf);
3523 		I915_WRITE(SOUTH_CHICKEN1, val);
3524 	}
3525 
3526 	/* Enable digital hotplug on the PCH */
3527 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3528 	hotplug |= PORTA_HOTPLUG_ENABLE |
3529 		   PORTB_HOTPLUG_ENABLE |
3530 		   PORTC_HOTPLUG_ENABLE |
3531 		   PORTD_HOTPLUG_ENABLE;
3532 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3533 
3534 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3535 	hotplug |= PORTE_HOTPLUG_ENABLE;
3536 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3537 }
3538 
spt_hpd_irq_setup(struct drm_i915_private * dev_priv)3539 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3540 {
3541 	u32 hotplug_irqs, enabled_irqs;
3542 
3543 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3544 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3545 
3546 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3547 
3548 	spt_hpd_detection_setup(dev_priv);
3549 }
3550 
ilk_hpd_detection_setup(struct drm_i915_private * dev_priv)3551 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3552 {
3553 	u32 hotplug;
3554 
3555 	/*
3556 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3557 	 * duration to 2ms (which is the minimum in the Display Port spec)
3558 	 * The pulse duration bits are reserved on HSW+.
3559 	 */
3560 	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3561 	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3562 	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
3563 		   DIGITAL_PORTA_PULSE_DURATION_2ms;
3564 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3565 }
3566 
ilk_hpd_irq_setup(struct drm_i915_private * dev_priv)3567 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3568 {
3569 	u32 hotplug_irqs, enabled_irqs;
3570 
3571 	if (INTEL_GEN(dev_priv) >= 8) {
3572 		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3573 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3574 
3575 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3576 	} else if (INTEL_GEN(dev_priv) >= 7) {
3577 		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3578 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3579 
3580 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3581 	} else {
3582 		hotplug_irqs = DE_DP_A_HOTPLUG;
3583 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3584 
3585 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3586 	}
3587 
3588 	ilk_hpd_detection_setup(dev_priv);
3589 
3590 	ibx_hpd_irq_setup(dev_priv);
3591 }
3592 
__bxt_hpd_detection_setup(struct drm_i915_private * dev_priv,u32 enabled_irqs)3593 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
3594 				      u32 enabled_irqs)
3595 {
3596 	u32 hotplug;
3597 
3598 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3599 	hotplug |= PORTA_HOTPLUG_ENABLE |
3600 		   PORTB_HOTPLUG_ENABLE |
3601 		   PORTC_HOTPLUG_ENABLE;
3602 
3603 	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3604 		      hotplug, enabled_irqs);
3605 	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3606 
3607 	/*
3608 	 * For BXT invert bit has to be set based on AOB design
3609 	 * for HPD detection logic, update it based on VBT fields.
3610 	 */
3611 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3612 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3613 		hotplug |= BXT_DDIA_HPD_INVERT;
3614 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3615 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3616 		hotplug |= BXT_DDIB_HPD_INVERT;
3617 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3618 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3619 		hotplug |= BXT_DDIC_HPD_INVERT;
3620 
3621 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3622 }
3623 
bxt_hpd_detection_setup(struct drm_i915_private * dev_priv)3624 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3625 {
3626 	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
3627 }
3628 
bxt_hpd_irq_setup(struct drm_i915_private * dev_priv)3629 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3630 {
3631 	u32 hotplug_irqs, enabled_irqs;
3632 
3633 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3634 	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3635 
3636 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3637 
3638 	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
3639 }
3640 
ibx_irq_postinstall(struct drm_i915_private * dev_priv)3641 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3642 {
3643 	u32 mask;
3644 
3645 	if (HAS_PCH_NOP(dev_priv))
3646 		return;
3647 
3648 	if (HAS_PCH_IBX(dev_priv))
3649 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3650 	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3651 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3652 	else
3653 		mask = SDE_GMBUS_CPT;
3654 
3655 	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3656 	I915_WRITE(SDEIMR, ~mask);
3657 
3658 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
3659 	    HAS_PCH_LPT(dev_priv))
3660 		ibx_hpd_detection_setup(dev_priv);
3661 	else
3662 		spt_hpd_detection_setup(dev_priv);
3663 }
3664 
ironlake_irq_postinstall(struct drm_i915_private * dev_priv)3665 static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
3666 {
3667 	struct intel_uncore *uncore = &dev_priv->uncore;
3668 	u32 display_mask, extra_mask;
3669 
3670 	if (INTEL_GEN(dev_priv) >= 7) {
3671 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3672 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3673 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3674 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3675 			      DE_DP_A_HOTPLUG_IVB);
3676 	} else {
3677 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3678 				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3679 				DE_PIPEA_CRC_DONE | DE_POISON);
3680 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3681 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3682 			      DE_DP_A_HOTPLUG);
3683 	}
3684 
3685 	if (IS_HASWELL(dev_priv)) {
3686 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3687 		intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
3688 		display_mask |= DE_EDP_PSR_INT_HSW;
3689 	}
3690 
3691 	dev_priv->irq_mask = ~display_mask;
3692 
3693 	ibx_irq_pre_postinstall(dev_priv);
3694 
3695 	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3696 		      display_mask | extra_mask);
3697 
3698 	gen5_gt_irq_postinstall(&dev_priv->gt);
3699 
3700 	ilk_hpd_detection_setup(dev_priv);
3701 
3702 	ibx_irq_postinstall(dev_priv);
3703 
3704 	if (IS_IRONLAKE_M(dev_priv)) {
3705 		/* Enable PCU event interrupts
3706 		 *
3707 		 * spinlocking not required here for correctness since interrupt
3708 		 * setup is guaranteed to run in single-threaded context. But we
3709 		 * need it to make the assert_spin_locked happy. */
3710 		spin_lock_irq(&dev_priv->irq_lock);
3711 		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3712 		spin_unlock_irq(&dev_priv->irq_lock);
3713 	}
3714 }
3715 
valleyview_enable_display_irqs(struct drm_i915_private * dev_priv)3716 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3717 {
3718 	lockdep_assert_held(&dev_priv->irq_lock);
3719 
3720 	if (dev_priv->display_irqs_enabled)
3721 		return;
3722 
3723 	dev_priv->display_irqs_enabled = true;
3724 
3725 	if (intel_irqs_enabled(dev_priv)) {
3726 		vlv_display_irq_reset(dev_priv);
3727 		vlv_display_irq_postinstall(dev_priv);
3728 	}
3729 }
3730 
valleyview_disable_display_irqs(struct drm_i915_private * dev_priv)3731 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3732 {
3733 	lockdep_assert_held(&dev_priv->irq_lock);
3734 
3735 	if (!dev_priv->display_irqs_enabled)
3736 		return;
3737 
3738 	dev_priv->display_irqs_enabled = false;
3739 
3740 	if (intel_irqs_enabled(dev_priv))
3741 		vlv_display_irq_reset(dev_priv);
3742 }
3743 
3744 
valleyview_irq_postinstall(struct drm_i915_private * dev_priv)3745 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3746 {
3747 	gen5_gt_irq_postinstall(&dev_priv->gt);
3748 
3749 	spin_lock_irq(&dev_priv->irq_lock);
3750 	if (dev_priv->display_irqs_enabled)
3751 		vlv_display_irq_postinstall(dev_priv);
3752 	spin_unlock_irq(&dev_priv->irq_lock);
3753 
3754 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3755 	POSTING_READ(VLV_MASTER_IER);
3756 }
3757 
gen8_de_irq_postinstall(struct drm_i915_private * dev_priv)3758 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3759 {
3760 	struct intel_uncore *uncore = &dev_priv->uncore;
3761 
3762 	u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3763 	u32 de_pipe_enables;
3764 	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3765 	u32 de_port_enables;
3766 	u32 de_misc_masked = GEN8_DE_EDP_PSR;
3767 	enum pipe pipe;
3768 
3769 	if (INTEL_GEN(dev_priv) <= 10)
3770 		de_misc_masked |= GEN8_DE_MISC_GSE;
3771 
3772 	if (INTEL_GEN(dev_priv) >= 9) {
3773 		de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3774 		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3775 				  GEN9_AUX_CHANNEL_D;
3776 		if (IS_GEN9_LP(dev_priv))
3777 			de_port_masked |= BXT_DE_PORT_GMBUS;
3778 	} else {
3779 		de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3780 	}
3781 
3782 	if (INTEL_GEN(dev_priv) >= 11)
3783 		de_port_masked |= ICL_AUX_CHANNEL_E;
3784 
3785 	if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
3786 		de_port_masked |= CNL_AUX_CHANNEL_F;
3787 
3788 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3789 					   GEN8_PIPE_FIFO_UNDERRUN;
3790 
3791 	de_port_enables = de_port_masked;
3792 	if (IS_GEN9_LP(dev_priv))
3793 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3794 	else if (IS_BROADWELL(dev_priv))
3795 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3796 
3797 	gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3798 	intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
3799 
3800 	for_each_pipe(dev_priv, pipe) {
3801 		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3802 
3803 		if (intel_display_power_is_enabled(dev_priv,
3804 				POWER_DOMAIN_PIPE(pipe)))
3805 			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3806 					  dev_priv->de_irq_mask[pipe],
3807 					  de_pipe_enables);
3808 	}
3809 
3810 	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3811 	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3812 
3813 	if (INTEL_GEN(dev_priv) >= 11) {
3814 		u32 de_hpd_masked = 0;
3815 		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3816 				     GEN11_DE_TBT_HOTPLUG_MASK;
3817 
3818 		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3819 			      de_hpd_enables);
3820 		gen11_hpd_detection_setup(dev_priv);
3821 	} else if (IS_GEN9_LP(dev_priv)) {
3822 		bxt_hpd_detection_setup(dev_priv);
3823 	} else if (IS_BROADWELL(dev_priv)) {
3824 		ilk_hpd_detection_setup(dev_priv);
3825 	}
3826 }
3827 
gen8_irq_postinstall(struct drm_i915_private * dev_priv)3828 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3829 {
3830 	if (HAS_PCH_SPLIT(dev_priv))
3831 		ibx_irq_pre_postinstall(dev_priv);
3832 
3833 	gen8_gt_irq_postinstall(&dev_priv->gt);
3834 	gen8_de_irq_postinstall(dev_priv);
3835 
3836 	if (HAS_PCH_SPLIT(dev_priv))
3837 		ibx_irq_postinstall(dev_priv);
3838 
3839 	gen8_master_intr_enable(dev_priv->uncore.regs);
3840 }
3841 
icp_irq_postinstall(struct drm_i915_private * dev_priv)3842 static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3843 {
3844 	u32 mask = SDE_GMBUS_ICP;
3845 
3846 	WARN_ON(I915_READ(SDEIER) != 0);
3847 	I915_WRITE(SDEIER, 0xffffffff);
3848 	POSTING_READ(SDEIER);
3849 
3850 	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3851 	I915_WRITE(SDEIMR, ~mask);
3852 
3853 	if (HAS_PCH_TGP(dev_priv))
3854 		icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
3855 					TGP_TC_HPD_ENABLE_MASK);
3856 	else if (HAS_PCH_MCC(dev_priv))
3857 		icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
3858 	else
3859 		icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
3860 					ICP_TC_HPD_ENABLE_MASK);
3861 }
3862 
gen11_irq_postinstall(struct drm_i915_private * dev_priv)3863 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3864 {
3865 	struct intel_uncore *uncore = &dev_priv->uncore;
3866 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3867 
3868 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3869 		icp_irq_postinstall(dev_priv);
3870 
3871 	gen11_gt_irq_postinstall(&dev_priv->gt);
3872 	gen8_de_irq_postinstall(dev_priv);
3873 
3874 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3875 
3876 	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
3877 
3878 	gen11_master_intr_enable(uncore->regs);
3879 	POSTING_READ(GEN11_GFX_MSTR_IRQ);
3880 }
3881 
cherryview_irq_postinstall(struct drm_i915_private * dev_priv)3882 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3883 {
3884 	gen8_gt_irq_postinstall(&dev_priv->gt);
3885 
3886 	spin_lock_irq(&dev_priv->irq_lock);
3887 	if (dev_priv->display_irqs_enabled)
3888 		vlv_display_irq_postinstall(dev_priv);
3889 	spin_unlock_irq(&dev_priv->irq_lock);
3890 
3891 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3892 	POSTING_READ(GEN8_MASTER_IRQ);
3893 }
3894 
i8xx_irq_reset(struct drm_i915_private * dev_priv)3895 static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3896 {
3897 	struct intel_uncore *uncore = &dev_priv->uncore;
3898 
3899 	i9xx_pipestat_irq_reset(dev_priv);
3900 
3901 	GEN2_IRQ_RESET(uncore);
3902 }
3903 
i8xx_irq_postinstall(struct drm_i915_private * dev_priv)3904 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3905 {
3906 	struct intel_uncore *uncore = &dev_priv->uncore;
3907 	u16 enable_mask;
3908 
3909 	intel_uncore_write16(uncore,
3910 			     EMR,
3911 			     ~(I915_ERROR_PAGE_TABLE |
3912 			       I915_ERROR_MEMORY_REFRESH));
3913 
3914 	/* Unmask the interrupts that we always want on. */
3915 	dev_priv->irq_mask =
3916 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3917 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3918 		  I915_MASTER_ERROR_INTERRUPT);
3919 
3920 	enable_mask =
3921 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3922 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3923 		I915_MASTER_ERROR_INTERRUPT |
3924 		I915_USER_INTERRUPT;
3925 
3926 	GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
3927 
3928 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3929 	 * just to make the assert_spin_locked check happy. */
3930 	spin_lock_irq(&dev_priv->irq_lock);
3931 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3932 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3933 	spin_unlock_irq(&dev_priv->irq_lock);
3934 }
3935 
i8xx_error_irq_ack(struct drm_i915_private * i915,u16 * eir,u16 * eir_stuck)3936 static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3937 			       u16 *eir, u16 *eir_stuck)
3938 {
3939 	struct intel_uncore *uncore = &i915->uncore;
3940 	u16 emr;
3941 
3942 	*eir = intel_uncore_read16(uncore, EIR);
3943 
3944 	if (*eir)
3945 		intel_uncore_write16(uncore, EIR, *eir);
3946 
3947 	*eir_stuck = intel_uncore_read16(uncore, EIR);
3948 	if (*eir_stuck == 0)
3949 		return;
3950 
3951 	/*
3952 	 * Toggle all EMR bits to make sure we get an edge
3953 	 * in the ISR master error bit if we don't clear
3954 	 * all the EIR bits. Otherwise the edge triggered
3955 	 * IIR on i965/g4x wouldn't notice that an interrupt
3956 	 * is still pending. Also some EIR bits can't be
3957 	 * cleared except by handling the underlying error
3958 	 * (or by a GPU reset) so we mask any bit that
3959 	 * remains set.
3960 	 */
3961 	emr = intel_uncore_read16(uncore, EMR);
3962 	intel_uncore_write16(uncore, EMR, 0xffff);
3963 	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3964 }
3965 
i8xx_error_irq_handler(struct drm_i915_private * dev_priv,u16 eir,u16 eir_stuck)3966 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3967 				   u16 eir, u16 eir_stuck)
3968 {
3969 	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
3970 
3971 	if (eir_stuck)
3972 		DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
3973 }
3974 
i9xx_error_irq_ack(struct drm_i915_private * dev_priv,u32 * eir,u32 * eir_stuck)3975 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
3976 			       u32 *eir, u32 *eir_stuck)
3977 {
3978 	u32 emr;
3979 
3980 	*eir = I915_READ(EIR);
3981 
3982 	I915_WRITE(EIR, *eir);
3983 
3984 	*eir_stuck = I915_READ(EIR);
3985 	if (*eir_stuck == 0)
3986 		return;
3987 
3988 	/*
3989 	 * Toggle all EMR bits to make sure we get an edge
3990 	 * in the ISR master error bit if we don't clear
3991 	 * all the EIR bits. Otherwise the edge triggered
3992 	 * IIR on i965/g4x wouldn't notice that an interrupt
3993 	 * is still pending. Also some EIR bits can't be
3994 	 * cleared except by handling the underlying error
3995 	 * (or by a GPU reset) so we mask any bit that
3996 	 * remains set.
3997 	 */
3998 	emr = I915_READ(EMR);
3999 	I915_WRITE(EMR, 0xffffffff);
4000 	I915_WRITE(EMR, emr | *eir_stuck);
4001 }
4002 
i9xx_error_irq_handler(struct drm_i915_private * dev_priv,u32 eir,u32 eir_stuck)4003 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
4004 				   u32 eir, u32 eir_stuck)
4005 {
4006 	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
4007 
4008 	if (eir_stuck)
4009 		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
4010 }
4011 
i8xx_irq_handler(int irq,void * arg)4012 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4013 {
4014 	struct drm_i915_private *dev_priv = arg;
4015 	irqreturn_t ret = IRQ_NONE;
4016 
4017 	if (!intel_irqs_enabled(dev_priv))
4018 		return IRQ_NONE;
4019 
4020 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4021 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4022 
4023 	do {
4024 		u32 pipe_stats[I915_MAX_PIPES] = {};
4025 		u16 eir = 0, eir_stuck = 0;
4026 		u16 iir;
4027 
4028 		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
4029 		if (iir == 0)
4030 			break;
4031 
4032 		ret = IRQ_HANDLED;
4033 
4034 		/* Call regardless, as some status bits might not be
4035 		 * signalled in iir */
4036 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4037 
4038 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4039 			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4040 
4041 		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
4042 
4043 		if (iir & I915_USER_INTERRUPT)
4044 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4045 
4046 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4047 			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4048 
4049 		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4050 	} while (0);
4051 
4052 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4053 
4054 	return ret;
4055 }
4056 
i915_irq_reset(struct drm_i915_private * dev_priv)4057 static void i915_irq_reset(struct drm_i915_private *dev_priv)
4058 {
4059 	struct intel_uncore *uncore = &dev_priv->uncore;
4060 
4061 	if (I915_HAS_HOTPLUG(dev_priv)) {
4062 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4063 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4064 	}
4065 
4066 	i9xx_pipestat_irq_reset(dev_priv);
4067 
4068 	GEN3_IRQ_RESET(uncore, GEN2_);
4069 }
4070 
i915_irq_postinstall(struct drm_i915_private * dev_priv)4071 static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
4072 {
4073 	struct intel_uncore *uncore = &dev_priv->uncore;
4074 	u32 enable_mask;
4075 
4076 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
4077 			  I915_ERROR_MEMORY_REFRESH));
4078 
4079 	/* Unmask the interrupts that we always want on. */
4080 	dev_priv->irq_mask =
4081 		~(I915_ASLE_INTERRUPT |
4082 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4083 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4084 		  I915_MASTER_ERROR_INTERRUPT);
4085 
4086 	enable_mask =
4087 		I915_ASLE_INTERRUPT |
4088 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4089 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4090 		I915_MASTER_ERROR_INTERRUPT |
4091 		I915_USER_INTERRUPT;
4092 
4093 	if (I915_HAS_HOTPLUG(dev_priv)) {
4094 		/* Enable in IER... */
4095 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4096 		/* and unmask in IMR */
4097 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4098 	}
4099 
4100 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4101 
4102 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4103 	 * just to make the assert_spin_locked check happy. */
4104 	spin_lock_irq(&dev_priv->irq_lock);
4105 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4106 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4107 	spin_unlock_irq(&dev_priv->irq_lock);
4108 
4109 	i915_enable_asle_pipestat(dev_priv);
4110 }
4111 
i915_irq_handler(int irq,void * arg)4112 static irqreturn_t i915_irq_handler(int irq, void *arg)
4113 {
4114 	struct drm_i915_private *dev_priv = arg;
4115 	irqreturn_t ret = IRQ_NONE;
4116 
4117 	if (!intel_irqs_enabled(dev_priv))
4118 		return IRQ_NONE;
4119 
4120 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4121 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4122 
4123 	do {
4124 		u32 pipe_stats[I915_MAX_PIPES] = {};
4125 		u32 eir = 0, eir_stuck = 0;
4126 		u32 hotplug_status = 0;
4127 		u32 iir;
4128 
4129 		iir = I915_READ(GEN2_IIR);
4130 		if (iir == 0)
4131 			break;
4132 
4133 		ret = IRQ_HANDLED;
4134 
4135 		if (I915_HAS_HOTPLUG(dev_priv) &&
4136 		    iir & I915_DISPLAY_PORT_INTERRUPT)
4137 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4138 
4139 		/* Call regardless, as some status bits might not be
4140 		 * signalled in iir */
4141 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4142 
4143 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4144 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4145 
4146 		I915_WRITE(GEN2_IIR, iir);
4147 
4148 		if (iir & I915_USER_INTERRUPT)
4149 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4150 
4151 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4152 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4153 
4154 		if (hotplug_status)
4155 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4156 
4157 		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4158 	} while (0);
4159 
4160 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4161 
4162 	return ret;
4163 }
4164 
i965_irq_reset(struct drm_i915_private * dev_priv)4165 static void i965_irq_reset(struct drm_i915_private *dev_priv)
4166 {
4167 	struct intel_uncore *uncore = &dev_priv->uncore;
4168 
4169 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4170 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4171 
4172 	i9xx_pipestat_irq_reset(dev_priv);
4173 
4174 	GEN3_IRQ_RESET(uncore, GEN2_);
4175 }
4176 
i965_irq_postinstall(struct drm_i915_private * dev_priv)4177 static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4178 {
4179 	struct intel_uncore *uncore = &dev_priv->uncore;
4180 	u32 enable_mask;
4181 	u32 error_mask;
4182 
4183 	/*
4184 	 * Enable some error detection, note the instruction error mask
4185 	 * bit is reserved, so we leave it masked.
4186 	 */
4187 	if (IS_G4X(dev_priv)) {
4188 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4189 			       GM45_ERROR_MEM_PRIV |
4190 			       GM45_ERROR_CP_PRIV |
4191 			       I915_ERROR_MEMORY_REFRESH);
4192 	} else {
4193 		error_mask = ~(I915_ERROR_PAGE_TABLE |
4194 			       I915_ERROR_MEMORY_REFRESH);
4195 	}
4196 	I915_WRITE(EMR, error_mask);
4197 
4198 	/* Unmask the interrupts that we always want on. */
4199 	dev_priv->irq_mask =
4200 		~(I915_ASLE_INTERRUPT |
4201 		  I915_DISPLAY_PORT_INTERRUPT |
4202 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4203 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4204 		  I915_MASTER_ERROR_INTERRUPT);
4205 
4206 	enable_mask =
4207 		I915_ASLE_INTERRUPT |
4208 		I915_DISPLAY_PORT_INTERRUPT |
4209 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4210 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4211 		I915_MASTER_ERROR_INTERRUPT |
4212 		I915_USER_INTERRUPT;
4213 
4214 	if (IS_G4X(dev_priv))
4215 		enable_mask |= I915_BSD_USER_INTERRUPT;
4216 
4217 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4218 
4219 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4220 	 * just to make the assert_spin_locked check happy. */
4221 	spin_lock_irq(&dev_priv->irq_lock);
4222 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4223 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4224 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4225 	spin_unlock_irq(&dev_priv->irq_lock);
4226 
4227 	i915_enable_asle_pipestat(dev_priv);
4228 }
4229 
i915_hpd_irq_setup(struct drm_i915_private * dev_priv)4230 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4231 {
4232 	u32 hotplug_en;
4233 
4234 	lockdep_assert_held(&dev_priv->irq_lock);
4235 
4236 	/* Note HDMI and DP share hotplug bits */
4237 	/* enable bits are the same for all generations */
4238 	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4239 	/* Programming the CRT detection parameters tends
4240 	   to generate a spurious hotplug event about three
4241 	   seconds later.  So just do it once.
4242 	*/
4243 	if (IS_G4X(dev_priv))
4244 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4245 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4246 
4247 	/* Ignore TV since it's buggy */
4248 	i915_hotplug_interrupt_update_locked(dev_priv,
4249 					     HOTPLUG_INT_EN_MASK |
4250 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4251 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4252 					     hotplug_en);
4253 }
4254 
i965_irq_handler(int irq,void * arg)4255 static irqreturn_t i965_irq_handler(int irq, void *arg)
4256 {
4257 	struct drm_i915_private *dev_priv = arg;
4258 	irqreturn_t ret = IRQ_NONE;
4259 
4260 	if (!intel_irqs_enabled(dev_priv))
4261 		return IRQ_NONE;
4262 
4263 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4264 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4265 
4266 	do {
4267 		u32 pipe_stats[I915_MAX_PIPES] = {};
4268 		u32 eir = 0, eir_stuck = 0;
4269 		u32 hotplug_status = 0;
4270 		u32 iir;
4271 
4272 		iir = I915_READ(GEN2_IIR);
4273 		if (iir == 0)
4274 			break;
4275 
4276 		ret = IRQ_HANDLED;
4277 
4278 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4279 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4280 
4281 		/* Call regardless, as some status bits might not be
4282 		 * signalled in iir */
4283 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4284 
4285 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4286 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4287 
4288 		I915_WRITE(GEN2_IIR, iir);
4289 
4290 		if (iir & I915_USER_INTERRUPT)
4291 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4292 
4293 		if (iir & I915_BSD_USER_INTERRUPT)
4294 			intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
4295 
4296 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4297 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4298 
4299 		if (hotplug_status)
4300 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4301 
4302 		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4303 	} while (0);
4304 
4305 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4306 
4307 	return ret;
4308 }
4309 
4310 /**
4311  * intel_irq_init - initializes irq support
4312  * @dev_priv: i915 device instance
4313  *
4314  * This function initializes all the irq support including work items, timers
4315  * and all the vtables. It does not setup the interrupt itself though.
4316  */
intel_irq_init(struct drm_i915_private * dev_priv)4317 void intel_irq_init(struct drm_i915_private *dev_priv)
4318 {
4319 	struct drm_device *dev = &dev_priv->drm;
4320 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
4321 	int i;
4322 
4323 	if (IS_I945GM(dev_priv))
4324 		i945gm_vblank_work_init(dev_priv);
4325 
4326 	intel_hpd_init_work(dev_priv);
4327 
4328 	INIT_WORK(&rps->work, gen6_pm_rps_work);
4329 
4330 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4331 	for (i = 0; i < MAX_L3_SLICES; ++i)
4332 		dev_priv->l3_parity.remap_info[i] = NULL;
4333 
4334 	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
4335 	if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
4336 		dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
4337 
4338 	/* Let's track the enabled rps events */
4339 	if (IS_VALLEYVIEW(dev_priv))
4340 		/* WaGsvRC0ResidencyMethod:vlv */
4341 		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4342 	else
4343 		dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
4344 					   GEN6_PM_RP_DOWN_THRESHOLD |
4345 					   GEN6_PM_RP_DOWN_TIMEOUT);
4346 
4347 	/* We share the register with other engine */
4348 	if (INTEL_GEN(dev_priv) > 9)
4349 		GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);
4350 
4351 	rps->pm_intrmsk_mbz = 0;
4352 
4353 	/*
4354 	 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
4355 	 * if GEN6_PM_UP_EI_EXPIRED is masked.
4356 	 *
4357 	 * TODO: verify if this can be reproduced on VLV,CHV.
4358 	 */
4359 	if (INTEL_GEN(dev_priv) <= 7)
4360 		rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
4361 
4362 	if (INTEL_GEN(dev_priv) >= 8)
4363 		rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
4364 
4365 	dev->vblank_disable_immediate = true;
4366 
4367 	/* Most platforms treat the display irq block as an always-on
4368 	 * power domain. vlv/chv can disable it at runtime and need
4369 	 * special care to avoid writing any of the display block registers
4370 	 * outside of the power domain. We defer setting up the display irqs
4371 	 * in this case to the runtime pm.
4372 	 */
4373 	dev_priv->display_irqs_enabled = true;
4374 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4375 		dev_priv->display_irqs_enabled = false;
4376 
4377 	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4378 	/* If we have MST support, we want to avoid doing short HPD IRQ storm
4379 	 * detection, as short HPD storms will occur as a natural part of
4380 	 * sideband messaging with MST.
4381 	 * On older platforms however, IRQ storms can occur with both long and
4382 	 * short pulses, as seen on some G4x systems.
4383 	 */
4384 	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
4385 
4386 	if (HAS_GMCH(dev_priv)) {
4387 		if (I915_HAS_HOTPLUG(dev_priv))
4388 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4389 	} else {
4390 		if (HAS_PCH_MCC(dev_priv))
4391 			/* EHL doesn't need most of gen11_hpd_irq_setup */
4392 			dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
4393 		else if (INTEL_GEN(dev_priv) >= 11)
4394 			dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4395 		else if (IS_GEN9_LP(dev_priv))
4396 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4397 		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4398 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4399 		else
4400 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4401 	}
4402 }
4403 
4404 /**
4405  * intel_irq_fini - deinitializes IRQ support
4406  * @i915: i915 device instance
4407  *
4408  * This function deinitializes all the IRQ support.
4409  */
intel_irq_fini(struct drm_i915_private * i915)4410 void intel_irq_fini(struct drm_i915_private *i915)
4411 {
4412 	int i;
4413 
4414 	if (IS_I945GM(i915))
4415 		i945gm_vblank_work_fini(i915);
4416 
4417 	for (i = 0; i < MAX_L3_SLICES; ++i)
4418 		kfree(i915->l3_parity.remap_info[i]);
4419 }
4420 
intel_irq_handler(struct drm_i915_private * dev_priv)4421 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4422 {
4423 	if (HAS_GMCH(dev_priv)) {
4424 		if (IS_CHERRYVIEW(dev_priv))
4425 			return cherryview_irq_handler;
4426 		else if (IS_VALLEYVIEW(dev_priv))
4427 			return valleyview_irq_handler;
4428 		else if (IS_GEN(dev_priv, 4))
4429 			return i965_irq_handler;
4430 		else if (IS_GEN(dev_priv, 3))
4431 			return i915_irq_handler;
4432 		else
4433 			return i8xx_irq_handler;
4434 	} else {
4435 		if (INTEL_GEN(dev_priv) >= 11)
4436 			return gen11_irq_handler;
4437 		else if (INTEL_GEN(dev_priv) >= 8)
4438 			return gen8_irq_handler;
4439 		else
4440 			return ironlake_irq_handler;
4441 	}
4442 }
4443 
intel_irq_reset(struct drm_i915_private * dev_priv)4444 static void intel_irq_reset(struct drm_i915_private *dev_priv)
4445 {
4446 	if (HAS_GMCH(dev_priv)) {
4447 		if (IS_CHERRYVIEW(dev_priv))
4448 			cherryview_irq_reset(dev_priv);
4449 		else if (IS_VALLEYVIEW(dev_priv))
4450 			valleyview_irq_reset(dev_priv);
4451 		else if (IS_GEN(dev_priv, 4))
4452 			i965_irq_reset(dev_priv);
4453 		else if (IS_GEN(dev_priv, 3))
4454 			i915_irq_reset(dev_priv);
4455 		else
4456 			i8xx_irq_reset(dev_priv);
4457 	} else {
4458 		if (INTEL_GEN(dev_priv) >= 11)
4459 			gen11_irq_reset(dev_priv);
4460 		else if (INTEL_GEN(dev_priv) >= 8)
4461 			gen8_irq_reset(dev_priv);
4462 		else
4463 			ironlake_irq_reset(dev_priv);
4464 	}
4465 }
4466 
intel_irq_postinstall(struct drm_i915_private * dev_priv)4467 static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4468 {
4469 	if (HAS_GMCH(dev_priv)) {
4470 		if (IS_CHERRYVIEW(dev_priv))
4471 			cherryview_irq_postinstall(dev_priv);
4472 		else if (IS_VALLEYVIEW(dev_priv))
4473 			valleyview_irq_postinstall(dev_priv);
4474 		else if (IS_GEN(dev_priv, 4))
4475 			i965_irq_postinstall(dev_priv);
4476 		else if (IS_GEN(dev_priv, 3))
4477 			i915_irq_postinstall(dev_priv);
4478 		else
4479 			i8xx_irq_postinstall(dev_priv);
4480 	} else {
4481 		if (INTEL_GEN(dev_priv) >= 11)
4482 			gen11_irq_postinstall(dev_priv);
4483 		else if (INTEL_GEN(dev_priv) >= 8)
4484 			gen8_irq_postinstall(dev_priv);
4485 		else
4486 			ironlake_irq_postinstall(dev_priv);
4487 	}
4488 }
4489 
4490 /**
4491  * intel_irq_install - enables the hardware interrupt
4492  * @dev_priv: i915 device instance
4493  *
4494  * This function enables the hardware interrupt handling, but leaves the hotplug
4495  * handling still disabled. It is called after intel_irq_init().
4496  *
4497  * In the driver load and resume code we need working interrupts in a few places
4498  * but don't want to deal with the hassle of concurrent probe and hotplug
4499  * workers. Hence the split into this two-stage approach.
4500  */
intel_irq_install(struct drm_i915_private * dev_priv)4501 int intel_irq_install(struct drm_i915_private *dev_priv)
4502 {
4503 	int irq = dev_priv->drm.pdev->irq;
4504 	int ret;
4505 
4506 	/*
4507 	 * We enable some interrupt sources in our postinstall hooks, so mark
4508 	 * interrupts as enabled _before_ actually enabling them to avoid
4509 	 * special cases in our ordering checks.
4510 	 */
4511 	dev_priv->runtime_pm.irqs_enabled = true;
4512 
4513 	dev_priv->drm.irq_enabled = true;
4514 
4515 	intel_irq_reset(dev_priv);
4516 
4517 	ret = request_irq(irq, intel_irq_handler(dev_priv),
4518 			  IRQF_SHARED, DRIVER_NAME, dev_priv);
4519 	if (ret < 0) {
4520 		dev_priv->drm.irq_enabled = false;
4521 		return ret;
4522 	}
4523 
4524 	intel_irq_postinstall(dev_priv);
4525 
4526 	return ret;
4527 }
4528 
4529 /**
4530  * intel_irq_uninstall - finilizes all irq handling
4531  * @dev_priv: i915 device instance
4532  *
4533  * This stops interrupt and hotplug handling and unregisters and frees all
4534  * resources acquired in the init functions.
4535  */
intel_irq_uninstall(struct drm_i915_private * dev_priv)4536 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4537 {
4538 	int irq = dev_priv->drm.pdev->irq;
4539 
4540 	/*
4541 	 * FIXME we can get called twice during driver load
4542 	 * error handling due to intel_modeset_cleanup()
4543 	 * calling us out of sequence. Would be nice if
4544 	 * it didn't do that...
4545 	 */
4546 	if (!dev_priv->drm.irq_enabled)
4547 		return;
4548 
4549 	dev_priv->drm.irq_enabled = false;
4550 
4551 	intel_irq_reset(dev_priv);
4552 
4553 	free_irq(irq, dev_priv);
4554 
4555 	intel_hpd_cancel_work(dev_priv);
4556 	dev_priv->runtime_pm.irqs_enabled = false;
4557 }
4558 
4559 /**
4560  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4561  * @dev_priv: i915 device instance
4562  *
4563  * This function is used to disable interrupts at runtime, both in the runtime
4564  * pm and the system suspend/resume code.
4565  */
intel_runtime_pm_disable_interrupts(struct drm_i915_private * dev_priv)4566 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4567 {
4568 	intel_irq_reset(dev_priv);
4569 	dev_priv->runtime_pm.irqs_enabled = false;
4570 	intel_synchronize_irq(dev_priv);
4571 }
4572 
4573 /**
4574  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4575  * @dev_priv: i915 device instance
4576  *
4577  * This function is used to enable interrupts at runtime, both in the runtime
4578  * pm and the system suspend/resume code.
4579  */
intel_runtime_pm_enable_interrupts(struct drm_i915_private * dev_priv)4580 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4581 {
4582 	dev_priv->runtime_pm.irqs_enabled = true;
4583 	intel_irq_reset(dev_priv);
4584 	intel_irq_postinstall(dev_priv);
4585 }
4586 
intel_irqs_enabled(struct drm_i915_private * dev_priv)4587 bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4588 {
4589 	/*
4590 	 * We only use drm_irq_uninstall() at unload and VT switch, so
4591 	 * this is the only thing we need to check.
4592 	 */
4593 	return dev_priv->runtime_pm.irqs_enabled;
4594 }
4595 
intel_synchronize_irq(struct drm_i915_private * i915)4596 void intel_synchronize_irq(struct drm_i915_private *i915)
4597 {
4598 	synchronize_irq(i915->drm.pdev->irq);
4599 }
4600