1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3 /*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/circ_buf.h>
32 #include <linux/slab.h>
33 #include <linux/sysrq.h>
34
35 #include <drm/drm_drv.h>
36
37 #include "display/icl_dsi_regs.h"
38 #include "display/intel_de.h"
39 #include "display/intel_display_trace.h"
40 #include "display/intel_display_types.h"
41 #include "display/intel_fifo_underrun.h"
42 #include "display/intel_hotplug.h"
43 #include "display/intel_lpe_audio.h"
44 #include "display/intel_psr.h"
45
46 #include "gt/intel_breadcrumbs.h"
47 #include "gt/intel_gt.h"
48 #include "gt/intel_gt_irq.h"
49 #include "gt/intel_gt_pm_irq.h"
50 #include "gt/intel_gt_regs.h"
51 #include "gt/intel_rps.h"
52
53 #include "i915_driver.h"
54 #include "i915_drv.h"
55 #include "i915_irq.h"
56 #include "intel_pm.h"
57
58 /**
59 * DOC: interrupt handling
60 *
61 * These functions provide the basic support for enabling and disabling the
62 * interrupt handling support. There's a lot more functionality in i915_irq.c
63 * and related files, but that will be described in separate chapters.
64 */
65
66 /*
67 * Interrupt statistic for PMU. Increments the counter only if the
68 * interrupt originated from the GPU so interrupts from a device which
69 * shares the interrupt line are not accounted.
70 */
pmu_irq_stats(struct drm_i915_private * i915,irqreturn_t res)71 static inline void pmu_irq_stats(struct drm_i915_private *i915,
72 irqreturn_t res)
73 {
74 if (unlikely(res != IRQ_HANDLED))
75 return;
76
77 /*
78 * A clever compiler translates that into INC. A not so clever one
79 * should at least prevent store tearing.
80 */
81 WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
82 }
83
84 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
85 typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915,
86 enum hpd_pin pin);
87
88 static const u32 hpd_ilk[HPD_NUM_PINS] = {
89 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
90 };
91
92 static const u32 hpd_ivb[HPD_NUM_PINS] = {
93 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
94 };
95
96 static const u32 hpd_bdw[HPD_NUM_PINS] = {
97 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
98 };
99
100 static const u32 hpd_ibx[HPD_NUM_PINS] = {
101 [HPD_CRT] = SDE_CRT_HOTPLUG,
102 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
103 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
104 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
105 [HPD_PORT_D] = SDE_PORTD_HOTPLUG,
106 };
107
108 static const u32 hpd_cpt[HPD_NUM_PINS] = {
109 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
110 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
111 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
112 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
113 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
114 };
115
116 static const u32 hpd_spt[HPD_NUM_PINS] = {
117 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
118 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
119 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
120 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
121 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
122 };
123
124 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
125 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
126 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
127 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
128 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
129 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
130 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
131 };
132
133 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
134 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
135 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
136 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
137 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
138 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
139 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
140 };
141
142 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
143 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
144 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
145 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
146 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
147 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
148 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
149 };
150
151 static const u32 hpd_bxt[HPD_NUM_PINS] = {
152 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
153 [HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B),
154 [HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C),
155 };
156
157 static const u32 hpd_gen11[HPD_NUM_PINS] = {
158 [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1),
159 [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2),
160 [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3),
161 [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4),
162 [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5),
163 [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6),
164 };
165
166 static const u32 hpd_icp[HPD_NUM_PINS] = {
167 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
168 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
169 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
170 [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
171 [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
172 [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
173 [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
174 [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5),
175 [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6),
176 };
177
178 static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
179 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
180 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
181 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
182 [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D),
183 [HPD_PORT_TC1] = SDE_TC_HOTPLUG_DG2(HPD_PORT_TC1),
184 };
185
intel_hpd_init_pins(struct drm_i915_private * dev_priv)186 static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
187 {
188 struct intel_hotplug *hpd = &dev_priv->display.hotplug;
189
190 if (HAS_GMCH(dev_priv)) {
191 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
192 IS_CHERRYVIEW(dev_priv))
193 hpd->hpd = hpd_status_g4x;
194 else
195 hpd->hpd = hpd_status_i915;
196 return;
197 }
198
199 if (DISPLAY_VER(dev_priv) >= 11)
200 hpd->hpd = hpd_gen11;
201 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
202 hpd->hpd = hpd_bxt;
203 else if (DISPLAY_VER(dev_priv) >= 8)
204 hpd->hpd = hpd_bdw;
205 else if (DISPLAY_VER(dev_priv) >= 7)
206 hpd->hpd = hpd_ivb;
207 else
208 hpd->hpd = hpd_ilk;
209
210 if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
211 (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
212 return;
213
214 if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
215 hpd->pch_hpd = hpd_sde_dg1;
216 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
217 hpd->pch_hpd = hpd_icp;
218 else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
219 hpd->pch_hpd = hpd_spt;
220 else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
221 hpd->pch_hpd = hpd_cpt;
222 else if (HAS_PCH_IBX(dev_priv))
223 hpd->pch_hpd = hpd_ibx;
224 else
225 MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
226 }
227
228 static void
intel_handle_vblank(struct drm_i915_private * dev_priv,enum pipe pipe)229 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
230 {
231 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
232
233 drm_crtc_handle_vblank(&crtc->base);
234 }
235
gen3_irq_reset(struct intel_uncore * uncore,i915_reg_t imr,i915_reg_t iir,i915_reg_t ier)236 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
237 i915_reg_t iir, i915_reg_t ier)
238 {
239 intel_uncore_write(uncore, imr, 0xffffffff);
240 intel_uncore_posting_read(uncore, imr);
241
242 intel_uncore_write(uncore, ier, 0);
243
244 /* IIR can theoretically queue up two events. Be paranoid. */
245 intel_uncore_write(uncore, iir, 0xffffffff);
246 intel_uncore_posting_read(uncore, iir);
247 intel_uncore_write(uncore, iir, 0xffffffff);
248 intel_uncore_posting_read(uncore, iir);
249 }
250
gen2_irq_reset(struct intel_uncore * uncore)251 void gen2_irq_reset(struct intel_uncore *uncore)
252 {
253 intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
254 intel_uncore_posting_read16(uncore, GEN2_IMR);
255
256 intel_uncore_write16(uncore, GEN2_IER, 0);
257
258 /* IIR can theoretically queue up two events. Be paranoid. */
259 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
260 intel_uncore_posting_read16(uncore, GEN2_IIR);
261 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
262 intel_uncore_posting_read16(uncore, GEN2_IIR);
263 }
264
265 /*
266 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
267 */
gen3_assert_iir_is_zero(struct intel_uncore * uncore,i915_reg_t reg)268 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
269 {
270 u32 val = intel_uncore_read(uncore, reg);
271
272 if (val == 0)
273 return;
274
275 drm_WARN(&uncore->i915->drm, 1,
276 "Interrupt register 0x%x is not zero: 0x%08x\n",
277 i915_mmio_reg_offset(reg), val);
278 intel_uncore_write(uncore, reg, 0xffffffff);
279 intel_uncore_posting_read(uncore, reg);
280 intel_uncore_write(uncore, reg, 0xffffffff);
281 intel_uncore_posting_read(uncore, reg);
282 }
283
gen2_assert_iir_is_zero(struct intel_uncore * uncore)284 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
285 {
286 u16 val = intel_uncore_read16(uncore, GEN2_IIR);
287
288 if (val == 0)
289 return;
290
291 drm_WARN(&uncore->i915->drm, 1,
292 "Interrupt register 0x%x is not zero: 0x%08x\n",
293 i915_mmio_reg_offset(GEN2_IIR), val);
294 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
295 intel_uncore_posting_read16(uncore, GEN2_IIR);
296 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
297 intel_uncore_posting_read16(uncore, GEN2_IIR);
298 }
299
gen3_irq_init(struct intel_uncore * uncore,i915_reg_t imr,u32 imr_val,i915_reg_t ier,u32 ier_val,i915_reg_t iir)300 void gen3_irq_init(struct intel_uncore *uncore,
301 i915_reg_t imr, u32 imr_val,
302 i915_reg_t ier, u32 ier_val,
303 i915_reg_t iir)
304 {
305 gen3_assert_iir_is_zero(uncore, iir);
306
307 intel_uncore_write(uncore, ier, ier_val);
308 intel_uncore_write(uncore, imr, imr_val);
309 intel_uncore_posting_read(uncore, imr);
310 }
311
gen2_irq_init(struct intel_uncore * uncore,u32 imr_val,u32 ier_val)312 void gen2_irq_init(struct intel_uncore *uncore,
313 u32 imr_val, u32 ier_val)
314 {
315 gen2_assert_iir_is_zero(uncore);
316
317 intel_uncore_write16(uncore, GEN2_IER, ier_val);
318 intel_uncore_write16(uncore, GEN2_IMR, imr_val);
319 intel_uncore_posting_read16(uncore, GEN2_IMR);
320 }
321
322 /* For display hotplug interrupt */
323 static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private * dev_priv,u32 mask,u32 bits)324 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
325 u32 mask,
326 u32 bits)
327 {
328 u32 val;
329
330 lockdep_assert_held(&dev_priv->irq_lock);
331 drm_WARN_ON(&dev_priv->drm, bits & ~mask);
332
333 val = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_EN);
334 val &= ~mask;
335 val |= bits;
336 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_EN, val);
337 }
338
339 /**
340 * i915_hotplug_interrupt_update - update hotplug interrupt enable
341 * @dev_priv: driver private
342 * @mask: bits to update
343 * @bits: bits to enable
344 * NOTE: the HPD enable bits are modified both inside and outside
345 * of an interrupt context. To avoid that read-modify-write cycles
346 * interfer, these bits are protected by a spinlock. Since this
347 * function is usually not called from a context where the lock is
348 * held already, this function acquires the lock itself. A non-locking
349 * version is also available.
350 */
i915_hotplug_interrupt_update(struct drm_i915_private * dev_priv,u32 mask,u32 bits)351 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
352 u32 mask,
353 u32 bits)
354 {
355 spin_lock_irq(&dev_priv->irq_lock);
356 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
357 spin_unlock_irq(&dev_priv->irq_lock);
358 }
359
360 /**
361 * ilk_update_display_irq - update DEIMR
362 * @dev_priv: driver private
363 * @interrupt_mask: mask of interrupt bits to update
364 * @enabled_irq_mask: mask of interrupt bits to enable
365 */
ilk_update_display_irq(struct drm_i915_private * dev_priv,u32 interrupt_mask,u32 enabled_irq_mask)366 static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
367 u32 interrupt_mask, u32 enabled_irq_mask)
368 {
369 u32 new_val;
370
371 lockdep_assert_held(&dev_priv->irq_lock);
372 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
373
374 new_val = dev_priv->irq_mask;
375 new_val &= ~interrupt_mask;
376 new_val |= (~enabled_irq_mask & interrupt_mask);
377
378 if (new_val != dev_priv->irq_mask &&
379 !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
380 dev_priv->irq_mask = new_val;
381 intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
382 intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
383 }
384 }
385
ilk_enable_display_irq(struct drm_i915_private * i915,u32 bits)386 void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
387 {
388 ilk_update_display_irq(i915, bits, bits);
389 }
390
ilk_disable_display_irq(struct drm_i915_private * i915,u32 bits)391 void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
392 {
393 ilk_update_display_irq(i915, bits, 0);
394 }
395
396 /**
397 * bdw_update_port_irq - update DE port interrupt
398 * @dev_priv: driver private
399 * @interrupt_mask: mask of interrupt bits to update
400 * @enabled_irq_mask: mask of interrupt bits to enable
401 */
bdw_update_port_irq(struct drm_i915_private * dev_priv,u32 interrupt_mask,u32 enabled_irq_mask)402 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
403 u32 interrupt_mask,
404 u32 enabled_irq_mask)
405 {
406 u32 new_val;
407 u32 old_val;
408
409 lockdep_assert_held(&dev_priv->irq_lock);
410
411 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
412
413 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
414 return;
415
416 old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
417
418 new_val = old_val;
419 new_val &= ~interrupt_mask;
420 new_val |= (~enabled_irq_mask & interrupt_mask);
421
422 if (new_val != old_val) {
423 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
424 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
425 }
426 }
427
428 /**
429 * bdw_update_pipe_irq - update DE pipe interrupt
430 * @dev_priv: driver private
431 * @pipe: pipe whose interrupt to update
432 * @interrupt_mask: mask of interrupt bits to update
433 * @enabled_irq_mask: mask of interrupt bits to enable
434 */
bdw_update_pipe_irq(struct drm_i915_private * dev_priv,enum pipe pipe,u32 interrupt_mask,u32 enabled_irq_mask)435 static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
436 enum pipe pipe, u32 interrupt_mask,
437 u32 enabled_irq_mask)
438 {
439 u32 new_val;
440
441 lockdep_assert_held(&dev_priv->irq_lock);
442
443 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
444
445 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
446 return;
447
448 new_val = dev_priv->de_irq_mask[pipe];
449 new_val &= ~interrupt_mask;
450 new_val |= (~enabled_irq_mask & interrupt_mask);
451
452 if (new_val != dev_priv->de_irq_mask[pipe]) {
453 dev_priv->de_irq_mask[pipe] = new_val;
454 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
455 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
456 }
457 }
458
bdw_enable_pipe_irq(struct drm_i915_private * i915,enum pipe pipe,u32 bits)459 void bdw_enable_pipe_irq(struct drm_i915_private *i915,
460 enum pipe pipe, u32 bits)
461 {
462 bdw_update_pipe_irq(i915, pipe, bits, bits);
463 }
464
bdw_disable_pipe_irq(struct drm_i915_private * i915,enum pipe pipe,u32 bits)465 void bdw_disable_pipe_irq(struct drm_i915_private *i915,
466 enum pipe pipe, u32 bits)
467 {
468 bdw_update_pipe_irq(i915, pipe, bits, 0);
469 }
470
471 /**
472 * ibx_display_interrupt_update - update SDEIMR
473 * @dev_priv: driver private
474 * @interrupt_mask: mask of interrupt bits to update
475 * @enabled_irq_mask: mask of interrupt bits to enable
476 */
ibx_display_interrupt_update(struct drm_i915_private * dev_priv,u32 interrupt_mask,u32 enabled_irq_mask)477 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
478 u32 interrupt_mask,
479 u32 enabled_irq_mask)
480 {
481 u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
482 sdeimr &= ~interrupt_mask;
483 sdeimr |= (~enabled_irq_mask & interrupt_mask);
484
485 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
486
487 lockdep_assert_held(&dev_priv->irq_lock);
488
489 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
490 return;
491
492 intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
493 intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
494 }
495
ibx_enable_display_interrupt(struct drm_i915_private * i915,u32 bits)496 void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
497 {
498 ibx_display_interrupt_update(i915, bits, bits);
499 }
500
ibx_disable_display_interrupt(struct drm_i915_private * i915,u32 bits)501 void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
502 {
503 ibx_display_interrupt_update(i915, bits, 0);
504 }
505
i915_pipestat_enable_mask(struct drm_i915_private * dev_priv,enum pipe pipe)506 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
507 enum pipe pipe)
508 {
509 u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
510 u32 enable_mask = status_mask << 16;
511
512 lockdep_assert_held(&dev_priv->irq_lock);
513
514 if (DISPLAY_VER(dev_priv) < 5)
515 goto out;
516
517 /*
518 * On pipe A we don't support the PSR interrupt yet,
519 * on pipe B and C the same bit MBZ.
520 */
521 if (drm_WARN_ON_ONCE(&dev_priv->drm,
522 status_mask & PIPE_A_PSR_STATUS_VLV))
523 return 0;
524 /*
525 * On pipe B and C we don't support the PSR interrupt yet, on pipe
526 * A the same bit is for perf counters which we don't use either.
527 */
528 if (drm_WARN_ON_ONCE(&dev_priv->drm,
529 status_mask & PIPE_B_PSR_STATUS_VLV))
530 return 0;
531
532 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
533 SPRITE0_FLIP_DONE_INT_EN_VLV |
534 SPRITE1_FLIP_DONE_INT_EN_VLV);
535 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
536 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
537 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
538 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
539
540 out:
541 drm_WARN_ONCE(&dev_priv->drm,
542 enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
543 status_mask & ~PIPESTAT_INT_STATUS_MASK,
544 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
545 pipe_name(pipe), enable_mask, status_mask);
546
547 return enable_mask;
548 }
549
i915_enable_pipestat(struct drm_i915_private * dev_priv,enum pipe pipe,u32 status_mask)550 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
551 enum pipe pipe, u32 status_mask)
552 {
553 i915_reg_t reg = PIPESTAT(pipe);
554 u32 enable_mask;
555
556 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
557 "pipe %c: status_mask=0x%x\n",
558 pipe_name(pipe), status_mask);
559
560 lockdep_assert_held(&dev_priv->irq_lock);
561 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
562
563 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
564 return;
565
566 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
567 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
568
569 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
570 intel_uncore_posting_read(&dev_priv->uncore, reg);
571 }
572
i915_disable_pipestat(struct drm_i915_private * dev_priv,enum pipe pipe,u32 status_mask)573 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
574 enum pipe pipe, u32 status_mask)
575 {
576 i915_reg_t reg = PIPESTAT(pipe);
577 u32 enable_mask;
578
579 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
580 "pipe %c: status_mask=0x%x\n",
581 pipe_name(pipe), status_mask);
582
583 lockdep_assert_held(&dev_priv->irq_lock);
584 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
585
586 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
587 return;
588
589 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
590 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
591
592 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
593 intel_uncore_posting_read(&dev_priv->uncore, reg);
594 }
595
i915_has_asle(struct drm_i915_private * dev_priv)596 static bool i915_has_asle(struct drm_i915_private *dev_priv)
597 {
598 if (!dev_priv->display.opregion.asle)
599 return false;
600
601 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
602 }
603
604 /**
605 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
606 * @dev_priv: i915 device private
607 */
i915_enable_asle_pipestat(struct drm_i915_private * dev_priv)608 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
609 {
610 if (!i915_has_asle(dev_priv))
611 return;
612
613 spin_lock_irq(&dev_priv->irq_lock);
614
615 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
616 if (DISPLAY_VER(dev_priv) >= 4)
617 i915_enable_pipestat(dev_priv, PIPE_A,
618 PIPE_LEGACY_BLC_EVENT_STATUS);
619
620 spin_unlock_irq(&dev_priv->irq_lock);
621 }
622
623 /*
624 * This timing diagram depicts the video signal in and
625 * around the vertical blanking period.
626 *
627 * Assumptions about the fictitious mode used in this example:
628 * vblank_start >= 3
629 * vsync_start = vblank_start + 1
630 * vsync_end = vblank_start + 2
631 * vtotal = vblank_start + 3
632 *
633 * start of vblank:
634 * latch double buffered registers
635 * increment frame counter (ctg+)
636 * generate start of vblank interrupt (gen4+)
637 * |
638 * | frame start:
639 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
640 * | may be shifted forward 1-3 extra lines via PIPECONF
641 * | |
642 * | | start of vsync:
643 * | | generate vsync interrupt
644 * | | |
645 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
646 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
647 * ----va---> <-----------------vb--------------------> <--------va-------------
648 * | | <----vs-----> |
649 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
650 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
651 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
652 * | | |
653 * last visible pixel first visible pixel
654 * | increment frame counter (gen3/4)
655 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
656 *
657 * x = horizontal active
658 * _ = horizontal blanking
659 * hs = horizontal sync
660 * va = vertical active
661 * vb = vertical blanking
662 * vs = vertical sync
663 * vbs = vblank_start (number)
664 *
665 * Summary:
666 * - most events happen at the start of horizontal sync
667 * - frame start happens at the start of horizontal blank, 1-4 lines
668 * (depending on PIPECONF settings) after the start of vblank
669 * - gen3/4 pixel and frame counter are synchronized with the start
670 * of horizontal active on the first line of vertical active
671 */
672
673 /* Called from drm generic code, passed a 'crtc', which
674 * we use as a pipe index
675 */
i915_get_vblank_counter(struct drm_crtc * crtc)676 u32 i915_get_vblank_counter(struct drm_crtc *crtc)
677 {
678 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
679 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
680 const struct drm_display_mode *mode = &vblank->hwmode;
681 enum pipe pipe = to_intel_crtc(crtc)->pipe;
682 i915_reg_t high_frame, low_frame;
683 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
684 unsigned long irqflags;
685
686 /*
687 * On i965gm TV output the frame counter only works up to
688 * the point when we enable the TV encoder. After that the
689 * frame counter ceases to work and reads zero. We need a
690 * vblank wait before enabling the TV encoder and so we
691 * have to enable vblank interrupts while the frame counter
692 * is still in a working state. However the core vblank code
693 * does not like us returning non-zero frame counter values
694 * when we've told it that we don't have a working frame
695 * counter. Thus we must stop non-zero values leaking out.
696 */
697 if (!vblank->max_vblank_count)
698 return 0;
699
700 htotal = mode->crtc_htotal;
701 hsync_start = mode->crtc_hsync_start;
702 vbl_start = mode->crtc_vblank_start;
703 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
704 vbl_start = DIV_ROUND_UP(vbl_start, 2);
705
706 /* Convert to pixel count */
707 vbl_start *= htotal;
708
709 /* Start of vblank event occurs at start of hsync */
710 vbl_start -= htotal - hsync_start;
711
712 high_frame = PIPEFRAME(pipe);
713 low_frame = PIPEFRAMEPIXEL(pipe);
714
715 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
716
717 /*
718 * High & low register fields aren't synchronized, so make sure
719 * we get a low value that's stable across two reads of the high
720 * register.
721 */
722 do {
723 high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
724 low = intel_de_read_fw(dev_priv, low_frame);
725 high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
726 } while (high1 != high2);
727
728 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
729
730 high1 >>= PIPE_FRAME_HIGH_SHIFT;
731 pixel = low & PIPE_PIXEL_MASK;
732 low >>= PIPE_FRAME_LOW_SHIFT;
733
734 /*
735 * The frame counter increments at beginning of active.
736 * Cook up a vblank counter by also checking the pixel
737 * counter against vblank start.
738 */
739 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
740 }
741
g4x_get_vblank_counter(struct drm_crtc * crtc)742 u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
743 {
744 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
745 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
746 enum pipe pipe = to_intel_crtc(crtc)->pipe;
747
748 if (!vblank->max_vblank_count)
749 return 0;
750
751 return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe));
752 }
753
intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc * crtc)754 static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
755 {
756 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
757 struct drm_vblank_crtc *vblank =
758 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
759 const struct drm_display_mode *mode = &vblank->hwmode;
760 u32 htotal = mode->crtc_htotal;
761 u32 clock = mode->crtc_clock;
762 u32 scan_prev_time, scan_curr_time, scan_post_time;
763
764 /*
765 * To avoid the race condition where we might cross into the
766 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
767 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
768 * during the same frame.
769 */
770 do {
771 /*
772 * This field provides read back of the display
773 * pipe frame time stamp. The time stamp value
774 * is sampled at every start of vertical blank.
775 */
776 scan_prev_time = intel_de_read_fw(dev_priv,
777 PIPE_FRMTMSTMP(crtc->pipe));
778
779 /*
780 * The TIMESTAMP_CTR register has the current
781 * time stamp value.
782 */
783 scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
784
785 scan_post_time = intel_de_read_fw(dev_priv,
786 PIPE_FRMTMSTMP(crtc->pipe));
787 } while (scan_post_time != scan_prev_time);
788
789 return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
790 clock), 1000 * htotal);
791 }
792
793 /*
794 * On certain encoders on certain platforms, pipe
795 * scanline register will not work to get the scanline,
796 * since the timings are driven from the PORT or issues
797 * with scanline register updates.
798 * This function will use Framestamp and current
799 * timestamp registers to calculate the scanline.
800 */
__intel_get_crtc_scanline_from_timestamp(struct intel_crtc * crtc)801 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
802 {
803 struct drm_vblank_crtc *vblank =
804 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
805 const struct drm_display_mode *mode = &vblank->hwmode;
806 u32 vblank_start = mode->crtc_vblank_start;
807 u32 vtotal = mode->crtc_vtotal;
808 u32 scanline;
809
810 scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
811 scanline = min(scanline, vtotal - 1);
812 scanline = (scanline + vblank_start) % vtotal;
813
814 return scanline;
815 }
816
817 /*
818 * intel_de_read_fw(), only for fast reads of display block, no need for
819 * forcewake etc.
820 */
__intel_get_crtc_scanline(struct intel_crtc * crtc)821 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
822 {
823 struct drm_device *dev = crtc->base.dev;
824 struct drm_i915_private *dev_priv = to_i915(dev);
825 const struct drm_display_mode *mode;
826 struct drm_vblank_crtc *vblank;
827 enum pipe pipe = crtc->pipe;
828 int position, vtotal;
829
830 if (!crtc->active)
831 return 0;
832
833 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
834 mode = &vblank->hwmode;
835
836 if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
837 return __intel_get_crtc_scanline_from_timestamp(crtc);
838
839 vtotal = mode->crtc_vtotal;
840 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
841 vtotal /= 2;
842
843 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
844
845 /*
846 * On HSW, the DSL reg (0x70000) appears to return 0 if we
847 * read it just before the start of vblank. So try it again
848 * so we don't accidentally end up spanning a vblank frame
849 * increment, causing the pipe_update_end() code to squak at us.
850 *
851 * The nature of this problem means we can't simply check the ISR
852 * bit and return the vblank start value; nor can we use the scanline
853 * debug register in the transcoder as it appears to have the same
854 * problem. We may need to extend this to include other platforms,
855 * but so far testing only shows the problem on HSW.
856 */
857 if (HAS_DDI(dev_priv) && !position) {
858 int i, temp;
859
860 for (i = 0; i < 100; i++) {
861 udelay(1);
862 temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
863 if (temp != position) {
864 position = temp;
865 break;
866 }
867 }
868 }
869
870 /*
871 * See update_scanline_offset() for the details on the
872 * scanline_offset adjustment.
873 */
874 return (position + crtc->scanline_offset) % vtotal;
875 }
876
i915_get_crtc_scanoutpos(struct drm_crtc * _crtc,bool in_vblank_irq,int * vpos,int * hpos,ktime_t * stime,ktime_t * etime,const struct drm_display_mode * mode)877 static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
878 bool in_vblank_irq,
879 int *vpos, int *hpos,
880 ktime_t *stime, ktime_t *etime,
881 const struct drm_display_mode *mode)
882 {
883 struct drm_device *dev = _crtc->dev;
884 struct drm_i915_private *dev_priv = to_i915(dev);
885 struct intel_crtc *crtc = to_intel_crtc(_crtc);
886 enum pipe pipe = crtc->pipe;
887 int position;
888 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
889 unsigned long irqflags;
890 bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 ||
891 IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) == 2 ||
892 crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
893
894 if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
895 drm_dbg(&dev_priv->drm,
896 "trying to get scanoutpos for disabled "
897 "pipe %c\n", pipe_name(pipe));
898 return false;
899 }
900
901 htotal = mode->crtc_htotal;
902 hsync_start = mode->crtc_hsync_start;
903 vtotal = mode->crtc_vtotal;
904 vbl_start = mode->crtc_vblank_start;
905 vbl_end = mode->crtc_vblank_end;
906
907 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
908 vbl_start = DIV_ROUND_UP(vbl_start, 2);
909 vbl_end /= 2;
910 vtotal /= 2;
911 }
912
913 /*
914 * Lock uncore.lock, as we will do multiple timing critical raw
915 * register reads, potentially with preemption disabled, so the
916 * following code must not block on uncore.lock.
917 */
918 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
919
920 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
921
922 /* Get optional system timestamp before query. */
923 if (stime)
924 *stime = ktime_get();
925
926 if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
927 int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);
928
929 position = __intel_get_crtc_scanline(crtc);
930
931 /*
932 * Already exiting vblank? If so, shift our position
933 * so it looks like we're already apporaching the full
934 * vblank end. This should make the generated timestamp
935 * more or less match when the active portion will start.
936 */
937 if (position >= vbl_start && scanlines < position)
938 position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
939 } else if (use_scanline_counter) {
940 /* No obvious pixelcount register. Only query vertical
941 * scanout position from Display scan line register.
942 */
943 position = __intel_get_crtc_scanline(crtc);
944 } else {
945 /* Have access to pixelcount since start of frame.
946 * We can split this into vertical and horizontal
947 * scanout position.
948 */
949 position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
950
951 /* convert to pixel counts */
952 vbl_start *= htotal;
953 vbl_end *= htotal;
954 vtotal *= htotal;
955
956 /*
957 * In interlaced modes, the pixel counter counts all pixels,
958 * so one field will have htotal more pixels. In order to avoid
959 * the reported position from jumping backwards when the pixel
960 * counter is beyond the length of the shorter field, just
961 * clamp the position the length of the shorter field. This
962 * matches how the scanline counter based position works since
963 * the scanline counter doesn't count the two half lines.
964 */
965 if (position >= vtotal)
966 position = vtotal - 1;
967
968 /*
969 * Start of vblank interrupt is triggered at start of hsync,
970 * just prior to the first active line of vblank. However we
971 * consider lines to start at the leading edge of horizontal
972 * active. So, should we get here before we've crossed into
973 * the horizontal active of the first line in vblank, we would
974 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
975 * always add htotal-hsync_start to the current pixel position.
976 */
977 position = (position + htotal - hsync_start) % vtotal;
978 }
979
980 /* Get optional system timestamp after query. */
981 if (etime)
982 *etime = ktime_get();
983
984 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
985
986 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
987
988 /*
989 * While in vblank, position will be negative
990 * counting up towards 0 at vbl_end. And outside
991 * vblank, position will be positive counting
992 * up since vbl_end.
993 */
994 if (position >= vbl_start)
995 position -= vbl_end;
996 else
997 position += vtotal - vbl_end;
998
999 if (use_scanline_counter) {
1000 *vpos = position;
1001 *hpos = 0;
1002 } else {
1003 *vpos = position / htotal;
1004 *hpos = position - (*vpos * htotal);
1005 }
1006
1007 return true;
1008 }
1009
intel_crtc_get_vblank_timestamp(struct drm_crtc * crtc,int * max_error,ktime_t * vblank_time,bool in_vblank_irq)1010 bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
1011 ktime_t *vblank_time, bool in_vblank_irq)
1012 {
1013 return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
1014 crtc, max_error, vblank_time, in_vblank_irq,
1015 i915_get_crtc_scanoutpos);
1016 }
1017
intel_get_crtc_scanline(struct intel_crtc * crtc)1018 int intel_get_crtc_scanline(struct intel_crtc *crtc)
1019 {
1020 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1021 unsigned long irqflags;
1022 int position;
1023
1024 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1025 position = __intel_get_crtc_scanline(crtc);
1026 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1027
1028 return position;
1029 }
1030
1031 /**
1032 * ivb_parity_work - Workqueue called when a parity error interrupt
1033 * occurred.
1034 * @work: workqueue struct
1035 *
1036 * Doesn't actually do anything except notify userspace. As a consequence of
1037 * this event, userspace should try to remap the bad rows since statistically
1038 * it is likely the same row is more likely to go bad again.
1039 */
ivb_parity_work(struct work_struct * work)1040 static void ivb_parity_work(struct work_struct *work)
1041 {
1042 struct drm_i915_private *dev_priv =
1043 container_of(work, typeof(*dev_priv), l3_parity.error_work);
1044 struct intel_gt *gt = to_gt(dev_priv);
1045 u32 error_status, row, bank, subbank;
1046 char *parity_event[6];
1047 u32 misccpctl;
1048 u8 slice = 0;
1049
1050 /* We must turn off DOP level clock gating to access the L3 registers.
1051 * In order to prevent a get/put style interface, acquire struct mutex
1052 * any time we access those registers.
1053 */
1054 mutex_lock(&dev_priv->drm.struct_mutex);
1055
1056 /* If we've screwed up tracking, just let the interrupt fire again */
1057 if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
1058 goto out;
1059
1060 misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
1061 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1062 intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
1063
1064 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1065 i915_reg_t reg;
1066
1067 slice--;
1068 if (drm_WARN_ON_ONCE(&dev_priv->drm,
1069 slice >= NUM_L3_SLICES(dev_priv)))
1070 break;
1071
1072 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1073
1074 reg = GEN7_L3CDERRST1(slice);
1075
1076 error_status = intel_uncore_read(&dev_priv->uncore, reg);
1077 row = GEN7_PARITY_ERROR_ROW(error_status);
1078 bank = GEN7_PARITY_ERROR_BANK(error_status);
1079 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1080
1081 intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1082 intel_uncore_posting_read(&dev_priv->uncore, reg);
1083
1084 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1085 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1086 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1087 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1088 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1089 parity_event[5] = NULL;
1090
1091 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1092 KOBJ_CHANGE, parity_event);
1093
1094 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1095 slice, row, bank, subbank);
1096
1097 kfree(parity_event[4]);
1098 kfree(parity_event[3]);
1099 kfree(parity_event[2]);
1100 kfree(parity_event[1]);
1101 }
1102
1103 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
1104
1105 out:
1106 drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
1107 spin_lock_irq(gt->irq_lock);
1108 gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
1109 spin_unlock_irq(gt->irq_lock);
1110
1111 mutex_unlock(&dev_priv->drm.struct_mutex);
1112 }
1113
gen11_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1114 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1115 {
1116 switch (pin) {
1117 case HPD_PORT_TC1:
1118 case HPD_PORT_TC2:
1119 case HPD_PORT_TC3:
1120 case HPD_PORT_TC4:
1121 case HPD_PORT_TC5:
1122 case HPD_PORT_TC6:
1123 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
1124 default:
1125 return false;
1126 }
1127 }
1128
bxt_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1129 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1130 {
1131 switch (pin) {
1132 case HPD_PORT_A:
1133 return val & PORTA_HOTPLUG_LONG_DETECT;
1134 case HPD_PORT_B:
1135 return val & PORTB_HOTPLUG_LONG_DETECT;
1136 case HPD_PORT_C:
1137 return val & PORTC_HOTPLUG_LONG_DETECT;
1138 default:
1139 return false;
1140 }
1141 }
1142
icp_ddi_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1143 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1144 {
1145 switch (pin) {
1146 case HPD_PORT_A:
1147 case HPD_PORT_B:
1148 case HPD_PORT_C:
1149 case HPD_PORT_D:
1150 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
1151 default:
1152 return false;
1153 }
1154 }
1155
icp_tc_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1156 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1157 {
1158 switch (pin) {
1159 case HPD_PORT_TC1:
1160 case HPD_PORT_TC2:
1161 case HPD_PORT_TC3:
1162 case HPD_PORT_TC4:
1163 case HPD_PORT_TC5:
1164 case HPD_PORT_TC6:
1165 return val & ICP_TC_HPD_LONG_DETECT(pin);
1166 default:
1167 return false;
1168 }
1169 }
1170
spt_port_hotplug2_long_detect(enum hpd_pin pin,u32 val)1171 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1172 {
1173 switch (pin) {
1174 case HPD_PORT_E:
1175 return val & PORTE_HOTPLUG_LONG_DETECT;
1176 default:
1177 return false;
1178 }
1179 }
1180
spt_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1181 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1182 {
1183 switch (pin) {
1184 case HPD_PORT_A:
1185 return val & PORTA_HOTPLUG_LONG_DETECT;
1186 case HPD_PORT_B:
1187 return val & PORTB_HOTPLUG_LONG_DETECT;
1188 case HPD_PORT_C:
1189 return val & PORTC_HOTPLUG_LONG_DETECT;
1190 case HPD_PORT_D:
1191 return val & PORTD_HOTPLUG_LONG_DETECT;
1192 default:
1193 return false;
1194 }
1195 }
1196
ilk_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1197 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1198 {
1199 switch (pin) {
1200 case HPD_PORT_A:
1201 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1202 default:
1203 return false;
1204 }
1205 }
1206
pch_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1207 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1208 {
1209 switch (pin) {
1210 case HPD_PORT_B:
1211 return val & PORTB_HOTPLUG_LONG_DETECT;
1212 case HPD_PORT_C:
1213 return val & PORTC_HOTPLUG_LONG_DETECT;
1214 case HPD_PORT_D:
1215 return val & PORTD_HOTPLUG_LONG_DETECT;
1216 default:
1217 return false;
1218 }
1219 }
1220
i9xx_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1221 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1222 {
1223 switch (pin) {
1224 case HPD_PORT_B:
1225 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1226 case HPD_PORT_C:
1227 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1228 case HPD_PORT_D:
1229 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1230 default:
1231 return false;
1232 }
1233 }
1234
1235 /*
1236 * Get a bit mask of pins that have triggered, and which ones may be long.
1237 * This can be called multiple times with the same masks to accumulate
1238 * hotplug detection results from several registers.
1239 *
1240 * Note that the caller is expected to zero out the masks initially.
1241 */
intel_get_hpd_pins(struct drm_i915_private * dev_priv,u32 * pin_mask,u32 * long_mask,u32 hotplug_trigger,u32 dig_hotplug_reg,const u32 hpd[HPD_NUM_PINS],bool long_pulse_detect (enum hpd_pin pin,u32 val))1242 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1243 u32 *pin_mask, u32 *long_mask,
1244 u32 hotplug_trigger, u32 dig_hotplug_reg,
1245 const u32 hpd[HPD_NUM_PINS],
1246 bool long_pulse_detect(enum hpd_pin pin, u32 val))
1247 {
1248 enum hpd_pin pin;
1249
1250 BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
1251
1252 for_each_hpd_pin(pin) {
1253 if ((hpd[pin] & hotplug_trigger) == 0)
1254 continue;
1255
1256 *pin_mask |= BIT(pin);
1257
1258 if (long_pulse_detect(pin, dig_hotplug_reg))
1259 *long_mask |= BIT(pin);
1260 }
1261
1262 drm_dbg(&dev_priv->drm,
1263 "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1264 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1265
1266 }
1267
intel_hpd_enabled_irqs(struct drm_i915_private * dev_priv,const u32 hpd[HPD_NUM_PINS])1268 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
1269 const u32 hpd[HPD_NUM_PINS])
1270 {
1271 struct intel_encoder *encoder;
1272 u32 enabled_irqs = 0;
1273
1274 for_each_intel_encoder(&dev_priv->drm, encoder)
1275 if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
1276 enabled_irqs |= hpd[encoder->hpd_pin];
1277
1278 return enabled_irqs;
1279 }
1280
intel_hpd_hotplug_irqs(struct drm_i915_private * dev_priv,const u32 hpd[HPD_NUM_PINS])1281 static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
1282 const u32 hpd[HPD_NUM_PINS])
1283 {
1284 struct intel_encoder *encoder;
1285 u32 hotplug_irqs = 0;
1286
1287 for_each_intel_encoder(&dev_priv->drm, encoder)
1288 hotplug_irqs |= hpd[encoder->hpd_pin];
1289
1290 return hotplug_irqs;
1291 }
1292
intel_hpd_hotplug_enables(struct drm_i915_private * i915,hotplug_enables_func hotplug_enables)1293 static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
1294 hotplug_enables_func hotplug_enables)
1295 {
1296 struct intel_encoder *encoder;
1297 u32 hotplug = 0;
1298
1299 for_each_intel_encoder(&i915->drm, encoder)
1300 hotplug |= hotplug_enables(i915, encoder->hpd_pin);
1301
1302 return hotplug;
1303 }
1304
gmbus_irq_handler(struct drm_i915_private * dev_priv)1305 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1306 {
1307 wake_up_all(&dev_priv->display.gmbus.wait_queue);
1308 }
1309
dp_aux_irq_handler(struct drm_i915_private * dev_priv)1310 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1311 {
1312 wake_up_all(&dev_priv->display.gmbus.wait_queue);
1313 }
1314
1315 #if defined(CONFIG_DEBUG_FS)
display_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe,u32 crc0,u32 crc1,u32 crc2,u32 crc3,u32 crc4)1316 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1317 enum pipe pipe,
1318 u32 crc0, u32 crc1,
1319 u32 crc2, u32 crc3,
1320 u32 crc4)
1321 {
1322 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
1323 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
1324 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1325
1326 trace_intel_pipe_crc(crtc, crcs);
1327
1328 spin_lock(&pipe_crc->lock);
1329 /*
1330 * For some not yet identified reason, the first CRC is
1331 * bonkers. So let's just wait for the next vblank and read
1332 * out the buggy result.
1333 *
1334 * On GEN8+ sometimes the second CRC is bonkers as well, so
1335 * don't trust that one either.
1336 */
1337 if (pipe_crc->skipped <= 0 ||
1338 (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1339 pipe_crc->skipped++;
1340 spin_unlock(&pipe_crc->lock);
1341 return;
1342 }
1343 spin_unlock(&pipe_crc->lock);
1344
1345 drm_crtc_add_crc_entry(&crtc->base, true,
1346 drm_crtc_accurate_vblank_count(&crtc->base),
1347 crcs);
1348 }
1349 #else
1350 static inline void
display_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe,u32 crc0,u32 crc1,u32 crc2,u32 crc3,u32 crc4)1351 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1352 enum pipe pipe,
1353 u32 crc0, u32 crc1,
1354 u32 crc2, u32 crc3,
1355 u32 crc4) {}
1356 #endif
1357
flip_done_handler(struct drm_i915_private * i915,enum pipe pipe)1358 static void flip_done_handler(struct drm_i915_private *i915,
1359 enum pipe pipe)
1360 {
1361 struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
1362 struct drm_crtc_state *crtc_state = crtc->base.state;
1363 struct drm_pending_vblank_event *e = crtc_state->event;
1364 struct drm_device *dev = &i915->drm;
1365 unsigned long irqflags;
1366
1367 spin_lock_irqsave(&dev->event_lock, irqflags);
1368
1369 crtc_state->event = NULL;
1370
1371 drm_crtc_send_vblank_event(&crtc->base, e);
1372
1373 spin_unlock_irqrestore(&dev->event_lock, irqflags);
1374 }
1375
hsw_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe)1376 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1377 enum pipe pipe)
1378 {
1379 display_pipe_crc_irq_handler(dev_priv, pipe,
1380 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1381 0, 0, 0, 0);
1382 }
1383
ivb_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe)1384 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1385 enum pipe pipe)
1386 {
1387 display_pipe_crc_irq_handler(dev_priv, pipe,
1388 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1389 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
1390 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
1391 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
1392 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
1393 }
1394
i9xx_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe)1395 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1396 enum pipe pipe)
1397 {
1398 u32 res1, res2;
1399
1400 if (DISPLAY_VER(dev_priv) >= 3)
1401 res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
1402 else
1403 res1 = 0;
1404
1405 if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
1406 res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
1407 else
1408 res2 = 0;
1409
1410 display_pipe_crc_irq_handler(dev_priv, pipe,
1411 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
1412 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
1413 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
1414 res1, res2);
1415 }
1416
i9xx_pipestat_irq_reset(struct drm_i915_private * dev_priv)1417 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1418 {
1419 enum pipe pipe;
1420
1421 for_each_pipe(dev_priv, pipe) {
1422 intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
1423 PIPESTAT_INT_STATUS_MASK |
1424 PIPE_FIFO_UNDERRUN_STATUS);
1425
1426 dev_priv->pipestat_irq_mask[pipe] = 0;
1427 }
1428 }
1429
i9xx_pipestat_irq_ack(struct drm_i915_private * dev_priv,u32 iir,u32 pipe_stats[I915_MAX_PIPES])1430 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1431 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1432 {
1433 enum pipe pipe;
1434
1435 spin_lock(&dev_priv->irq_lock);
1436
1437 if (!dev_priv->display_irqs_enabled) {
1438 spin_unlock(&dev_priv->irq_lock);
1439 return;
1440 }
1441
1442 for_each_pipe(dev_priv, pipe) {
1443 i915_reg_t reg;
1444 u32 status_mask, enable_mask, iir_bit = 0;
1445
1446 /*
1447 * PIPESTAT bits get signalled even when the interrupt is
1448 * disabled with the mask bits, and some of the status bits do
1449 * not generate interrupts at all (like the underrun bit). Hence
1450 * we need to be careful that we only handle what we want to
1451 * handle.
1452 */
1453
1454 /* fifo underruns are filterered in the underrun handler. */
1455 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1456
1457 switch (pipe) {
1458 default:
1459 case PIPE_A:
1460 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1461 break;
1462 case PIPE_B:
1463 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1464 break;
1465 case PIPE_C:
1466 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1467 break;
1468 }
1469 if (iir & iir_bit)
1470 status_mask |= dev_priv->pipestat_irq_mask[pipe];
1471
1472 if (!status_mask)
1473 continue;
1474
1475 reg = PIPESTAT(pipe);
1476 pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
1477 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1478
1479 /*
1480 * Clear the PIPE*STAT regs before the IIR
1481 *
1482 * Toggle the enable bits to make sure we get an
1483 * edge in the ISR pipe event bit if we don't clear
1484 * all the enabled status bits. Otherwise the edge
1485 * triggered IIR on i965/g4x wouldn't notice that
1486 * an interrupt is still pending.
1487 */
1488 if (pipe_stats[pipe]) {
1489 intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
1490 intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
1491 }
1492 }
1493 spin_unlock(&dev_priv->irq_lock);
1494 }
1495
i8xx_pipestat_irq_handler(struct drm_i915_private * dev_priv,u16 iir,u32 pipe_stats[I915_MAX_PIPES])1496 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1497 u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1498 {
1499 enum pipe pipe;
1500
1501 for_each_pipe(dev_priv, pipe) {
1502 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1503 intel_handle_vblank(dev_priv, pipe);
1504
1505 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1506 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1507
1508 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1509 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1510 }
1511 }
1512
i915_pipestat_irq_handler(struct drm_i915_private * dev_priv,u32 iir,u32 pipe_stats[I915_MAX_PIPES])1513 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1514 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1515 {
1516 bool blc_event = false;
1517 enum pipe pipe;
1518
1519 for_each_pipe(dev_priv, pipe) {
1520 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1521 intel_handle_vblank(dev_priv, pipe);
1522
1523 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1524 blc_event = true;
1525
1526 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1527 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1528
1529 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1530 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1531 }
1532
1533 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1534 intel_opregion_asle_intr(dev_priv);
1535 }
1536
i965_pipestat_irq_handler(struct drm_i915_private * dev_priv,u32 iir,u32 pipe_stats[I915_MAX_PIPES])1537 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1538 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1539 {
1540 bool blc_event = false;
1541 enum pipe pipe;
1542
1543 for_each_pipe(dev_priv, pipe) {
1544 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1545 intel_handle_vblank(dev_priv, pipe);
1546
1547 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1548 blc_event = true;
1549
1550 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1551 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1552
1553 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1554 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1555 }
1556
1557 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1558 intel_opregion_asle_intr(dev_priv);
1559
1560 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1561 gmbus_irq_handler(dev_priv);
1562 }
1563
valleyview_pipestat_irq_handler(struct drm_i915_private * dev_priv,u32 pipe_stats[I915_MAX_PIPES])1564 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1565 u32 pipe_stats[I915_MAX_PIPES])
1566 {
1567 enum pipe pipe;
1568
1569 for_each_pipe(dev_priv, pipe) {
1570 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1571 intel_handle_vblank(dev_priv, pipe);
1572
1573 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1574 flip_done_handler(dev_priv, pipe);
1575
1576 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1577 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1578
1579 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1580 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1581 }
1582
1583 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1584 gmbus_irq_handler(dev_priv);
1585 }
1586
i9xx_hpd_irq_ack(struct drm_i915_private * dev_priv)1587 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1588 {
1589 u32 hotplug_status = 0, hotplug_status_mask;
1590 int i;
1591
1592 if (IS_G4X(dev_priv) ||
1593 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1594 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1595 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1596 else
1597 hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1598
1599 /*
1600 * We absolutely have to clear all the pending interrupt
1601 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
1602 * interrupt bit won't have an edge, and the i965/g4x
1603 * edge triggered IIR will not notice that an interrupt
1604 * is still pending. We can't use PORT_HOTPLUG_EN to
1605 * guarantee the edge as the act of toggling the enable
1606 * bits can itself generate a new hotplug interrupt :(
1607 */
1608 for (i = 0; i < 10; i++) {
1609 u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
1610
1611 if (tmp == 0)
1612 return hotplug_status;
1613
1614 hotplug_status |= tmp;
1615 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
1616 }
1617
1618 drm_WARN_ONCE(&dev_priv->drm, 1,
1619 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1620 intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
1621
1622 return hotplug_status;
1623 }
1624
i9xx_hpd_irq_handler(struct drm_i915_private * dev_priv,u32 hotplug_status)1625 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1626 u32 hotplug_status)
1627 {
1628 u32 pin_mask = 0, long_mask = 0;
1629 u32 hotplug_trigger;
1630
1631 if (IS_G4X(dev_priv) ||
1632 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1633 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1634 else
1635 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1636
1637 if (hotplug_trigger) {
1638 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1639 hotplug_trigger, hotplug_trigger,
1640 dev_priv->display.hotplug.hpd,
1641 i9xx_port_hotplug_long_detect);
1642
1643 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1644 }
1645
1646 if ((IS_G4X(dev_priv) ||
1647 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1648 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1649 dp_aux_irq_handler(dev_priv);
1650 }
1651
valleyview_irq_handler(int irq,void * arg)1652 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1653 {
1654 struct drm_i915_private *dev_priv = arg;
1655 irqreturn_t ret = IRQ_NONE;
1656
1657 if (!intel_irqs_enabled(dev_priv))
1658 return IRQ_NONE;
1659
1660 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1661 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1662
1663 do {
1664 u32 iir, gt_iir, pm_iir;
1665 u32 pipe_stats[I915_MAX_PIPES] = {};
1666 u32 hotplug_status = 0;
1667 u32 ier = 0;
1668
1669 gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
1670 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
1671 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1672
1673 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1674 break;
1675
1676 ret = IRQ_HANDLED;
1677
1678 /*
1679 * Theory on interrupt generation, based on empirical evidence:
1680 *
1681 * x = ((VLV_IIR & VLV_IER) ||
1682 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1683 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1684 *
1685 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1686 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1687 * guarantee the CPU interrupt will be raised again even if we
1688 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1689 * bits this time around.
1690 */
1691 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
1692 ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
1693 intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
1694
1695 if (gt_iir)
1696 intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
1697 if (pm_iir)
1698 intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
1699
1700 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1701 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1702
1703 /* Call regardless, as some status bits might not be
1704 * signalled in iir */
1705 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1706
1707 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1708 I915_LPE_PIPE_B_INTERRUPT))
1709 intel_lpe_audio_irq_handler(dev_priv);
1710
1711 /*
1712 * VLV_IIR is single buffered, and reflects the level
1713 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1714 */
1715 if (iir)
1716 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1717
1718 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1719 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1720
1721 if (gt_iir)
1722 gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
1723 if (pm_iir)
1724 gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
1725
1726 if (hotplug_status)
1727 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1728
1729 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1730 } while (0);
1731
1732 pmu_irq_stats(dev_priv, ret);
1733
1734 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1735
1736 return ret;
1737 }
1738
cherryview_irq_handler(int irq,void * arg)1739 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1740 {
1741 struct drm_i915_private *dev_priv = arg;
1742 irqreturn_t ret = IRQ_NONE;
1743
1744 if (!intel_irqs_enabled(dev_priv))
1745 return IRQ_NONE;
1746
1747 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1748 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1749
1750 do {
1751 u32 master_ctl, iir;
1752 u32 pipe_stats[I915_MAX_PIPES] = {};
1753 u32 hotplug_status = 0;
1754 u32 ier = 0;
1755
1756 master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1757 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1758
1759 if (master_ctl == 0 && iir == 0)
1760 break;
1761
1762 ret = IRQ_HANDLED;
1763
1764 /*
1765 * Theory on interrupt generation, based on empirical evidence:
1766 *
1767 * x = ((VLV_IIR & VLV_IER) ||
1768 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1769 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1770 *
1771 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1772 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1773 * guarantee the CPU interrupt will be raised again even if we
1774 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1775 * bits this time around.
1776 */
1777 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
1778 ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
1779 intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
1780
1781 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
1782
1783 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1784 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1785
1786 /* Call regardless, as some status bits might not be
1787 * signalled in iir */
1788 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1789
1790 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1791 I915_LPE_PIPE_B_INTERRUPT |
1792 I915_LPE_PIPE_C_INTERRUPT))
1793 intel_lpe_audio_irq_handler(dev_priv);
1794
1795 /*
1796 * VLV_IIR is single buffered, and reflects the level
1797 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1798 */
1799 if (iir)
1800 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1801
1802 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1803 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1804
1805 if (hotplug_status)
1806 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1807
1808 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1809 } while (0);
1810
1811 pmu_irq_stats(dev_priv, ret);
1812
1813 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1814
1815 return ret;
1816 }
1817
ibx_hpd_irq_handler(struct drm_i915_private * dev_priv,u32 hotplug_trigger)1818 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1819 u32 hotplug_trigger)
1820 {
1821 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1822
1823 /*
1824 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1825 * unless we touch the hotplug register, even if hotplug_trigger is
1826 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1827 * errors.
1828 */
1829 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
1830 if (!hotplug_trigger) {
1831 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1832 PORTD_HOTPLUG_STATUS_MASK |
1833 PORTC_HOTPLUG_STATUS_MASK |
1834 PORTB_HOTPLUG_STATUS_MASK;
1835 dig_hotplug_reg &= ~mask;
1836 }
1837
1838 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1839 if (!hotplug_trigger)
1840 return;
1841
1842 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1843 hotplug_trigger, dig_hotplug_reg,
1844 dev_priv->display.hotplug.pch_hpd,
1845 pch_port_hotplug_long_detect);
1846
1847 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1848 }
1849
ibx_irq_handler(struct drm_i915_private * dev_priv,u32 pch_iir)1850 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1851 {
1852 enum pipe pipe;
1853 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1854
1855 ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1856
1857 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1858 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1859 SDE_AUDIO_POWER_SHIFT);
1860 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
1861 port_name(port));
1862 }
1863
1864 if (pch_iir & SDE_AUX_MASK)
1865 dp_aux_irq_handler(dev_priv);
1866
1867 if (pch_iir & SDE_GMBUS)
1868 gmbus_irq_handler(dev_priv);
1869
1870 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1871 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1872
1873 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1874 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1875
1876 if (pch_iir & SDE_POISON)
1877 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1878
1879 if (pch_iir & SDE_FDI_MASK) {
1880 for_each_pipe(dev_priv, pipe)
1881 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
1882 pipe_name(pipe),
1883 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1884 }
1885
1886 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1887 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1888
1889 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1890 drm_dbg(&dev_priv->drm,
1891 "PCH transcoder CRC error interrupt\n");
1892
1893 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1894 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1895
1896 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1897 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1898 }
1899
ivb_err_int_handler(struct drm_i915_private * dev_priv)1900 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1901 {
1902 u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
1903 enum pipe pipe;
1904
1905 if (err_int & ERR_INT_POISON)
1906 drm_err(&dev_priv->drm, "Poison interrupt\n");
1907
1908 for_each_pipe(dev_priv, pipe) {
1909 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1910 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1911
1912 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1913 if (IS_IVYBRIDGE(dev_priv))
1914 ivb_pipe_crc_irq_handler(dev_priv, pipe);
1915 else
1916 hsw_pipe_crc_irq_handler(dev_priv, pipe);
1917 }
1918 }
1919
1920 intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
1921 }
1922
cpt_serr_int_handler(struct drm_i915_private * dev_priv)1923 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1924 {
1925 u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
1926 enum pipe pipe;
1927
1928 if (serr_int & SERR_INT_POISON)
1929 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1930
1931 for_each_pipe(dev_priv, pipe)
1932 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1933 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1934
1935 intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
1936 }
1937
cpt_irq_handler(struct drm_i915_private * dev_priv,u32 pch_iir)1938 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1939 {
1940 enum pipe pipe;
1941 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1942
1943 ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1944
1945 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1946 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1947 SDE_AUDIO_POWER_SHIFT_CPT);
1948 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
1949 port_name(port));
1950 }
1951
1952 if (pch_iir & SDE_AUX_MASK_CPT)
1953 dp_aux_irq_handler(dev_priv);
1954
1955 if (pch_iir & SDE_GMBUS_CPT)
1956 gmbus_irq_handler(dev_priv);
1957
1958 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1959 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1960
1961 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1962 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1963
1964 if (pch_iir & SDE_FDI_MASK_CPT) {
1965 for_each_pipe(dev_priv, pipe)
1966 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
1967 pipe_name(pipe),
1968 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1969 }
1970
1971 if (pch_iir & SDE_ERROR_CPT)
1972 cpt_serr_int_handler(dev_priv);
1973 }
1974
icp_irq_handler(struct drm_i915_private * dev_priv,u32 pch_iir)1975 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1976 {
1977 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
1978 u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
1979 u32 pin_mask = 0, long_mask = 0;
1980
1981 if (ddi_hotplug_trigger) {
1982 u32 dig_hotplug_reg;
1983
1984 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
1985 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, dig_hotplug_reg);
1986
1987 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1988 ddi_hotplug_trigger, dig_hotplug_reg,
1989 dev_priv->display.hotplug.pch_hpd,
1990 icp_ddi_port_hotplug_long_detect);
1991 }
1992
1993 if (tc_hotplug_trigger) {
1994 u32 dig_hotplug_reg;
1995
1996 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
1997 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, dig_hotplug_reg);
1998
1999 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2000 tc_hotplug_trigger, dig_hotplug_reg,
2001 dev_priv->display.hotplug.pch_hpd,
2002 icp_tc_port_hotplug_long_detect);
2003 }
2004
2005 if (pin_mask)
2006 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2007
2008 if (pch_iir & SDE_GMBUS_ICP)
2009 gmbus_irq_handler(dev_priv);
2010 }
2011
spt_irq_handler(struct drm_i915_private * dev_priv,u32 pch_iir)2012 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2013 {
2014 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2015 ~SDE_PORTE_HOTPLUG_SPT;
2016 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2017 u32 pin_mask = 0, long_mask = 0;
2018
2019 if (hotplug_trigger) {
2020 u32 dig_hotplug_reg;
2021
2022 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
2023 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
2024
2025 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2026 hotplug_trigger, dig_hotplug_reg,
2027 dev_priv->display.hotplug.pch_hpd,
2028 spt_port_hotplug_long_detect);
2029 }
2030
2031 if (hotplug2_trigger) {
2032 u32 dig_hotplug_reg;
2033
2034 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
2035 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2036
2037 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2038 hotplug2_trigger, dig_hotplug_reg,
2039 dev_priv->display.hotplug.pch_hpd,
2040 spt_port_hotplug2_long_detect);
2041 }
2042
2043 if (pin_mask)
2044 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2045
2046 if (pch_iir & SDE_GMBUS_CPT)
2047 gmbus_irq_handler(dev_priv);
2048 }
2049
ilk_hpd_irq_handler(struct drm_i915_private * dev_priv,u32 hotplug_trigger)2050 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2051 u32 hotplug_trigger)
2052 {
2053 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2054
2055 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
2056 intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2057
2058 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2059 hotplug_trigger, dig_hotplug_reg,
2060 dev_priv->display.hotplug.hpd,
2061 ilk_port_hotplug_long_detect);
2062
2063 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2064 }
2065
ilk_display_irq_handler(struct drm_i915_private * dev_priv,u32 de_iir)2066 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2067 u32 de_iir)
2068 {
2069 enum pipe pipe;
2070 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2071
2072 if (hotplug_trigger)
2073 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2074
2075 if (de_iir & DE_AUX_CHANNEL_A)
2076 dp_aux_irq_handler(dev_priv);
2077
2078 if (de_iir & DE_GSE)
2079 intel_opregion_asle_intr(dev_priv);
2080
2081 if (de_iir & DE_POISON)
2082 drm_err(&dev_priv->drm, "Poison interrupt\n");
2083
2084 for_each_pipe(dev_priv, pipe) {
2085 if (de_iir & DE_PIPE_VBLANK(pipe))
2086 intel_handle_vblank(dev_priv, pipe);
2087
2088 if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2089 flip_done_handler(dev_priv, pipe);
2090
2091 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2092 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2093
2094 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2095 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2096 }
2097
2098 /* check event from PCH */
2099 if (de_iir & DE_PCH_EVENT) {
2100 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2101
2102 if (HAS_PCH_CPT(dev_priv))
2103 cpt_irq_handler(dev_priv, pch_iir);
2104 else
2105 ibx_irq_handler(dev_priv, pch_iir);
2106
2107 /* should clear PCH hotplug event before clear CPU irq */
2108 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2109 }
2110
2111 if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
2112 gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
2113 }
2114
ivb_display_irq_handler(struct drm_i915_private * dev_priv,u32 de_iir)2115 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2116 u32 de_iir)
2117 {
2118 enum pipe pipe;
2119 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2120
2121 if (hotplug_trigger)
2122 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2123
2124 if (de_iir & DE_ERR_INT_IVB)
2125 ivb_err_int_handler(dev_priv);
2126
2127 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2128 dp_aux_irq_handler(dev_priv);
2129
2130 if (de_iir & DE_GSE_IVB)
2131 intel_opregion_asle_intr(dev_priv);
2132
2133 for_each_pipe(dev_priv, pipe) {
2134 if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
2135 intel_handle_vblank(dev_priv, pipe);
2136
2137 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2138 flip_done_handler(dev_priv, pipe);
2139 }
2140
2141 /* check event from PCH */
2142 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2143 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2144
2145 cpt_irq_handler(dev_priv, pch_iir);
2146
2147 /* clear PCH hotplug event before clear CPU irq */
2148 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2149 }
2150 }
2151
2152 /*
2153 * To handle irqs with the minimum potential races with fresh interrupts, we:
2154 * 1 - Disable Master Interrupt Control.
2155 * 2 - Find the source(s) of the interrupt.
2156 * 3 - Clear the Interrupt Identity bits (IIR).
2157 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2158 * 5 - Re-enable Master Interrupt Control.
2159 */
ilk_irq_handler(int irq,void * arg)2160 static irqreturn_t ilk_irq_handler(int irq, void *arg)
2161 {
2162 struct drm_i915_private *i915 = arg;
2163 void __iomem * const regs = i915->uncore.regs;
2164 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2165 irqreturn_t ret = IRQ_NONE;
2166
2167 if (unlikely(!intel_irqs_enabled(i915)))
2168 return IRQ_NONE;
2169
2170 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2171 disable_rpm_wakeref_asserts(&i915->runtime_pm);
2172
2173 /* disable master interrupt before clearing iir */
2174 de_ier = raw_reg_read(regs, DEIER);
2175 raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2176
2177 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2178 * interrupts will will be stored on its back queue, and then we'll be
2179 * able to process them after we restore SDEIER (as soon as we restore
2180 * it, we'll get an interrupt if SDEIIR still has something to process
2181 * due to its back queue). */
2182 if (!HAS_PCH_NOP(i915)) {
2183 sde_ier = raw_reg_read(regs, SDEIER);
2184 raw_reg_write(regs, SDEIER, 0);
2185 }
2186
2187 /* Find, clear, then process each source of interrupt */
2188
2189 gt_iir = raw_reg_read(regs, GTIIR);
2190 if (gt_iir) {
2191 raw_reg_write(regs, GTIIR, gt_iir);
2192 if (GRAPHICS_VER(i915) >= 6)
2193 gen6_gt_irq_handler(to_gt(i915), gt_iir);
2194 else
2195 gen5_gt_irq_handler(to_gt(i915), gt_iir);
2196 ret = IRQ_HANDLED;
2197 }
2198
2199 de_iir = raw_reg_read(regs, DEIIR);
2200 if (de_iir) {
2201 raw_reg_write(regs, DEIIR, de_iir);
2202 if (DISPLAY_VER(i915) >= 7)
2203 ivb_display_irq_handler(i915, de_iir);
2204 else
2205 ilk_display_irq_handler(i915, de_iir);
2206 ret = IRQ_HANDLED;
2207 }
2208
2209 if (GRAPHICS_VER(i915) >= 6) {
2210 u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
2211 if (pm_iir) {
2212 raw_reg_write(regs, GEN6_PMIIR, pm_iir);
2213 gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
2214 ret = IRQ_HANDLED;
2215 }
2216 }
2217
2218 raw_reg_write(regs, DEIER, de_ier);
2219 if (sde_ier)
2220 raw_reg_write(regs, SDEIER, sde_ier);
2221
2222 pmu_irq_stats(i915, ret);
2223
2224 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2225 enable_rpm_wakeref_asserts(&i915->runtime_pm);
2226
2227 return ret;
2228 }
2229
bxt_hpd_irq_handler(struct drm_i915_private * dev_priv,u32 hotplug_trigger)2230 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2231 u32 hotplug_trigger)
2232 {
2233 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2234
2235 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
2236 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
2237
2238 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2239 hotplug_trigger, dig_hotplug_reg,
2240 dev_priv->display.hotplug.hpd,
2241 bxt_port_hotplug_long_detect);
2242
2243 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2244 }
2245
gen11_hpd_irq_handler(struct drm_i915_private * dev_priv,u32 iir)2246 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2247 {
2248 u32 pin_mask = 0, long_mask = 0;
2249 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2250 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2251
2252 if (trigger_tc) {
2253 u32 dig_hotplug_reg;
2254
2255 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
2256 intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2257
2258 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2259 trigger_tc, dig_hotplug_reg,
2260 dev_priv->display.hotplug.hpd,
2261 gen11_port_hotplug_long_detect);
2262 }
2263
2264 if (trigger_tbt) {
2265 u32 dig_hotplug_reg;
2266
2267 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
2268 intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2269
2270 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2271 trigger_tbt, dig_hotplug_reg,
2272 dev_priv->display.hotplug.hpd,
2273 gen11_port_hotplug_long_detect);
2274 }
2275
2276 if (pin_mask)
2277 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2278 else
2279 drm_err(&dev_priv->drm,
2280 "Unexpected DE HPD interrupt 0x%08x\n", iir);
2281 }
2282
gen8_de_port_aux_mask(struct drm_i915_private * dev_priv)2283 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2284 {
2285 u32 mask;
2286
2287 if (DISPLAY_VER(dev_priv) >= 13)
2288 return TGL_DE_PORT_AUX_DDIA |
2289 TGL_DE_PORT_AUX_DDIB |
2290 TGL_DE_PORT_AUX_DDIC |
2291 XELPD_DE_PORT_AUX_DDID |
2292 XELPD_DE_PORT_AUX_DDIE |
2293 TGL_DE_PORT_AUX_USBC1 |
2294 TGL_DE_PORT_AUX_USBC2 |
2295 TGL_DE_PORT_AUX_USBC3 |
2296 TGL_DE_PORT_AUX_USBC4;
2297 else if (DISPLAY_VER(dev_priv) >= 12)
2298 return TGL_DE_PORT_AUX_DDIA |
2299 TGL_DE_PORT_AUX_DDIB |
2300 TGL_DE_PORT_AUX_DDIC |
2301 TGL_DE_PORT_AUX_USBC1 |
2302 TGL_DE_PORT_AUX_USBC2 |
2303 TGL_DE_PORT_AUX_USBC3 |
2304 TGL_DE_PORT_AUX_USBC4 |
2305 TGL_DE_PORT_AUX_USBC5 |
2306 TGL_DE_PORT_AUX_USBC6;
2307
2308
2309 mask = GEN8_AUX_CHANNEL_A;
2310 if (DISPLAY_VER(dev_priv) >= 9)
2311 mask |= GEN9_AUX_CHANNEL_B |
2312 GEN9_AUX_CHANNEL_C |
2313 GEN9_AUX_CHANNEL_D;
2314
2315 if (DISPLAY_VER(dev_priv) == 11) {
2316 mask |= ICL_AUX_CHANNEL_F;
2317 mask |= ICL_AUX_CHANNEL_E;
2318 }
2319
2320 return mask;
2321 }
2322
gen8_de_pipe_fault_mask(struct drm_i915_private * dev_priv)2323 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
2324 {
2325 if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
2326 return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
2327 else if (DISPLAY_VER(dev_priv) >= 11)
2328 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
2329 else if (DISPLAY_VER(dev_priv) >= 9)
2330 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2331 else
2332 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2333 }
2334
2335 static void
gen8_de_misc_irq_handler(struct drm_i915_private * dev_priv,u32 iir)2336 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2337 {
2338 bool found = false;
2339
2340 if (iir & GEN8_DE_MISC_GSE) {
2341 intel_opregion_asle_intr(dev_priv);
2342 found = true;
2343 }
2344
2345 if (iir & GEN8_DE_EDP_PSR) {
2346 struct intel_encoder *encoder;
2347 u32 psr_iir;
2348 i915_reg_t iir_reg;
2349
2350 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2351 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2352
2353 if (DISPLAY_VER(dev_priv) >= 12)
2354 iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder);
2355 else
2356 iir_reg = EDP_PSR_IIR;
2357
2358 psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
2359 intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
2360
2361 if (psr_iir)
2362 found = true;
2363
2364 intel_psr_irq_handler(intel_dp, psr_iir);
2365
2366 /* prior GEN12 only have one EDP PSR */
2367 if (DISPLAY_VER(dev_priv) < 12)
2368 break;
2369 }
2370 }
2371
2372 if (!found)
2373 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2374 }
2375
gen11_dsi_te_interrupt_handler(struct drm_i915_private * dev_priv,u32 te_trigger)2376 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
2377 u32 te_trigger)
2378 {
2379 enum pipe pipe = INVALID_PIPE;
2380 enum transcoder dsi_trans;
2381 enum port port;
2382 u32 val, tmp;
2383
2384 /*
2385 * Incase of dual link, TE comes from DSI_1
2386 * this is to check if dual link is enabled
2387 */
2388 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
2389 val &= PORT_SYNC_MODE_ENABLE;
2390
2391 /*
2392 * if dual link is enabled, then read DSI_0
2393 * transcoder registers
2394 */
2395 port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
2396 PORT_A : PORT_B;
2397 dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
2398
2399 /* Check if DSI configured in command mode */
2400 val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
2401 val = val & OP_MODE_MASK;
2402
2403 if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
2404 drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
2405 return;
2406 }
2407
2408 /* Get PIPE for handling VBLANK event */
2409 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
2410 switch (val & TRANS_DDI_EDP_INPUT_MASK) {
2411 case TRANS_DDI_EDP_INPUT_A_ON:
2412 pipe = PIPE_A;
2413 break;
2414 case TRANS_DDI_EDP_INPUT_B_ONOFF:
2415 pipe = PIPE_B;
2416 break;
2417 case TRANS_DDI_EDP_INPUT_C_ONOFF:
2418 pipe = PIPE_C;
2419 break;
2420 default:
2421 drm_err(&dev_priv->drm, "Invalid PIPE\n");
2422 return;
2423 }
2424
2425 intel_handle_vblank(dev_priv, pipe);
2426
2427 /* clear TE in dsi IIR */
2428 port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
2429 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
2430 intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
2431 }
2432
gen8_de_pipe_flip_done_mask(struct drm_i915_private * i915)2433 static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
2434 {
2435 if (DISPLAY_VER(i915) >= 9)
2436 return GEN9_PIPE_PLANE1_FLIP_DONE;
2437 else
2438 return GEN8_PIPE_PRIMARY_FLIP_DONE;
2439 }
2440
gen8_de_pipe_underrun_mask(struct drm_i915_private * dev_priv)2441 u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv)
2442 {
2443 u32 mask = GEN8_PIPE_FIFO_UNDERRUN;
2444
2445 if (DISPLAY_VER(dev_priv) >= 13)
2446 mask |= XELPD_PIPE_SOFT_UNDERRUN |
2447 XELPD_PIPE_HARD_UNDERRUN;
2448
2449 return mask;
2450 }
2451
2452 static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private * dev_priv,u32 master_ctl)2453 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2454 {
2455 irqreturn_t ret = IRQ_NONE;
2456 u32 iir;
2457 enum pipe pipe;
2458
2459 drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
2460
2461 if (master_ctl & GEN8_DE_MISC_IRQ) {
2462 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
2463 if (iir) {
2464 intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
2465 ret = IRQ_HANDLED;
2466 gen8_de_misc_irq_handler(dev_priv, iir);
2467 } else {
2468 drm_err(&dev_priv->drm,
2469 "The master control interrupt lied (DE MISC)!\n");
2470 }
2471 }
2472
2473 if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2474 iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
2475 if (iir) {
2476 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
2477 ret = IRQ_HANDLED;
2478 gen11_hpd_irq_handler(dev_priv, iir);
2479 } else {
2480 drm_err(&dev_priv->drm,
2481 "The master control interrupt lied, (DE HPD)!\n");
2482 }
2483 }
2484
2485 if (master_ctl & GEN8_DE_PORT_IRQ) {
2486 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
2487 if (iir) {
2488 bool found = false;
2489
2490 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
2491 ret = IRQ_HANDLED;
2492
2493 if (iir & gen8_de_port_aux_mask(dev_priv)) {
2494 dp_aux_irq_handler(dev_priv);
2495 found = true;
2496 }
2497
2498 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
2499 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
2500
2501 if (hotplug_trigger) {
2502 bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
2503 found = true;
2504 }
2505 } else if (IS_BROADWELL(dev_priv)) {
2506 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
2507
2508 if (hotplug_trigger) {
2509 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2510 found = true;
2511 }
2512 }
2513
2514 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
2515 (iir & BXT_DE_PORT_GMBUS)) {
2516 gmbus_irq_handler(dev_priv);
2517 found = true;
2518 }
2519
2520 if (DISPLAY_VER(dev_priv) >= 11) {
2521 u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
2522
2523 if (te_trigger) {
2524 gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
2525 found = true;
2526 }
2527 }
2528
2529 if (!found)
2530 drm_err(&dev_priv->drm,
2531 "Unexpected DE Port interrupt\n");
2532 }
2533 else
2534 drm_err(&dev_priv->drm,
2535 "The master control interrupt lied (DE PORT)!\n");
2536 }
2537
2538 for_each_pipe(dev_priv, pipe) {
2539 u32 fault_errors;
2540
2541 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2542 continue;
2543
2544 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
2545 if (!iir) {
2546 drm_err(&dev_priv->drm,
2547 "The master control interrupt lied (DE PIPE)!\n");
2548 continue;
2549 }
2550
2551 ret = IRQ_HANDLED;
2552 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
2553
2554 if (iir & GEN8_PIPE_VBLANK)
2555 intel_handle_vblank(dev_priv, pipe);
2556
2557 if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
2558 flip_done_handler(dev_priv, pipe);
2559
2560 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2561 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2562
2563 if (iir & gen8_de_pipe_underrun_mask(dev_priv))
2564 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2565
2566 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2567 if (fault_errors)
2568 drm_err(&dev_priv->drm,
2569 "Fault errors on pipe %c: 0x%08x\n",
2570 pipe_name(pipe),
2571 fault_errors);
2572 }
2573
2574 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2575 master_ctl & GEN8_DE_PCH_IRQ) {
2576 /*
2577 * FIXME(BDW): Assume for now that the new interrupt handling
2578 * scheme also closed the SDE interrupt handling race we've seen
2579 * on older pch-split platforms. But this needs testing.
2580 */
2581 iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2582 if (iir) {
2583 intel_uncore_write(&dev_priv->uncore, SDEIIR, iir);
2584 ret = IRQ_HANDLED;
2585
2586 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2587 icp_irq_handler(dev_priv, iir);
2588 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2589 spt_irq_handler(dev_priv, iir);
2590 else
2591 cpt_irq_handler(dev_priv, iir);
2592 } else {
2593 /*
2594 * Like on previous PCH there seems to be something
2595 * fishy going on with forwarding PCH interrupts.
2596 */
2597 drm_dbg(&dev_priv->drm,
2598 "The master control interrupt lied (SDE)!\n");
2599 }
2600 }
2601
2602 return ret;
2603 }
2604
gen8_master_intr_disable(void __iomem * const regs)2605 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2606 {
2607 raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2608
2609 /*
2610 * Now with master disabled, get a sample of level indications
2611 * for this interrupt. Indications will be cleared on related acks.
2612 * New indications can and will light up during processing,
2613 * and will generate new interrupt after enabling master.
2614 */
2615 return raw_reg_read(regs, GEN8_MASTER_IRQ);
2616 }
2617
gen8_master_intr_enable(void __iomem * const regs)2618 static inline void gen8_master_intr_enable(void __iomem * const regs)
2619 {
2620 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2621 }
2622
gen8_irq_handler(int irq,void * arg)2623 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2624 {
2625 struct drm_i915_private *dev_priv = arg;
2626 void __iomem * const regs = dev_priv->uncore.regs;
2627 u32 master_ctl;
2628
2629 if (!intel_irqs_enabled(dev_priv))
2630 return IRQ_NONE;
2631
2632 master_ctl = gen8_master_intr_disable(regs);
2633 if (!master_ctl) {
2634 gen8_master_intr_enable(regs);
2635 return IRQ_NONE;
2636 }
2637
2638 /* Find, queue (onto bottom-halves), then clear each source */
2639 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
2640
2641 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2642 if (master_ctl & ~GEN8_GT_IRQS) {
2643 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2644 gen8_de_irq_handler(dev_priv, master_ctl);
2645 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2646 }
2647
2648 gen8_master_intr_enable(regs);
2649
2650 pmu_irq_stats(dev_priv, IRQ_HANDLED);
2651
2652 return IRQ_HANDLED;
2653 }
2654
2655 static u32
gen11_gu_misc_irq_ack(struct drm_i915_private * i915,const u32 master_ctl)2656 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
2657 {
2658 void __iomem * const regs = i915->uncore.regs;
2659 u32 iir;
2660
2661 if (!(master_ctl & GEN11_GU_MISC_IRQ))
2662 return 0;
2663
2664 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2665 if (likely(iir))
2666 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2667
2668 return iir;
2669 }
2670
2671 static void
gen11_gu_misc_irq_handler(struct drm_i915_private * i915,const u32 iir)2672 gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
2673 {
2674 if (iir & GEN11_GU_MISC_GSE)
2675 intel_opregion_asle_intr(i915);
2676 }
2677
gen11_master_intr_disable(void __iomem * const regs)2678 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2679 {
2680 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2681
2682 /*
2683 * Now with master disabled, get a sample of level indications
2684 * for this interrupt. Indications will be cleared on related acks.
2685 * New indications can and will light up during processing,
2686 * and will generate new interrupt after enabling master.
2687 */
2688 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2689 }
2690
gen11_master_intr_enable(void __iomem * const regs)2691 static inline void gen11_master_intr_enable(void __iomem * const regs)
2692 {
2693 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2694 }
2695
2696 static void
gen11_display_irq_handler(struct drm_i915_private * i915)2697 gen11_display_irq_handler(struct drm_i915_private *i915)
2698 {
2699 void __iomem * const regs = i915->uncore.regs;
2700 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2701
2702 disable_rpm_wakeref_asserts(&i915->runtime_pm);
2703 /*
2704 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2705 * for the display related bits.
2706 */
2707 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2708 gen8_de_irq_handler(i915, disp_ctl);
2709 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2710 GEN11_DISPLAY_IRQ_ENABLE);
2711
2712 enable_rpm_wakeref_asserts(&i915->runtime_pm);
2713 }
2714
gen11_irq_handler(int irq,void * arg)2715 static irqreturn_t gen11_irq_handler(int irq, void *arg)
2716 {
2717 struct drm_i915_private *i915 = arg;
2718 void __iomem * const regs = i915->uncore.regs;
2719 struct intel_gt *gt = to_gt(i915);
2720 u32 master_ctl;
2721 u32 gu_misc_iir;
2722
2723 if (!intel_irqs_enabled(i915))
2724 return IRQ_NONE;
2725
2726 master_ctl = gen11_master_intr_disable(regs);
2727 if (!master_ctl) {
2728 gen11_master_intr_enable(regs);
2729 return IRQ_NONE;
2730 }
2731
2732 /* Find, queue (onto bottom-halves), then clear each source */
2733 gen11_gt_irq_handler(gt, master_ctl);
2734
2735 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2736 if (master_ctl & GEN11_DISPLAY_IRQ)
2737 gen11_display_irq_handler(i915);
2738
2739 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2740
2741 gen11_master_intr_enable(regs);
2742
2743 gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2744
2745 pmu_irq_stats(i915, IRQ_HANDLED);
2746
2747 return IRQ_HANDLED;
2748 }
2749
dg1_master_intr_disable(void __iomem * const regs)2750 static inline u32 dg1_master_intr_disable(void __iomem * const regs)
2751 {
2752 u32 val;
2753
2754 /* First disable interrupts */
2755 raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
2756
2757 /* Get the indication levels and ack the master unit */
2758 val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
2759 if (unlikely(!val))
2760 return 0;
2761
2762 raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
2763
2764 return val;
2765 }
2766
dg1_master_intr_enable(void __iomem * const regs)2767 static inline void dg1_master_intr_enable(void __iomem * const regs)
2768 {
2769 raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
2770 }
2771
dg1_irq_handler(int irq,void * arg)2772 static irqreturn_t dg1_irq_handler(int irq, void *arg)
2773 {
2774 struct drm_i915_private * const i915 = arg;
2775 struct intel_gt *gt = to_gt(i915);
2776 void __iomem * const regs = gt->uncore->regs;
2777 u32 master_tile_ctl, master_ctl;
2778 u32 gu_misc_iir;
2779
2780 if (!intel_irqs_enabled(i915))
2781 return IRQ_NONE;
2782
2783 master_tile_ctl = dg1_master_intr_disable(regs);
2784 if (!master_tile_ctl) {
2785 dg1_master_intr_enable(regs);
2786 return IRQ_NONE;
2787 }
2788
2789 /* FIXME: we only support tile 0 for now. */
2790 if (master_tile_ctl & DG1_MSTR_TILE(0)) {
2791 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2792 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
2793 } else {
2794 DRM_ERROR("Tile not supported: 0x%08x\n", master_tile_ctl);
2795 dg1_master_intr_enable(regs);
2796 return IRQ_NONE;
2797 }
2798
2799 gen11_gt_irq_handler(gt, master_ctl);
2800
2801 if (master_ctl & GEN11_DISPLAY_IRQ)
2802 gen11_display_irq_handler(i915);
2803
2804 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2805
2806 dg1_master_intr_enable(regs);
2807
2808 gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2809
2810 pmu_irq_stats(i915, IRQ_HANDLED);
2811
2812 return IRQ_HANDLED;
2813 }
2814
2815 /* Called from drm generic code, passed 'crtc' which
2816 * we use as a pipe index
2817 */
i8xx_enable_vblank(struct drm_crtc * crtc)2818 int i8xx_enable_vblank(struct drm_crtc *crtc)
2819 {
2820 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2821 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2822 unsigned long irqflags;
2823
2824 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2825 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2826 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2827
2828 return 0;
2829 }
2830
i915gm_enable_vblank(struct drm_crtc * crtc)2831 int i915gm_enable_vblank(struct drm_crtc *crtc)
2832 {
2833 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2834
2835 /*
2836 * Vblank interrupts fail to wake the device up from C2+.
2837 * Disabling render clock gating during C-states avoids
2838 * the problem. There is a small power cost so we do this
2839 * only when vblank interrupts are actually enabled.
2840 */
2841 if (dev_priv->vblank_enabled++ == 0)
2842 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2843
2844 return i8xx_enable_vblank(crtc);
2845 }
2846
i965_enable_vblank(struct drm_crtc * crtc)2847 int i965_enable_vblank(struct drm_crtc *crtc)
2848 {
2849 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2850 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2851 unsigned long irqflags;
2852
2853 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2854 i915_enable_pipestat(dev_priv, pipe,
2855 PIPE_START_VBLANK_INTERRUPT_STATUS);
2856 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2857
2858 return 0;
2859 }
2860
ilk_enable_vblank(struct drm_crtc * crtc)2861 int ilk_enable_vblank(struct drm_crtc *crtc)
2862 {
2863 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2864 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2865 unsigned long irqflags;
2866 u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2867 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2868
2869 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2870 ilk_enable_display_irq(dev_priv, bit);
2871 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2872
2873 /* Even though there is no DMC, frame counter can get stuck when
2874 * PSR is active as no frames are generated.
2875 */
2876 if (HAS_PSR(dev_priv))
2877 drm_crtc_vblank_restore(crtc);
2878
2879 return 0;
2880 }
2881
gen11_dsi_configure_te(struct intel_crtc * intel_crtc,bool enable)2882 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
2883 bool enable)
2884 {
2885 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
2886 enum port port;
2887 u32 tmp;
2888
2889 if (!(intel_crtc->mode_flags &
2890 (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
2891 return false;
2892
2893 /* for dual link cases we consider TE from slave */
2894 if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
2895 port = PORT_B;
2896 else
2897 port = PORT_A;
2898
2899 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_MASK_REG(port));
2900 if (enable)
2901 tmp &= ~DSI_TE_EVENT;
2902 else
2903 tmp |= DSI_TE_EVENT;
2904
2905 intel_uncore_write(&dev_priv->uncore, DSI_INTR_MASK_REG(port), tmp);
2906
2907 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
2908 intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
2909
2910 return true;
2911 }
2912
bdw_enable_vblank(struct drm_crtc * _crtc)2913 int bdw_enable_vblank(struct drm_crtc *_crtc)
2914 {
2915 struct intel_crtc *crtc = to_intel_crtc(_crtc);
2916 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2917 enum pipe pipe = crtc->pipe;
2918 unsigned long irqflags;
2919
2920 if (gen11_dsi_configure_te(crtc, true))
2921 return 0;
2922
2923 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2924 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2925 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2926
2927 /* Even if there is no DMC, frame counter can get stuck when
2928 * PSR is active as no frames are generated, so check only for PSR.
2929 */
2930 if (HAS_PSR(dev_priv))
2931 drm_crtc_vblank_restore(&crtc->base);
2932
2933 return 0;
2934 }
2935
2936 /* Called from drm generic code, passed 'crtc' which
2937 * we use as a pipe index
2938 */
i8xx_disable_vblank(struct drm_crtc * crtc)2939 void i8xx_disable_vblank(struct drm_crtc *crtc)
2940 {
2941 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2942 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2943 unsigned long irqflags;
2944
2945 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2946 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2947 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2948 }
2949
i915gm_disable_vblank(struct drm_crtc * crtc)2950 void i915gm_disable_vblank(struct drm_crtc *crtc)
2951 {
2952 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2953
2954 i8xx_disable_vblank(crtc);
2955
2956 if (--dev_priv->vblank_enabled == 0)
2957 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2958 }
2959
i965_disable_vblank(struct drm_crtc * crtc)2960 void i965_disable_vblank(struct drm_crtc *crtc)
2961 {
2962 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2963 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2964 unsigned long irqflags;
2965
2966 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2967 i915_disable_pipestat(dev_priv, pipe,
2968 PIPE_START_VBLANK_INTERRUPT_STATUS);
2969 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2970 }
2971
ilk_disable_vblank(struct drm_crtc * crtc)2972 void ilk_disable_vblank(struct drm_crtc *crtc)
2973 {
2974 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2975 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2976 unsigned long irqflags;
2977 u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2978 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2979
2980 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2981 ilk_disable_display_irq(dev_priv, bit);
2982 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2983 }
2984
bdw_disable_vblank(struct drm_crtc * _crtc)2985 void bdw_disable_vblank(struct drm_crtc *_crtc)
2986 {
2987 struct intel_crtc *crtc = to_intel_crtc(_crtc);
2988 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2989 enum pipe pipe = crtc->pipe;
2990 unsigned long irqflags;
2991
2992 if (gen11_dsi_configure_te(crtc, false))
2993 return;
2994
2995 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2996 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2997 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2998 }
2999
ibx_irq_reset(struct drm_i915_private * dev_priv)3000 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
3001 {
3002 struct intel_uncore *uncore = &dev_priv->uncore;
3003
3004 if (HAS_PCH_NOP(dev_priv))
3005 return;
3006
3007 GEN3_IRQ_RESET(uncore, SDE);
3008
3009 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3010 intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
3011 }
3012
vlv_display_irq_reset(struct drm_i915_private * dev_priv)3013 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3014 {
3015 struct intel_uncore *uncore = &dev_priv->uncore;
3016
3017 if (IS_CHERRYVIEW(dev_priv))
3018 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3019 else
3020 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
3021
3022 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3023 intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
3024
3025 i9xx_pipestat_irq_reset(dev_priv);
3026
3027 GEN3_IRQ_RESET(uncore, VLV_);
3028 dev_priv->irq_mask = ~0u;
3029 }
3030
vlv_display_irq_postinstall(struct drm_i915_private * dev_priv)3031 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3032 {
3033 struct intel_uncore *uncore = &dev_priv->uncore;
3034
3035 u32 pipestat_mask;
3036 u32 enable_mask;
3037 enum pipe pipe;
3038
3039 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3040
3041 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3042 for_each_pipe(dev_priv, pipe)
3043 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3044
3045 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3046 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3047 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3048 I915_LPE_PIPE_A_INTERRUPT |
3049 I915_LPE_PIPE_B_INTERRUPT;
3050
3051 if (IS_CHERRYVIEW(dev_priv))
3052 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3053 I915_LPE_PIPE_C_INTERRUPT;
3054
3055 drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
3056
3057 dev_priv->irq_mask = ~enable_mask;
3058
3059 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
3060 }
3061
3062 /* drm_dma.h hooks
3063 */
ilk_irq_reset(struct drm_i915_private * dev_priv)3064 static void ilk_irq_reset(struct drm_i915_private *dev_priv)
3065 {
3066 struct intel_uncore *uncore = &dev_priv->uncore;
3067
3068 GEN3_IRQ_RESET(uncore, DE);
3069 dev_priv->irq_mask = ~0u;
3070
3071 if (GRAPHICS_VER(dev_priv) == 7)
3072 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
3073
3074 if (IS_HASWELL(dev_priv)) {
3075 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3076 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3077 }
3078
3079 gen5_gt_irq_reset(to_gt(dev_priv));
3080
3081 ibx_irq_reset(dev_priv);
3082 }
3083
valleyview_irq_reset(struct drm_i915_private * dev_priv)3084 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
3085 {
3086 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
3087 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3088
3089 gen5_gt_irq_reset(to_gt(dev_priv));
3090
3091 spin_lock_irq(&dev_priv->irq_lock);
3092 if (dev_priv->display_irqs_enabled)
3093 vlv_display_irq_reset(dev_priv);
3094 spin_unlock_irq(&dev_priv->irq_lock);
3095 }
3096
gen8_display_irq_reset(struct drm_i915_private * dev_priv)3097 static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
3098 {
3099 struct intel_uncore *uncore = &dev_priv->uncore;
3100 enum pipe pipe;
3101
3102 if (!HAS_DISPLAY(dev_priv))
3103 return;
3104
3105 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3106 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3107
3108 for_each_pipe(dev_priv, pipe)
3109 if (intel_display_power_is_enabled(dev_priv,
3110 POWER_DOMAIN_PIPE(pipe)))
3111 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3112
3113 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3114 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3115 }
3116
gen8_irq_reset(struct drm_i915_private * dev_priv)3117 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
3118 {
3119 struct intel_uncore *uncore = &dev_priv->uncore;
3120
3121 gen8_master_intr_disable(dev_priv->uncore.regs);
3122
3123 gen8_gt_irq_reset(to_gt(dev_priv));
3124 gen8_display_irq_reset(dev_priv);
3125 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3126
3127 if (HAS_PCH_SPLIT(dev_priv))
3128 ibx_irq_reset(dev_priv);
3129
3130 }
3131
gen11_display_irq_reset(struct drm_i915_private * dev_priv)3132 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
3133 {
3134 struct intel_uncore *uncore = &dev_priv->uncore;
3135 enum pipe pipe;
3136 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3137 BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3138
3139 if (!HAS_DISPLAY(dev_priv))
3140 return;
3141
3142 intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
3143
3144 if (DISPLAY_VER(dev_priv) >= 12) {
3145 enum transcoder trans;
3146
3147 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3148 enum intel_display_power_domain domain;
3149
3150 domain = POWER_DOMAIN_TRANSCODER(trans);
3151 if (!intel_display_power_is_enabled(dev_priv, domain))
3152 continue;
3153
3154 intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
3155 intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
3156 }
3157 } else {
3158 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3159 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3160 }
3161
3162 for_each_pipe(dev_priv, pipe)
3163 if (intel_display_power_is_enabled(dev_priv,
3164 POWER_DOMAIN_PIPE(pipe)))
3165 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3166
3167 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3168 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3169 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3170
3171 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3172 GEN3_IRQ_RESET(uncore, SDE);
3173 }
3174
gen11_irq_reset(struct drm_i915_private * dev_priv)3175 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
3176 {
3177 struct intel_gt *gt = to_gt(dev_priv);
3178 struct intel_uncore *uncore = gt->uncore;
3179
3180 gen11_master_intr_disable(dev_priv->uncore.regs);
3181
3182 gen11_gt_irq_reset(gt);
3183 gen11_display_irq_reset(dev_priv);
3184
3185 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3186 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3187 }
3188
dg1_irq_reset(struct drm_i915_private * dev_priv)3189 static void dg1_irq_reset(struct drm_i915_private *dev_priv)
3190 {
3191 struct intel_gt *gt = to_gt(dev_priv);
3192 struct intel_uncore *uncore = gt->uncore;
3193
3194 dg1_master_intr_disable(dev_priv->uncore.regs);
3195
3196 gen11_gt_irq_reset(gt);
3197 gen11_display_irq_reset(dev_priv);
3198
3199 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3200 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3201 }
3202
gen8_irq_power_well_post_enable(struct drm_i915_private * dev_priv,u8 pipe_mask)3203 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3204 u8 pipe_mask)
3205 {
3206 struct intel_uncore *uncore = &dev_priv->uncore;
3207 u32 extra_ier = GEN8_PIPE_VBLANK |
3208 gen8_de_pipe_underrun_mask(dev_priv) |
3209 gen8_de_pipe_flip_done_mask(dev_priv);
3210 enum pipe pipe;
3211
3212 spin_lock_irq(&dev_priv->irq_lock);
3213
3214 if (!intel_irqs_enabled(dev_priv)) {
3215 spin_unlock_irq(&dev_priv->irq_lock);
3216 return;
3217 }
3218
3219 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3220 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3221 dev_priv->de_irq_mask[pipe],
3222 ~dev_priv->de_irq_mask[pipe] | extra_ier);
3223
3224 spin_unlock_irq(&dev_priv->irq_lock);
3225 }
3226
gen8_irq_power_well_pre_disable(struct drm_i915_private * dev_priv,u8 pipe_mask)3227 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3228 u8 pipe_mask)
3229 {
3230 struct intel_uncore *uncore = &dev_priv->uncore;
3231 enum pipe pipe;
3232
3233 spin_lock_irq(&dev_priv->irq_lock);
3234
3235 if (!intel_irqs_enabled(dev_priv)) {
3236 spin_unlock_irq(&dev_priv->irq_lock);
3237 return;
3238 }
3239
3240 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3241 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3242
3243 spin_unlock_irq(&dev_priv->irq_lock);
3244
3245 /* make sure we're done processing display irqs */
3246 intel_synchronize_irq(dev_priv);
3247 }
3248
cherryview_irq_reset(struct drm_i915_private * dev_priv)3249 static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
3250 {
3251 struct intel_uncore *uncore = &dev_priv->uncore;
3252
3253 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
3254 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3255
3256 gen8_gt_irq_reset(to_gt(dev_priv));
3257
3258 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3259
3260 spin_lock_irq(&dev_priv->irq_lock);
3261 if (dev_priv->display_irqs_enabled)
3262 vlv_display_irq_reset(dev_priv);
3263 spin_unlock_irq(&dev_priv->irq_lock);
3264 }
3265
ibx_hotplug_enables(struct drm_i915_private * i915,enum hpd_pin pin)3266 static u32 ibx_hotplug_enables(struct drm_i915_private *i915,
3267 enum hpd_pin pin)
3268 {
3269 switch (pin) {
3270 case HPD_PORT_A:
3271 /*
3272 * When CPU and PCH are on the same package, port A
3273 * HPD must be enabled in both north and south.
3274 */
3275 return HAS_PCH_LPT_LP(i915) ?
3276 PORTA_HOTPLUG_ENABLE : 0;
3277 case HPD_PORT_B:
3278 return PORTB_HOTPLUG_ENABLE |
3279 PORTB_PULSE_DURATION_2ms;
3280 case HPD_PORT_C:
3281 return PORTC_HOTPLUG_ENABLE |
3282 PORTC_PULSE_DURATION_2ms;
3283 case HPD_PORT_D:
3284 return PORTD_HOTPLUG_ENABLE |
3285 PORTD_PULSE_DURATION_2ms;
3286 default:
3287 return 0;
3288 }
3289 }
3290
ibx_hpd_detection_setup(struct drm_i915_private * dev_priv)3291 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3292 {
3293 u32 hotplug;
3294
3295 /*
3296 * Enable digital hotplug on the PCH, and configure the DP short pulse
3297 * duration to 2ms (which is the minimum in the Display Port spec).
3298 * The pulse duration bits are reserved on LPT+.
3299 */
3300 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3301 hotplug &= ~(PORTA_HOTPLUG_ENABLE |
3302 PORTB_HOTPLUG_ENABLE |
3303 PORTC_HOTPLUG_ENABLE |
3304 PORTD_HOTPLUG_ENABLE |
3305 PORTB_PULSE_DURATION_MASK |
3306 PORTC_PULSE_DURATION_MASK |
3307 PORTD_PULSE_DURATION_MASK);
3308 hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables);
3309 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3310 }
3311
ibx_hpd_irq_setup(struct drm_i915_private * dev_priv)3312 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3313 {
3314 u32 hotplug_irqs, enabled_irqs;
3315
3316 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3317 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3318
3319 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3320
3321 ibx_hpd_detection_setup(dev_priv);
3322 }
3323
icp_ddi_hotplug_enables(struct drm_i915_private * i915,enum hpd_pin pin)3324 static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915,
3325 enum hpd_pin pin)
3326 {
3327 switch (pin) {
3328 case HPD_PORT_A:
3329 case HPD_PORT_B:
3330 case HPD_PORT_C:
3331 case HPD_PORT_D:
3332 return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin);
3333 default:
3334 return 0;
3335 }
3336 }
3337
icp_tc_hotplug_enables(struct drm_i915_private * i915,enum hpd_pin pin)3338 static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915,
3339 enum hpd_pin pin)
3340 {
3341 switch (pin) {
3342 case HPD_PORT_TC1:
3343 case HPD_PORT_TC2:
3344 case HPD_PORT_TC3:
3345 case HPD_PORT_TC4:
3346 case HPD_PORT_TC5:
3347 case HPD_PORT_TC6:
3348 return ICP_TC_HPD_ENABLE(pin);
3349 default:
3350 return 0;
3351 }
3352 }
3353
icp_ddi_hpd_detection_setup(struct drm_i915_private * dev_priv)3354 static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
3355 {
3356 u32 hotplug;
3357
3358 hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
3359 hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
3360 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
3361 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
3362 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D));
3363 hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables);
3364 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, hotplug);
3365 }
3366
icp_tc_hpd_detection_setup(struct drm_i915_private * dev_priv)3367 static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3368 {
3369 u32 hotplug;
3370
3371 hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
3372 hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
3373 ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
3374 ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
3375 ICP_TC_HPD_ENABLE(HPD_PORT_TC4) |
3376 ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
3377 ICP_TC_HPD_ENABLE(HPD_PORT_TC6));
3378 hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables);
3379 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, hotplug);
3380 }
3381
icp_hpd_irq_setup(struct drm_i915_private * dev_priv)3382 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3383 {
3384 u32 hotplug_irqs, enabled_irqs;
3385
3386 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3387 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3388
3389 if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
3390 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3391
3392 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3393
3394 icp_ddi_hpd_detection_setup(dev_priv);
3395 icp_tc_hpd_detection_setup(dev_priv);
3396 }
3397
gen11_hotplug_enables(struct drm_i915_private * i915,enum hpd_pin pin)3398 static u32 gen11_hotplug_enables(struct drm_i915_private *i915,
3399 enum hpd_pin pin)
3400 {
3401 switch (pin) {
3402 case HPD_PORT_TC1:
3403 case HPD_PORT_TC2:
3404 case HPD_PORT_TC3:
3405 case HPD_PORT_TC4:
3406 case HPD_PORT_TC5:
3407 case HPD_PORT_TC6:
3408 return GEN11_HOTPLUG_CTL_ENABLE(pin);
3409 default:
3410 return 0;
3411 }
3412 }
3413
dg1_hpd_irq_setup(struct drm_i915_private * dev_priv)3414 static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
3415 {
3416 u32 val;
3417
3418 val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
3419 val |= (INVERT_DDIA_HPD |
3420 INVERT_DDIB_HPD |
3421 INVERT_DDIC_HPD |
3422 INVERT_DDID_HPD);
3423 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
3424
3425 icp_hpd_irq_setup(dev_priv);
3426 }
3427
gen11_tc_hpd_detection_setup(struct drm_i915_private * dev_priv)3428 static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3429 {
3430 u32 hotplug;
3431
3432 hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
3433 hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3434 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3435 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3436 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3437 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3438 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
3439 hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
3440 intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, hotplug);
3441 }
3442
gen11_tbt_hpd_detection_setup(struct drm_i915_private * dev_priv)3443 static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3444 {
3445 u32 hotplug;
3446
3447 hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
3448 hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3449 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3450 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3451 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3452 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3453 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
3454 hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
3455 intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, hotplug);
3456 }
3457
gen11_hpd_irq_setup(struct drm_i915_private * dev_priv)3458 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3459 {
3460 u32 hotplug_irqs, enabled_irqs;
3461 u32 val;
3462
3463 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3464 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3465
3466 val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3467 val &= ~hotplug_irqs;
3468 val |= ~enabled_irqs & hotplug_irqs;
3469 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IMR, val);
3470 intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3471
3472 gen11_tc_hpd_detection_setup(dev_priv);
3473 gen11_tbt_hpd_detection_setup(dev_priv);
3474
3475 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3476 icp_hpd_irq_setup(dev_priv);
3477 }
3478
spt_hotplug_enables(struct drm_i915_private * i915,enum hpd_pin pin)3479 static u32 spt_hotplug_enables(struct drm_i915_private *i915,
3480 enum hpd_pin pin)
3481 {
3482 switch (pin) {
3483 case HPD_PORT_A:
3484 return PORTA_HOTPLUG_ENABLE;
3485 case HPD_PORT_B:
3486 return PORTB_HOTPLUG_ENABLE;
3487 case HPD_PORT_C:
3488 return PORTC_HOTPLUG_ENABLE;
3489 case HPD_PORT_D:
3490 return PORTD_HOTPLUG_ENABLE;
3491 default:
3492 return 0;
3493 }
3494 }
3495
spt_hotplug2_enables(struct drm_i915_private * i915,enum hpd_pin pin)3496 static u32 spt_hotplug2_enables(struct drm_i915_private *i915,
3497 enum hpd_pin pin)
3498 {
3499 switch (pin) {
3500 case HPD_PORT_E:
3501 return PORTE_HOTPLUG_ENABLE;
3502 default:
3503 return 0;
3504 }
3505 }
3506
spt_hpd_detection_setup(struct drm_i915_private * dev_priv)3507 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3508 {
3509 u32 val, hotplug;
3510
3511 /* Display WA #1179 WaHardHangonHotPlug: cnp */
3512 if (HAS_PCH_CNP(dev_priv)) {
3513 val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
3514 val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3515 val |= CHASSIS_CLK_REQ_DURATION(0xf);
3516 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
3517 }
3518
3519 /* Enable digital hotplug on the PCH */
3520 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3521 hotplug &= ~(PORTA_HOTPLUG_ENABLE |
3522 PORTB_HOTPLUG_ENABLE |
3523 PORTC_HOTPLUG_ENABLE |
3524 PORTD_HOTPLUG_ENABLE);
3525 hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables);
3526 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3527
3528 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
3529 hotplug &= ~PORTE_HOTPLUG_ENABLE;
3530 hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables);
3531 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, hotplug);
3532 }
3533
spt_hpd_irq_setup(struct drm_i915_private * dev_priv)3534 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3535 {
3536 u32 hotplug_irqs, enabled_irqs;
3537
3538 if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3539 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3540
3541 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3542 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3543
3544 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3545
3546 spt_hpd_detection_setup(dev_priv);
3547 }
3548
ilk_hotplug_enables(struct drm_i915_private * i915,enum hpd_pin pin)3549 static u32 ilk_hotplug_enables(struct drm_i915_private *i915,
3550 enum hpd_pin pin)
3551 {
3552 switch (pin) {
3553 case HPD_PORT_A:
3554 return DIGITAL_PORTA_HOTPLUG_ENABLE |
3555 DIGITAL_PORTA_PULSE_DURATION_2ms;
3556 default:
3557 return 0;
3558 }
3559 }
3560
ilk_hpd_detection_setup(struct drm_i915_private * dev_priv)3561 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3562 {
3563 u32 hotplug;
3564
3565 /*
3566 * Enable digital hotplug on the CPU, and configure the DP short pulse
3567 * duration to 2ms (which is the minimum in the Display Port spec)
3568 * The pulse duration bits are reserved on HSW+.
3569 */
3570 hotplug = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
3571 hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE |
3572 DIGITAL_PORTA_PULSE_DURATION_MASK);
3573 hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables);
3574 intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3575 }
3576
ilk_hpd_irq_setup(struct drm_i915_private * dev_priv)3577 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3578 {
3579 u32 hotplug_irqs, enabled_irqs;
3580
3581 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3582 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3583
3584 if (DISPLAY_VER(dev_priv) >= 8)
3585 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3586 else
3587 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3588
3589 ilk_hpd_detection_setup(dev_priv);
3590
3591 ibx_hpd_irq_setup(dev_priv);
3592 }
3593
bxt_hotplug_enables(struct drm_i915_private * i915,enum hpd_pin pin)3594 static u32 bxt_hotplug_enables(struct drm_i915_private *i915,
3595 enum hpd_pin pin)
3596 {
3597 u32 hotplug;
3598
3599 switch (pin) {
3600 case HPD_PORT_A:
3601 hotplug = PORTA_HOTPLUG_ENABLE;
3602 if (intel_bios_is_port_hpd_inverted(i915, PORT_A))
3603 hotplug |= BXT_DDIA_HPD_INVERT;
3604 return hotplug;
3605 case HPD_PORT_B:
3606 hotplug = PORTB_HOTPLUG_ENABLE;
3607 if (intel_bios_is_port_hpd_inverted(i915, PORT_B))
3608 hotplug |= BXT_DDIB_HPD_INVERT;
3609 return hotplug;
3610 case HPD_PORT_C:
3611 hotplug = PORTC_HOTPLUG_ENABLE;
3612 if (intel_bios_is_port_hpd_inverted(i915, PORT_C))
3613 hotplug |= BXT_DDIC_HPD_INVERT;
3614 return hotplug;
3615 default:
3616 return 0;
3617 }
3618 }
3619
bxt_hpd_detection_setup(struct drm_i915_private * dev_priv)3620 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3621 {
3622 u32 hotplug;
3623
3624 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3625 hotplug &= ~(PORTA_HOTPLUG_ENABLE |
3626 PORTB_HOTPLUG_ENABLE |
3627 PORTC_HOTPLUG_ENABLE |
3628 BXT_DDIA_HPD_INVERT |
3629 BXT_DDIB_HPD_INVERT |
3630 BXT_DDIC_HPD_INVERT);
3631 hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables);
3632 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3633 }
3634
bxt_hpd_irq_setup(struct drm_i915_private * dev_priv)3635 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3636 {
3637 u32 hotplug_irqs, enabled_irqs;
3638
3639 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3640 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3641
3642 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3643
3644 bxt_hpd_detection_setup(dev_priv);
3645 }
3646
3647 /*
3648 * SDEIER is also touched by the interrupt handler to work around missed PCH
3649 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3650 * instead we unconditionally enable all PCH interrupt sources here, but then
3651 * only unmask them as needed with SDEIMR.
3652 *
3653 * Note that we currently do this after installing the interrupt handler,
3654 * but before we enable the master interrupt. That should be sufficient
3655 * to avoid races with the irq handler, assuming we have MSI. Shared legacy
3656 * interrupts could still race.
3657 */
ibx_irq_postinstall(struct drm_i915_private * dev_priv)3658 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3659 {
3660 struct intel_uncore *uncore = &dev_priv->uncore;
3661 u32 mask;
3662
3663 if (HAS_PCH_NOP(dev_priv))
3664 return;
3665
3666 if (HAS_PCH_IBX(dev_priv))
3667 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3668 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3669 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3670 else
3671 mask = SDE_GMBUS_CPT;
3672
3673 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3674 }
3675
ilk_irq_postinstall(struct drm_i915_private * dev_priv)3676 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3677 {
3678 struct intel_uncore *uncore = &dev_priv->uncore;
3679 u32 display_mask, extra_mask;
3680
3681 if (GRAPHICS_VER(dev_priv) >= 7) {
3682 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3683 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3684 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3685 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3686 DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
3687 DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
3688 DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
3689 DE_DP_A_HOTPLUG_IVB);
3690 } else {
3691 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3692 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3693 DE_PIPEA_CRC_DONE | DE_POISON);
3694 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
3695 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3696 DE_PLANE_FLIP_DONE(PLANE_A) |
3697 DE_PLANE_FLIP_DONE(PLANE_B) |
3698 DE_DP_A_HOTPLUG);
3699 }
3700
3701 if (IS_HASWELL(dev_priv)) {
3702 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3703 display_mask |= DE_EDP_PSR_INT_HSW;
3704 }
3705
3706 if (IS_IRONLAKE_M(dev_priv))
3707 extra_mask |= DE_PCU_EVENT;
3708
3709 dev_priv->irq_mask = ~display_mask;
3710
3711 ibx_irq_postinstall(dev_priv);
3712
3713 gen5_gt_irq_postinstall(to_gt(dev_priv));
3714
3715 GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3716 display_mask | extra_mask);
3717 }
3718
valleyview_enable_display_irqs(struct drm_i915_private * dev_priv)3719 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3720 {
3721 lockdep_assert_held(&dev_priv->irq_lock);
3722
3723 if (dev_priv->display_irqs_enabled)
3724 return;
3725
3726 dev_priv->display_irqs_enabled = true;
3727
3728 if (intel_irqs_enabled(dev_priv)) {
3729 vlv_display_irq_reset(dev_priv);
3730 vlv_display_irq_postinstall(dev_priv);
3731 }
3732 }
3733
valleyview_disable_display_irqs(struct drm_i915_private * dev_priv)3734 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3735 {
3736 lockdep_assert_held(&dev_priv->irq_lock);
3737
3738 if (!dev_priv->display_irqs_enabled)
3739 return;
3740
3741 dev_priv->display_irqs_enabled = false;
3742
3743 if (intel_irqs_enabled(dev_priv))
3744 vlv_display_irq_reset(dev_priv);
3745 }
3746
3747
valleyview_irq_postinstall(struct drm_i915_private * dev_priv)3748 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3749 {
3750 gen5_gt_irq_postinstall(to_gt(dev_priv));
3751
3752 spin_lock_irq(&dev_priv->irq_lock);
3753 if (dev_priv->display_irqs_enabled)
3754 vlv_display_irq_postinstall(dev_priv);
3755 spin_unlock_irq(&dev_priv->irq_lock);
3756
3757 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3758 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3759 }
3760
gen8_de_irq_postinstall(struct drm_i915_private * dev_priv)3761 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3762 {
3763 struct intel_uncore *uncore = &dev_priv->uncore;
3764
3765 u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
3766 GEN8_PIPE_CDCLK_CRC_DONE;
3767 u32 de_pipe_enables;
3768 u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3769 u32 de_port_enables;
3770 u32 de_misc_masked = GEN8_DE_EDP_PSR;
3771 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3772 BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3773 enum pipe pipe;
3774
3775 if (!HAS_DISPLAY(dev_priv))
3776 return;
3777
3778 if (DISPLAY_VER(dev_priv) <= 10)
3779 de_misc_masked |= GEN8_DE_MISC_GSE;
3780
3781 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3782 de_port_masked |= BXT_DE_PORT_GMBUS;
3783
3784 if (DISPLAY_VER(dev_priv) >= 11) {
3785 enum port port;
3786
3787 if (intel_bios_is_dsi_present(dev_priv, &port))
3788 de_port_masked |= DSI0_TE | DSI1_TE;
3789 }
3790
3791 de_pipe_enables = de_pipe_masked |
3792 GEN8_PIPE_VBLANK |
3793 gen8_de_pipe_underrun_mask(dev_priv) |
3794 gen8_de_pipe_flip_done_mask(dev_priv);
3795
3796 de_port_enables = de_port_masked;
3797 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3798 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3799 else if (IS_BROADWELL(dev_priv))
3800 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
3801
3802 if (DISPLAY_VER(dev_priv) >= 12) {
3803 enum transcoder trans;
3804
3805 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3806 enum intel_display_power_domain domain;
3807
3808 domain = POWER_DOMAIN_TRANSCODER(trans);
3809 if (!intel_display_power_is_enabled(dev_priv, domain))
3810 continue;
3811
3812 gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3813 }
3814 } else {
3815 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3816 }
3817
3818 for_each_pipe(dev_priv, pipe) {
3819 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3820
3821 if (intel_display_power_is_enabled(dev_priv,
3822 POWER_DOMAIN_PIPE(pipe)))
3823 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3824 dev_priv->de_irq_mask[pipe],
3825 de_pipe_enables);
3826 }
3827
3828 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3829 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3830
3831 if (DISPLAY_VER(dev_priv) >= 11) {
3832 u32 de_hpd_masked = 0;
3833 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3834 GEN11_DE_TBT_HOTPLUG_MASK;
3835
3836 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3837 de_hpd_enables);
3838 }
3839 }
3840
icp_irq_postinstall(struct drm_i915_private * dev_priv)3841 static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3842 {
3843 struct intel_uncore *uncore = &dev_priv->uncore;
3844 u32 mask = SDE_GMBUS_ICP;
3845
3846 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3847 }
3848
gen8_irq_postinstall(struct drm_i915_private * dev_priv)3849 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3850 {
3851 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3852 icp_irq_postinstall(dev_priv);
3853 else if (HAS_PCH_SPLIT(dev_priv))
3854 ibx_irq_postinstall(dev_priv);
3855
3856 gen8_gt_irq_postinstall(to_gt(dev_priv));
3857 gen8_de_irq_postinstall(dev_priv);
3858
3859 gen8_master_intr_enable(dev_priv->uncore.regs);
3860 }
3861
gen11_de_irq_postinstall(struct drm_i915_private * dev_priv)3862 static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
3863 {
3864 if (!HAS_DISPLAY(dev_priv))
3865 return;
3866
3867 gen8_de_irq_postinstall(dev_priv);
3868
3869 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3870 GEN11_DISPLAY_IRQ_ENABLE);
3871 }
3872
gen11_irq_postinstall(struct drm_i915_private * dev_priv)3873 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3874 {
3875 struct intel_gt *gt = to_gt(dev_priv);
3876 struct intel_uncore *uncore = gt->uncore;
3877 u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3878
3879 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3880 icp_irq_postinstall(dev_priv);
3881
3882 gen11_gt_irq_postinstall(gt);
3883 gen11_de_irq_postinstall(dev_priv);
3884
3885 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3886
3887 gen11_master_intr_enable(uncore->regs);
3888 intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
3889 }
3890
dg1_irq_postinstall(struct drm_i915_private * dev_priv)3891 static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
3892 {
3893 struct intel_gt *gt = to_gt(dev_priv);
3894 struct intel_uncore *uncore = gt->uncore;
3895 u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3896
3897 gen11_gt_irq_postinstall(gt);
3898
3899 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3900
3901 if (HAS_DISPLAY(dev_priv)) {
3902 icp_irq_postinstall(dev_priv);
3903 gen8_de_irq_postinstall(dev_priv);
3904 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3905 GEN11_DISPLAY_IRQ_ENABLE);
3906 }
3907
3908 dg1_master_intr_enable(uncore->regs);
3909 intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
3910 }
3911
cherryview_irq_postinstall(struct drm_i915_private * dev_priv)3912 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3913 {
3914 gen8_gt_irq_postinstall(to_gt(dev_priv));
3915
3916 spin_lock_irq(&dev_priv->irq_lock);
3917 if (dev_priv->display_irqs_enabled)
3918 vlv_display_irq_postinstall(dev_priv);
3919 spin_unlock_irq(&dev_priv->irq_lock);
3920
3921 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3922 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3923 }
3924
i8xx_irq_reset(struct drm_i915_private * dev_priv)3925 static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3926 {
3927 struct intel_uncore *uncore = &dev_priv->uncore;
3928
3929 i9xx_pipestat_irq_reset(dev_priv);
3930
3931 GEN2_IRQ_RESET(uncore);
3932 dev_priv->irq_mask = ~0u;
3933 }
3934
i8xx_irq_postinstall(struct drm_i915_private * dev_priv)3935 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3936 {
3937 struct intel_uncore *uncore = &dev_priv->uncore;
3938 u16 enable_mask;
3939
3940 intel_uncore_write16(uncore,
3941 EMR,
3942 ~(I915_ERROR_PAGE_TABLE |
3943 I915_ERROR_MEMORY_REFRESH));
3944
3945 /* Unmask the interrupts that we always want on. */
3946 dev_priv->irq_mask =
3947 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3948 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3949 I915_MASTER_ERROR_INTERRUPT);
3950
3951 enable_mask =
3952 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3953 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3954 I915_MASTER_ERROR_INTERRUPT |
3955 I915_USER_INTERRUPT;
3956
3957 GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
3958
3959 /* Interrupt setup is already guaranteed to be single-threaded, this is
3960 * just to make the assert_spin_locked check happy. */
3961 spin_lock_irq(&dev_priv->irq_lock);
3962 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3963 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3964 spin_unlock_irq(&dev_priv->irq_lock);
3965 }
3966
i8xx_error_irq_ack(struct drm_i915_private * i915,u16 * eir,u16 * eir_stuck)3967 static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3968 u16 *eir, u16 *eir_stuck)
3969 {
3970 struct intel_uncore *uncore = &i915->uncore;
3971 u16 emr;
3972
3973 *eir = intel_uncore_read16(uncore, EIR);
3974
3975 if (*eir)
3976 intel_uncore_write16(uncore, EIR, *eir);
3977
3978 *eir_stuck = intel_uncore_read16(uncore, EIR);
3979 if (*eir_stuck == 0)
3980 return;
3981
3982 /*
3983 * Toggle all EMR bits to make sure we get an edge
3984 * in the ISR master error bit if we don't clear
3985 * all the EIR bits. Otherwise the edge triggered
3986 * IIR on i965/g4x wouldn't notice that an interrupt
3987 * is still pending. Also some EIR bits can't be
3988 * cleared except by handling the underlying error
3989 * (or by a GPU reset) so we mask any bit that
3990 * remains set.
3991 */
3992 emr = intel_uncore_read16(uncore, EMR);
3993 intel_uncore_write16(uncore, EMR, 0xffff);
3994 intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3995 }
3996
i8xx_error_irq_handler(struct drm_i915_private * dev_priv,u16 eir,u16 eir_stuck)3997 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3998 u16 eir, u16 eir_stuck)
3999 {
4000 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
4001
4002 if (eir_stuck)
4003 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
4004 eir_stuck);
4005 }
4006
i9xx_error_irq_ack(struct drm_i915_private * dev_priv,u32 * eir,u32 * eir_stuck)4007 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
4008 u32 *eir, u32 *eir_stuck)
4009 {
4010 u32 emr;
4011
4012 *eir = intel_uncore_read(&dev_priv->uncore, EIR);
4013
4014 intel_uncore_write(&dev_priv->uncore, EIR, *eir);
4015
4016 *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
4017 if (*eir_stuck == 0)
4018 return;
4019
4020 /*
4021 * Toggle all EMR bits to make sure we get an edge
4022 * in the ISR master error bit if we don't clear
4023 * all the EIR bits. Otherwise the edge triggered
4024 * IIR on i965/g4x wouldn't notice that an interrupt
4025 * is still pending. Also some EIR bits can't be
4026 * cleared except by handling the underlying error
4027 * (or by a GPU reset) so we mask any bit that
4028 * remains set.
4029 */
4030 emr = intel_uncore_read(&dev_priv->uncore, EMR);
4031 intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
4032 intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
4033 }
4034
i9xx_error_irq_handler(struct drm_i915_private * dev_priv,u32 eir,u32 eir_stuck)4035 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
4036 u32 eir, u32 eir_stuck)
4037 {
4038 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
4039
4040 if (eir_stuck)
4041 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
4042 eir_stuck);
4043 }
4044
i8xx_irq_handler(int irq,void * arg)4045 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4046 {
4047 struct drm_i915_private *dev_priv = arg;
4048 irqreturn_t ret = IRQ_NONE;
4049
4050 if (!intel_irqs_enabled(dev_priv))
4051 return IRQ_NONE;
4052
4053 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4054 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4055
4056 do {
4057 u32 pipe_stats[I915_MAX_PIPES] = {};
4058 u16 eir = 0, eir_stuck = 0;
4059 u16 iir;
4060
4061 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
4062 if (iir == 0)
4063 break;
4064
4065 ret = IRQ_HANDLED;
4066
4067 /* Call regardless, as some status bits might not be
4068 * signalled in iir */
4069 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4070
4071 if (iir & I915_MASTER_ERROR_INTERRUPT)
4072 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4073
4074 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
4075
4076 if (iir & I915_USER_INTERRUPT)
4077 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
4078
4079 if (iir & I915_MASTER_ERROR_INTERRUPT)
4080 i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4081
4082 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4083 } while (0);
4084
4085 pmu_irq_stats(dev_priv, ret);
4086
4087 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4088
4089 return ret;
4090 }
4091
i915_irq_reset(struct drm_i915_private * dev_priv)4092 static void i915_irq_reset(struct drm_i915_private *dev_priv)
4093 {
4094 struct intel_uncore *uncore = &dev_priv->uncore;
4095
4096 if (I915_HAS_HOTPLUG(dev_priv)) {
4097 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4098 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
4099 }
4100
4101 i9xx_pipestat_irq_reset(dev_priv);
4102
4103 GEN3_IRQ_RESET(uncore, GEN2_);
4104 dev_priv->irq_mask = ~0u;
4105 }
4106
i915_irq_postinstall(struct drm_i915_private * dev_priv)4107 static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
4108 {
4109 struct intel_uncore *uncore = &dev_priv->uncore;
4110 u32 enable_mask;
4111
4112 intel_uncore_write(&dev_priv->uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
4113 I915_ERROR_MEMORY_REFRESH));
4114
4115 /* Unmask the interrupts that we always want on. */
4116 dev_priv->irq_mask =
4117 ~(I915_ASLE_INTERRUPT |
4118 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4119 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4120 I915_MASTER_ERROR_INTERRUPT);
4121
4122 enable_mask =
4123 I915_ASLE_INTERRUPT |
4124 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4125 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4126 I915_MASTER_ERROR_INTERRUPT |
4127 I915_USER_INTERRUPT;
4128
4129 if (I915_HAS_HOTPLUG(dev_priv)) {
4130 /* Enable in IER... */
4131 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4132 /* and unmask in IMR */
4133 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4134 }
4135
4136 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4137
4138 /* Interrupt setup is already guaranteed to be single-threaded, this is
4139 * just to make the assert_spin_locked check happy. */
4140 spin_lock_irq(&dev_priv->irq_lock);
4141 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4142 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4143 spin_unlock_irq(&dev_priv->irq_lock);
4144
4145 i915_enable_asle_pipestat(dev_priv);
4146 }
4147
i915_irq_handler(int irq,void * arg)4148 static irqreturn_t i915_irq_handler(int irq, void *arg)
4149 {
4150 struct drm_i915_private *dev_priv = arg;
4151 irqreturn_t ret = IRQ_NONE;
4152
4153 if (!intel_irqs_enabled(dev_priv))
4154 return IRQ_NONE;
4155
4156 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4157 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4158
4159 do {
4160 u32 pipe_stats[I915_MAX_PIPES] = {};
4161 u32 eir = 0, eir_stuck = 0;
4162 u32 hotplug_status = 0;
4163 u32 iir;
4164
4165 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4166 if (iir == 0)
4167 break;
4168
4169 ret = IRQ_HANDLED;
4170
4171 if (I915_HAS_HOTPLUG(dev_priv) &&
4172 iir & I915_DISPLAY_PORT_INTERRUPT)
4173 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4174
4175 /* Call regardless, as some status bits might not be
4176 * signalled in iir */
4177 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4178
4179 if (iir & I915_MASTER_ERROR_INTERRUPT)
4180 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4181
4182 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4183
4184 if (iir & I915_USER_INTERRUPT)
4185 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
4186
4187 if (iir & I915_MASTER_ERROR_INTERRUPT)
4188 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4189
4190 if (hotplug_status)
4191 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4192
4193 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4194 } while (0);
4195
4196 pmu_irq_stats(dev_priv, ret);
4197
4198 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4199
4200 return ret;
4201 }
4202
i965_irq_reset(struct drm_i915_private * dev_priv)4203 static void i965_irq_reset(struct drm_i915_private *dev_priv)
4204 {
4205 struct intel_uncore *uncore = &dev_priv->uncore;
4206
4207 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4208 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
4209
4210 i9xx_pipestat_irq_reset(dev_priv);
4211
4212 GEN3_IRQ_RESET(uncore, GEN2_);
4213 dev_priv->irq_mask = ~0u;
4214 }
4215
i965_irq_postinstall(struct drm_i915_private * dev_priv)4216 static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4217 {
4218 struct intel_uncore *uncore = &dev_priv->uncore;
4219 u32 enable_mask;
4220 u32 error_mask;
4221
4222 /*
4223 * Enable some error detection, note the instruction error mask
4224 * bit is reserved, so we leave it masked.
4225 */
4226 if (IS_G4X(dev_priv)) {
4227 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4228 GM45_ERROR_MEM_PRIV |
4229 GM45_ERROR_CP_PRIV |
4230 I915_ERROR_MEMORY_REFRESH);
4231 } else {
4232 error_mask = ~(I915_ERROR_PAGE_TABLE |
4233 I915_ERROR_MEMORY_REFRESH);
4234 }
4235 intel_uncore_write(&dev_priv->uncore, EMR, error_mask);
4236
4237 /* Unmask the interrupts that we always want on. */
4238 dev_priv->irq_mask =
4239 ~(I915_ASLE_INTERRUPT |
4240 I915_DISPLAY_PORT_INTERRUPT |
4241 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4242 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4243 I915_MASTER_ERROR_INTERRUPT);
4244
4245 enable_mask =
4246 I915_ASLE_INTERRUPT |
4247 I915_DISPLAY_PORT_INTERRUPT |
4248 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4249 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4250 I915_MASTER_ERROR_INTERRUPT |
4251 I915_USER_INTERRUPT;
4252
4253 if (IS_G4X(dev_priv))
4254 enable_mask |= I915_BSD_USER_INTERRUPT;
4255
4256 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4257
4258 /* Interrupt setup is already guaranteed to be single-threaded, this is
4259 * just to make the assert_spin_locked check happy. */
4260 spin_lock_irq(&dev_priv->irq_lock);
4261 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4262 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4263 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4264 spin_unlock_irq(&dev_priv->irq_lock);
4265
4266 i915_enable_asle_pipestat(dev_priv);
4267 }
4268
i915_hpd_irq_setup(struct drm_i915_private * dev_priv)4269 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4270 {
4271 u32 hotplug_en;
4272
4273 lockdep_assert_held(&dev_priv->irq_lock);
4274
4275 /* Note HDMI and DP share hotplug bits */
4276 /* enable bits are the same for all generations */
4277 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4278 /* Programming the CRT detection parameters tends
4279 to generate a spurious hotplug event about three
4280 seconds later. So just do it once.
4281 */
4282 if (IS_G4X(dev_priv))
4283 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4284 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4285
4286 /* Ignore TV since it's buggy */
4287 i915_hotplug_interrupt_update_locked(dev_priv,
4288 HOTPLUG_INT_EN_MASK |
4289 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4290 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4291 hotplug_en);
4292 }
4293
i965_irq_handler(int irq,void * arg)4294 static irqreturn_t i965_irq_handler(int irq, void *arg)
4295 {
4296 struct drm_i915_private *dev_priv = arg;
4297 irqreturn_t ret = IRQ_NONE;
4298
4299 if (!intel_irqs_enabled(dev_priv))
4300 return IRQ_NONE;
4301
4302 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4303 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4304
4305 do {
4306 u32 pipe_stats[I915_MAX_PIPES] = {};
4307 u32 eir = 0, eir_stuck = 0;
4308 u32 hotplug_status = 0;
4309 u32 iir;
4310
4311 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4312 if (iir == 0)
4313 break;
4314
4315 ret = IRQ_HANDLED;
4316
4317 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4318 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4319
4320 /* Call regardless, as some status bits might not be
4321 * signalled in iir */
4322 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4323
4324 if (iir & I915_MASTER_ERROR_INTERRUPT)
4325 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4326
4327 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4328
4329 if (iir & I915_USER_INTERRUPT)
4330 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
4331 iir);
4332
4333 if (iir & I915_BSD_USER_INTERRUPT)
4334 intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
4335 iir >> 25);
4336
4337 if (iir & I915_MASTER_ERROR_INTERRUPT)
4338 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4339
4340 if (hotplug_status)
4341 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4342
4343 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4344 } while (0);
4345
4346 pmu_irq_stats(dev_priv, IRQ_HANDLED);
4347
4348 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4349
4350 return ret;
4351 }
4352
4353 struct intel_hotplug_funcs {
4354 void (*hpd_irq_setup)(struct drm_i915_private *i915);
4355 };
4356
4357 #define HPD_FUNCS(platform) \
4358 static const struct intel_hotplug_funcs platform##_hpd_funcs = { \
4359 .hpd_irq_setup = platform##_hpd_irq_setup, \
4360 }
4361
4362 HPD_FUNCS(i915);
4363 HPD_FUNCS(dg1);
4364 HPD_FUNCS(gen11);
4365 HPD_FUNCS(bxt);
4366 HPD_FUNCS(icp);
4367 HPD_FUNCS(spt);
4368 HPD_FUNCS(ilk);
4369 #undef HPD_FUNCS
4370
intel_hpd_irq_setup(struct drm_i915_private * i915)4371 void intel_hpd_irq_setup(struct drm_i915_private *i915)
4372 {
4373 if (i915->display_irqs_enabled && i915->display.funcs.hotplug)
4374 i915->display.funcs.hotplug->hpd_irq_setup(i915);
4375 }
4376
4377 /**
4378 * intel_irq_init - initializes irq support
4379 * @dev_priv: i915 device instance
4380 *
4381 * This function initializes all the irq support including work items, timers
4382 * and all the vtables. It does not setup the interrupt itself though.
4383 */
intel_irq_init(struct drm_i915_private * dev_priv)4384 void intel_irq_init(struct drm_i915_private *dev_priv)
4385 {
4386 struct drm_device *dev = &dev_priv->drm;
4387 int i;
4388
4389 INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
4390 for (i = 0; i < MAX_L3_SLICES; ++i)
4391 dev_priv->l3_parity.remap_info[i] = NULL;
4392
4393 /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
4394 if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
4395 to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
4396
4397 if (!HAS_DISPLAY(dev_priv))
4398 return;
4399
4400 intel_hpd_init_pins(dev_priv);
4401
4402 intel_hpd_init_work(dev_priv);
4403
4404 dev->vblank_disable_immediate = true;
4405
4406 /* Most platforms treat the display irq block as an always-on
4407 * power domain. vlv/chv can disable it at runtime and need
4408 * special care to avoid writing any of the display block registers
4409 * outside of the power domain. We defer setting up the display irqs
4410 * in this case to the runtime pm.
4411 */
4412 dev_priv->display_irqs_enabled = true;
4413 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4414 dev_priv->display_irqs_enabled = false;
4415
4416 dev_priv->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4417 /* If we have MST support, we want to avoid doing short HPD IRQ storm
4418 * detection, as short HPD storms will occur as a natural part of
4419 * sideband messaging with MST.
4420 * On older platforms however, IRQ storms can occur with both long and
4421 * short pulses, as seen on some G4x systems.
4422 */
4423 dev_priv->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
4424
4425 if (HAS_GMCH(dev_priv)) {
4426 if (I915_HAS_HOTPLUG(dev_priv))
4427 dev_priv->display.funcs.hotplug = &i915_hpd_funcs;
4428 } else {
4429 if (HAS_PCH_DG2(dev_priv))
4430 dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
4431 else if (HAS_PCH_DG1(dev_priv))
4432 dev_priv->display.funcs.hotplug = &dg1_hpd_funcs;
4433 else if (DISPLAY_VER(dev_priv) >= 11)
4434 dev_priv->display.funcs.hotplug = &gen11_hpd_funcs;
4435 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4436 dev_priv->display.funcs.hotplug = &bxt_hpd_funcs;
4437 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4438 dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
4439 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4440 dev_priv->display.funcs.hotplug = &spt_hpd_funcs;
4441 else
4442 dev_priv->display.funcs.hotplug = &ilk_hpd_funcs;
4443 }
4444 }
4445
4446 /**
4447 * intel_irq_fini - deinitializes IRQ support
4448 * @i915: i915 device instance
4449 *
4450 * This function deinitializes all the IRQ support.
4451 */
intel_irq_fini(struct drm_i915_private * i915)4452 void intel_irq_fini(struct drm_i915_private *i915)
4453 {
4454 int i;
4455
4456 for (i = 0; i < MAX_L3_SLICES; ++i)
4457 kfree(i915->l3_parity.remap_info[i]);
4458 }
4459
intel_irq_handler(struct drm_i915_private * dev_priv)4460 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4461 {
4462 if (HAS_GMCH(dev_priv)) {
4463 if (IS_CHERRYVIEW(dev_priv))
4464 return cherryview_irq_handler;
4465 else if (IS_VALLEYVIEW(dev_priv))
4466 return valleyview_irq_handler;
4467 else if (GRAPHICS_VER(dev_priv) == 4)
4468 return i965_irq_handler;
4469 else if (GRAPHICS_VER(dev_priv) == 3)
4470 return i915_irq_handler;
4471 else
4472 return i8xx_irq_handler;
4473 } else {
4474 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4475 return dg1_irq_handler;
4476 else if (GRAPHICS_VER(dev_priv) >= 11)
4477 return gen11_irq_handler;
4478 else if (GRAPHICS_VER(dev_priv) >= 8)
4479 return gen8_irq_handler;
4480 else
4481 return ilk_irq_handler;
4482 }
4483 }
4484
intel_irq_reset(struct drm_i915_private * dev_priv)4485 static void intel_irq_reset(struct drm_i915_private *dev_priv)
4486 {
4487 if (HAS_GMCH(dev_priv)) {
4488 if (IS_CHERRYVIEW(dev_priv))
4489 cherryview_irq_reset(dev_priv);
4490 else if (IS_VALLEYVIEW(dev_priv))
4491 valleyview_irq_reset(dev_priv);
4492 else if (GRAPHICS_VER(dev_priv) == 4)
4493 i965_irq_reset(dev_priv);
4494 else if (GRAPHICS_VER(dev_priv) == 3)
4495 i915_irq_reset(dev_priv);
4496 else
4497 i8xx_irq_reset(dev_priv);
4498 } else {
4499 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4500 dg1_irq_reset(dev_priv);
4501 else if (GRAPHICS_VER(dev_priv) >= 11)
4502 gen11_irq_reset(dev_priv);
4503 else if (GRAPHICS_VER(dev_priv) >= 8)
4504 gen8_irq_reset(dev_priv);
4505 else
4506 ilk_irq_reset(dev_priv);
4507 }
4508 }
4509
intel_irq_postinstall(struct drm_i915_private * dev_priv)4510 static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4511 {
4512 if (HAS_GMCH(dev_priv)) {
4513 if (IS_CHERRYVIEW(dev_priv))
4514 cherryview_irq_postinstall(dev_priv);
4515 else if (IS_VALLEYVIEW(dev_priv))
4516 valleyview_irq_postinstall(dev_priv);
4517 else if (GRAPHICS_VER(dev_priv) == 4)
4518 i965_irq_postinstall(dev_priv);
4519 else if (GRAPHICS_VER(dev_priv) == 3)
4520 i915_irq_postinstall(dev_priv);
4521 else
4522 i8xx_irq_postinstall(dev_priv);
4523 } else {
4524 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4525 dg1_irq_postinstall(dev_priv);
4526 else if (GRAPHICS_VER(dev_priv) >= 11)
4527 gen11_irq_postinstall(dev_priv);
4528 else if (GRAPHICS_VER(dev_priv) >= 8)
4529 gen8_irq_postinstall(dev_priv);
4530 else
4531 ilk_irq_postinstall(dev_priv);
4532 }
4533 }
4534
4535 /**
4536 * intel_irq_install - enables the hardware interrupt
4537 * @dev_priv: i915 device instance
4538 *
4539 * This function enables the hardware interrupt handling, but leaves the hotplug
4540 * handling still disabled. It is called after intel_irq_init().
4541 *
4542 * In the driver load and resume code we need working interrupts in a few places
4543 * but don't want to deal with the hassle of concurrent probe and hotplug
4544 * workers. Hence the split into this two-stage approach.
4545 */
intel_irq_install(struct drm_i915_private * dev_priv)4546 int intel_irq_install(struct drm_i915_private *dev_priv)
4547 {
4548 int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4549 int ret;
4550
4551 /*
4552 * We enable some interrupt sources in our postinstall hooks, so mark
4553 * interrupts as enabled _before_ actually enabling them to avoid
4554 * special cases in our ordering checks.
4555 */
4556 dev_priv->runtime_pm.irqs_enabled = true;
4557
4558 dev_priv->irq_enabled = true;
4559
4560 intel_irq_reset(dev_priv);
4561
4562 ret = request_irq(irq, intel_irq_handler(dev_priv),
4563 IRQF_SHARED, DRIVER_NAME, dev_priv);
4564 if (ret < 0) {
4565 dev_priv->irq_enabled = false;
4566 return ret;
4567 }
4568
4569 intel_irq_postinstall(dev_priv);
4570
4571 return ret;
4572 }
4573
4574 /**
4575 * intel_irq_uninstall - finilizes all irq handling
4576 * @dev_priv: i915 device instance
4577 *
4578 * This stops interrupt and hotplug handling and unregisters and frees all
4579 * resources acquired in the init functions.
4580 */
intel_irq_uninstall(struct drm_i915_private * dev_priv)4581 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4582 {
4583 int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4584
4585 /*
4586 * FIXME we can get called twice during driver probe
4587 * error handling as well as during driver remove due to
4588 * intel_modeset_driver_remove() calling us out of sequence.
4589 * Would be nice if it didn't do that...
4590 */
4591 if (!dev_priv->irq_enabled)
4592 return;
4593
4594 dev_priv->irq_enabled = false;
4595
4596 intel_irq_reset(dev_priv);
4597
4598 free_irq(irq, dev_priv);
4599
4600 intel_hpd_cancel_work(dev_priv);
4601 dev_priv->runtime_pm.irqs_enabled = false;
4602 }
4603
4604 /**
4605 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4606 * @dev_priv: i915 device instance
4607 *
4608 * This function is used to disable interrupts at runtime, both in the runtime
4609 * pm and the system suspend/resume code.
4610 */
intel_runtime_pm_disable_interrupts(struct drm_i915_private * dev_priv)4611 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4612 {
4613 intel_irq_reset(dev_priv);
4614 dev_priv->runtime_pm.irqs_enabled = false;
4615 intel_synchronize_irq(dev_priv);
4616 }
4617
4618 /**
4619 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4620 * @dev_priv: i915 device instance
4621 *
4622 * This function is used to enable interrupts at runtime, both in the runtime
4623 * pm and the system suspend/resume code.
4624 */
intel_runtime_pm_enable_interrupts(struct drm_i915_private * dev_priv)4625 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4626 {
4627 dev_priv->runtime_pm.irqs_enabled = true;
4628 intel_irq_reset(dev_priv);
4629 intel_irq_postinstall(dev_priv);
4630 }
4631
intel_irqs_enabled(struct drm_i915_private * dev_priv)4632 bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4633 {
4634 return dev_priv->runtime_pm.irqs_enabled;
4635 }
4636
intel_synchronize_irq(struct drm_i915_private * i915)4637 void intel_synchronize_irq(struct drm_i915_private *i915)
4638 {
4639 synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
4640 }
4641
intel_synchronize_hardirq(struct drm_i915_private * i915)4642 void intel_synchronize_hardirq(struct drm_i915_private *i915)
4643 {
4644 synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
4645 }
4646