1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
5
6 #include <linux/vgaarb.h>
7
8 #include "display/intel_crt.h"
9 #include "display/intel_dp.h"
10
11 #include "i915_drv.h"
12 #include "i915_irq.h"
13 #include "intel_cdclk.h"
14 #include "intel_combo_phy.h"
15 #include "intel_csr.h"
16 #include "intel_display_power.h"
17 #include "intel_display_types.h"
18 #include "intel_dpio_phy.h"
19 #include "intel_hotplug.h"
20 #include "intel_sideband.h"
21 #include "intel_tc.h"
22
23 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
24 enum i915_power_well_id power_well_id);
25
26 const char *
intel_display_power_domain_str(struct drm_i915_private * i915,enum intel_display_power_domain domain)27 intel_display_power_domain_str(struct drm_i915_private *i915,
28 enum intel_display_power_domain domain)
29 {
30 bool ddi_tc_ports = IS_GEN(i915, 12);
31
32 switch (domain) {
33 case POWER_DOMAIN_DISPLAY_CORE:
34 return "DISPLAY_CORE";
35 case POWER_DOMAIN_PIPE_A:
36 return "PIPE_A";
37 case POWER_DOMAIN_PIPE_B:
38 return "PIPE_B";
39 case POWER_DOMAIN_PIPE_C:
40 return "PIPE_C";
41 case POWER_DOMAIN_PIPE_D:
42 return "PIPE_D";
43 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
44 return "PIPE_A_PANEL_FITTER";
45 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
46 return "PIPE_B_PANEL_FITTER";
47 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
48 return "PIPE_C_PANEL_FITTER";
49 case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
50 return "PIPE_D_PANEL_FITTER";
51 case POWER_DOMAIN_TRANSCODER_A:
52 return "TRANSCODER_A";
53 case POWER_DOMAIN_TRANSCODER_B:
54 return "TRANSCODER_B";
55 case POWER_DOMAIN_TRANSCODER_C:
56 return "TRANSCODER_C";
57 case POWER_DOMAIN_TRANSCODER_D:
58 return "TRANSCODER_D";
59 case POWER_DOMAIN_TRANSCODER_EDP:
60 return "TRANSCODER_EDP";
61 case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
62 return "TRANSCODER_VDSC_PW2";
63 case POWER_DOMAIN_TRANSCODER_DSI_A:
64 return "TRANSCODER_DSI_A";
65 case POWER_DOMAIN_TRANSCODER_DSI_C:
66 return "TRANSCODER_DSI_C";
67 case POWER_DOMAIN_PORT_DDI_A_LANES:
68 return "PORT_DDI_A_LANES";
69 case POWER_DOMAIN_PORT_DDI_B_LANES:
70 return "PORT_DDI_B_LANES";
71 case POWER_DOMAIN_PORT_DDI_C_LANES:
72 return "PORT_DDI_C_LANES";
73 case POWER_DOMAIN_PORT_DDI_D_LANES:
74 BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_LANES !=
75 POWER_DOMAIN_PORT_DDI_TC1_LANES);
76 return ddi_tc_ports ? "PORT_DDI_TC1_LANES" : "PORT_DDI_D_LANES";
77 case POWER_DOMAIN_PORT_DDI_E_LANES:
78 BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_LANES !=
79 POWER_DOMAIN_PORT_DDI_TC2_LANES);
80 return ddi_tc_ports ? "PORT_DDI_TC2_LANES" : "PORT_DDI_E_LANES";
81 case POWER_DOMAIN_PORT_DDI_F_LANES:
82 BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_LANES !=
83 POWER_DOMAIN_PORT_DDI_TC3_LANES);
84 return ddi_tc_ports ? "PORT_DDI_TC3_LANES" : "PORT_DDI_F_LANES";
85 case POWER_DOMAIN_PORT_DDI_TC4_LANES:
86 return "PORT_DDI_TC4_LANES";
87 case POWER_DOMAIN_PORT_DDI_TC5_LANES:
88 return "PORT_DDI_TC5_LANES";
89 case POWER_DOMAIN_PORT_DDI_TC6_LANES:
90 return "PORT_DDI_TC6_LANES";
91 case POWER_DOMAIN_PORT_DDI_A_IO:
92 return "PORT_DDI_A_IO";
93 case POWER_DOMAIN_PORT_DDI_B_IO:
94 return "PORT_DDI_B_IO";
95 case POWER_DOMAIN_PORT_DDI_C_IO:
96 return "PORT_DDI_C_IO";
97 case POWER_DOMAIN_PORT_DDI_D_IO:
98 BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_IO !=
99 POWER_DOMAIN_PORT_DDI_TC1_IO);
100 return ddi_tc_ports ? "PORT_DDI_TC1_IO" : "PORT_DDI_D_IO";
101 case POWER_DOMAIN_PORT_DDI_E_IO:
102 BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_IO !=
103 POWER_DOMAIN_PORT_DDI_TC2_IO);
104 return ddi_tc_ports ? "PORT_DDI_TC2_IO" : "PORT_DDI_E_IO";
105 case POWER_DOMAIN_PORT_DDI_F_IO:
106 BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_IO !=
107 POWER_DOMAIN_PORT_DDI_TC3_IO);
108 return ddi_tc_ports ? "PORT_DDI_TC3_IO" : "PORT_DDI_F_IO";
109 case POWER_DOMAIN_PORT_DDI_TC4_IO:
110 return "PORT_DDI_TC4_IO";
111 case POWER_DOMAIN_PORT_DDI_TC5_IO:
112 return "PORT_DDI_TC5_IO";
113 case POWER_DOMAIN_PORT_DDI_TC6_IO:
114 return "PORT_DDI_TC6_IO";
115 case POWER_DOMAIN_PORT_DSI:
116 return "PORT_DSI";
117 case POWER_DOMAIN_PORT_CRT:
118 return "PORT_CRT";
119 case POWER_DOMAIN_PORT_OTHER:
120 return "PORT_OTHER";
121 case POWER_DOMAIN_VGA:
122 return "VGA";
123 case POWER_DOMAIN_AUDIO:
124 return "AUDIO";
125 case POWER_DOMAIN_AUX_A:
126 return "AUX_A";
127 case POWER_DOMAIN_AUX_B:
128 return "AUX_B";
129 case POWER_DOMAIN_AUX_C:
130 return "AUX_C";
131 case POWER_DOMAIN_AUX_D:
132 BUILD_BUG_ON(POWER_DOMAIN_AUX_D != POWER_DOMAIN_AUX_TC1);
133 return ddi_tc_ports ? "AUX_TC1" : "AUX_D";
134 case POWER_DOMAIN_AUX_E:
135 BUILD_BUG_ON(POWER_DOMAIN_AUX_E != POWER_DOMAIN_AUX_TC2);
136 return ddi_tc_ports ? "AUX_TC2" : "AUX_E";
137 case POWER_DOMAIN_AUX_F:
138 BUILD_BUG_ON(POWER_DOMAIN_AUX_F != POWER_DOMAIN_AUX_TC3);
139 return ddi_tc_ports ? "AUX_TC3" : "AUX_F";
140 case POWER_DOMAIN_AUX_TC4:
141 return "AUX_TC4";
142 case POWER_DOMAIN_AUX_TC5:
143 return "AUX_TC5";
144 case POWER_DOMAIN_AUX_TC6:
145 return "AUX_TC6";
146 case POWER_DOMAIN_AUX_IO_A:
147 return "AUX_IO_A";
148 case POWER_DOMAIN_AUX_TBT1:
149 return "AUX_TBT1";
150 case POWER_DOMAIN_AUX_TBT2:
151 return "AUX_TBT2";
152 case POWER_DOMAIN_AUX_TBT3:
153 return "AUX_TBT3";
154 case POWER_DOMAIN_AUX_TBT4:
155 return "AUX_TBT4";
156 case POWER_DOMAIN_AUX_TBT5:
157 return "AUX_TBT5";
158 case POWER_DOMAIN_AUX_TBT6:
159 return "AUX_TBT6";
160 case POWER_DOMAIN_GMBUS:
161 return "GMBUS";
162 case POWER_DOMAIN_INIT:
163 return "INIT";
164 case POWER_DOMAIN_MODESET:
165 return "MODESET";
166 case POWER_DOMAIN_GT_IRQ:
167 return "GT_IRQ";
168 case POWER_DOMAIN_DPLL_DC_OFF:
169 return "DPLL_DC_OFF";
170 default:
171 MISSING_CASE(domain);
172 return "?";
173 }
174 }
175
intel_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)176 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
177 struct i915_power_well *power_well)
178 {
179 DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
180 power_well->desc->ops->enable(dev_priv, power_well);
181 power_well->hw_enabled = true;
182 }
183
intel_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)184 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
185 struct i915_power_well *power_well)
186 {
187 DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
188 power_well->hw_enabled = false;
189 power_well->desc->ops->disable(dev_priv, power_well);
190 }
191
intel_power_well_get(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)192 static void intel_power_well_get(struct drm_i915_private *dev_priv,
193 struct i915_power_well *power_well)
194 {
195 if (!power_well->count++)
196 intel_power_well_enable(dev_priv, power_well);
197 }
198
intel_power_well_put(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)199 static void intel_power_well_put(struct drm_i915_private *dev_priv,
200 struct i915_power_well *power_well)
201 {
202 WARN(!power_well->count, "Use count on power well %s is already zero",
203 power_well->desc->name);
204
205 if (!--power_well->count)
206 intel_power_well_disable(dev_priv, power_well);
207 }
208
209 /**
210 * __intel_display_power_is_enabled - unlocked check for a power domain
211 * @dev_priv: i915 device instance
212 * @domain: power domain to check
213 *
214 * This is the unlocked version of intel_display_power_is_enabled() and should
215 * only be used from error capture and recovery code where deadlocks are
216 * possible.
217 *
218 * Returns:
219 * True when the power domain is enabled, false otherwise.
220 */
__intel_display_power_is_enabled(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)221 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
222 enum intel_display_power_domain domain)
223 {
224 struct i915_power_well *power_well;
225 bool is_enabled;
226
227 if (dev_priv->runtime_pm.suspended)
228 return false;
229
230 is_enabled = true;
231
232 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
233 if (power_well->desc->always_on)
234 continue;
235
236 if (!power_well->hw_enabled) {
237 is_enabled = false;
238 break;
239 }
240 }
241
242 return is_enabled;
243 }
244
245 /**
246 * intel_display_power_is_enabled - check for a power domain
247 * @dev_priv: i915 device instance
248 * @domain: power domain to check
249 *
250 * This function can be used to check the hw power domain state. It is mostly
251 * used in hardware state readout functions. Everywhere else code should rely
252 * upon explicit power domain reference counting to ensure that the hardware
253 * block is powered up before accessing it.
254 *
255 * Callers must hold the relevant modesetting locks to ensure that concurrent
256 * threads can't disable the power well while the caller tries to read a few
257 * registers.
258 *
259 * Returns:
260 * True when the power domain is enabled, false otherwise.
261 */
intel_display_power_is_enabled(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)262 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
263 enum intel_display_power_domain domain)
264 {
265 struct i915_power_domains *power_domains;
266 bool ret;
267
268 power_domains = &dev_priv->power_domains;
269
270 mutex_lock(&power_domains->lock);
271 ret = __intel_display_power_is_enabled(dev_priv, domain);
272 mutex_unlock(&power_domains->lock);
273
274 return ret;
275 }
276
277 /*
278 * Starting with Haswell, we have a "Power Down Well" that can be turned off
279 * when not needed anymore. We have 4 registers that can request the power well
280 * to be enabled, and it will only be disabled if none of the registers is
281 * requesting it to be enabled.
282 */
hsw_power_well_post_enable(struct drm_i915_private * dev_priv,u8 irq_pipe_mask,bool has_vga)283 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
284 u8 irq_pipe_mask, bool has_vga)
285 {
286 struct pci_dev *pdev = dev_priv->drm.pdev;
287
288 /*
289 * After we re-enable the power well, if we touch VGA register 0x3d5
290 * we'll get unclaimed register interrupts. This stops after we write
291 * anything to the VGA MSR register. The vgacon module uses this
292 * register all the time, so if we unbind our driver and, as a
293 * consequence, bind vgacon, we'll get stuck in an infinite loop at
294 * console_unlock(). So make here we touch the VGA MSR register, making
295 * sure vgacon can keep working normally without triggering interrupts
296 * and error messages.
297 */
298 if (has_vga) {
299 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
300 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
301 vga_put(pdev, VGA_RSRC_LEGACY_IO);
302 }
303
304 if (irq_pipe_mask)
305 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
306 }
307
hsw_power_well_pre_disable(struct drm_i915_private * dev_priv,u8 irq_pipe_mask)308 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
309 u8 irq_pipe_mask)
310 {
311 if (irq_pipe_mask)
312 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
313 }
314
hsw_wait_for_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)315 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
316 struct i915_power_well *power_well)
317 {
318 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
319 int pw_idx = power_well->desc->hsw.idx;
320
321 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
322 if (intel_de_wait_for_set(dev_priv, regs->driver,
323 HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
324 DRM_DEBUG_KMS("%s power well enable timeout\n",
325 power_well->desc->name);
326
327 /* An AUX timeout is expected if the TBT DP tunnel is down. */
328 WARN_ON(!power_well->desc->hsw.is_tc_tbt);
329 }
330 }
331
hsw_power_well_requesters(struct drm_i915_private * dev_priv,const struct i915_power_well_regs * regs,int pw_idx)332 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
333 const struct i915_power_well_regs *regs,
334 int pw_idx)
335 {
336 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
337 u32 ret;
338
339 ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
340 ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
341 if (regs->kvmr.reg)
342 ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
343 ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
344
345 return ret;
346 }
347
hsw_wait_for_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)348 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
349 struct i915_power_well *power_well)
350 {
351 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
352 int pw_idx = power_well->desc->hsw.idx;
353 bool disabled;
354 u32 reqs;
355
356 /*
357 * Bspec doesn't require waiting for PWs to get disabled, but still do
358 * this for paranoia. The known cases where a PW will be forced on:
359 * - a KVMR request on any power well via the KVMR request register
360 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
361 * DEBUG request registers
362 * Skip the wait in case any of the request bits are set and print a
363 * diagnostic message.
364 */
365 wait_for((disabled = !(I915_READ(regs->driver) &
366 HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
367 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
368 if (disabled)
369 return;
370
371 DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
372 power_well->desc->name,
373 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
374 }
375
gen9_wait_for_power_well_fuses(struct drm_i915_private * dev_priv,enum skl_power_gate pg)376 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
377 enum skl_power_gate pg)
378 {
379 /* Timeout 5us for PG#0, for other PGs 1us */
380 WARN_ON(intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
381 SKL_FUSE_PG_DIST_STATUS(pg), 1));
382 }
383
hsw_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)384 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
385 struct i915_power_well *power_well)
386 {
387 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
388 int pw_idx = power_well->desc->hsw.idx;
389 bool wait_fuses = power_well->desc->hsw.has_fuses;
390 enum skl_power_gate uninitialized_var(pg);
391 u32 val;
392
393 if (wait_fuses) {
394 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
395 SKL_PW_CTL_IDX_TO_PG(pw_idx);
396 /*
397 * For PW1 we have to wait both for the PW0/PG0 fuse state
398 * before enabling the power well and PW1/PG1's own fuse
399 * state after the enabling. For all other power wells with
400 * fuses we only have to wait for that PW/PG's fuse state
401 * after the enabling.
402 */
403 if (pg == SKL_PG1)
404 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
405 }
406
407 val = I915_READ(regs->driver);
408 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
409 hsw_wait_for_power_well_enable(dev_priv, power_well);
410
411 /* Display WA #1178: cnl */
412 if (IS_CANNONLAKE(dev_priv) &&
413 pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
414 pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
415 val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
416 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
417 I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
418 }
419
420 if (wait_fuses)
421 gen9_wait_for_power_well_fuses(dev_priv, pg);
422
423 hsw_power_well_post_enable(dev_priv,
424 power_well->desc->hsw.irq_pipe_mask,
425 power_well->desc->hsw.has_vga);
426 }
427
hsw_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)428 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
429 struct i915_power_well *power_well)
430 {
431 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
432 int pw_idx = power_well->desc->hsw.idx;
433 u32 val;
434
435 hsw_power_well_pre_disable(dev_priv,
436 power_well->desc->hsw.irq_pipe_mask);
437
438 val = I915_READ(regs->driver);
439 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
440 hsw_wait_for_power_well_disable(dev_priv, power_well);
441 }
442
443 #define ICL_AUX_PW_TO_PHY(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
444
445 static void
icl_combo_phy_aux_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)446 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
447 struct i915_power_well *power_well)
448 {
449 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
450 int pw_idx = power_well->desc->hsw.idx;
451 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
452 u32 val;
453 int wa_idx_max;
454
455 val = I915_READ(regs->driver);
456 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
457
458 if (INTEL_GEN(dev_priv) < 12) {
459 val = I915_READ(ICL_PORT_CL_DW12(phy));
460 I915_WRITE(ICL_PORT_CL_DW12(phy), val | ICL_LANE_ENABLE_AUX);
461 }
462
463 hsw_wait_for_power_well_enable(dev_priv, power_well);
464
465 /* Display WA #1178: icl, tgl */
466 if (IS_TIGERLAKE(dev_priv))
467 wa_idx_max = ICL_PW_CTL_IDX_AUX_C;
468 else
469 wa_idx_max = ICL_PW_CTL_IDX_AUX_B;
470
471 if (!IS_ELKHARTLAKE(dev_priv) &&
472 pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= wa_idx_max &&
473 !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
474 val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
475 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
476 I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
477 }
478 }
479
480 static void
icl_combo_phy_aux_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)481 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
482 struct i915_power_well *power_well)
483 {
484 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
485 int pw_idx = power_well->desc->hsw.idx;
486 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
487 u32 val;
488
489 if (INTEL_GEN(dev_priv) < 12) {
490 val = I915_READ(ICL_PORT_CL_DW12(phy));
491 I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX);
492 }
493
494 val = I915_READ(regs->driver);
495 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
496
497 hsw_wait_for_power_well_disable(dev_priv, power_well);
498 }
499
500 #define ICL_AUX_PW_TO_CH(pw_idx) \
501 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
502
503 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
504 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
505
icl_tc_phy_aux_ch(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)506 static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
507 struct i915_power_well *power_well)
508 {
509 int pw_idx = power_well->desc->hsw.idx;
510
511 return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
512 ICL_AUX_PW_TO_CH(pw_idx);
513 }
514
515 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
516
517 static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
518
power_well_async_ref_count(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)519 static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
520 struct i915_power_well *power_well)
521 {
522 int refs = hweight64(power_well->desc->domains &
523 async_put_domains_mask(&dev_priv->power_domains));
524
525 WARN_ON(refs > power_well->count);
526
527 return refs;
528 }
529
icl_tc_port_assert_ref_held(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)530 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
531 struct i915_power_well *power_well)
532 {
533 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
534 struct intel_digital_port *dig_port = NULL;
535 struct intel_encoder *encoder;
536
537 /* Bypass the check if all references are released asynchronously */
538 if (power_well_async_ref_count(dev_priv, power_well) ==
539 power_well->count)
540 return;
541
542 aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
543
544 for_each_intel_encoder(&dev_priv->drm, encoder) {
545 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
546
547 if (!intel_phy_is_tc(dev_priv, phy))
548 continue;
549
550 /* We'll check the MST primary port */
551 if (encoder->type == INTEL_OUTPUT_DP_MST)
552 continue;
553
554 dig_port = enc_to_dig_port(&encoder->base);
555 if (WARN_ON(!dig_port))
556 continue;
557
558 if (dig_port->aux_ch != aux_ch) {
559 dig_port = NULL;
560 continue;
561 }
562
563 break;
564 }
565
566 if (WARN_ON(!dig_port))
567 return;
568
569 WARN_ON(!intel_tc_port_ref_held(dig_port));
570 }
571
572 #else
573
icl_tc_port_assert_ref_held(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)574 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
575 struct i915_power_well *power_well)
576 {
577 }
578
579 #endif
580
581 static void
icl_tc_phy_aux_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)582 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
583 struct i915_power_well *power_well)
584 {
585 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
586 u32 val;
587
588 icl_tc_port_assert_ref_held(dev_priv, power_well);
589
590 val = I915_READ(DP_AUX_CH_CTL(aux_ch));
591 val &= ~DP_AUX_CH_CTL_TBT_IO;
592 if (power_well->desc->hsw.is_tc_tbt)
593 val |= DP_AUX_CH_CTL_TBT_IO;
594 I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
595
596 hsw_power_well_enable(dev_priv, power_well);
597 }
598
599 static void
icl_tc_phy_aux_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)600 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
601 struct i915_power_well *power_well)
602 {
603 icl_tc_port_assert_ref_held(dev_priv, power_well);
604
605 hsw_power_well_disable(dev_priv, power_well);
606 }
607
608 /*
609 * We should only use the power well if we explicitly asked the hardware to
610 * enable it, so check if it's enabled and also check if we've requested it to
611 * be enabled.
612 */
hsw_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)613 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
614 struct i915_power_well *power_well)
615 {
616 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
617 enum i915_power_well_id id = power_well->desc->id;
618 int pw_idx = power_well->desc->hsw.idx;
619 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
620 HSW_PWR_WELL_CTL_STATE(pw_idx);
621 u32 val;
622
623 val = I915_READ(regs->driver);
624
625 /*
626 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
627 * and the MISC_IO PW will be not restored, so check instead for the
628 * BIOS's own request bits, which are forced-on for these power wells
629 * when exiting DC5/6.
630 */
631 if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
632 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
633 val |= I915_READ(regs->bios);
634
635 return (val & mask) == mask;
636 }
637
assert_can_enable_dc9(struct drm_i915_private * dev_priv)638 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
639 {
640 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
641 "DC9 already programmed to be enabled.\n");
642 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
643 "DC5 still not disabled to enable DC9.\n");
644 WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
645 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
646 "Power well 2 on.\n");
647 WARN_ONCE(intel_irqs_enabled(dev_priv),
648 "Interrupts not disabled yet.\n");
649
650 /*
651 * TODO: check for the following to verify the conditions to enter DC9
652 * state are satisfied:
653 * 1] Check relevant display engine registers to verify if mode set
654 * disable sequence was followed.
655 * 2] Check if display uninitialize sequence is initialized.
656 */
657 }
658
assert_can_disable_dc9(struct drm_i915_private * dev_priv)659 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
660 {
661 WARN_ONCE(intel_irqs_enabled(dev_priv),
662 "Interrupts not disabled yet.\n");
663 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
664 "DC5 still not disabled.\n");
665
666 /*
667 * TODO: check for the following to verify DC9 state was indeed
668 * entered before programming to disable it:
669 * 1] Check relevant display engine registers to verify if mode
670 * set disable sequence was followed.
671 * 2] Check if display uninitialize sequence is initialized.
672 */
673 }
674
gen9_write_dc_state(struct drm_i915_private * dev_priv,u32 state)675 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
676 u32 state)
677 {
678 int rewrites = 0;
679 int rereads = 0;
680 u32 v;
681
682 I915_WRITE(DC_STATE_EN, state);
683
684 /* It has been observed that disabling the dc6 state sometimes
685 * doesn't stick and dmc keeps returning old value. Make sure
686 * the write really sticks enough times and also force rewrite until
687 * we are confident that state is exactly what we want.
688 */
689 do {
690 v = I915_READ(DC_STATE_EN);
691
692 if (v != state) {
693 I915_WRITE(DC_STATE_EN, state);
694 rewrites++;
695 rereads = 0;
696 } else if (rereads++ > 5) {
697 break;
698 }
699
700 } while (rewrites < 100);
701
702 if (v != state)
703 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
704 state, v);
705
706 /* Most of the times we need one retry, avoid spam */
707 if (rewrites > 1)
708 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
709 state, rewrites);
710 }
711
gen9_dc_mask(struct drm_i915_private * dev_priv)712 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
713 {
714 u32 mask;
715
716 mask = DC_STATE_EN_UPTO_DC5;
717 if (INTEL_GEN(dev_priv) >= 11)
718 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
719 else if (IS_GEN9_LP(dev_priv))
720 mask |= DC_STATE_EN_DC9;
721 else
722 mask |= DC_STATE_EN_UPTO_DC6;
723
724 return mask;
725 }
726
gen9_sanitize_dc_state(struct drm_i915_private * dev_priv)727 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
728 {
729 u32 val;
730
731 val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
732
733 DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
734 dev_priv->csr.dc_state, val);
735 dev_priv->csr.dc_state = val;
736 }
737
738 /**
739 * gen9_set_dc_state - set target display C power state
740 * @dev_priv: i915 device instance
741 * @state: target DC power state
742 * - DC_STATE_DISABLE
743 * - DC_STATE_EN_UPTO_DC5
744 * - DC_STATE_EN_UPTO_DC6
745 * - DC_STATE_EN_DC9
746 *
747 * Signal to DMC firmware/HW the target DC power state passed in @state.
748 * DMC/HW can turn off individual display clocks and power rails when entering
749 * a deeper DC power state (higher in number) and turns these back when exiting
750 * that state to a shallower power state (lower in number). The HW will decide
751 * when to actually enter a given state on an on-demand basis, for instance
752 * depending on the active state of display pipes. The state of display
753 * registers backed by affected power rails are saved/restored as needed.
754 *
755 * Based on the above enabling a deeper DC power state is asynchronous wrt.
756 * enabling it. Disabling a deeper power state is synchronous: for instance
757 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
758 * back on and register state is restored. This is guaranteed by the MMIO write
759 * to DC_STATE_EN blocking until the state is restored.
760 */
gen9_set_dc_state(struct drm_i915_private * dev_priv,u32 state)761 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
762 {
763 u32 val;
764 u32 mask;
765
766 if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
767 state &= dev_priv->csr.allowed_dc_mask;
768
769 val = I915_READ(DC_STATE_EN);
770 mask = gen9_dc_mask(dev_priv);
771 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
772 val & mask, state);
773
774 /* Check if DMC is ignoring our DC state requests */
775 if ((val & mask) != dev_priv->csr.dc_state)
776 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
777 dev_priv->csr.dc_state, val & mask);
778
779 val &= ~mask;
780 val |= state;
781
782 gen9_write_dc_state(dev_priv, val);
783
784 dev_priv->csr.dc_state = val & mask;
785 }
786
bxt_enable_dc9(struct drm_i915_private * dev_priv)787 static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
788 {
789 assert_can_enable_dc9(dev_priv);
790
791 DRM_DEBUG_KMS("Enabling DC9\n");
792 /*
793 * Power sequencer reset is not needed on
794 * platforms with South Display Engine on PCH,
795 * because PPS registers are always on.
796 */
797 if (!HAS_PCH_SPLIT(dev_priv))
798 intel_power_sequencer_reset(dev_priv);
799 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
800 }
801
bxt_disable_dc9(struct drm_i915_private * dev_priv)802 static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
803 {
804 assert_can_disable_dc9(dev_priv);
805
806 DRM_DEBUG_KMS("Disabling DC9\n");
807
808 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
809
810 intel_pps_unlock_regs_wa(dev_priv);
811 }
812
assert_csr_loaded(struct drm_i915_private * dev_priv)813 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
814 {
815 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
816 "CSR program storage start is NULL\n");
817 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
818 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
819 }
820
821 static struct i915_power_well *
lookup_power_well(struct drm_i915_private * dev_priv,enum i915_power_well_id power_well_id)822 lookup_power_well(struct drm_i915_private *dev_priv,
823 enum i915_power_well_id power_well_id)
824 {
825 struct i915_power_well *power_well;
826
827 for_each_power_well(dev_priv, power_well)
828 if (power_well->desc->id == power_well_id)
829 return power_well;
830
831 /*
832 * It's not feasible to add error checking code to the callers since
833 * this condition really shouldn't happen and it doesn't even make sense
834 * to abort things like display initialization sequences. Just return
835 * the first power well and hope the WARN gets reported so we can fix
836 * our driver.
837 */
838 WARN(1, "Power well %d not defined for this platform\n", power_well_id);
839 return &dev_priv->power_domains.power_wells[0];
840 }
841
assert_can_enable_dc5(struct drm_i915_private * dev_priv)842 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
843 {
844 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
845 SKL_DISP_PW_2);
846
847 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
848
849 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
850 "DC5 already programmed to be enabled.\n");
851 assert_rpm_wakelock_held(&dev_priv->runtime_pm);
852
853 assert_csr_loaded(dev_priv);
854 }
855
gen9_enable_dc5(struct drm_i915_private * dev_priv)856 static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
857 {
858 assert_can_enable_dc5(dev_priv);
859
860 DRM_DEBUG_KMS("Enabling DC5\n");
861
862 /* Wa Display #1183: skl,kbl,cfl */
863 if (IS_GEN9_BC(dev_priv))
864 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
865 SKL_SELECT_ALTERNATE_DC_EXIT);
866
867 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
868 }
869
assert_can_enable_dc6(struct drm_i915_private * dev_priv)870 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
871 {
872 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
873 "Backlight is not disabled.\n");
874 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
875 "DC6 already programmed to be enabled.\n");
876
877 assert_csr_loaded(dev_priv);
878 }
879
skl_enable_dc6(struct drm_i915_private * dev_priv)880 static void skl_enable_dc6(struct drm_i915_private *dev_priv)
881 {
882 assert_can_enable_dc6(dev_priv);
883
884 DRM_DEBUG_KMS("Enabling DC6\n");
885
886 /* Wa Display #1183: skl,kbl,cfl */
887 if (IS_GEN9_BC(dev_priv))
888 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
889 SKL_SELECT_ALTERNATE_DC_EXIT);
890
891 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
892 }
893
hsw_power_well_sync_hw(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)894 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
895 struct i915_power_well *power_well)
896 {
897 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
898 int pw_idx = power_well->desc->hsw.idx;
899 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
900 u32 bios_req = I915_READ(regs->bios);
901
902 /* Take over the request bit if set by BIOS. */
903 if (bios_req & mask) {
904 u32 drv_req = I915_READ(regs->driver);
905
906 if (!(drv_req & mask))
907 I915_WRITE(regs->driver, drv_req | mask);
908 I915_WRITE(regs->bios, bios_req & ~mask);
909 }
910 }
911
bxt_dpio_cmn_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)912 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
913 struct i915_power_well *power_well)
914 {
915 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
916 }
917
bxt_dpio_cmn_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)918 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
919 struct i915_power_well *power_well)
920 {
921 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
922 }
923
bxt_dpio_cmn_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)924 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
925 struct i915_power_well *power_well)
926 {
927 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
928 }
929
bxt_verify_ddi_phy_power_wells(struct drm_i915_private * dev_priv)930 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
931 {
932 struct i915_power_well *power_well;
933
934 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
935 if (power_well->count > 0)
936 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
937
938 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
939 if (power_well->count > 0)
940 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
941
942 if (IS_GEMINILAKE(dev_priv)) {
943 power_well = lookup_power_well(dev_priv,
944 GLK_DISP_PW_DPIO_CMN_C);
945 if (power_well->count > 0)
946 bxt_ddi_phy_verify_state(dev_priv,
947 power_well->desc->bxt.phy);
948 }
949 }
950
gen9_dc_off_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)951 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
952 struct i915_power_well *power_well)
953 {
954 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
955 }
956
gen9_assert_dbuf_enabled(struct drm_i915_private * dev_priv)957 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
958 {
959 u32 tmp = I915_READ(DBUF_CTL);
960
961 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
962 (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
963 "Unexpected DBuf power power state (0x%08x)\n", tmp);
964 }
965
gen9_disable_dc_states(struct drm_i915_private * dev_priv)966 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
967 {
968 struct intel_cdclk_state cdclk_state = {};
969
970 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
971
972 dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
973 /* Can't read out voltage_level so can't use intel_cdclk_changed() */
974 WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
975
976 gen9_assert_dbuf_enabled(dev_priv);
977
978 if (IS_GEN9_LP(dev_priv))
979 bxt_verify_ddi_phy_power_wells(dev_priv);
980
981 if (INTEL_GEN(dev_priv) >= 11)
982 /*
983 * DMC retains HW context only for port A, the other combo
984 * PHY's HW context for port B is lost after DC transitions,
985 * so we need to restore it manually.
986 */
987 intel_combo_phy_init(dev_priv);
988 }
989
gen9_dc_off_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)990 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
991 struct i915_power_well *power_well)
992 {
993 gen9_disable_dc_states(dev_priv);
994 }
995
gen9_dc_off_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)996 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
997 struct i915_power_well *power_well)
998 {
999 if (!dev_priv->csr.dmc_payload)
1000 return;
1001
1002 if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
1003 skl_enable_dc6(dev_priv);
1004 else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
1005 gen9_enable_dc5(dev_priv);
1006 }
1007
i9xx_power_well_sync_hw_noop(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1008 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1009 struct i915_power_well *power_well)
1010 {
1011 }
1012
i9xx_always_on_power_well_noop(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1013 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1014 struct i915_power_well *power_well)
1015 {
1016 }
1017
i9xx_always_on_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1018 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1019 struct i915_power_well *power_well)
1020 {
1021 return true;
1022 }
1023
i830_pipes_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1024 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1025 struct i915_power_well *power_well)
1026 {
1027 if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1028 i830_enable_pipe(dev_priv, PIPE_A);
1029 if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1030 i830_enable_pipe(dev_priv, PIPE_B);
1031 }
1032
i830_pipes_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1033 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1034 struct i915_power_well *power_well)
1035 {
1036 i830_disable_pipe(dev_priv, PIPE_B);
1037 i830_disable_pipe(dev_priv, PIPE_A);
1038 }
1039
i830_pipes_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1040 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1041 struct i915_power_well *power_well)
1042 {
1043 return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1044 I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1045 }
1046
i830_pipes_power_well_sync_hw(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1047 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1048 struct i915_power_well *power_well)
1049 {
1050 if (power_well->count > 0)
1051 i830_pipes_power_well_enable(dev_priv, power_well);
1052 else
1053 i830_pipes_power_well_disable(dev_priv, power_well);
1054 }
1055
vlv_set_power_well(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,bool enable)1056 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1057 struct i915_power_well *power_well, bool enable)
1058 {
1059 int pw_idx = power_well->desc->vlv.idx;
1060 u32 mask;
1061 u32 state;
1062 u32 ctrl;
1063
1064 mask = PUNIT_PWRGT_MASK(pw_idx);
1065 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1066 PUNIT_PWRGT_PWR_GATE(pw_idx);
1067
1068 vlv_punit_get(dev_priv);
1069
1070 #define COND \
1071 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1072
1073 if (COND)
1074 goto out;
1075
1076 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1077 ctrl &= ~mask;
1078 ctrl |= state;
1079 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1080
1081 if (wait_for(COND, 100))
1082 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1083 state,
1084 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1085
1086 #undef COND
1087
1088 out:
1089 vlv_punit_put(dev_priv);
1090 }
1091
vlv_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1092 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1093 struct i915_power_well *power_well)
1094 {
1095 vlv_set_power_well(dev_priv, power_well, true);
1096 }
1097
vlv_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1098 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1099 struct i915_power_well *power_well)
1100 {
1101 vlv_set_power_well(dev_priv, power_well, false);
1102 }
1103
vlv_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1104 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1105 struct i915_power_well *power_well)
1106 {
1107 int pw_idx = power_well->desc->vlv.idx;
1108 bool enabled = false;
1109 u32 mask;
1110 u32 state;
1111 u32 ctrl;
1112
1113 mask = PUNIT_PWRGT_MASK(pw_idx);
1114 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1115
1116 vlv_punit_get(dev_priv);
1117
1118 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1119 /*
1120 * We only ever set the power-on and power-gate states, anything
1121 * else is unexpected.
1122 */
1123 WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1124 state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1125 if (state == ctrl)
1126 enabled = true;
1127
1128 /*
1129 * A transient state at this point would mean some unexpected party
1130 * is poking at the power controls too.
1131 */
1132 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1133 WARN_ON(ctrl != state);
1134
1135 vlv_punit_put(dev_priv);
1136
1137 return enabled;
1138 }
1139
vlv_init_display_clock_gating(struct drm_i915_private * dev_priv)1140 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1141 {
1142 u32 val;
1143
1144 /*
1145 * On driver load, a pipe may be active and driving a DSI display.
1146 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1147 * (and never recovering) in this case. intel_dsi_post_disable() will
1148 * clear it when we turn off the display.
1149 */
1150 val = I915_READ(DSPCLK_GATE_D);
1151 val &= DPOUNIT_CLOCK_GATE_DISABLE;
1152 val |= VRHUNIT_CLOCK_GATE_DISABLE;
1153 I915_WRITE(DSPCLK_GATE_D, val);
1154
1155 /*
1156 * Disable trickle feed and enable pnd deadline calculation
1157 */
1158 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1159 I915_WRITE(CBR1_VLV, 0);
1160
1161 WARN_ON(dev_priv->rawclk_freq == 0);
1162
1163 I915_WRITE(RAWCLK_FREQ_VLV,
1164 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
1165 }
1166
vlv_display_power_well_init(struct drm_i915_private * dev_priv)1167 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1168 {
1169 struct intel_encoder *encoder;
1170 enum pipe pipe;
1171
1172 /*
1173 * Enable the CRI clock source so we can get at the
1174 * display and the reference clock for VGA
1175 * hotplug / manual detection. Supposedly DSI also
1176 * needs the ref clock up and running.
1177 *
1178 * CHV DPLL B/C have some issues if VGA mode is enabled.
1179 */
1180 for_each_pipe(dev_priv, pipe) {
1181 u32 val = I915_READ(DPLL(pipe));
1182
1183 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1184 if (pipe != PIPE_A)
1185 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1186
1187 I915_WRITE(DPLL(pipe), val);
1188 }
1189
1190 vlv_init_display_clock_gating(dev_priv);
1191
1192 spin_lock_irq(&dev_priv->irq_lock);
1193 valleyview_enable_display_irqs(dev_priv);
1194 spin_unlock_irq(&dev_priv->irq_lock);
1195
1196 /*
1197 * During driver initialization/resume we can avoid restoring the
1198 * part of the HW/SW state that will be inited anyway explicitly.
1199 */
1200 if (dev_priv->power_domains.initializing)
1201 return;
1202
1203 intel_hpd_init(dev_priv);
1204
1205 /* Re-enable the ADPA, if we have one */
1206 for_each_intel_encoder(&dev_priv->drm, encoder) {
1207 if (encoder->type == INTEL_OUTPUT_ANALOG)
1208 intel_crt_reset(&encoder->base);
1209 }
1210
1211 i915_redisable_vga_power_on(dev_priv);
1212
1213 intel_pps_unlock_regs_wa(dev_priv);
1214 }
1215
vlv_display_power_well_deinit(struct drm_i915_private * dev_priv)1216 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1217 {
1218 spin_lock_irq(&dev_priv->irq_lock);
1219 valleyview_disable_display_irqs(dev_priv);
1220 spin_unlock_irq(&dev_priv->irq_lock);
1221
1222 /* make sure we're done processing display irqs */
1223 intel_synchronize_irq(dev_priv);
1224
1225 intel_power_sequencer_reset(dev_priv);
1226
1227 /* Prevent us from re-enabling polling on accident in late suspend */
1228 if (!dev_priv->drm.dev->power.is_suspended)
1229 intel_hpd_poll_init(dev_priv);
1230 }
1231
vlv_display_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1232 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1233 struct i915_power_well *power_well)
1234 {
1235 vlv_set_power_well(dev_priv, power_well, true);
1236
1237 vlv_display_power_well_init(dev_priv);
1238 }
1239
vlv_display_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1240 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1241 struct i915_power_well *power_well)
1242 {
1243 vlv_display_power_well_deinit(dev_priv);
1244
1245 vlv_set_power_well(dev_priv, power_well, false);
1246 }
1247
vlv_dpio_cmn_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1248 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1249 struct i915_power_well *power_well)
1250 {
1251 /* since ref/cri clock was enabled */
1252 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1253
1254 vlv_set_power_well(dev_priv, power_well, true);
1255
1256 /*
1257 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1258 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1259 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1260 * b. The other bits such as sfr settings / modesel may all
1261 * be set to 0.
1262 *
1263 * This should only be done on init and resume from S3 with
1264 * both PLLs disabled, or we risk losing DPIO and PLL
1265 * synchronization.
1266 */
1267 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1268 }
1269
vlv_dpio_cmn_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1270 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1271 struct i915_power_well *power_well)
1272 {
1273 enum pipe pipe;
1274
1275 for_each_pipe(dev_priv, pipe)
1276 assert_pll_disabled(dev_priv, pipe);
1277
1278 /* Assert common reset */
1279 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1280
1281 vlv_set_power_well(dev_priv, power_well, false);
1282 }
1283
1284 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1285
1286 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1287
assert_chv_phy_status(struct drm_i915_private * dev_priv)1288 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1289 {
1290 struct i915_power_well *cmn_bc =
1291 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1292 struct i915_power_well *cmn_d =
1293 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1294 u32 phy_control = dev_priv->chv_phy_control;
1295 u32 phy_status = 0;
1296 u32 phy_status_mask = 0xffffffff;
1297
1298 /*
1299 * The BIOS can leave the PHY is some weird state
1300 * where it doesn't fully power down some parts.
1301 * Disable the asserts until the PHY has been fully
1302 * reset (ie. the power well has been disabled at
1303 * least once).
1304 */
1305 if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1306 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1307 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1308 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1309 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1310 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1311 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1312
1313 if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1314 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1315 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1316 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1317
1318 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1319 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1320
1321 /* this assumes override is only used to enable lanes */
1322 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1323 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1324
1325 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1326 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1327
1328 /* CL1 is on whenever anything is on in either channel */
1329 if (BITS_SET(phy_control,
1330 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1331 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1332 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1333
1334 /*
1335 * The DPLLB check accounts for the pipe B + port A usage
1336 * with CL2 powered up but all the lanes in the second channel
1337 * powered down.
1338 */
1339 if (BITS_SET(phy_control,
1340 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1341 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1342 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1343
1344 if (BITS_SET(phy_control,
1345 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1346 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1347 if (BITS_SET(phy_control,
1348 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1349 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1350
1351 if (BITS_SET(phy_control,
1352 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1353 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1354 if (BITS_SET(phy_control,
1355 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1356 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1357 }
1358
1359 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1360 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1361
1362 /* this assumes override is only used to enable lanes */
1363 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1364 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1365
1366 if (BITS_SET(phy_control,
1367 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1368 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1369
1370 if (BITS_SET(phy_control,
1371 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1372 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1373 if (BITS_SET(phy_control,
1374 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1375 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1376 }
1377
1378 phy_status &= phy_status_mask;
1379
1380 /*
1381 * The PHY may be busy with some initial calibration and whatnot,
1382 * so the power state can take a while to actually change.
1383 */
1384 if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
1385 phy_status_mask, phy_status, 10))
1386 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1387 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1388 phy_status, dev_priv->chv_phy_control);
1389 }
1390
1391 #undef BITS_SET
1392
chv_dpio_cmn_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1393 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1394 struct i915_power_well *power_well)
1395 {
1396 enum dpio_phy phy;
1397 enum pipe pipe;
1398 u32 tmp;
1399
1400 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1401 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1402
1403 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1404 pipe = PIPE_A;
1405 phy = DPIO_PHY0;
1406 } else {
1407 pipe = PIPE_C;
1408 phy = DPIO_PHY1;
1409 }
1410
1411 /* since ref/cri clock was enabled */
1412 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1413 vlv_set_power_well(dev_priv, power_well, true);
1414
1415 /* Poll for phypwrgood signal */
1416 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
1417 PHY_POWERGOOD(phy), 1))
1418 DRM_ERROR("Display PHY %d is not power up\n", phy);
1419
1420 vlv_dpio_get(dev_priv);
1421
1422 /* Enable dynamic power down */
1423 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1424 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1425 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1426 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1427
1428 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1429 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1430 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1431 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1432 } else {
1433 /*
1434 * Force the non-existing CL2 off. BXT does this
1435 * too, so maybe it saves some power even though
1436 * CL2 doesn't exist?
1437 */
1438 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1439 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1440 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1441 }
1442
1443 vlv_dpio_put(dev_priv);
1444
1445 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1446 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1447
1448 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1449 phy, dev_priv->chv_phy_control);
1450
1451 assert_chv_phy_status(dev_priv);
1452 }
1453
chv_dpio_cmn_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1454 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1455 struct i915_power_well *power_well)
1456 {
1457 enum dpio_phy phy;
1458
1459 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1460 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1461
1462 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1463 phy = DPIO_PHY0;
1464 assert_pll_disabled(dev_priv, PIPE_A);
1465 assert_pll_disabled(dev_priv, PIPE_B);
1466 } else {
1467 phy = DPIO_PHY1;
1468 assert_pll_disabled(dev_priv, PIPE_C);
1469 }
1470
1471 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1472 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1473
1474 vlv_set_power_well(dev_priv, power_well, false);
1475
1476 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1477 phy, dev_priv->chv_phy_control);
1478
1479 /* PHY is fully reset now, so we can enable the PHY state asserts */
1480 dev_priv->chv_phy_assert[phy] = true;
1481
1482 assert_chv_phy_status(dev_priv);
1483 }
1484
assert_chv_phy_powergate(struct drm_i915_private * dev_priv,enum dpio_phy phy,enum dpio_channel ch,bool override,unsigned int mask)1485 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1486 enum dpio_channel ch, bool override, unsigned int mask)
1487 {
1488 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1489 u32 reg, val, expected, actual;
1490
1491 /*
1492 * The BIOS can leave the PHY is some weird state
1493 * where it doesn't fully power down some parts.
1494 * Disable the asserts until the PHY has been fully
1495 * reset (ie. the power well has been disabled at
1496 * least once).
1497 */
1498 if (!dev_priv->chv_phy_assert[phy])
1499 return;
1500
1501 if (ch == DPIO_CH0)
1502 reg = _CHV_CMN_DW0_CH0;
1503 else
1504 reg = _CHV_CMN_DW6_CH1;
1505
1506 vlv_dpio_get(dev_priv);
1507 val = vlv_dpio_read(dev_priv, pipe, reg);
1508 vlv_dpio_put(dev_priv);
1509
1510 /*
1511 * This assumes !override is only used when the port is disabled.
1512 * All lanes should power down even without the override when
1513 * the port is disabled.
1514 */
1515 if (!override || mask == 0xf) {
1516 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1517 /*
1518 * If CH1 common lane is not active anymore
1519 * (eg. for pipe B DPLL) the entire channel will
1520 * shut down, which causes the common lane registers
1521 * to read as 0. That means we can't actually check
1522 * the lane power down status bits, but as the entire
1523 * register reads as 0 it's a good indication that the
1524 * channel is indeed entirely powered down.
1525 */
1526 if (ch == DPIO_CH1 && val == 0)
1527 expected = 0;
1528 } else if (mask != 0x0) {
1529 expected = DPIO_ANYDL_POWERDOWN;
1530 } else {
1531 expected = 0;
1532 }
1533
1534 if (ch == DPIO_CH0)
1535 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1536 else
1537 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1538 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1539
1540 WARN(actual != expected,
1541 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1542 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1543 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1544 reg, val);
1545 }
1546
chv_phy_powergate_ch(struct drm_i915_private * dev_priv,enum dpio_phy phy,enum dpio_channel ch,bool override)1547 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1548 enum dpio_channel ch, bool override)
1549 {
1550 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1551 bool was_override;
1552
1553 mutex_lock(&power_domains->lock);
1554
1555 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1556
1557 if (override == was_override)
1558 goto out;
1559
1560 if (override)
1561 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1562 else
1563 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1564
1565 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1566
1567 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1568 phy, ch, dev_priv->chv_phy_control);
1569
1570 assert_chv_phy_status(dev_priv);
1571
1572 out:
1573 mutex_unlock(&power_domains->lock);
1574
1575 return was_override;
1576 }
1577
chv_phy_powergate_lanes(struct intel_encoder * encoder,bool override,unsigned int mask)1578 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1579 bool override, unsigned int mask)
1580 {
1581 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1582 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1583 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1584 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1585
1586 mutex_lock(&power_domains->lock);
1587
1588 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1589 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1590
1591 if (override)
1592 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1593 else
1594 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1595
1596 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1597
1598 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1599 phy, ch, mask, dev_priv->chv_phy_control);
1600
1601 assert_chv_phy_status(dev_priv);
1602
1603 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1604
1605 mutex_unlock(&power_domains->lock);
1606 }
1607
chv_pipe_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1608 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1609 struct i915_power_well *power_well)
1610 {
1611 enum pipe pipe = PIPE_A;
1612 bool enabled;
1613 u32 state, ctrl;
1614
1615 vlv_punit_get(dev_priv);
1616
1617 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1618 /*
1619 * We only ever set the power-on and power-gate states, anything
1620 * else is unexpected.
1621 */
1622 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1623 enabled = state == DP_SSS_PWR_ON(pipe);
1624
1625 /*
1626 * A transient state at this point would mean some unexpected party
1627 * is poking at the power controls too.
1628 */
1629 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1630 WARN_ON(ctrl << 16 != state);
1631
1632 vlv_punit_put(dev_priv);
1633
1634 return enabled;
1635 }
1636
chv_set_pipe_power_well(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,bool enable)1637 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1638 struct i915_power_well *power_well,
1639 bool enable)
1640 {
1641 enum pipe pipe = PIPE_A;
1642 u32 state;
1643 u32 ctrl;
1644
1645 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1646
1647 vlv_punit_get(dev_priv);
1648
1649 #define COND \
1650 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1651
1652 if (COND)
1653 goto out;
1654
1655 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1656 ctrl &= ~DP_SSC_MASK(pipe);
1657 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1658 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1659
1660 if (wait_for(COND, 100))
1661 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1662 state,
1663 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1664
1665 #undef COND
1666
1667 out:
1668 vlv_punit_put(dev_priv);
1669 }
1670
chv_pipe_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1671 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1672 struct i915_power_well *power_well)
1673 {
1674 chv_set_pipe_power_well(dev_priv, power_well, true);
1675
1676 vlv_display_power_well_init(dev_priv);
1677 }
1678
chv_pipe_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1679 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1680 struct i915_power_well *power_well)
1681 {
1682 vlv_display_power_well_deinit(dev_priv);
1683
1684 chv_set_pipe_power_well(dev_priv, power_well, false);
1685 }
1686
__async_put_domains_mask(struct i915_power_domains * power_domains)1687 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1688 {
1689 return power_domains->async_put_domains[0] |
1690 power_domains->async_put_domains[1];
1691 }
1692
1693 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1694
1695 static bool
assert_async_put_domain_masks_disjoint(struct i915_power_domains * power_domains)1696 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1697 {
1698 return !WARN_ON(power_domains->async_put_domains[0] &
1699 power_domains->async_put_domains[1]);
1700 }
1701
1702 static bool
__async_put_domains_state_ok(struct i915_power_domains * power_domains)1703 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
1704 {
1705 enum intel_display_power_domain domain;
1706 bool err = false;
1707
1708 err |= !assert_async_put_domain_masks_disjoint(power_domains);
1709 err |= WARN_ON(!!power_domains->async_put_wakeref !=
1710 !!__async_put_domains_mask(power_domains));
1711
1712 for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1713 err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
1714
1715 return !err;
1716 }
1717
print_power_domains(struct i915_power_domains * power_domains,const char * prefix,u64 mask)1718 static void print_power_domains(struct i915_power_domains *power_domains,
1719 const char *prefix, u64 mask)
1720 {
1721 struct drm_i915_private *i915 =
1722 container_of(power_domains, struct drm_i915_private,
1723 power_domains);
1724 enum intel_display_power_domain domain;
1725
1726 DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
1727 for_each_power_domain(domain, mask)
1728 DRM_DEBUG_DRIVER("%s use_count %d\n",
1729 intel_display_power_domain_str(i915, domain),
1730 power_domains->domain_use_count[domain]);
1731 }
1732
1733 static void
print_async_put_domains_state(struct i915_power_domains * power_domains)1734 print_async_put_domains_state(struct i915_power_domains *power_domains)
1735 {
1736 DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
1737 power_domains->async_put_wakeref);
1738
1739 print_power_domains(power_domains, "async_put_domains[0]",
1740 power_domains->async_put_domains[0]);
1741 print_power_domains(power_domains, "async_put_domains[1]",
1742 power_domains->async_put_domains[1]);
1743 }
1744
1745 static void
verify_async_put_domains_state(struct i915_power_domains * power_domains)1746 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1747 {
1748 if (!__async_put_domains_state_ok(power_domains))
1749 print_async_put_domains_state(power_domains);
1750 }
1751
1752 #else
1753
1754 static void
assert_async_put_domain_masks_disjoint(struct i915_power_domains * power_domains)1755 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1756 {
1757 }
1758
1759 static void
verify_async_put_domains_state(struct i915_power_domains * power_domains)1760 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1761 {
1762 }
1763
1764 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
1765
async_put_domains_mask(struct i915_power_domains * power_domains)1766 static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
1767 {
1768 assert_async_put_domain_masks_disjoint(power_domains);
1769
1770 return __async_put_domains_mask(power_domains);
1771 }
1772
1773 static void
async_put_domains_clear_domain(struct i915_power_domains * power_domains,enum intel_display_power_domain domain)1774 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
1775 enum intel_display_power_domain domain)
1776 {
1777 assert_async_put_domain_masks_disjoint(power_domains);
1778
1779 power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
1780 power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
1781 }
1782
1783 static bool
intel_display_power_grab_async_put_ref(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)1784 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
1785 enum intel_display_power_domain domain)
1786 {
1787 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1788 bool ret = false;
1789
1790 if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
1791 goto out_verify;
1792
1793 async_put_domains_clear_domain(power_domains, domain);
1794
1795 ret = true;
1796
1797 if (async_put_domains_mask(power_domains))
1798 goto out_verify;
1799
1800 cancel_delayed_work(&power_domains->async_put_work);
1801 intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
1802 fetch_and_zero(&power_domains->async_put_wakeref));
1803 out_verify:
1804 verify_async_put_domains_state(power_domains);
1805
1806 return ret;
1807 }
1808
1809 static void
__intel_display_power_get_domain(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)1810 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1811 enum intel_display_power_domain domain)
1812 {
1813 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1814 struct i915_power_well *power_well;
1815
1816 if (intel_display_power_grab_async_put_ref(dev_priv, domain))
1817 return;
1818
1819 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1820 intel_power_well_get(dev_priv, power_well);
1821
1822 power_domains->domain_use_count[domain]++;
1823 }
1824
1825 /**
1826 * intel_display_power_get - grab a power domain reference
1827 * @dev_priv: i915 device instance
1828 * @domain: power domain to reference
1829 *
1830 * This function grabs a power domain reference for @domain and ensures that the
1831 * power domain and all its parents are powered up. Therefore users should only
1832 * grab a reference to the innermost power domain they need.
1833 *
1834 * Any power domain reference obtained by this function must have a symmetric
1835 * call to intel_display_power_put() to release the reference again.
1836 */
intel_display_power_get(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)1837 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
1838 enum intel_display_power_domain domain)
1839 {
1840 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1841 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1842
1843 mutex_lock(&power_domains->lock);
1844 __intel_display_power_get_domain(dev_priv, domain);
1845 mutex_unlock(&power_domains->lock);
1846
1847 return wakeref;
1848 }
1849
1850 /**
1851 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1852 * @dev_priv: i915 device instance
1853 * @domain: power domain to reference
1854 *
1855 * This function grabs a power domain reference for @domain and ensures that the
1856 * power domain and all its parents are powered up. Therefore users should only
1857 * grab a reference to the innermost power domain they need.
1858 *
1859 * Any power domain reference obtained by this function must have a symmetric
1860 * call to intel_display_power_put() to release the reference again.
1861 */
1862 intel_wakeref_t
intel_display_power_get_if_enabled(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)1863 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1864 enum intel_display_power_domain domain)
1865 {
1866 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1867 intel_wakeref_t wakeref;
1868 bool is_enabled;
1869
1870 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
1871 if (!wakeref)
1872 return false;
1873
1874 mutex_lock(&power_domains->lock);
1875
1876 if (__intel_display_power_is_enabled(dev_priv, domain)) {
1877 __intel_display_power_get_domain(dev_priv, domain);
1878 is_enabled = true;
1879 } else {
1880 is_enabled = false;
1881 }
1882
1883 mutex_unlock(&power_domains->lock);
1884
1885 if (!is_enabled) {
1886 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1887 wakeref = 0;
1888 }
1889
1890 return wakeref;
1891 }
1892
1893 static void
__intel_display_power_put_domain(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)1894 __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
1895 enum intel_display_power_domain domain)
1896 {
1897 struct i915_power_domains *power_domains;
1898 struct i915_power_well *power_well;
1899 const char *name = intel_display_power_domain_str(dev_priv, domain);
1900
1901 power_domains = &dev_priv->power_domains;
1902
1903 WARN(!power_domains->domain_use_count[domain],
1904 "Use count on domain %s is already zero\n",
1905 name);
1906 WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain),
1907 "Async disabling of domain %s is pending\n",
1908 name);
1909
1910 power_domains->domain_use_count[domain]--;
1911
1912 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
1913 intel_power_well_put(dev_priv, power_well);
1914 }
1915
__intel_display_power_put(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)1916 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
1917 enum intel_display_power_domain domain)
1918 {
1919 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1920
1921 mutex_lock(&power_domains->lock);
1922 __intel_display_power_put_domain(dev_priv, domain);
1923 mutex_unlock(&power_domains->lock);
1924 }
1925
1926 /**
1927 * intel_display_power_put_unchecked - release an unchecked power domain reference
1928 * @dev_priv: i915 device instance
1929 * @domain: power domain to reference
1930 *
1931 * This function drops the power domain reference obtained by
1932 * intel_display_power_get() and might power down the corresponding hardware
1933 * block right away if this is the last reference.
1934 *
1935 * This function exists only for historical reasons and should be avoided in
1936 * new code, as the correctness of its use cannot be checked. Always use
1937 * intel_display_power_put() instead.
1938 */
intel_display_power_put_unchecked(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)1939 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
1940 enum intel_display_power_domain domain)
1941 {
1942 __intel_display_power_put(dev_priv, domain);
1943 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
1944 }
1945
1946 static void
queue_async_put_domains_work(struct i915_power_domains * power_domains,intel_wakeref_t wakeref)1947 queue_async_put_domains_work(struct i915_power_domains *power_domains,
1948 intel_wakeref_t wakeref)
1949 {
1950 WARN_ON(power_domains->async_put_wakeref);
1951 power_domains->async_put_wakeref = wakeref;
1952 WARN_ON(!queue_delayed_work(system_unbound_wq,
1953 &power_domains->async_put_work,
1954 msecs_to_jiffies(100)));
1955 }
1956
1957 static void
release_async_put_domains(struct i915_power_domains * power_domains,u64 mask)1958 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
1959 {
1960 struct drm_i915_private *dev_priv =
1961 container_of(power_domains, struct drm_i915_private,
1962 power_domains);
1963 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1964 enum intel_display_power_domain domain;
1965 intel_wakeref_t wakeref;
1966
1967 /*
1968 * The caller must hold already raw wakeref, upgrade that to a proper
1969 * wakeref to make the state checker happy about the HW access during
1970 * power well disabling.
1971 */
1972 assert_rpm_raw_wakeref_held(rpm);
1973 wakeref = intel_runtime_pm_get(rpm);
1974
1975 for_each_power_domain(domain, mask) {
1976 /* Clear before put, so put's sanity check is happy. */
1977 async_put_domains_clear_domain(power_domains, domain);
1978 __intel_display_power_put_domain(dev_priv, domain);
1979 }
1980
1981 intel_runtime_pm_put(rpm, wakeref);
1982 }
1983
1984 static void
intel_display_power_put_async_work(struct work_struct * work)1985 intel_display_power_put_async_work(struct work_struct *work)
1986 {
1987 struct drm_i915_private *dev_priv =
1988 container_of(work, struct drm_i915_private,
1989 power_domains.async_put_work.work);
1990 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1991 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1992 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
1993 intel_wakeref_t old_work_wakeref = 0;
1994
1995 mutex_lock(&power_domains->lock);
1996
1997 /*
1998 * Bail out if all the domain refs pending to be released were grabbed
1999 * by subsequent gets or a flush_work.
2000 */
2001 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2002 if (!old_work_wakeref)
2003 goto out_verify;
2004
2005 release_async_put_domains(power_domains,
2006 power_domains->async_put_domains[0]);
2007
2008 /* Requeue the work if more domains were async put meanwhile. */
2009 if (power_domains->async_put_domains[1]) {
2010 power_domains->async_put_domains[0] =
2011 fetch_and_zero(&power_domains->async_put_domains[1]);
2012 queue_async_put_domains_work(power_domains,
2013 fetch_and_zero(&new_work_wakeref));
2014 }
2015
2016 out_verify:
2017 verify_async_put_domains_state(power_domains);
2018
2019 mutex_unlock(&power_domains->lock);
2020
2021 if (old_work_wakeref)
2022 intel_runtime_pm_put_raw(rpm, old_work_wakeref);
2023 if (new_work_wakeref)
2024 intel_runtime_pm_put_raw(rpm, new_work_wakeref);
2025 }
2026
2027 /**
2028 * intel_display_power_put_async - release a power domain reference asynchronously
2029 * @i915: i915 device instance
2030 * @domain: power domain to reference
2031 * @wakeref: wakeref acquired for the reference that is being released
2032 *
2033 * This function drops the power domain reference obtained by
2034 * intel_display_power_get*() and schedules a work to power down the
2035 * corresponding hardware block if this is the last reference.
2036 */
__intel_display_power_put_async(struct drm_i915_private * i915,enum intel_display_power_domain domain,intel_wakeref_t wakeref)2037 void __intel_display_power_put_async(struct drm_i915_private *i915,
2038 enum intel_display_power_domain domain,
2039 intel_wakeref_t wakeref)
2040 {
2041 struct i915_power_domains *power_domains = &i915->power_domains;
2042 struct intel_runtime_pm *rpm = &i915->runtime_pm;
2043 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
2044
2045 mutex_lock(&power_domains->lock);
2046
2047 if (power_domains->domain_use_count[domain] > 1) {
2048 __intel_display_power_put_domain(i915, domain);
2049
2050 goto out_verify;
2051 }
2052
2053 WARN_ON(power_domains->domain_use_count[domain] != 1);
2054
2055 /* Let a pending work requeue itself or queue a new one. */
2056 if (power_domains->async_put_wakeref) {
2057 power_domains->async_put_domains[1] |= BIT_ULL(domain);
2058 } else {
2059 power_domains->async_put_domains[0] |= BIT_ULL(domain);
2060 queue_async_put_domains_work(power_domains,
2061 fetch_and_zero(&work_wakeref));
2062 }
2063
2064 out_verify:
2065 verify_async_put_domains_state(power_domains);
2066
2067 mutex_unlock(&power_domains->lock);
2068
2069 if (work_wakeref)
2070 intel_runtime_pm_put_raw(rpm, work_wakeref);
2071
2072 intel_runtime_pm_put(rpm, wakeref);
2073 }
2074
2075 /**
2076 * intel_display_power_flush_work - flushes the async display power disabling work
2077 * @i915: i915 device instance
2078 *
2079 * Flushes any pending work that was scheduled by a preceding
2080 * intel_display_power_put_async() call, completing the disabling of the
2081 * corresponding power domains.
2082 *
2083 * Note that the work handler function may still be running after this
2084 * function returns; to ensure that the work handler isn't running use
2085 * intel_display_power_flush_work_sync() instead.
2086 */
intel_display_power_flush_work(struct drm_i915_private * i915)2087 void intel_display_power_flush_work(struct drm_i915_private *i915)
2088 {
2089 struct i915_power_domains *power_domains = &i915->power_domains;
2090 intel_wakeref_t work_wakeref;
2091
2092 mutex_lock(&power_domains->lock);
2093
2094 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2095 if (!work_wakeref)
2096 goto out_verify;
2097
2098 release_async_put_domains(power_domains,
2099 async_put_domains_mask(power_domains));
2100 cancel_delayed_work(&power_domains->async_put_work);
2101
2102 out_verify:
2103 verify_async_put_domains_state(power_domains);
2104
2105 mutex_unlock(&power_domains->lock);
2106
2107 if (work_wakeref)
2108 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
2109 }
2110
2111 /**
2112 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2113 * @i915: i915 device instance
2114 *
2115 * Like intel_display_power_flush_work(), but also ensure that the work
2116 * handler function is not running any more when this function returns.
2117 */
2118 static void
intel_display_power_flush_work_sync(struct drm_i915_private * i915)2119 intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2120 {
2121 struct i915_power_domains *power_domains = &i915->power_domains;
2122
2123 intel_display_power_flush_work(i915);
2124 cancel_delayed_work_sync(&power_domains->async_put_work);
2125
2126 verify_async_put_domains_state(power_domains);
2127
2128 WARN_ON(power_domains->async_put_wakeref);
2129 }
2130
2131 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2132 /**
2133 * intel_display_power_put - release a power domain reference
2134 * @dev_priv: i915 device instance
2135 * @domain: power domain to reference
2136 * @wakeref: wakeref acquired for the reference that is being released
2137 *
2138 * This function drops the power domain reference obtained by
2139 * intel_display_power_get() and might power down the corresponding hardware
2140 * block right away if this is the last reference.
2141 */
intel_display_power_put(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain,intel_wakeref_t wakeref)2142 void intel_display_power_put(struct drm_i915_private *dev_priv,
2143 enum intel_display_power_domain domain,
2144 intel_wakeref_t wakeref)
2145 {
2146 __intel_display_power_put(dev_priv, domain);
2147 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2148 }
2149 #endif
2150
2151 #define I830_PIPES_POWER_DOMAINS ( \
2152 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2153 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2154 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2155 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2156 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2157 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2158 BIT_ULL(POWER_DOMAIN_INIT))
2159
2160 #define VLV_DISPLAY_POWER_DOMAINS ( \
2161 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
2162 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2163 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2164 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2165 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2166 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2167 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2168 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2169 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2170 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
2171 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
2172 BIT_ULL(POWER_DOMAIN_VGA) | \
2173 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2174 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2175 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2176 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2177 BIT_ULL(POWER_DOMAIN_INIT))
2178
2179 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
2180 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2181 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2182 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
2183 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2184 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2185 BIT_ULL(POWER_DOMAIN_INIT))
2186
2187 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
2188 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2189 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2190 BIT_ULL(POWER_DOMAIN_INIT))
2191
2192 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
2193 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2194 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2195 BIT_ULL(POWER_DOMAIN_INIT))
2196
2197 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
2198 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2199 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2200 BIT_ULL(POWER_DOMAIN_INIT))
2201
2202 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
2203 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2204 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2205 BIT_ULL(POWER_DOMAIN_INIT))
2206
2207 #define CHV_DISPLAY_POWER_DOMAINS ( \
2208 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
2209 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2210 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2211 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2212 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2213 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2214 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2215 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2216 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2217 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2218 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2219 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2220 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2221 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
2222 BIT_ULL(POWER_DOMAIN_VGA) | \
2223 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2224 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2225 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2226 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2227 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2228 BIT_ULL(POWER_DOMAIN_INIT))
2229
2230 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
2231 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2232 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2233 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2234 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2235 BIT_ULL(POWER_DOMAIN_INIT))
2236
2237 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
2238 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2239 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2240 BIT_ULL(POWER_DOMAIN_INIT))
2241
2242 #define HSW_DISPLAY_POWER_DOMAINS ( \
2243 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2244 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2245 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2246 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2247 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2248 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2249 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2250 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2251 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2252 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2253 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2254 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
2255 BIT_ULL(POWER_DOMAIN_VGA) | \
2256 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2257 BIT_ULL(POWER_DOMAIN_INIT))
2258
2259 #define BDW_DISPLAY_POWER_DOMAINS ( \
2260 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2261 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2262 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2263 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2264 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2265 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2266 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2267 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2268 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2269 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2270 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
2271 BIT_ULL(POWER_DOMAIN_VGA) | \
2272 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2273 BIT_ULL(POWER_DOMAIN_INIT))
2274
2275 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2276 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2277 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2278 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2279 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2280 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2281 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2282 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2283 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2284 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2285 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2286 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2287 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2288 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2289 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2290 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2291 BIT_ULL(POWER_DOMAIN_VGA) | \
2292 BIT_ULL(POWER_DOMAIN_INIT))
2293 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
2294 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
2295 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
2296 BIT_ULL(POWER_DOMAIN_INIT))
2297 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2298 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2299 BIT_ULL(POWER_DOMAIN_INIT))
2300 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2301 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2302 BIT_ULL(POWER_DOMAIN_INIT))
2303 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
2304 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2305 BIT_ULL(POWER_DOMAIN_INIT))
2306 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2307 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2308 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2309 BIT_ULL(POWER_DOMAIN_MODESET) | \
2310 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2311 BIT_ULL(POWER_DOMAIN_INIT))
2312
2313 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2314 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2315 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2316 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2317 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2318 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2319 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2320 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2321 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2322 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2323 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2324 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2325 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2326 BIT_ULL(POWER_DOMAIN_VGA) | \
2327 BIT_ULL(POWER_DOMAIN_INIT))
2328 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2329 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2330 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2331 BIT_ULL(POWER_DOMAIN_MODESET) | \
2332 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2333 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2334 BIT_ULL(POWER_DOMAIN_INIT))
2335 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
2336 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2337 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2338 BIT_ULL(POWER_DOMAIN_INIT))
2339 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
2340 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2341 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2342 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2343 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2344 BIT_ULL(POWER_DOMAIN_INIT))
2345
2346 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2347 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2348 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2349 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2350 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2351 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2352 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2353 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2354 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2355 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2356 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2357 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2358 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2359 BIT_ULL(POWER_DOMAIN_VGA) | \
2360 BIT_ULL(POWER_DOMAIN_INIT))
2361 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
2362 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2363 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2364 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2365 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2366 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2367 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
2368 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2369 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2370 BIT_ULL(POWER_DOMAIN_INIT))
2371 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
2372 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2373 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2374 BIT_ULL(POWER_DOMAIN_INIT))
2375 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
2376 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2377 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2378 BIT_ULL(POWER_DOMAIN_INIT))
2379 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
2380 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2381 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2382 BIT_ULL(POWER_DOMAIN_INIT))
2383 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
2384 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2385 BIT_ULL(POWER_DOMAIN_INIT))
2386 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
2387 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2388 BIT_ULL(POWER_DOMAIN_INIT))
2389 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2390 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2391 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2392 BIT_ULL(POWER_DOMAIN_MODESET) | \
2393 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2394 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2395 BIT_ULL(POWER_DOMAIN_INIT))
2396
2397 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2398 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2399 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2400 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2401 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2402 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2403 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2404 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2405 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2406 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2407 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2408 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2409 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2410 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2411 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2412 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2413 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2414 BIT_ULL(POWER_DOMAIN_VGA) | \
2415 BIT_ULL(POWER_DOMAIN_INIT))
2416 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
2417 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
2418 BIT_ULL(POWER_DOMAIN_INIT))
2419 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
2420 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2421 BIT_ULL(POWER_DOMAIN_INIT))
2422 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
2423 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2424 BIT_ULL(POWER_DOMAIN_INIT))
2425 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
2426 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2427 BIT_ULL(POWER_DOMAIN_INIT))
2428 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
2429 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2430 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2431 BIT_ULL(POWER_DOMAIN_INIT))
2432 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
2433 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2434 BIT_ULL(POWER_DOMAIN_INIT))
2435 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
2436 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2437 BIT_ULL(POWER_DOMAIN_INIT))
2438 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
2439 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2440 BIT_ULL(POWER_DOMAIN_INIT))
2441 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \
2442 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2443 BIT_ULL(POWER_DOMAIN_INIT))
2444 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \
2445 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
2446 BIT_ULL(POWER_DOMAIN_INIT))
2447 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2448 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2449 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2450 BIT_ULL(POWER_DOMAIN_MODESET) | \
2451 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2452 BIT_ULL(POWER_DOMAIN_INIT))
2453
2454 /*
2455 * ICL PW_0/PG_0 domains (HW/DMC control):
2456 * - PCI
2457 * - clocks except port PLL
2458 * - central power except FBC
2459 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2460 * ICL PW_1/PG_1 domains (HW/DMC control):
2461 * - DBUF function
2462 * - PIPE_A and its planes, except VGA
2463 * - transcoder EDP + PSR
2464 * - transcoder DSI
2465 * - DDI_A
2466 * - FBC
2467 */
2468 #define ICL_PW_4_POWER_DOMAINS ( \
2469 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2470 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2471 BIT_ULL(POWER_DOMAIN_INIT))
2472 /* VDSC/joining */
2473 #define ICL_PW_3_POWER_DOMAINS ( \
2474 ICL_PW_4_POWER_DOMAINS | \
2475 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2476 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2477 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2478 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2479 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2480 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2481 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2482 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2483 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2484 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2485 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2486 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2487 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2488 BIT_ULL(POWER_DOMAIN_AUX_E) | \
2489 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2490 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
2491 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
2492 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
2493 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
2494 BIT_ULL(POWER_DOMAIN_VGA) | \
2495 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2496 BIT_ULL(POWER_DOMAIN_INIT))
2497 /*
2498 * - transcoder WD
2499 * - KVMR (HW control)
2500 */
2501 #define ICL_PW_2_POWER_DOMAINS ( \
2502 ICL_PW_3_POWER_DOMAINS | \
2503 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
2504 BIT_ULL(POWER_DOMAIN_INIT))
2505 /*
2506 * - KVMR (HW control)
2507 */
2508 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2509 ICL_PW_2_POWER_DOMAINS | \
2510 BIT_ULL(POWER_DOMAIN_MODESET) | \
2511 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2512 BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \
2513 BIT_ULL(POWER_DOMAIN_INIT))
2514
2515 #define ICL_DDI_IO_A_POWER_DOMAINS ( \
2516 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2517 #define ICL_DDI_IO_B_POWER_DOMAINS ( \
2518 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2519 #define ICL_DDI_IO_C_POWER_DOMAINS ( \
2520 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2521 #define ICL_DDI_IO_D_POWER_DOMAINS ( \
2522 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2523 #define ICL_DDI_IO_E_POWER_DOMAINS ( \
2524 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2525 #define ICL_DDI_IO_F_POWER_DOMAINS ( \
2526 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2527
2528 #define ICL_AUX_A_IO_POWER_DOMAINS ( \
2529 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2530 BIT_ULL(POWER_DOMAIN_AUX_A))
2531 #define ICL_AUX_B_IO_POWER_DOMAINS ( \
2532 BIT_ULL(POWER_DOMAIN_AUX_B))
2533 #define ICL_AUX_C_IO_POWER_DOMAINS ( \
2534 BIT_ULL(POWER_DOMAIN_AUX_C))
2535 #define ICL_AUX_D_IO_POWER_DOMAINS ( \
2536 BIT_ULL(POWER_DOMAIN_AUX_D))
2537 #define ICL_AUX_E_IO_POWER_DOMAINS ( \
2538 BIT_ULL(POWER_DOMAIN_AUX_E))
2539 #define ICL_AUX_F_IO_POWER_DOMAINS ( \
2540 BIT_ULL(POWER_DOMAIN_AUX_F))
2541 #define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \
2542 BIT_ULL(POWER_DOMAIN_AUX_TBT1))
2543 #define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \
2544 BIT_ULL(POWER_DOMAIN_AUX_TBT2))
2545 #define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \
2546 BIT_ULL(POWER_DOMAIN_AUX_TBT3))
2547 #define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \
2548 BIT_ULL(POWER_DOMAIN_AUX_TBT4))
2549
2550 #define TGL_PW_5_POWER_DOMAINS ( \
2551 BIT_ULL(POWER_DOMAIN_PIPE_D) | \
2552 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \
2553 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \
2554 BIT_ULL(POWER_DOMAIN_INIT))
2555
2556 #define TGL_PW_4_POWER_DOMAINS ( \
2557 TGL_PW_5_POWER_DOMAINS | \
2558 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2559 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2560 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2561 BIT_ULL(POWER_DOMAIN_INIT))
2562
2563 #define TGL_PW_3_POWER_DOMAINS ( \
2564 TGL_PW_4_POWER_DOMAINS | \
2565 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2566 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2567 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2568 BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_LANES) | \
2569 BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_LANES) | \
2570 BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_LANES) | \
2571 BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_LANES) | \
2572 BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_LANES) | \
2573 BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_LANES) | \
2574 BIT_ULL(POWER_DOMAIN_AUX_TC1) | \
2575 BIT_ULL(POWER_DOMAIN_AUX_TC2) | \
2576 BIT_ULL(POWER_DOMAIN_AUX_TC3) | \
2577 BIT_ULL(POWER_DOMAIN_AUX_TC4) | \
2578 BIT_ULL(POWER_DOMAIN_AUX_TC5) | \
2579 BIT_ULL(POWER_DOMAIN_AUX_TC6) | \
2580 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
2581 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
2582 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
2583 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
2584 BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \
2585 BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \
2586 BIT_ULL(POWER_DOMAIN_VGA) | \
2587 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2588 BIT_ULL(POWER_DOMAIN_INIT))
2589
2590 #define TGL_PW_2_POWER_DOMAINS ( \
2591 TGL_PW_3_POWER_DOMAINS | \
2592 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
2593 BIT_ULL(POWER_DOMAIN_INIT))
2594
2595 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2596 TGL_PW_2_POWER_DOMAINS | \
2597 BIT_ULL(POWER_DOMAIN_MODESET) | \
2598 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2599 BIT_ULL(POWER_DOMAIN_INIT))
2600
2601 #define TGL_DDI_IO_TC1_POWER_DOMAINS ( \
2602 BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_IO))
2603 #define TGL_DDI_IO_TC2_POWER_DOMAINS ( \
2604 BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_IO))
2605 #define TGL_DDI_IO_TC3_POWER_DOMAINS ( \
2606 BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_IO))
2607 #define TGL_DDI_IO_TC4_POWER_DOMAINS ( \
2608 BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_IO))
2609 #define TGL_DDI_IO_TC5_POWER_DOMAINS ( \
2610 BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_IO))
2611 #define TGL_DDI_IO_TC6_POWER_DOMAINS ( \
2612 BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_IO))
2613
2614 #define TGL_AUX_TC1_IO_POWER_DOMAINS ( \
2615 BIT_ULL(POWER_DOMAIN_AUX_TC1))
2616 #define TGL_AUX_TC2_IO_POWER_DOMAINS ( \
2617 BIT_ULL(POWER_DOMAIN_AUX_TC2))
2618 #define TGL_AUX_TC3_IO_POWER_DOMAINS ( \
2619 BIT_ULL(POWER_DOMAIN_AUX_TC3))
2620 #define TGL_AUX_TC4_IO_POWER_DOMAINS ( \
2621 BIT_ULL(POWER_DOMAIN_AUX_TC4))
2622 #define TGL_AUX_TC5_IO_POWER_DOMAINS ( \
2623 BIT_ULL(POWER_DOMAIN_AUX_TC5))
2624 #define TGL_AUX_TC6_IO_POWER_DOMAINS ( \
2625 BIT_ULL(POWER_DOMAIN_AUX_TC6))
2626 #define TGL_AUX_TBT5_IO_POWER_DOMAINS ( \
2627 BIT_ULL(POWER_DOMAIN_AUX_TBT5))
2628 #define TGL_AUX_TBT6_IO_POWER_DOMAINS ( \
2629 BIT_ULL(POWER_DOMAIN_AUX_TBT6))
2630
2631 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2632 .sync_hw = i9xx_power_well_sync_hw_noop,
2633 .enable = i9xx_always_on_power_well_noop,
2634 .disable = i9xx_always_on_power_well_noop,
2635 .is_enabled = i9xx_always_on_power_well_enabled,
2636 };
2637
2638 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2639 .sync_hw = i9xx_power_well_sync_hw_noop,
2640 .enable = chv_pipe_power_well_enable,
2641 .disable = chv_pipe_power_well_disable,
2642 .is_enabled = chv_pipe_power_well_enabled,
2643 };
2644
2645 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2646 .sync_hw = i9xx_power_well_sync_hw_noop,
2647 .enable = chv_dpio_cmn_power_well_enable,
2648 .disable = chv_dpio_cmn_power_well_disable,
2649 .is_enabled = vlv_power_well_enabled,
2650 };
2651
2652 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2653 {
2654 .name = "always-on",
2655 .always_on = true,
2656 .domains = POWER_DOMAIN_MASK,
2657 .ops = &i9xx_always_on_power_well_ops,
2658 .id = DISP_PW_ID_NONE,
2659 },
2660 };
2661
2662 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2663 .sync_hw = i830_pipes_power_well_sync_hw,
2664 .enable = i830_pipes_power_well_enable,
2665 .disable = i830_pipes_power_well_disable,
2666 .is_enabled = i830_pipes_power_well_enabled,
2667 };
2668
2669 static const struct i915_power_well_desc i830_power_wells[] = {
2670 {
2671 .name = "always-on",
2672 .always_on = true,
2673 .domains = POWER_DOMAIN_MASK,
2674 .ops = &i9xx_always_on_power_well_ops,
2675 .id = DISP_PW_ID_NONE,
2676 },
2677 {
2678 .name = "pipes",
2679 .domains = I830_PIPES_POWER_DOMAINS,
2680 .ops = &i830_pipes_power_well_ops,
2681 .id = DISP_PW_ID_NONE,
2682 },
2683 };
2684
2685 static const struct i915_power_well_ops hsw_power_well_ops = {
2686 .sync_hw = hsw_power_well_sync_hw,
2687 .enable = hsw_power_well_enable,
2688 .disable = hsw_power_well_disable,
2689 .is_enabled = hsw_power_well_enabled,
2690 };
2691
2692 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2693 .sync_hw = i9xx_power_well_sync_hw_noop,
2694 .enable = gen9_dc_off_power_well_enable,
2695 .disable = gen9_dc_off_power_well_disable,
2696 .is_enabled = gen9_dc_off_power_well_enabled,
2697 };
2698
2699 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2700 .sync_hw = i9xx_power_well_sync_hw_noop,
2701 .enable = bxt_dpio_cmn_power_well_enable,
2702 .disable = bxt_dpio_cmn_power_well_disable,
2703 .is_enabled = bxt_dpio_cmn_power_well_enabled,
2704 };
2705
2706 static const struct i915_power_well_regs hsw_power_well_regs = {
2707 .bios = HSW_PWR_WELL_CTL1,
2708 .driver = HSW_PWR_WELL_CTL2,
2709 .kvmr = HSW_PWR_WELL_CTL3,
2710 .debug = HSW_PWR_WELL_CTL4,
2711 };
2712
2713 static const struct i915_power_well_desc hsw_power_wells[] = {
2714 {
2715 .name = "always-on",
2716 .always_on = true,
2717 .domains = POWER_DOMAIN_MASK,
2718 .ops = &i9xx_always_on_power_well_ops,
2719 .id = DISP_PW_ID_NONE,
2720 },
2721 {
2722 .name = "display",
2723 .domains = HSW_DISPLAY_POWER_DOMAINS,
2724 .ops = &hsw_power_well_ops,
2725 .id = HSW_DISP_PW_GLOBAL,
2726 {
2727 .hsw.regs = &hsw_power_well_regs,
2728 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2729 .hsw.has_vga = true,
2730 },
2731 },
2732 };
2733
2734 static const struct i915_power_well_desc bdw_power_wells[] = {
2735 {
2736 .name = "always-on",
2737 .always_on = true,
2738 .domains = POWER_DOMAIN_MASK,
2739 .ops = &i9xx_always_on_power_well_ops,
2740 .id = DISP_PW_ID_NONE,
2741 },
2742 {
2743 .name = "display",
2744 .domains = BDW_DISPLAY_POWER_DOMAINS,
2745 .ops = &hsw_power_well_ops,
2746 .id = HSW_DISP_PW_GLOBAL,
2747 {
2748 .hsw.regs = &hsw_power_well_regs,
2749 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2750 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2751 .hsw.has_vga = true,
2752 },
2753 },
2754 };
2755
2756 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2757 .sync_hw = i9xx_power_well_sync_hw_noop,
2758 .enable = vlv_display_power_well_enable,
2759 .disable = vlv_display_power_well_disable,
2760 .is_enabled = vlv_power_well_enabled,
2761 };
2762
2763 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2764 .sync_hw = i9xx_power_well_sync_hw_noop,
2765 .enable = vlv_dpio_cmn_power_well_enable,
2766 .disable = vlv_dpio_cmn_power_well_disable,
2767 .is_enabled = vlv_power_well_enabled,
2768 };
2769
2770 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2771 .sync_hw = i9xx_power_well_sync_hw_noop,
2772 .enable = vlv_power_well_enable,
2773 .disable = vlv_power_well_disable,
2774 .is_enabled = vlv_power_well_enabled,
2775 };
2776
2777 static const struct i915_power_well_desc vlv_power_wells[] = {
2778 {
2779 .name = "always-on",
2780 .always_on = true,
2781 .domains = POWER_DOMAIN_MASK,
2782 .ops = &i9xx_always_on_power_well_ops,
2783 .id = DISP_PW_ID_NONE,
2784 },
2785 {
2786 .name = "display",
2787 .domains = VLV_DISPLAY_POWER_DOMAINS,
2788 .ops = &vlv_display_power_well_ops,
2789 .id = VLV_DISP_PW_DISP2D,
2790 {
2791 .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
2792 },
2793 },
2794 {
2795 .name = "dpio-tx-b-01",
2796 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2797 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2798 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2799 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2800 .ops = &vlv_dpio_power_well_ops,
2801 .id = DISP_PW_ID_NONE,
2802 {
2803 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
2804 },
2805 },
2806 {
2807 .name = "dpio-tx-b-23",
2808 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2809 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2810 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2811 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2812 .ops = &vlv_dpio_power_well_ops,
2813 .id = DISP_PW_ID_NONE,
2814 {
2815 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
2816 },
2817 },
2818 {
2819 .name = "dpio-tx-c-01",
2820 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2821 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2822 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2823 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2824 .ops = &vlv_dpio_power_well_ops,
2825 .id = DISP_PW_ID_NONE,
2826 {
2827 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
2828 },
2829 },
2830 {
2831 .name = "dpio-tx-c-23",
2832 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2833 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2834 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2835 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2836 .ops = &vlv_dpio_power_well_ops,
2837 .id = DISP_PW_ID_NONE,
2838 {
2839 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
2840 },
2841 },
2842 {
2843 .name = "dpio-common",
2844 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2845 .ops = &vlv_dpio_cmn_power_well_ops,
2846 .id = VLV_DISP_PW_DPIO_CMN_BC,
2847 {
2848 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2849 },
2850 },
2851 };
2852
2853 static const struct i915_power_well_desc chv_power_wells[] = {
2854 {
2855 .name = "always-on",
2856 .always_on = true,
2857 .domains = POWER_DOMAIN_MASK,
2858 .ops = &i9xx_always_on_power_well_ops,
2859 .id = DISP_PW_ID_NONE,
2860 },
2861 {
2862 .name = "display",
2863 /*
2864 * Pipe A power well is the new disp2d well. Pipe B and C
2865 * power wells don't actually exist. Pipe A power well is
2866 * required for any pipe to work.
2867 */
2868 .domains = CHV_DISPLAY_POWER_DOMAINS,
2869 .ops = &chv_pipe_power_well_ops,
2870 .id = DISP_PW_ID_NONE,
2871 },
2872 {
2873 .name = "dpio-common-bc",
2874 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2875 .ops = &chv_dpio_cmn_power_well_ops,
2876 .id = VLV_DISP_PW_DPIO_CMN_BC,
2877 {
2878 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2879 },
2880 },
2881 {
2882 .name = "dpio-common-d",
2883 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2884 .ops = &chv_dpio_cmn_power_well_ops,
2885 .id = CHV_DISP_PW_DPIO_CMN_D,
2886 {
2887 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
2888 },
2889 },
2890 };
2891
intel_display_power_well_is_enabled(struct drm_i915_private * dev_priv,enum i915_power_well_id power_well_id)2892 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2893 enum i915_power_well_id power_well_id)
2894 {
2895 struct i915_power_well *power_well;
2896 bool ret;
2897
2898 power_well = lookup_power_well(dev_priv, power_well_id);
2899 ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
2900
2901 return ret;
2902 }
2903
2904 static const struct i915_power_well_desc skl_power_wells[] = {
2905 {
2906 .name = "always-on",
2907 .always_on = true,
2908 .domains = POWER_DOMAIN_MASK,
2909 .ops = &i9xx_always_on_power_well_ops,
2910 .id = DISP_PW_ID_NONE,
2911 },
2912 {
2913 .name = "power well 1",
2914 /* Handled by the DMC firmware */
2915 .always_on = true,
2916 .domains = 0,
2917 .ops = &hsw_power_well_ops,
2918 .id = SKL_DISP_PW_1,
2919 {
2920 .hsw.regs = &hsw_power_well_regs,
2921 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
2922 .hsw.has_fuses = true,
2923 },
2924 },
2925 {
2926 .name = "MISC IO power well",
2927 /* Handled by the DMC firmware */
2928 .always_on = true,
2929 .domains = 0,
2930 .ops = &hsw_power_well_ops,
2931 .id = SKL_DISP_PW_MISC_IO,
2932 {
2933 .hsw.regs = &hsw_power_well_regs,
2934 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
2935 },
2936 },
2937 {
2938 .name = "DC off",
2939 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2940 .ops = &gen9_dc_off_power_well_ops,
2941 .id = DISP_PW_ID_NONE,
2942 },
2943 {
2944 .name = "power well 2",
2945 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2946 .ops = &hsw_power_well_ops,
2947 .id = SKL_DISP_PW_2,
2948 {
2949 .hsw.regs = &hsw_power_well_regs,
2950 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
2951 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2952 .hsw.has_vga = true,
2953 .hsw.has_fuses = true,
2954 },
2955 },
2956 {
2957 .name = "DDI A/E IO power well",
2958 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
2959 .ops = &hsw_power_well_ops,
2960 .id = DISP_PW_ID_NONE,
2961 {
2962 .hsw.regs = &hsw_power_well_regs,
2963 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
2964 },
2965 },
2966 {
2967 .name = "DDI B IO power well",
2968 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2969 .ops = &hsw_power_well_ops,
2970 .id = DISP_PW_ID_NONE,
2971 {
2972 .hsw.regs = &hsw_power_well_regs,
2973 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2974 },
2975 },
2976 {
2977 .name = "DDI C IO power well",
2978 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2979 .ops = &hsw_power_well_ops,
2980 .id = DISP_PW_ID_NONE,
2981 {
2982 .hsw.regs = &hsw_power_well_regs,
2983 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2984 },
2985 },
2986 {
2987 .name = "DDI D IO power well",
2988 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
2989 .ops = &hsw_power_well_ops,
2990 .id = DISP_PW_ID_NONE,
2991 {
2992 .hsw.regs = &hsw_power_well_regs,
2993 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
2994 },
2995 },
2996 };
2997
2998 static const struct i915_power_well_desc bxt_power_wells[] = {
2999 {
3000 .name = "always-on",
3001 .always_on = true,
3002 .domains = POWER_DOMAIN_MASK,
3003 .ops = &i9xx_always_on_power_well_ops,
3004 .id = DISP_PW_ID_NONE,
3005 },
3006 {
3007 .name = "power well 1",
3008 /* Handled by the DMC firmware */
3009 .always_on = true,
3010 .domains = 0,
3011 .ops = &hsw_power_well_ops,
3012 .id = SKL_DISP_PW_1,
3013 {
3014 .hsw.regs = &hsw_power_well_regs,
3015 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3016 .hsw.has_fuses = true,
3017 },
3018 },
3019 {
3020 .name = "DC off",
3021 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3022 .ops = &gen9_dc_off_power_well_ops,
3023 .id = DISP_PW_ID_NONE,
3024 },
3025 {
3026 .name = "power well 2",
3027 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3028 .ops = &hsw_power_well_ops,
3029 .id = SKL_DISP_PW_2,
3030 {
3031 .hsw.regs = &hsw_power_well_regs,
3032 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3033 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3034 .hsw.has_vga = true,
3035 .hsw.has_fuses = true,
3036 },
3037 },
3038 {
3039 .name = "dpio-common-a",
3040 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3041 .ops = &bxt_dpio_cmn_power_well_ops,
3042 .id = BXT_DISP_PW_DPIO_CMN_A,
3043 {
3044 .bxt.phy = DPIO_PHY1,
3045 },
3046 },
3047 {
3048 .name = "dpio-common-bc",
3049 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3050 .ops = &bxt_dpio_cmn_power_well_ops,
3051 .id = VLV_DISP_PW_DPIO_CMN_BC,
3052 {
3053 .bxt.phy = DPIO_PHY0,
3054 },
3055 },
3056 };
3057
3058 static const struct i915_power_well_desc glk_power_wells[] = {
3059 {
3060 .name = "always-on",
3061 .always_on = true,
3062 .domains = POWER_DOMAIN_MASK,
3063 .ops = &i9xx_always_on_power_well_ops,
3064 .id = DISP_PW_ID_NONE,
3065 },
3066 {
3067 .name = "power well 1",
3068 /* Handled by the DMC firmware */
3069 .always_on = true,
3070 .domains = 0,
3071 .ops = &hsw_power_well_ops,
3072 .id = SKL_DISP_PW_1,
3073 {
3074 .hsw.regs = &hsw_power_well_regs,
3075 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3076 .hsw.has_fuses = true,
3077 },
3078 },
3079 {
3080 .name = "DC off",
3081 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3082 .ops = &gen9_dc_off_power_well_ops,
3083 .id = DISP_PW_ID_NONE,
3084 },
3085 {
3086 .name = "power well 2",
3087 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3088 .ops = &hsw_power_well_ops,
3089 .id = SKL_DISP_PW_2,
3090 {
3091 .hsw.regs = &hsw_power_well_regs,
3092 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3093 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3094 .hsw.has_vga = true,
3095 .hsw.has_fuses = true,
3096 },
3097 },
3098 {
3099 .name = "dpio-common-a",
3100 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3101 .ops = &bxt_dpio_cmn_power_well_ops,
3102 .id = BXT_DISP_PW_DPIO_CMN_A,
3103 {
3104 .bxt.phy = DPIO_PHY1,
3105 },
3106 },
3107 {
3108 .name = "dpio-common-b",
3109 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3110 .ops = &bxt_dpio_cmn_power_well_ops,
3111 .id = VLV_DISP_PW_DPIO_CMN_BC,
3112 {
3113 .bxt.phy = DPIO_PHY0,
3114 },
3115 },
3116 {
3117 .name = "dpio-common-c",
3118 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3119 .ops = &bxt_dpio_cmn_power_well_ops,
3120 .id = GLK_DISP_PW_DPIO_CMN_C,
3121 {
3122 .bxt.phy = DPIO_PHY2,
3123 },
3124 },
3125 {
3126 .name = "AUX A",
3127 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3128 .ops = &hsw_power_well_ops,
3129 .id = DISP_PW_ID_NONE,
3130 {
3131 .hsw.regs = &hsw_power_well_regs,
3132 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3133 },
3134 },
3135 {
3136 .name = "AUX B",
3137 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3138 .ops = &hsw_power_well_ops,
3139 .id = DISP_PW_ID_NONE,
3140 {
3141 .hsw.regs = &hsw_power_well_regs,
3142 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3143 },
3144 },
3145 {
3146 .name = "AUX C",
3147 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3148 .ops = &hsw_power_well_ops,
3149 .id = DISP_PW_ID_NONE,
3150 {
3151 .hsw.regs = &hsw_power_well_regs,
3152 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3153 },
3154 },
3155 {
3156 .name = "DDI A IO power well",
3157 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3158 .ops = &hsw_power_well_ops,
3159 .id = DISP_PW_ID_NONE,
3160 {
3161 .hsw.regs = &hsw_power_well_regs,
3162 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3163 },
3164 },
3165 {
3166 .name = "DDI B IO power well",
3167 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3168 .ops = &hsw_power_well_ops,
3169 .id = DISP_PW_ID_NONE,
3170 {
3171 .hsw.regs = &hsw_power_well_regs,
3172 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3173 },
3174 },
3175 {
3176 .name = "DDI C IO power well",
3177 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3178 .ops = &hsw_power_well_ops,
3179 .id = DISP_PW_ID_NONE,
3180 {
3181 .hsw.regs = &hsw_power_well_regs,
3182 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3183 },
3184 },
3185 };
3186
3187 static const struct i915_power_well_desc cnl_power_wells[] = {
3188 {
3189 .name = "always-on",
3190 .always_on = true,
3191 .domains = POWER_DOMAIN_MASK,
3192 .ops = &i9xx_always_on_power_well_ops,
3193 .id = DISP_PW_ID_NONE,
3194 },
3195 {
3196 .name = "power well 1",
3197 /* Handled by the DMC firmware */
3198 .always_on = true,
3199 .domains = 0,
3200 .ops = &hsw_power_well_ops,
3201 .id = SKL_DISP_PW_1,
3202 {
3203 .hsw.regs = &hsw_power_well_regs,
3204 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3205 .hsw.has_fuses = true,
3206 },
3207 },
3208 {
3209 .name = "AUX A",
3210 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
3211 .ops = &hsw_power_well_ops,
3212 .id = DISP_PW_ID_NONE,
3213 {
3214 .hsw.regs = &hsw_power_well_regs,
3215 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3216 },
3217 },
3218 {
3219 .name = "AUX B",
3220 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
3221 .ops = &hsw_power_well_ops,
3222 .id = DISP_PW_ID_NONE,
3223 {
3224 .hsw.regs = &hsw_power_well_regs,
3225 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3226 },
3227 },
3228 {
3229 .name = "AUX C",
3230 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
3231 .ops = &hsw_power_well_ops,
3232 .id = DISP_PW_ID_NONE,
3233 {
3234 .hsw.regs = &hsw_power_well_regs,
3235 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3236 },
3237 },
3238 {
3239 .name = "AUX D",
3240 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
3241 .ops = &hsw_power_well_ops,
3242 .id = DISP_PW_ID_NONE,
3243 {
3244 .hsw.regs = &hsw_power_well_regs,
3245 .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3246 },
3247 },
3248 {
3249 .name = "DC off",
3250 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3251 .ops = &gen9_dc_off_power_well_ops,
3252 .id = DISP_PW_ID_NONE,
3253 },
3254 {
3255 .name = "power well 2",
3256 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3257 .ops = &hsw_power_well_ops,
3258 .id = SKL_DISP_PW_2,
3259 {
3260 .hsw.regs = &hsw_power_well_regs,
3261 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3262 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3263 .hsw.has_vga = true,
3264 .hsw.has_fuses = true,
3265 },
3266 },
3267 {
3268 .name = "DDI A IO power well",
3269 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3270 .ops = &hsw_power_well_ops,
3271 .id = DISP_PW_ID_NONE,
3272 {
3273 .hsw.regs = &hsw_power_well_regs,
3274 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3275 },
3276 },
3277 {
3278 .name = "DDI B IO power well",
3279 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3280 .ops = &hsw_power_well_ops,
3281 .id = DISP_PW_ID_NONE,
3282 {
3283 .hsw.regs = &hsw_power_well_regs,
3284 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3285 },
3286 },
3287 {
3288 .name = "DDI C IO power well",
3289 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3290 .ops = &hsw_power_well_ops,
3291 .id = DISP_PW_ID_NONE,
3292 {
3293 .hsw.regs = &hsw_power_well_regs,
3294 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3295 },
3296 },
3297 {
3298 .name = "DDI D IO power well",
3299 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3300 .ops = &hsw_power_well_ops,
3301 .id = DISP_PW_ID_NONE,
3302 {
3303 .hsw.regs = &hsw_power_well_regs,
3304 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3305 },
3306 },
3307 {
3308 .name = "DDI F IO power well",
3309 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3310 .ops = &hsw_power_well_ops,
3311 .id = DISP_PW_ID_NONE,
3312 {
3313 .hsw.regs = &hsw_power_well_regs,
3314 .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3315 },
3316 },
3317 {
3318 .name = "AUX F",
3319 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3320 .ops = &hsw_power_well_ops,
3321 .id = DISP_PW_ID_NONE,
3322 {
3323 .hsw.regs = &hsw_power_well_regs,
3324 .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3325 },
3326 },
3327 };
3328
3329 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
3330 .sync_hw = hsw_power_well_sync_hw,
3331 .enable = icl_combo_phy_aux_power_well_enable,
3332 .disable = icl_combo_phy_aux_power_well_disable,
3333 .is_enabled = hsw_power_well_enabled,
3334 };
3335
3336 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
3337 .sync_hw = hsw_power_well_sync_hw,
3338 .enable = icl_tc_phy_aux_power_well_enable,
3339 .disable = icl_tc_phy_aux_power_well_disable,
3340 .is_enabled = hsw_power_well_enabled,
3341 };
3342
3343 static const struct i915_power_well_regs icl_aux_power_well_regs = {
3344 .bios = ICL_PWR_WELL_CTL_AUX1,
3345 .driver = ICL_PWR_WELL_CTL_AUX2,
3346 .debug = ICL_PWR_WELL_CTL_AUX4,
3347 };
3348
3349 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3350 .bios = ICL_PWR_WELL_CTL_DDI1,
3351 .driver = ICL_PWR_WELL_CTL_DDI2,
3352 .debug = ICL_PWR_WELL_CTL_DDI4,
3353 };
3354
3355 static const struct i915_power_well_desc icl_power_wells[] = {
3356 {
3357 .name = "always-on",
3358 .always_on = true,
3359 .domains = POWER_DOMAIN_MASK,
3360 .ops = &i9xx_always_on_power_well_ops,
3361 .id = DISP_PW_ID_NONE,
3362 },
3363 {
3364 .name = "power well 1",
3365 /* Handled by the DMC firmware */
3366 .always_on = true,
3367 .domains = 0,
3368 .ops = &hsw_power_well_ops,
3369 .id = SKL_DISP_PW_1,
3370 {
3371 .hsw.regs = &hsw_power_well_regs,
3372 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3373 .hsw.has_fuses = true,
3374 },
3375 },
3376 {
3377 .name = "DC off",
3378 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3379 .ops = &gen9_dc_off_power_well_ops,
3380 .id = DISP_PW_ID_NONE,
3381 },
3382 {
3383 .name = "power well 2",
3384 .domains = ICL_PW_2_POWER_DOMAINS,
3385 .ops = &hsw_power_well_ops,
3386 .id = SKL_DISP_PW_2,
3387 {
3388 .hsw.regs = &hsw_power_well_regs,
3389 .hsw.idx = ICL_PW_CTL_IDX_PW_2,
3390 .hsw.has_fuses = true,
3391 },
3392 },
3393 {
3394 .name = "power well 3",
3395 .domains = ICL_PW_3_POWER_DOMAINS,
3396 .ops = &hsw_power_well_ops,
3397 .id = DISP_PW_ID_NONE,
3398 {
3399 .hsw.regs = &hsw_power_well_regs,
3400 .hsw.idx = ICL_PW_CTL_IDX_PW_3,
3401 .hsw.irq_pipe_mask = BIT(PIPE_B),
3402 .hsw.has_vga = true,
3403 .hsw.has_fuses = true,
3404 },
3405 },
3406 {
3407 .name = "DDI A IO",
3408 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3409 .ops = &hsw_power_well_ops,
3410 .id = DISP_PW_ID_NONE,
3411 {
3412 .hsw.regs = &icl_ddi_power_well_regs,
3413 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3414 },
3415 },
3416 {
3417 .name = "DDI B IO",
3418 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3419 .ops = &hsw_power_well_ops,
3420 .id = DISP_PW_ID_NONE,
3421 {
3422 .hsw.regs = &icl_ddi_power_well_regs,
3423 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3424 },
3425 },
3426 {
3427 .name = "DDI C IO",
3428 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3429 .ops = &hsw_power_well_ops,
3430 .id = DISP_PW_ID_NONE,
3431 {
3432 .hsw.regs = &icl_ddi_power_well_regs,
3433 .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3434 },
3435 },
3436 {
3437 .name = "DDI D IO",
3438 .domains = ICL_DDI_IO_D_POWER_DOMAINS,
3439 .ops = &hsw_power_well_ops,
3440 .id = DISP_PW_ID_NONE,
3441 {
3442 .hsw.regs = &icl_ddi_power_well_regs,
3443 .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3444 },
3445 },
3446 {
3447 .name = "DDI E IO",
3448 .domains = ICL_DDI_IO_E_POWER_DOMAINS,
3449 .ops = &hsw_power_well_ops,
3450 .id = DISP_PW_ID_NONE,
3451 {
3452 .hsw.regs = &icl_ddi_power_well_regs,
3453 .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3454 },
3455 },
3456 {
3457 .name = "DDI F IO",
3458 .domains = ICL_DDI_IO_F_POWER_DOMAINS,
3459 .ops = &hsw_power_well_ops,
3460 .id = DISP_PW_ID_NONE,
3461 {
3462 .hsw.regs = &icl_ddi_power_well_regs,
3463 .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3464 },
3465 },
3466 {
3467 .name = "AUX A",
3468 .domains = ICL_AUX_A_IO_POWER_DOMAINS,
3469 .ops = &icl_combo_phy_aux_power_well_ops,
3470 .id = DISP_PW_ID_NONE,
3471 {
3472 .hsw.regs = &icl_aux_power_well_regs,
3473 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3474 },
3475 },
3476 {
3477 .name = "AUX B",
3478 .domains = ICL_AUX_B_IO_POWER_DOMAINS,
3479 .ops = &icl_combo_phy_aux_power_well_ops,
3480 .id = DISP_PW_ID_NONE,
3481 {
3482 .hsw.regs = &icl_aux_power_well_regs,
3483 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3484 },
3485 },
3486 {
3487 .name = "AUX C",
3488 .domains = ICL_AUX_C_IO_POWER_DOMAINS,
3489 .ops = &icl_tc_phy_aux_power_well_ops,
3490 .id = DISP_PW_ID_NONE,
3491 {
3492 .hsw.regs = &icl_aux_power_well_regs,
3493 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3494 .hsw.is_tc_tbt = false,
3495 },
3496 },
3497 {
3498 .name = "AUX D",
3499 .domains = ICL_AUX_D_IO_POWER_DOMAINS,
3500 .ops = &icl_tc_phy_aux_power_well_ops,
3501 .id = DISP_PW_ID_NONE,
3502 {
3503 .hsw.regs = &icl_aux_power_well_regs,
3504 .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3505 .hsw.is_tc_tbt = false,
3506 },
3507 },
3508 {
3509 .name = "AUX E",
3510 .domains = ICL_AUX_E_IO_POWER_DOMAINS,
3511 .ops = &icl_tc_phy_aux_power_well_ops,
3512 .id = DISP_PW_ID_NONE,
3513 {
3514 .hsw.regs = &icl_aux_power_well_regs,
3515 .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3516 .hsw.is_tc_tbt = false,
3517 },
3518 },
3519 {
3520 .name = "AUX F",
3521 .domains = ICL_AUX_F_IO_POWER_DOMAINS,
3522 .ops = &icl_tc_phy_aux_power_well_ops,
3523 .id = DISP_PW_ID_NONE,
3524 {
3525 .hsw.regs = &icl_aux_power_well_regs,
3526 .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3527 .hsw.is_tc_tbt = false,
3528 },
3529 },
3530 {
3531 .name = "AUX TBT1",
3532 .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
3533 .ops = &icl_tc_phy_aux_power_well_ops,
3534 .id = DISP_PW_ID_NONE,
3535 {
3536 .hsw.regs = &icl_aux_power_well_regs,
3537 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3538 .hsw.is_tc_tbt = true,
3539 },
3540 },
3541 {
3542 .name = "AUX TBT2",
3543 .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
3544 .ops = &icl_tc_phy_aux_power_well_ops,
3545 .id = DISP_PW_ID_NONE,
3546 {
3547 .hsw.regs = &icl_aux_power_well_regs,
3548 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3549 .hsw.is_tc_tbt = true,
3550 },
3551 },
3552 {
3553 .name = "AUX TBT3",
3554 .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
3555 .ops = &icl_tc_phy_aux_power_well_ops,
3556 .id = DISP_PW_ID_NONE,
3557 {
3558 .hsw.regs = &icl_aux_power_well_regs,
3559 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3560 .hsw.is_tc_tbt = true,
3561 },
3562 },
3563 {
3564 .name = "AUX TBT4",
3565 .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
3566 .ops = &icl_tc_phy_aux_power_well_ops,
3567 .id = DISP_PW_ID_NONE,
3568 {
3569 .hsw.regs = &icl_aux_power_well_regs,
3570 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3571 .hsw.is_tc_tbt = true,
3572 },
3573 },
3574 {
3575 .name = "power well 4",
3576 .domains = ICL_PW_4_POWER_DOMAINS,
3577 .ops = &hsw_power_well_ops,
3578 .id = DISP_PW_ID_NONE,
3579 {
3580 .hsw.regs = &hsw_power_well_regs,
3581 .hsw.idx = ICL_PW_CTL_IDX_PW_4,
3582 .hsw.has_fuses = true,
3583 .hsw.irq_pipe_mask = BIT(PIPE_C),
3584 },
3585 },
3586 };
3587
3588 static const struct i915_power_well_desc tgl_power_wells[] = {
3589 {
3590 .name = "always-on",
3591 .always_on = true,
3592 .domains = POWER_DOMAIN_MASK,
3593 .ops = &i9xx_always_on_power_well_ops,
3594 .id = DISP_PW_ID_NONE,
3595 },
3596 {
3597 .name = "power well 1",
3598 /* Handled by the DMC firmware */
3599 .always_on = true,
3600 .domains = 0,
3601 .ops = &hsw_power_well_ops,
3602 .id = SKL_DISP_PW_1,
3603 {
3604 .hsw.regs = &hsw_power_well_regs,
3605 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3606 .hsw.has_fuses = true,
3607 },
3608 },
3609 {
3610 .name = "DC off",
3611 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
3612 .ops = &gen9_dc_off_power_well_ops,
3613 .id = DISP_PW_ID_NONE,
3614 },
3615 {
3616 .name = "power well 2",
3617 .domains = TGL_PW_2_POWER_DOMAINS,
3618 .ops = &hsw_power_well_ops,
3619 .id = SKL_DISP_PW_2,
3620 {
3621 .hsw.regs = &hsw_power_well_regs,
3622 .hsw.idx = ICL_PW_CTL_IDX_PW_2,
3623 .hsw.has_fuses = true,
3624 },
3625 },
3626 {
3627 .name = "power well 3",
3628 .domains = TGL_PW_3_POWER_DOMAINS,
3629 .ops = &hsw_power_well_ops,
3630 .id = DISP_PW_ID_NONE,
3631 {
3632 .hsw.regs = &hsw_power_well_regs,
3633 .hsw.idx = ICL_PW_CTL_IDX_PW_3,
3634 .hsw.irq_pipe_mask = BIT(PIPE_B),
3635 .hsw.has_vga = true,
3636 .hsw.has_fuses = true,
3637 },
3638 },
3639 {
3640 .name = "DDI A IO",
3641 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3642 .ops = &hsw_power_well_ops,
3643 .id = DISP_PW_ID_NONE,
3644 {
3645 .hsw.regs = &icl_ddi_power_well_regs,
3646 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3647 }
3648 },
3649 {
3650 .name = "DDI B IO",
3651 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3652 .ops = &hsw_power_well_ops,
3653 .id = DISP_PW_ID_NONE,
3654 {
3655 .hsw.regs = &icl_ddi_power_well_regs,
3656 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3657 }
3658 },
3659 {
3660 .name = "DDI C IO",
3661 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3662 .ops = &hsw_power_well_ops,
3663 .id = DISP_PW_ID_NONE,
3664 {
3665 .hsw.regs = &icl_ddi_power_well_regs,
3666 .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3667 }
3668 },
3669 {
3670 .name = "DDI TC1 IO",
3671 .domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
3672 .ops = &hsw_power_well_ops,
3673 .id = DISP_PW_ID_NONE,
3674 {
3675 .hsw.regs = &icl_ddi_power_well_regs,
3676 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
3677 },
3678 },
3679 {
3680 .name = "DDI TC2 IO",
3681 .domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
3682 .ops = &hsw_power_well_ops,
3683 .id = DISP_PW_ID_NONE,
3684 {
3685 .hsw.regs = &icl_ddi_power_well_regs,
3686 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
3687 },
3688 },
3689 {
3690 .name = "DDI TC3 IO",
3691 .domains = TGL_DDI_IO_TC3_POWER_DOMAINS,
3692 .ops = &hsw_power_well_ops,
3693 .id = DISP_PW_ID_NONE,
3694 {
3695 .hsw.regs = &icl_ddi_power_well_regs,
3696 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
3697 },
3698 },
3699 {
3700 .name = "DDI TC4 IO",
3701 .domains = TGL_DDI_IO_TC4_POWER_DOMAINS,
3702 .ops = &hsw_power_well_ops,
3703 .id = DISP_PW_ID_NONE,
3704 {
3705 .hsw.regs = &icl_ddi_power_well_regs,
3706 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
3707 },
3708 },
3709 {
3710 .name = "DDI TC5 IO",
3711 .domains = TGL_DDI_IO_TC5_POWER_DOMAINS,
3712 .ops = &hsw_power_well_ops,
3713 .id = DISP_PW_ID_NONE,
3714 {
3715 .hsw.regs = &icl_ddi_power_well_regs,
3716 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
3717 },
3718 },
3719 {
3720 .name = "DDI TC6 IO",
3721 .domains = TGL_DDI_IO_TC6_POWER_DOMAINS,
3722 .ops = &hsw_power_well_ops,
3723 .id = DISP_PW_ID_NONE,
3724 {
3725 .hsw.regs = &icl_ddi_power_well_regs,
3726 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
3727 },
3728 },
3729 {
3730 .name = "AUX A",
3731 .domains = ICL_AUX_A_IO_POWER_DOMAINS,
3732 .ops = &icl_combo_phy_aux_power_well_ops,
3733 .id = DISP_PW_ID_NONE,
3734 {
3735 .hsw.regs = &icl_aux_power_well_regs,
3736 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3737 },
3738 },
3739 {
3740 .name = "AUX B",
3741 .domains = ICL_AUX_B_IO_POWER_DOMAINS,
3742 .ops = &icl_combo_phy_aux_power_well_ops,
3743 .id = DISP_PW_ID_NONE,
3744 {
3745 .hsw.regs = &icl_aux_power_well_regs,
3746 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3747 },
3748 },
3749 {
3750 .name = "AUX C",
3751 .domains = ICL_AUX_C_IO_POWER_DOMAINS,
3752 .ops = &icl_combo_phy_aux_power_well_ops,
3753 .id = DISP_PW_ID_NONE,
3754 {
3755 .hsw.regs = &icl_aux_power_well_regs,
3756 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3757 },
3758 },
3759 {
3760 .name = "AUX TC1",
3761 .domains = TGL_AUX_TC1_IO_POWER_DOMAINS,
3762 .ops = &icl_tc_phy_aux_power_well_ops,
3763 .id = DISP_PW_ID_NONE,
3764 {
3765 .hsw.regs = &icl_aux_power_well_regs,
3766 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
3767 .hsw.is_tc_tbt = false,
3768 },
3769 },
3770 {
3771 .name = "AUX TC2",
3772 .domains = TGL_AUX_TC2_IO_POWER_DOMAINS,
3773 .ops = &icl_tc_phy_aux_power_well_ops,
3774 .id = DISP_PW_ID_NONE,
3775 {
3776 .hsw.regs = &icl_aux_power_well_regs,
3777 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
3778 .hsw.is_tc_tbt = false,
3779 },
3780 },
3781 {
3782 .name = "AUX TC3",
3783 .domains = TGL_AUX_TC3_IO_POWER_DOMAINS,
3784 .ops = &icl_tc_phy_aux_power_well_ops,
3785 .id = DISP_PW_ID_NONE,
3786 {
3787 .hsw.regs = &icl_aux_power_well_regs,
3788 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
3789 .hsw.is_tc_tbt = false,
3790 },
3791 },
3792 {
3793 .name = "AUX TC4",
3794 .domains = TGL_AUX_TC4_IO_POWER_DOMAINS,
3795 .ops = &icl_tc_phy_aux_power_well_ops,
3796 .id = DISP_PW_ID_NONE,
3797 {
3798 .hsw.regs = &icl_aux_power_well_regs,
3799 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
3800 .hsw.is_tc_tbt = false,
3801 },
3802 },
3803 {
3804 .name = "AUX TC5",
3805 .domains = TGL_AUX_TC5_IO_POWER_DOMAINS,
3806 .ops = &icl_tc_phy_aux_power_well_ops,
3807 .id = DISP_PW_ID_NONE,
3808 {
3809 .hsw.regs = &icl_aux_power_well_regs,
3810 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
3811 .hsw.is_tc_tbt = false,
3812 },
3813 },
3814 {
3815 .name = "AUX TC6",
3816 .domains = TGL_AUX_TC6_IO_POWER_DOMAINS,
3817 .ops = &icl_tc_phy_aux_power_well_ops,
3818 .id = DISP_PW_ID_NONE,
3819 {
3820 .hsw.regs = &icl_aux_power_well_regs,
3821 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
3822 .hsw.is_tc_tbt = false,
3823 },
3824 },
3825 {
3826 .name = "AUX TBT1",
3827 .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
3828 .ops = &hsw_power_well_ops,
3829 .id = DISP_PW_ID_NONE,
3830 {
3831 .hsw.regs = &icl_aux_power_well_regs,
3832 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
3833 .hsw.is_tc_tbt = true,
3834 },
3835 },
3836 {
3837 .name = "AUX TBT2",
3838 .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
3839 .ops = &hsw_power_well_ops,
3840 .id = DISP_PW_ID_NONE,
3841 {
3842 .hsw.regs = &icl_aux_power_well_regs,
3843 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
3844 .hsw.is_tc_tbt = true,
3845 },
3846 },
3847 {
3848 .name = "AUX TBT3",
3849 .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
3850 .ops = &hsw_power_well_ops,
3851 .id = DISP_PW_ID_NONE,
3852 {
3853 .hsw.regs = &icl_aux_power_well_regs,
3854 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
3855 .hsw.is_tc_tbt = true,
3856 },
3857 },
3858 {
3859 .name = "AUX TBT4",
3860 .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
3861 .ops = &hsw_power_well_ops,
3862 .id = DISP_PW_ID_NONE,
3863 {
3864 .hsw.regs = &icl_aux_power_well_regs,
3865 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
3866 .hsw.is_tc_tbt = true,
3867 },
3868 },
3869 {
3870 .name = "AUX TBT5",
3871 .domains = TGL_AUX_TBT5_IO_POWER_DOMAINS,
3872 .ops = &hsw_power_well_ops,
3873 .id = DISP_PW_ID_NONE,
3874 {
3875 .hsw.regs = &icl_aux_power_well_regs,
3876 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
3877 .hsw.is_tc_tbt = true,
3878 },
3879 },
3880 {
3881 .name = "AUX TBT6",
3882 .domains = TGL_AUX_TBT6_IO_POWER_DOMAINS,
3883 .ops = &hsw_power_well_ops,
3884 .id = DISP_PW_ID_NONE,
3885 {
3886 .hsw.regs = &icl_aux_power_well_regs,
3887 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
3888 .hsw.is_tc_tbt = true,
3889 },
3890 },
3891 {
3892 .name = "power well 4",
3893 .domains = TGL_PW_4_POWER_DOMAINS,
3894 .ops = &hsw_power_well_ops,
3895 .id = DISP_PW_ID_NONE,
3896 {
3897 .hsw.regs = &hsw_power_well_regs,
3898 .hsw.idx = ICL_PW_CTL_IDX_PW_4,
3899 .hsw.has_fuses = true,
3900 .hsw.irq_pipe_mask = BIT(PIPE_C),
3901 }
3902 },
3903 {
3904 .name = "power well 5",
3905 .domains = TGL_PW_5_POWER_DOMAINS,
3906 .ops = &hsw_power_well_ops,
3907 .id = DISP_PW_ID_NONE,
3908 {
3909 .hsw.regs = &hsw_power_well_regs,
3910 .hsw.idx = TGL_PW_CTL_IDX_PW_5,
3911 .hsw.has_fuses = true,
3912 .hsw.irq_pipe_mask = BIT(PIPE_D),
3913 },
3914 },
3915 };
3916
3917 static int
sanitize_disable_power_well_option(const struct drm_i915_private * dev_priv,int disable_power_well)3918 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
3919 int disable_power_well)
3920 {
3921 if (disable_power_well >= 0)
3922 return !!disable_power_well;
3923
3924 return 1;
3925 }
3926
get_allowed_dc_mask(const struct drm_i915_private * dev_priv,int enable_dc)3927 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
3928 int enable_dc)
3929 {
3930 u32 mask;
3931 int requested_dc;
3932 int max_dc;
3933
3934 if (INTEL_GEN(dev_priv) >= 11) {
3935 max_dc = 2;
3936 /*
3937 * DC9 has a separate HW flow from the rest of the DC states,
3938 * not depending on the DMC firmware. It's needed by system
3939 * suspend/resume, so allow it unconditionally.
3940 */
3941 mask = DC_STATE_EN_DC9;
3942 } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
3943 max_dc = 2;
3944 mask = 0;
3945 } else if (IS_GEN9_LP(dev_priv)) {
3946 max_dc = 1;
3947 mask = DC_STATE_EN_DC9;
3948 } else {
3949 max_dc = 0;
3950 mask = 0;
3951 }
3952
3953 if (!i915_modparams.disable_power_well)
3954 max_dc = 0;
3955
3956 if (enable_dc >= 0 && enable_dc <= max_dc) {
3957 requested_dc = enable_dc;
3958 } else if (enable_dc == -1) {
3959 requested_dc = max_dc;
3960 } else if (enable_dc > max_dc && enable_dc <= 2) {
3961 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
3962 enable_dc, max_dc);
3963 requested_dc = max_dc;
3964 } else {
3965 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
3966 requested_dc = max_dc;
3967 }
3968
3969 if (requested_dc > 1)
3970 mask |= DC_STATE_EN_UPTO_DC6;
3971 if (requested_dc > 0)
3972 mask |= DC_STATE_EN_UPTO_DC5;
3973
3974 DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
3975
3976 return mask;
3977 }
3978
3979 static int
__set_power_wells(struct i915_power_domains * power_domains,const struct i915_power_well_desc * power_well_descs,int power_well_count)3980 __set_power_wells(struct i915_power_domains *power_domains,
3981 const struct i915_power_well_desc *power_well_descs,
3982 int power_well_count)
3983 {
3984 u64 power_well_ids = 0;
3985 int i;
3986
3987 power_domains->power_well_count = power_well_count;
3988 power_domains->power_wells =
3989 kcalloc(power_well_count,
3990 sizeof(*power_domains->power_wells),
3991 GFP_KERNEL);
3992 if (!power_domains->power_wells)
3993 return -ENOMEM;
3994
3995 for (i = 0; i < power_well_count; i++) {
3996 enum i915_power_well_id id = power_well_descs[i].id;
3997
3998 power_domains->power_wells[i].desc = &power_well_descs[i];
3999
4000 if (id == DISP_PW_ID_NONE)
4001 continue;
4002
4003 WARN_ON(id >= sizeof(power_well_ids) * 8);
4004 WARN_ON(power_well_ids & BIT_ULL(id));
4005 power_well_ids |= BIT_ULL(id);
4006 }
4007
4008 return 0;
4009 }
4010
4011 #define set_power_wells(power_domains, __power_well_descs) \
4012 __set_power_wells(power_domains, __power_well_descs, \
4013 ARRAY_SIZE(__power_well_descs))
4014
4015 /**
4016 * intel_power_domains_init - initializes the power domain structures
4017 * @dev_priv: i915 device instance
4018 *
4019 * Initializes the power domain structures for @dev_priv depending upon the
4020 * supported platform.
4021 */
intel_power_domains_init(struct drm_i915_private * dev_priv)4022 int intel_power_domains_init(struct drm_i915_private *dev_priv)
4023 {
4024 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4025 int err;
4026
4027 i915_modparams.disable_power_well =
4028 sanitize_disable_power_well_option(dev_priv,
4029 i915_modparams.disable_power_well);
4030 dev_priv->csr.allowed_dc_mask =
4031 get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
4032
4033 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
4034
4035 mutex_init(&power_domains->lock);
4036
4037 INIT_DELAYED_WORK(&power_domains->async_put_work,
4038 intel_display_power_put_async_work);
4039
4040 /*
4041 * The enabling order will be from lower to higher indexed wells,
4042 * the disabling order is reversed.
4043 */
4044 if (IS_GEN(dev_priv, 12)) {
4045 err = set_power_wells(power_domains, tgl_power_wells);
4046 } else if (IS_GEN(dev_priv, 11)) {
4047 err = set_power_wells(power_domains, icl_power_wells);
4048 } else if (IS_CANNONLAKE(dev_priv)) {
4049 err = set_power_wells(power_domains, cnl_power_wells);
4050
4051 /*
4052 * DDI and Aux IO are getting enabled for all ports
4053 * regardless the presence or use. So, in order to avoid
4054 * timeouts, lets remove them from the list
4055 * for the SKUs without port F.
4056 */
4057 if (!IS_CNL_WITH_PORT_F(dev_priv))
4058 power_domains->power_well_count -= 2;
4059 } else if (IS_GEMINILAKE(dev_priv)) {
4060 err = set_power_wells(power_domains, glk_power_wells);
4061 } else if (IS_BROXTON(dev_priv)) {
4062 err = set_power_wells(power_domains, bxt_power_wells);
4063 } else if (IS_GEN9_BC(dev_priv)) {
4064 err = set_power_wells(power_domains, skl_power_wells);
4065 } else if (IS_CHERRYVIEW(dev_priv)) {
4066 err = set_power_wells(power_domains, chv_power_wells);
4067 } else if (IS_BROADWELL(dev_priv)) {
4068 err = set_power_wells(power_domains, bdw_power_wells);
4069 } else if (IS_HASWELL(dev_priv)) {
4070 err = set_power_wells(power_domains, hsw_power_wells);
4071 } else if (IS_VALLEYVIEW(dev_priv)) {
4072 err = set_power_wells(power_domains, vlv_power_wells);
4073 } else if (IS_I830(dev_priv)) {
4074 err = set_power_wells(power_domains, i830_power_wells);
4075 } else {
4076 err = set_power_wells(power_domains, i9xx_always_on_power_well);
4077 }
4078
4079 return err;
4080 }
4081
4082 /**
4083 * intel_power_domains_cleanup - clean up power domains resources
4084 * @dev_priv: i915 device instance
4085 *
4086 * Release any resources acquired by intel_power_domains_init()
4087 */
intel_power_domains_cleanup(struct drm_i915_private * dev_priv)4088 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
4089 {
4090 kfree(dev_priv->power_domains.power_wells);
4091 }
4092
intel_power_domains_sync_hw(struct drm_i915_private * dev_priv)4093 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
4094 {
4095 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4096 struct i915_power_well *power_well;
4097
4098 mutex_lock(&power_domains->lock);
4099 for_each_power_well(dev_priv, power_well) {
4100 power_well->desc->ops->sync_hw(dev_priv, power_well);
4101 power_well->hw_enabled =
4102 power_well->desc->ops->is_enabled(dev_priv, power_well);
4103 }
4104 mutex_unlock(&power_domains->lock);
4105 }
4106
4107 static inline
intel_dbuf_slice_set(struct drm_i915_private * dev_priv,i915_reg_t reg,bool enable)4108 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
4109 i915_reg_t reg, bool enable)
4110 {
4111 u32 val, status;
4112
4113 val = I915_READ(reg);
4114 val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
4115 I915_WRITE(reg, val);
4116 POSTING_READ(reg);
4117 udelay(10);
4118
4119 status = I915_READ(reg) & DBUF_POWER_STATE;
4120 if ((enable && !status) || (!enable && status)) {
4121 DRM_ERROR("DBus power %s timeout!\n",
4122 enable ? "enable" : "disable");
4123 return false;
4124 }
4125 return true;
4126 }
4127
gen9_dbuf_enable(struct drm_i915_private * dev_priv)4128 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
4129 {
4130 intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
4131 }
4132
gen9_dbuf_disable(struct drm_i915_private * dev_priv)4133 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
4134 {
4135 intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
4136 }
4137
intel_dbuf_max_slices(struct drm_i915_private * dev_priv)4138 static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
4139 {
4140 if (INTEL_GEN(dev_priv) < 11)
4141 return 1;
4142 return 2;
4143 }
4144
icl_dbuf_slices_update(struct drm_i915_private * dev_priv,u8 req_slices)4145 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
4146 u8 req_slices)
4147 {
4148 const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
4149 bool ret;
4150
4151 if (req_slices > intel_dbuf_max_slices(dev_priv)) {
4152 DRM_ERROR("Invalid number of dbuf slices requested\n");
4153 return;
4154 }
4155
4156 if (req_slices == hw_enabled_slices || req_slices == 0)
4157 return;
4158
4159 if (req_slices > hw_enabled_slices)
4160 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
4161 else
4162 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
4163
4164 if (ret)
4165 dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
4166 }
4167
icl_dbuf_enable(struct drm_i915_private * dev_priv)4168 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
4169 {
4170 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
4171 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
4172 POSTING_READ(DBUF_CTL_S2);
4173
4174 udelay(10);
4175
4176 if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
4177 !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
4178 DRM_ERROR("DBuf power enable timeout\n");
4179 else
4180 /*
4181 * FIXME: for now pretend that we only have 1 slice, see
4182 * intel_enabled_dbuf_slices_num().
4183 */
4184 dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
4185 }
4186
icl_dbuf_disable(struct drm_i915_private * dev_priv)4187 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
4188 {
4189 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
4190 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
4191 POSTING_READ(DBUF_CTL_S2);
4192
4193 udelay(10);
4194
4195 if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
4196 (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
4197 DRM_ERROR("DBuf power disable timeout!\n");
4198 else
4199 /*
4200 * FIXME: for now pretend that the first slice is always
4201 * enabled, see intel_enabled_dbuf_slices_num().
4202 */
4203 dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
4204 }
4205
icl_mbus_init(struct drm_i915_private * dev_priv)4206 static void icl_mbus_init(struct drm_i915_private *dev_priv)
4207 {
4208 u32 val;
4209
4210 val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
4211 MBUS_ABOX_BT_CREDIT_POOL2(16) |
4212 MBUS_ABOX_B_CREDIT(1) |
4213 MBUS_ABOX_BW_CREDIT(1);
4214
4215 I915_WRITE(MBUS_ABOX_CTL, val);
4216 }
4217
hsw_assert_cdclk(struct drm_i915_private * dev_priv)4218 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
4219 {
4220 u32 val = I915_READ(LCPLL_CTL);
4221
4222 /*
4223 * The LCPLL register should be turned on by the BIOS. For now
4224 * let's just check its state and print errors in case
4225 * something is wrong. Don't even try to turn it on.
4226 */
4227
4228 if (val & LCPLL_CD_SOURCE_FCLK)
4229 DRM_ERROR("CDCLK source is not LCPLL\n");
4230
4231 if (val & LCPLL_PLL_DISABLE)
4232 DRM_ERROR("LCPLL is disabled\n");
4233
4234 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
4235 DRM_ERROR("LCPLL not using non-SSC reference\n");
4236 }
4237
assert_can_disable_lcpll(struct drm_i915_private * dev_priv)4238 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
4239 {
4240 struct drm_device *dev = &dev_priv->drm;
4241 struct intel_crtc *crtc;
4242
4243 for_each_intel_crtc(dev, crtc)
4244 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
4245 pipe_name(crtc->pipe));
4246
4247 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
4248 "Display power well on\n");
4249 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE,
4250 "SPLL enabled\n");
4251 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
4252 "WRPLL1 enabled\n");
4253 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
4254 "WRPLL2 enabled\n");
4255 I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON,
4256 "Panel power on\n");
4257 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
4258 "CPU PWM1 enabled\n");
4259 if (IS_HASWELL(dev_priv))
4260 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
4261 "CPU PWM2 enabled\n");
4262 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
4263 "PCH PWM1 enabled\n");
4264 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
4265 "Utility pin enabled\n");
4266 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE,
4267 "PCH GTC enabled\n");
4268
4269 /*
4270 * In theory we can still leave IRQs enabled, as long as only the HPD
4271 * interrupts remain enabled. We used to check for that, but since it's
4272 * gen-specific and since we only disable LCPLL after we fully disable
4273 * the interrupts, the check below should be enough.
4274 */
4275 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4276 }
4277
hsw_read_dcomp(struct drm_i915_private * dev_priv)4278 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
4279 {
4280 if (IS_HASWELL(dev_priv))
4281 return I915_READ(D_COMP_HSW);
4282 else
4283 return I915_READ(D_COMP_BDW);
4284 }
4285
hsw_write_dcomp(struct drm_i915_private * dev_priv,u32 val)4286 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
4287 {
4288 if (IS_HASWELL(dev_priv)) {
4289 if (sandybridge_pcode_write(dev_priv,
4290 GEN6_PCODE_WRITE_D_COMP, val))
4291 DRM_DEBUG_KMS("Failed to write to D_COMP\n");
4292 } else {
4293 I915_WRITE(D_COMP_BDW, val);
4294 POSTING_READ(D_COMP_BDW);
4295 }
4296 }
4297
4298 /*
4299 * This function implements pieces of two sequences from BSpec:
4300 * - Sequence for display software to disable LCPLL
4301 * - Sequence for display software to allow package C8+
4302 * The steps implemented here are just the steps that actually touch the LCPLL
4303 * register. Callers should take care of disabling all the display engine
4304 * functions, doing the mode unset, fixing interrupts, etc.
4305 */
hsw_disable_lcpll(struct drm_i915_private * dev_priv,bool switch_to_fclk,bool allow_power_down)4306 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
4307 bool switch_to_fclk, bool allow_power_down)
4308 {
4309 u32 val;
4310
4311 assert_can_disable_lcpll(dev_priv);
4312
4313 val = I915_READ(LCPLL_CTL);
4314
4315 if (switch_to_fclk) {
4316 val |= LCPLL_CD_SOURCE_FCLK;
4317 I915_WRITE(LCPLL_CTL, val);
4318
4319 if (wait_for_us(I915_READ(LCPLL_CTL) &
4320 LCPLL_CD_SOURCE_FCLK_DONE, 1))
4321 DRM_ERROR("Switching to FCLK failed\n");
4322
4323 val = I915_READ(LCPLL_CTL);
4324 }
4325
4326 val |= LCPLL_PLL_DISABLE;
4327 I915_WRITE(LCPLL_CTL, val);
4328 POSTING_READ(LCPLL_CTL);
4329
4330 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
4331 DRM_ERROR("LCPLL still locked\n");
4332
4333 val = hsw_read_dcomp(dev_priv);
4334 val |= D_COMP_COMP_DISABLE;
4335 hsw_write_dcomp(dev_priv, val);
4336 ndelay(100);
4337
4338 if (wait_for((hsw_read_dcomp(dev_priv) &
4339 D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
4340 DRM_ERROR("D_COMP RCOMP still in progress\n");
4341
4342 if (allow_power_down) {
4343 val = I915_READ(LCPLL_CTL);
4344 val |= LCPLL_POWER_DOWN_ALLOW;
4345 I915_WRITE(LCPLL_CTL, val);
4346 POSTING_READ(LCPLL_CTL);
4347 }
4348 }
4349
4350 /*
4351 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
4352 * source.
4353 */
hsw_restore_lcpll(struct drm_i915_private * dev_priv)4354 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4355 {
4356 u32 val;
4357
4358 val = I915_READ(LCPLL_CTL);
4359
4360 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
4361 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
4362 return;
4363
4364 /*
4365 * Make sure we're not on PC8 state before disabling PC8, otherwise
4366 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
4367 */
4368 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4369
4370 if (val & LCPLL_POWER_DOWN_ALLOW) {
4371 val &= ~LCPLL_POWER_DOWN_ALLOW;
4372 I915_WRITE(LCPLL_CTL, val);
4373 POSTING_READ(LCPLL_CTL);
4374 }
4375
4376 val = hsw_read_dcomp(dev_priv);
4377 val |= D_COMP_COMP_FORCE;
4378 val &= ~D_COMP_COMP_DISABLE;
4379 hsw_write_dcomp(dev_priv, val);
4380
4381 val = I915_READ(LCPLL_CTL);
4382 val &= ~LCPLL_PLL_DISABLE;
4383 I915_WRITE(LCPLL_CTL, val);
4384
4385 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
4386 DRM_ERROR("LCPLL not locked yet\n");
4387
4388 if (val & LCPLL_CD_SOURCE_FCLK) {
4389 val = I915_READ(LCPLL_CTL);
4390 val &= ~LCPLL_CD_SOURCE_FCLK;
4391 I915_WRITE(LCPLL_CTL, val);
4392
4393 if (wait_for_us((I915_READ(LCPLL_CTL) &
4394 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
4395 DRM_ERROR("Switching back to LCPLL failed\n");
4396 }
4397
4398 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4399
4400 intel_update_cdclk(dev_priv);
4401 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
4402 }
4403
4404 /*
4405 * Package states C8 and deeper are really deep PC states that can only be
4406 * reached when all the devices on the system allow it, so even if the graphics
4407 * device allows PC8+, it doesn't mean the system will actually get to these
4408 * states. Our driver only allows PC8+ when going into runtime PM.
4409 *
4410 * The requirements for PC8+ are that all the outputs are disabled, the power
4411 * well is disabled and most interrupts are disabled, and these are also
4412 * requirements for runtime PM. When these conditions are met, we manually do
4413 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
4414 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
4415 * hang the machine.
4416 *
4417 * When we really reach PC8 or deeper states (not just when we allow it) we lose
4418 * the state of some registers, so when we come back from PC8+ we need to
4419 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
4420 * need to take care of the registers kept by RC6. Notice that this happens even
4421 * if we don't put the device in PCI D3 state (which is what currently happens
4422 * because of the runtime PM support).
4423 *
4424 * For more, read "Display Sequences for Package C8" on the hardware
4425 * documentation.
4426 */
hsw_enable_pc8(struct drm_i915_private * dev_priv)4427 static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4428 {
4429 u32 val;
4430
4431 DRM_DEBUG_KMS("Enabling package C8+\n");
4432
4433 if (HAS_PCH_LPT_LP(dev_priv)) {
4434 val = I915_READ(SOUTH_DSPCLK_GATE_D);
4435 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4436 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4437 }
4438
4439 lpt_disable_clkout_dp(dev_priv);
4440 hsw_disable_lcpll(dev_priv, true, true);
4441 }
4442
hsw_disable_pc8(struct drm_i915_private * dev_priv)4443 static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
4444 {
4445 u32 val;
4446
4447 DRM_DEBUG_KMS("Disabling package C8+\n");
4448
4449 hsw_restore_lcpll(dev_priv);
4450 intel_init_pch_refclk(dev_priv);
4451
4452 if (HAS_PCH_LPT_LP(dev_priv)) {
4453 val = I915_READ(SOUTH_DSPCLK_GATE_D);
4454 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
4455 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4456 }
4457 }
4458
intel_pch_reset_handshake(struct drm_i915_private * dev_priv,bool enable)4459 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
4460 bool enable)
4461 {
4462 i915_reg_t reg;
4463 u32 reset_bits, val;
4464
4465 if (IS_IVYBRIDGE(dev_priv)) {
4466 reg = GEN7_MSG_CTL;
4467 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
4468 } else {
4469 reg = HSW_NDE_RSTWRN_OPT;
4470 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
4471 }
4472
4473 val = I915_READ(reg);
4474
4475 if (enable)
4476 val |= reset_bits;
4477 else
4478 val &= ~reset_bits;
4479
4480 I915_WRITE(reg, val);
4481 }
4482
skl_display_core_init(struct drm_i915_private * dev_priv,bool resume)4483 static void skl_display_core_init(struct drm_i915_private *dev_priv,
4484 bool resume)
4485 {
4486 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4487 struct i915_power_well *well;
4488
4489 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4490
4491 /* enable PCH reset handshake */
4492 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4493
4494 /* enable PG1 and Misc I/O */
4495 mutex_lock(&power_domains->lock);
4496
4497 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4498 intel_power_well_enable(dev_priv, well);
4499
4500 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
4501 intel_power_well_enable(dev_priv, well);
4502
4503 mutex_unlock(&power_domains->lock);
4504
4505 intel_cdclk_init(dev_priv);
4506
4507 gen9_dbuf_enable(dev_priv);
4508
4509 if (resume && dev_priv->csr.dmc_payload)
4510 intel_csr_load_program(dev_priv);
4511 }
4512
skl_display_core_uninit(struct drm_i915_private * dev_priv)4513 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
4514 {
4515 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4516 struct i915_power_well *well;
4517
4518 gen9_disable_dc_states(dev_priv);
4519
4520 gen9_dbuf_disable(dev_priv);
4521
4522 intel_cdclk_uninit(dev_priv);
4523
4524 /* The spec doesn't call for removing the reset handshake flag */
4525 /* disable PG1 and Misc I/O */
4526
4527 mutex_lock(&power_domains->lock);
4528
4529 /*
4530 * BSpec says to keep the MISC IO power well enabled here, only
4531 * remove our request for power well 1.
4532 * Note that even though the driver's request is removed power well 1
4533 * may stay enabled after this due to DMC's own request on it.
4534 */
4535 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4536 intel_power_well_disable(dev_priv, well);
4537
4538 mutex_unlock(&power_domains->lock);
4539
4540 usleep_range(10, 30); /* 10 us delay per Bspec */
4541 }
4542
bxt_display_core_init(struct drm_i915_private * dev_priv,bool resume)4543 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4544 {
4545 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4546 struct i915_power_well *well;
4547
4548 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4549
4550 /*
4551 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
4552 * or else the reset will hang because there is no PCH to respond.
4553 * Move the handshake programming to initialization sequence.
4554 * Previously was left up to BIOS.
4555 */
4556 intel_pch_reset_handshake(dev_priv, false);
4557
4558 /* Enable PG1 */
4559 mutex_lock(&power_domains->lock);
4560
4561 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4562 intel_power_well_enable(dev_priv, well);
4563
4564 mutex_unlock(&power_domains->lock);
4565
4566 intel_cdclk_init(dev_priv);
4567
4568 gen9_dbuf_enable(dev_priv);
4569
4570 if (resume && dev_priv->csr.dmc_payload)
4571 intel_csr_load_program(dev_priv);
4572 }
4573
bxt_display_core_uninit(struct drm_i915_private * dev_priv)4574 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
4575 {
4576 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4577 struct i915_power_well *well;
4578
4579 gen9_disable_dc_states(dev_priv);
4580
4581 gen9_dbuf_disable(dev_priv);
4582
4583 intel_cdclk_uninit(dev_priv);
4584
4585 /* The spec doesn't call for removing the reset handshake flag */
4586
4587 /*
4588 * Disable PW1 (PG1).
4589 * Note that even though the driver's request is removed power well 1
4590 * may stay enabled after this due to DMC's own request on it.
4591 */
4592 mutex_lock(&power_domains->lock);
4593
4594 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4595 intel_power_well_disable(dev_priv, well);
4596
4597 mutex_unlock(&power_domains->lock);
4598
4599 usleep_range(10, 30); /* 10 us delay per Bspec */
4600 }
4601
cnl_display_core_init(struct drm_i915_private * dev_priv,bool resume)4602 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4603 {
4604 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4605 struct i915_power_well *well;
4606
4607 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4608
4609 /* 1. Enable PCH Reset Handshake */
4610 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4611
4612 /* 2-3. */
4613 intel_combo_phy_init(dev_priv);
4614
4615 /*
4616 * 4. Enable Power Well 1 (PG1).
4617 * The AUX IO power wells will be enabled on demand.
4618 */
4619 mutex_lock(&power_domains->lock);
4620 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4621 intel_power_well_enable(dev_priv, well);
4622 mutex_unlock(&power_domains->lock);
4623
4624 /* 5. Enable CD clock */
4625 intel_cdclk_init(dev_priv);
4626
4627 /* 6. Enable DBUF */
4628 gen9_dbuf_enable(dev_priv);
4629
4630 if (resume && dev_priv->csr.dmc_payload)
4631 intel_csr_load_program(dev_priv);
4632 }
4633
cnl_display_core_uninit(struct drm_i915_private * dev_priv)4634 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
4635 {
4636 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4637 struct i915_power_well *well;
4638
4639 gen9_disable_dc_states(dev_priv);
4640
4641 /* 1. Disable all display engine functions -> aready done */
4642
4643 /* 2. Disable DBUF */
4644 gen9_dbuf_disable(dev_priv);
4645
4646 /* 3. Disable CD clock */
4647 intel_cdclk_uninit(dev_priv);
4648
4649 /*
4650 * 4. Disable Power Well 1 (PG1).
4651 * The AUX IO power wells are toggled on demand, so they are already
4652 * disabled at this point.
4653 */
4654 mutex_lock(&power_domains->lock);
4655 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4656 intel_power_well_disable(dev_priv, well);
4657 mutex_unlock(&power_domains->lock);
4658
4659 usleep_range(10, 30); /* 10 us delay per Bspec */
4660
4661 /* 5. */
4662 intel_combo_phy_uninit(dev_priv);
4663 }
4664
icl_display_core_init(struct drm_i915_private * dev_priv,bool resume)4665 static void icl_display_core_init(struct drm_i915_private *dev_priv,
4666 bool resume)
4667 {
4668 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4669 struct i915_power_well *well;
4670
4671 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4672
4673 /* 1. Enable PCH reset handshake. */
4674 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4675
4676 /* 2. Initialize all combo phys */
4677 intel_combo_phy_init(dev_priv);
4678
4679 /*
4680 * 3. Enable Power Well 1 (PG1).
4681 * The AUX IO power wells will be enabled on demand.
4682 */
4683 mutex_lock(&power_domains->lock);
4684 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4685 intel_power_well_enable(dev_priv, well);
4686 mutex_unlock(&power_domains->lock);
4687
4688 /* 4. Enable CDCLK. */
4689 intel_cdclk_init(dev_priv);
4690
4691 /* 5. Enable DBUF. */
4692 icl_dbuf_enable(dev_priv);
4693
4694 /* 6. Setup MBUS. */
4695 icl_mbus_init(dev_priv);
4696
4697 if (resume && dev_priv->csr.dmc_payload)
4698 intel_csr_load_program(dev_priv);
4699 }
4700
icl_display_core_uninit(struct drm_i915_private * dev_priv)4701 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
4702 {
4703 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4704 struct i915_power_well *well;
4705
4706 gen9_disable_dc_states(dev_priv);
4707
4708 /* 1. Disable all display engine functions -> aready done */
4709
4710 /* 2. Disable DBUF */
4711 icl_dbuf_disable(dev_priv);
4712
4713 /* 3. Disable CD clock */
4714 intel_cdclk_uninit(dev_priv);
4715
4716 /*
4717 * 4. Disable Power Well 1 (PG1).
4718 * The AUX IO power wells are toggled on demand, so they are already
4719 * disabled at this point.
4720 */
4721 mutex_lock(&power_domains->lock);
4722 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4723 intel_power_well_disable(dev_priv, well);
4724 mutex_unlock(&power_domains->lock);
4725
4726 /* 5. */
4727 intel_combo_phy_uninit(dev_priv);
4728 }
4729
chv_phy_control_init(struct drm_i915_private * dev_priv)4730 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
4731 {
4732 struct i915_power_well *cmn_bc =
4733 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
4734 struct i915_power_well *cmn_d =
4735 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
4736
4737 /*
4738 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
4739 * workaround never ever read DISPLAY_PHY_CONTROL, and
4740 * instead maintain a shadow copy ourselves. Use the actual
4741 * power well state and lane status to reconstruct the
4742 * expected initial value.
4743 */
4744 dev_priv->chv_phy_control =
4745 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
4746 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
4747 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
4748 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
4749 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
4750
4751 /*
4752 * If all lanes are disabled we leave the override disabled
4753 * with all power down bits cleared to match the state we
4754 * would use after disabling the port. Otherwise enable the
4755 * override and set the lane powerdown bits accding to the
4756 * current lane status.
4757 */
4758 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
4759 u32 status = I915_READ(DPLL(PIPE_A));
4760 unsigned int mask;
4761
4762 mask = status & DPLL_PORTB_READY_MASK;
4763 if (mask == 0xf)
4764 mask = 0x0;
4765 else
4766 dev_priv->chv_phy_control |=
4767 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
4768
4769 dev_priv->chv_phy_control |=
4770 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
4771
4772 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
4773 if (mask == 0xf)
4774 mask = 0x0;
4775 else
4776 dev_priv->chv_phy_control |=
4777 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
4778
4779 dev_priv->chv_phy_control |=
4780 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
4781
4782 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
4783
4784 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
4785 } else {
4786 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
4787 }
4788
4789 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
4790 u32 status = I915_READ(DPIO_PHY_STATUS);
4791 unsigned int mask;
4792
4793 mask = status & DPLL_PORTD_READY_MASK;
4794
4795 if (mask == 0xf)
4796 mask = 0x0;
4797 else
4798 dev_priv->chv_phy_control |=
4799 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
4800
4801 dev_priv->chv_phy_control |=
4802 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
4803
4804 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
4805
4806 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
4807 } else {
4808 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
4809 }
4810
4811 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
4812
4813 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
4814 dev_priv->chv_phy_control);
4815 }
4816
vlv_cmnlane_wa(struct drm_i915_private * dev_priv)4817 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
4818 {
4819 struct i915_power_well *cmn =
4820 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
4821 struct i915_power_well *disp2d =
4822 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
4823
4824 /* If the display might be already active skip this */
4825 if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
4826 disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
4827 I915_READ(DPIO_CTL) & DPIO_CMNRST)
4828 return;
4829
4830 DRM_DEBUG_KMS("toggling display PHY side reset\n");
4831
4832 /* cmnlane needs DPLL registers */
4833 disp2d->desc->ops->enable(dev_priv, disp2d);
4834
4835 /*
4836 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
4837 * Need to assert and de-assert PHY SB reset by gating the
4838 * common lane power, then un-gating it.
4839 * Simply ungating isn't enough to reset the PHY enough to get
4840 * ports and lanes running.
4841 */
4842 cmn->desc->ops->disable(dev_priv, cmn);
4843 }
4844
vlv_punit_is_power_gated(struct drm_i915_private * dev_priv,u32 reg0)4845 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
4846 {
4847 bool ret;
4848
4849 vlv_punit_get(dev_priv);
4850 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
4851 vlv_punit_put(dev_priv);
4852
4853 return ret;
4854 }
4855
assert_ved_power_gated(struct drm_i915_private * dev_priv)4856 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
4857 {
4858 WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
4859 "VED not power gated\n");
4860 }
4861
assert_isp_power_gated(struct drm_i915_private * dev_priv)4862 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
4863 {
4864 static const struct pci_device_id isp_ids[] = {
4865 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
4866 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
4867 {}
4868 };
4869
4870 WARN(!pci_dev_present(isp_ids) &&
4871 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
4872 "ISP not power gated\n");
4873 }
4874
4875 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
4876
4877 /**
4878 * intel_power_domains_init_hw - initialize hardware power domain state
4879 * @i915: i915 device instance
4880 * @resume: Called from resume code paths or not
4881 *
4882 * This function initializes the hardware power domain state and enables all
4883 * power wells belonging to the INIT power domain. Power wells in other
4884 * domains (and not in the INIT domain) are referenced or disabled by
4885 * intel_modeset_readout_hw_state(). After that the reference count of each
4886 * power well must match its HW enabled state, see
4887 * intel_power_domains_verify_state().
4888 *
4889 * It will return with power domains disabled (to be enabled later by
4890 * intel_power_domains_enable()) and must be paired with
4891 * intel_power_domains_driver_remove().
4892 */
intel_power_domains_init_hw(struct drm_i915_private * i915,bool resume)4893 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
4894 {
4895 struct i915_power_domains *power_domains = &i915->power_domains;
4896
4897 power_domains->initializing = true;
4898
4899 /* Must happen before power domain init on VLV/CHV */
4900 intel_update_rawclk(i915);
4901
4902 if (INTEL_GEN(i915) >= 11) {
4903 icl_display_core_init(i915, resume);
4904 } else if (IS_CANNONLAKE(i915)) {
4905 cnl_display_core_init(i915, resume);
4906 } else if (IS_GEN9_BC(i915)) {
4907 skl_display_core_init(i915, resume);
4908 } else if (IS_GEN9_LP(i915)) {
4909 bxt_display_core_init(i915, resume);
4910 } else if (IS_CHERRYVIEW(i915)) {
4911 mutex_lock(&power_domains->lock);
4912 chv_phy_control_init(i915);
4913 mutex_unlock(&power_domains->lock);
4914 assert_isp_power_gated(i915);
4915 } else if (IS_VALLEYVIEW(i915)) {
4916 mutex_lock(&power_domains->lock);
4917 vlv_cmnlane_wa(i915);
4918 mutex_unlock(&power_domains->lock);
4919 assert_ved_power_gated(i915);
4920 assert_isp_power_gated(i915);
4921 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
4922 hsw_assert_cdclk(i915);
4923 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
4924 } else if (IS_IVYBRIDGE(i915)) {
4925 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
4926 }
4927
4928 /*
4929 * Keep all power wells enabled for any dependent HW access during
4930 * initialization and to make sure we keep BIOS enabled display HW
4931 * resources powered until display HW readout is complete. We drop
4932 * this reference in intel_power_domains_enable().
4933 */
4934 power_domains->wakeref =
4935 intel_display_power_get(i915, POWER_DOMAIN_INIT);
4936
4937 /* Disable power support if the user asked so. */
4938 if (!i915_modparams.disable_power_well)
4939 intel_display_power_get(i915, POWER_DOMAIN_INIT);
4940 intel_power_domains_sync_hw(i915);
4941
4942 power_domains->initializing = false;
4943 }
4944
4945 /**
4946 * intel_power_domains_driver_remove - deinitialize hw power domain state
4947 * @i915: i915 device instance
4948 *
4949 * De-initializes the display power domain HW state. It also ensures that the
4950 * device stays powered up so that the driver can be reloaded.
4951 *
4952 * It must be called with power domains already disabled (after a call to
4953 * intel_power_domains_disable()) and must be paired with
4954 * intel_power_domains_init_hw().
4955 */
intel_power_domains_driver_remove(struct drm_i915_private * i915)4956 void intel_power_domains_driver_remove(struct drm_i915_private *i915)
4957 {
4958 intel_wakeref_t wakeref __maybe_unused =
4959 fetch_and_zero(&i915->power_domains.wakeref);
4960
4961 /* Remove the refcount we took to keep power well support disabled. */
4962 if (!i915_modparams.disable_power_well)
4963 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
4964
4965 intel_display_power_flush_work_sync(i915);
4966
4967 intel_power_domains_verify_state(i915);
4968
4969 /* Keep the power well enabled, but cancel its rpm wakeref. */
4970 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
4971 }
4972
4973 /**
4974 * intel_power_domains_enable - enable toggling of display power wells
4975 * @i915: i915 device instance
4976 *
4977 * Enable the ondemand enabling/disabling of the display power wells. Note that
4978 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
4979 * only at specific points of the display modeset sequence, thus they are not
4980 * affected by the intel_power_domains_enable()/disable() calls. The purpose
4981 * of these function is to keep the rest of power wells enabled until the end
4982 * of display HW readout (which will acquire the power references reflecting
4983 * the current HW state).
4984 */
intel_power_domains_enable(struct drm_i915_private * i915)4985 void intel_power_domains_enable(struct drm_i915_private *i915)
4986 {
4987 intel_wakeref_t wakeref __maybe_unused =
4988 fetch_and_zero(&i915->power_domains.wakeref);
4989
4990 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
4991 intel_power_domains_verify_state(i915);
4992 }
4993
4994 /**
4995 * intel_power_domains_disable - disable toggling of display power wells
4996 * @i915: i915 device instance
4997 *
4998 * Disable the ondemand enabling/disabling of the display power wells. See
4999 * intel_power_domains_enable() for which power wells this call controls.
5000 */
intel_power_domains_disable(struct drm_i915_private * i915)5001 void intel_power_domains_disable(struct drm_i915_private *i915)
5002 {
5003 struct i915_power_domains *power_domains = &i915->power_domains;
5004
5005 WARN_ON(power_domains->wakeref);
5006 power_domains->wakeref =
5007 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5008
5009 intel_power_domains_verify_state(i915);
5010 }
5011
5012 /**
5013 * intel_power_domains_suspend - suspend power domain state
5014 * @i915: i915 device instance
5015 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
5016 *
5017 * This function prepares the hardware power domain state before entering
5018 * system suspend.
5019 *
5020 * It must be called with power domains already disabled (after a call to
5021 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
5022 */
intel_power_domains_suspend(struct drm_i915_private * i915,enum i915_drm_suspend_mode suspend_mode)5023 void intel_power_domains_suspend(struct drm_i915_private *i915,
5024 enum i915_drm_suspend_mode suspend_mode)
5025 {
5026 struct i915_power_domains *power_domains = &i915->power_domains;
5027 intel_wakeref_t wakeref __maybe_unused =
5028 fetch_and_zero(&power_domains->wakeref);
5029
5030 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5031
5032 /*
5033 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
5034 * support don't manually deinit the power domains. This also means the
5035 * CSR/DMC firmware will stay active, it will power down any HW
5036 * resources as required and also enable deeper system power states
5037 * that would be blocked if the firmware was inactive.
5038 */
5039 if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
5040 suspend_mode == I915_DRM_SUSPEND_IDLE &&
5041 i915->csr.dmc_payload) {
5042 intel_display_power_flush_work(i915);
5043 intel_power_domains_verify_state(i915);
5044 return;
5045 }
5046
5047 /*
5048 * Even if power well support was disabled we still want to disable
5049 * power wells if power domains must be deinitialized for suspend.
5050 */
5051 if (!i915_modparams.disable_power_well)
5052 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5053
5054 intel_display_power_flush_work(i915);
5055 intel_power_domains_verify_state(i915);
5056
5057 if (INTEL_GEN(i915) >= 11)
5058 icl_display_core_uninit(i915);
5059 else if (IS_CANNONLAKE(i915))
5060 cnl_display_core_uninit(i915);
5061 else if (IS_GEN9_BC(i915))
5062 skl_display_core_uninit(i915);
5063 else if (IS_GEN9_LP(i915))
5064 bxt_display_core_uninit(i915);
5065
5066 power_domains->display_core_suspended = true;
5067 }
5068
5069 /**
5070 * intel_power_domains_resume - resume power domain state
5071 * @i915: i915 device instance
5072 *
5073 * This function resume the hardware power domain state during system resume.
5074 *
5075 * It will return with power domain support disabled (to be enabled later by
5076 * intel_power_domains_enable()) and must be paired with
5077 * intel_power_domains_suspend().
5078 */
intel_power_domains_resume(struct drm_i915_private * i915)5079 void intel_power_domains_resume(struct drm_i915_private *i915)
5080 {
5081 struct i915_power_domains *power_domains = &i915->power_domains;
5082
5083 if (power_domains->display_core_suspended) {
5084 intel_power_domains_init_hw(i915, true);
5085 power_domains->display_core_suspended = false;
5086 } else {
5087 WARN_ON(power_domains->wakeref);
5088 power_domains->wakeref =
5089 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5090 }
5091
5092 intel_power_domains_verify_state(i915);
5093 }
5094
5095 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
5096
intel_power_domains_dump_info(struct drm_i915_private * i915)5097 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
5098 {
5099 struct i915_power_domains *power_domains = &i915->power_domains;
5100 struct i915_power_well *power_well;
5101
5102 for_each_power_well(i915, power_well) {
5103 enum intel_display_power_domain domain;
5104
5105 DRM_DEBUG_DRIVER("%-25s %d\n",
5106 power_well->desc->name, power_well->count);
5107
5108 for_each_power_domain(domain, power_well->desc->domains)
5109 DRM_DEBUG_DRIVER(" %-23s %d\n",
5110 intel_display_power_domain_str(i915,
5111 domain),
5112 power_domains->domain_use_count[domain]);
5113 }
5114 }
5115
5116 /**
5117 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
5118 * @i915: i915 device instance
5119 *
5120 * Verify if the reference count of each power well matches its HW enabled
5121 * state and the total refcount of the domains it belongs to. This must be
5122 * called after modeset HW state sanitization, which is responsible for
5123 * acquiring reference counts for any power wells in use and disabling the
5124 * ones left on by BIOS but not required by any active output.
5125 */
intel_power_domains_verify_state(struct drm_i915_private * i915)5126 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5127 {
5128 struct i915_power_domains *power_domains = &i915->power_domains;
5129 struct i915_power_well *power_well;
5130 bool dump_domain_info;
5131
5132 mutex_lock(&power_domains->lock);
5133
5134 verify_async_put_domains_state(power_domains);
5135
5136 dump_domain_info = false;
5137 for_each_power_well(i915, power_well) {
5138 enum intel_display_power_domain domain;
5139 int domains_count;
5140 bool enabled;
5141
5142 enabled = power_well->desc->ops->is_enabled(i915, power_well);
5143 if ((power_well->count || power_well->desc->always_on) !=
5144 enabled)
5145 DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
5146 power_well->desc->name,
5147 power_well->count, enabled);
5148
5149 domains_count = 0;
5150 for_each_power_domain(domain, power_well->desc->domains)
5151 domains_count += power_domains->domain_use_count[domain];
5152
5153 if (power_well->count != domains_count) {
5154 DRM_ERROR("power well %s refcount/domain refcount mismatch "
5155 "(refcount %d/domains refcount %d)\n",
5156 power_well->desc->name, power_well->count,
5157 domains_count);
5158 dump_domain_info = true;
5159 }
5160 }
5161
5162 if (dump_domain_info) {
5163 static bool dumped;
5164
5165 if (!dumped) {
5166 intel_power_domains_dump_info(i915);
5167 dumped = true;
5168 }
5169 }
5170
5171 mutex_unlock(&power_domains->lock);
5172 }
5173
5174 #else
5175
intel_power_domains_verify_state(struct drm_i915_private * i915)5176 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5177 {
5178 }
5179
5180 #endif
5181
intel_display_power_suspend_late(struct drm_i915_private * i915)5182 void intel_display_power_suspend_late(struct drm_i915_private *i915)
5183 {
5184 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915))
5185 bxt_enable_dc9(i915);
5186 else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
5187 hsw_enable_pc8(i915);
5188 }
5189
intel_display_power_resume_early(struct drm_i915_private * i915)5190 void intel_display_power_resume_early(struct drm_i915_private *i915)
5191 {
5192 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) {
5193 gen9_sanitize_dc_state(i915);
5194 bxt_disable_dc9(i915);
5195 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5196 hsw_disable_pc8(i915);
5197 }
5198 }
5199
intel_display_power_suspend(struct drm_i915_private * i915)5200 void intel_display_power_suspend(struct drm_i915_private *i915)
5201 {
5202 if (INTEL_GEN(i915) >= 11) {
5203 icl_display_core_uninit(i915);
5204 bxt_enable_dc9(i915);
5205 } else if (IS_GEN9_LP(i915)) {
5206 bxt_display_core_uninit(i915);
5207 bxt_enable_dc9(i915);
5208 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5209 hsw_enable_pc8(i915);
5210 }
5211 }
5212
intel_display_power_resume(struct drm_i915_private * i915)5213 void intel_display_power_resume(struct drm_i915_private *i915)
5214 {
5215 if (INTEL_GEN(i915) >= 11) {
5216 bxt_disable_dc9(i915);
5217 icl_display_core_init(i915, true);
5218 if (i915->csr.dmc_payload) {
5219 if (i915->csr.allowed_dc_mask &
5220 DC_STATE_EN_UPTO_DC6)
5221 skl_enable_dc6(i915);
5222 else if (i915->csr.allowed_dc_mask &
5223 DC_STATE_EN_UPTO_DC5)
5224 gen9_enable_dc5(i915);
5225 }
5226 } else if (IS_GEN9_LP(i915)) {
5227 bxt_disable_dc9(i915);
5228 bxt_display_core_init(i915, true);
5229 if (i915->csr.dmc_payload &&
5230 (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
5231 gen9_enable_dc5(i915);
5232 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5233 hsw_disable_pc8(i915);
5234 }
5235 }
5236