1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "display/intel_crt.h"
7 
8 #include "i915_drv.h"
9 #include "i915_irq.h"
10 #include "intel_cdclk.h"
11 #include "intel_combo_phy.h"
12 #include "intel_display_power.h"
13 #include "intel_de.h"
14 #include "intel_display_types.h"
15 #include "intel_dmc.h"
16 #include "intel_dpio_phy.h"
17 #include "intel_hotplug.h"
18 #include "intel_pm.h"
19 #include "intel_pps.h"
20 #include "intel_sideband.h"
21 #include "intel_snps_phy.h"
22 #include "intel_tc.h"
23 #include "intel_vga.h"
24 
25 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
26 					 enum i915_power_well_id power_well_id);
27 
28 const char *
intel_display_power_domain_str(enum intel_display_power_domain domain)29 intel_display_power_domain_str(enum intel_display_power_domain domain)
30 {
31 	switch (domain) {
32 	case POWER_DOMAIN_DISPLAY_CORE:
33 		return "DISPLAY_CORE";
34 	case POWER_DOMAIN_PIPE_A:
35 		return "PIPE_A";
36 	case POWER_DOMAIN_PIPE_B:
37 		return "PIPE_B";
38 	case POWER_DOMAIN_PIPE_C:
39 		return "PIPE_C";
40 	case POWER_DOMAIN_PIPE_D:
41 		return "PIPE_D";
42 	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
43 		return "PIPE_A_PANEL_FITTER";
44 	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
45 		return "PIPE_B_PANEL_FITTER";
46 	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
47 		return "PIPE_C_PANEL_FITTER";
48 	case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
49 		return "PIPE_D_PANEL_FITTER";
50 	case POWER_DOMAIN_TRANSCODER_A:
51 		return "TRANSCODER_A";
52 	case POWER_DOMAIN_TRANSCODER_B:
53 		return "TRANSCODER_B";
54 	case POWER_DOMAIN_TRANSCODER_C:
55 		return "TRANSCODER_C";
56 	case POWER_DOMAIN_TRANSCODER_D:
57 		return "TRANSCODER_D";
58 	case POWER_DOMAIN_TRANSCODER_EDP:
59 		return "TRANSCODER_EDP";
60 	case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
61 		return "TRANSCODER_VDSC_PW2";
62 	case POWER_DOMAIN_TRANSCODER_DSI_A:
63 		return "TRANSCODER_DSI_A";
64 	case POWER_DOMAIN_TRANSCODER_DSI_C:
65 		return "TRANSCODER_DSI_C";
66 	case POWER_DOMAIN_PORT_DDI_A_LANES:
67 		return "PORT_DDI_A_LANES";
68 	case POWER_DOMAIN_PORT_DDI_B_LANES:
69 		return "PORT_DDI_B_LANES";
70 	case POWER_DOMAIN_PORT_DDI_C_LANES:
71 		return "PORT_DDI_C_LANES";
72 	case POWER_DOMAIN_PORT_DDI_D_LANES:
73 		return "PORT_DDI_D_LANES";
74 	case POWER_DOMAIN_PORT_DDI_E_LANES:
75 		return "PORT_DDI_E_LANES";
76 	case POWER_DOMAIN_PORT_DDI_F_LANES:
77 		return "PORT_DDI_F_LANES";
78 	case POWER_DOMAIN_PORT_DDI_G_LANES:
79 		return "PORT_DDI_G_LANES";
80 	case POWER_DOMAIN_PORT_DDI_H_LANES:
81 		return "PORT_DDI_H_LANES";
82 	case POWER_DOMAIN_PORT_DDI_I_LANES:
83 		return "PORT_DDI_I_LANES";
84 	case POWER_DOMAIN_PORT_DDI_A_IO:
85 		return "PORT_DDI_A_IO";
86 	case POWER_DOMAIN_PORT_DDI_B_IO:
87 		return "PORT_DDI_B_IO";
88 	case POWER_DOMAIN_PORT_DDI_C_IO:
89 		return "PORT_DDI_C_IO";
90 	case POWER_DOMAIN_PORT_DDI_D_IO:
91 		return "PORT_DDI_D_IO";
92 	case POWER_DOMAIN_PORT_DDI_E_IO:
93 		return "PORT_DDI_E_IO";
94 	case POWER_DOMAIN_PORT_DDI_F_IO:
95 		return "PORT_DDI_F_IO";
96 	case POWER_DOMAIN_PORT_DDI_G_IO:
97 		return "PORT_DDI_G_IO";
98 	case POWER_DOMAIN_PORT_DDI_H_IO:
99 		return "PORT_DDI_H_IO";
100 	case POWER_DOMAIN_PORT_DDI_I_IO:
101 		return "PORT_DDI_I_IO";
102 	case POWER_DOMAIN_PORT_DSI:
103 		return "PORT_DSI";
104 	case POWER_DOMAIN_PORT_CRT:
105 		return "PORT_CRT";
106 	case POWER_DOMAIN_PORT_OTHER:
107 		return "PORT_OTHER";
108 	case POWER_DOMAIN_VGA:
109 		return "VGA";
110 	case POWER_DOMAIN_AUDIO_MMIO:
111 		return "AUDIO_MMIO";
112 	case POWER_DOMAIN_AUDIO_PLAYBACK:
113 		return "AUDIO_PLAYBACK";
114 	case POWER_DOMAIN_AUX_A:
115 		return "AUX_A";
116 	case POWER_DOMAIN_AUX_B:
117 		return "AUX_B";
118 	case POWER_DOMAIN_AUX_C:
119 		return "AUX_C";
120 	case POWER_DOMAIN_AUX_D:
121 		return "AUX_D";
122 	case POWER_DOMAIN_AUX_E:
123 		return "AUX_E";
124 	case POWER_DOMAIN_AUX_F:
125 		return "AUX_F";
126 	case POWER_DOMAIN_AUX_G:
127 		return "AUX_G";
128 	case POWER_DOMAIN_AUX_H:
129 		return "AUX_H";
130 	case POWER_DOMAIN_AUX_I:
131 		return "AUX_I";
132 	case POWER_DOMAIN_AUX_IO_A:
133 		return "AUX_IO_A";
134 	case POWER_DOMAIN_AUX_C_TBT:
135 		return "AUX_C_TBT";
136 	case POWER_DOMAIN_AUX_D_TBT:
137 		return "AUX_D_TBT";
138 	case POWER_DOMAIN_AUX_E_TBT:
139 		return "AUX_E_TBT";
140 	case POWER_DOMAIN_AUX_F_TBT:
141 		return "AUX_F_TBT";
142 	case POWER_DOMAIN_AUX_G_TBT:
143 		return "AUX_G_TBT";
144 	case POWER_DOMAIN_AUX_H_TBT:
145 		return "AUX_H_TBT";
146 	case POWER_DOMAIN_AUX_I_TBT:
147 		return "AUX_I_TBT";
148 	case POWER_DOMAIN_GMBUS:
149 		return "GMBUS";
150 	case POWER_DOMAIN_INIT:
151 		return "INIT";
152 	case POWER_DOMAIN_MODESET:
153 		return "MODESET";
154 	case POWER_DOMAIN_GT_IRQ:
155 		return "GT_IRQ";
156 	case POWER_DOMAIN_DPLL_DC_OFF:
157 		return "DPLL_DC_OFF";
158 	case POWER_DOMAIN_TC_COLD_OFF:
159 		return "TC_COLD_OFF";
160 	default:
161 		MISSING_CASE(domain);
162 		return "?";
163 	}
164 }
165 
intel_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)166 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
167 				    struct i915_power_well *power_well)
168 {
169 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name);
170 	power_well->desc->ops->enable(dev_priv, power_well);
171 	power_well->hw_enabled = true;
172 }
173 
intel_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)174 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
175 				     struct i915_power_well *power_well)
176 {
177 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name);
178 	power_well->hw_enabled = false;
179 	power_well->desc->ops->disable(dev_priv, power_well);
180 }
181 
intel_power_well_get(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)182 static void intel_power_well_get(struct drm_i915_private *dev_priv,
183 				 struct i915_power_well *power_well)
184 {
185 	if (!power_well->count++)
186 		intel_power_well_enable(dev_priv, power_well);
187 }
188 
intel_power_well_put(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)189 static void intel_power_well_put(struct drm_i915_private *dev_priv,
190 				 struct i915_power_well *power_well)
191 {
192 	drm_WARN(&dev_priv->drm, !power_well->count,
193 		 "Use count on power well %s is already zero",
194 		 power_well->desc->name);
195 
196 	if (!--power_well->count)
197 		intel_power_well_disable(dev_priv, power_well);
198 }
199 
200 /**
201  * __intel_display_power_is_enabled - unlocked check for a power domain
202  * @dev_priv: i915 device instance
203  * @domain: power domain to check
204  *
205  * This is the unlocked version of intel_display_power_is_enabled() and should
206  * only be used from error capture and recovery code where deadlocks are
207  * possible.
208  *
209  * Returns:
210  * True when the power domain is enabled, false otherwise.
211  */
__intel_display_power_is_enabled(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)212 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
213 				      enum intel_display_power_domain domain)
214 {
215 	struct i915_power_well *power_well;
216 	bool is_enabled;
217 
218 	if (dev_priv->runtime_pm.suspended)
219 		return false;
220 
221 	is_enabled = true;
222 
223 	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
224 		if (power_well->desc->always_on)
225 			continue;
226 
227 		if (!power_well->hw_enabled) {
228 			is_enabled = false;
229 			break;
230 		}
231 	}
232 
233 	return is_enabled;
234 }
235 
236 /**
237  * intel_display_power_is_enabled - check for a power domain
238  * @dev_priv: i915 device instance
239  * @domain: power domain to check
240  *
241  * This function can be used to check the hw power domain state. It is mostly
242  * used in hardware state readout functions. Everywhere else code should rely
243  * upon explicit power domain reference counting to ensure that the hardware
244  * block is powered up before accessing it.
245  *
246  * Callers must hold the relevant modesetting locks to ensure that concurrent
247  * threads can't disable the power well while the caller tries to read a few
248  * registers.
249  *
250  * Returns:
251  * True when the power domain is enabled, false otherwise.
252  */
intel_display_power_is_enabled(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)253 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
254 				    enum intel_display_power_domain domain)
255 {
256 	struct i915_power_domains *power_domains;
257 	bool ret;
258 
259 	power_domains = &dev_priv->power_domains;
260 
261 	mutex_lock(&power_domains->lock);
262 	ret = __intel_display_power_is_enabled(dev_priv, domain);
263 	mutex_unlock(&power_domains->lock);
264 
265 	return ret;
266 }
267 
268 /*
269  * Starting with Haswell, we have a "Power Down Well" that can be turned off
270  * when not needed anymore. We have 4 registers that can request the power well
271  * to be enabled, and it will only be disabled if none of the registers is
272  * requesting it to be enabled.
273  */
hsw_power_well_post_enable(struct drm_i915_private * dev_priv,u8 irq_pipe_mask,bool has_vga)274 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
275 				       u8 irq_pipe_mask, bool has_vga)
276 {
277 	if (has_vga)
278 		intel_vga_reset_io_mem(dev_priv);
279 
280 	if (irq_pipe_mask)
281 		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
282 }
283 
hsw_power_well_pre_disable(struct drm_i915_private * dev_priv,u8 irq_pipe_mask)284 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
285 				       u8 irq_pipe_mask)
286 {
287 	if (irq_pipe_mask)
288 		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
289 }
290 
291 #define ICL_AUX_PW_TO_CH(pw_idx)	\
292 	((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
293 
294 #define ICL_TBT_AUX_PW_TO_CH(pw_idx)	\
295 	((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
296 
icl_aux_pw_to_ch(const struct i915_power_well * power_well)297 static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well)
298 {
299 	int pw_idx = power_well->desc->hsw.idx;
300 
301 	return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
302 						 ICL_AUX_PW_TO_CH(pw_idx);
303 }
304 
305 static struct intel_digital_port *
aux_ch_to_digital_port(struct drm_i915_private * dev_priv,enum aux_ch aux_ch)306 aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
307 		       enum aux_ch aux_ch)
308 {
309 	struct intel_digital_port *dig_port = NULL;
310 	struct intel_encoder *encoder;
311 
312 	for_each_intel_encoder(&dev_priv->drm, encoder) {
313 		/* We'll check the MST primary port */
314 		if (encoder->type == INTEL_OUTPUT_DP_MST)
315 			continue;
316 
317 		dig_port = enc_to_dig_port(encoder);
318 		if (!dig_port)
319 			continue;
320 
321 		if (dig_port->aux_ch != aux_ch) {
322 			dig_port = NULL;
323 			continue;
324 		}
325 
326 		break;
327 	}
328 
329 	return dig_port;
330 }
331 
icl_aux_pw_to_phy(struct drm_i915_private * i915,const struct i915_power_well * power_well)332 static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
333 				  const struct i915_power_well *power_well)
334 {
335 	enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
336 	struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
337 
338 	return intel_port_to_phy(i915, dig_port->base.port);
339 }
340 
hsw_wait_for_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,bool timeout_expected)341 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
342 					   struct i915_power_well *power_well,
343 					   bool timeout_expected)
344 {
345 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
346 	int pw_idx = power_well->desc->hsw.idx;
347 	int enable_delay = power_well->desc->hsw.fixed_enable_delay;
348 
349 	/*
350 	 * For some power wells we're not supposed to watch the status bit for
351 	 * an ack, but rather just wait a fixed amount of time and then
352 	 * proceed.  This is only used on DG2.
353 	 */
354 	if (IS_DG2(dev_priv) && enable_delay) {
355 		usleep_range(enable_delay, 2 * enable_delay);
356 		return;
357 	}
358 
359 	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
360 	if (intel_de_wait_for_set(dev_priv, regs->driver,
361 				  HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
362 		drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
363 			    power_well->desc->name);
364 
365 		drm_WARN_ON(&dev_priv->drm, !timeout_expected);
366 
367 	}
368 }
369 
hsw_power_well_requesters(struct drm_i915_private * dev_priv,const struct i915_power_well_regs * regs,int pw_idx)370 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
371 				     const struct i915_power_well_regs *regs,
372 				     int pw_idx)
373 {
374 	u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
375 	u32 ret;
376 
377 	ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
378 	ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
379 	if (regs->kvmr.reg)
380 		ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
381 	ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
382 
383 	return ret;
384 }
385 
hsw_wait_for_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)386 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
387 					    struct i915_power_well *power_well)
388 {
389 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
390 	int pw_idx = power_well->desc->hsw.idx;
391 	bool disabled;
392 	u32 reqs;
393 
394 	/*
395 	 * Bspec doesn't require waiting for PWs to get disabled, but still do
396 	 * this for paranoia. The known cases where a PW will be forced on:
397 	 * - a KVMR request on any power well via the KVMR request register
398 	 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
399 	 *   DEBUG request registers
400 	 * Skip the wait in case any of the request bits are set and print a
401 	 * diagnostic message.
402 	 */
403 	wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
404 			       HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
405 		 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
406 	if (disabled)
407 		return;
408 
409 	drm_dbg_kms(&dev_priv->drm,
410 		    "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
411 		    power_well->desc->name,
412 		    !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
413 }
414 
gen9_wait_for_power_well_fuses(struct drm_i915_private * dev_priv,enum skl_power_gate pg)415 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
416 					   enum skl_power_gate pg)
417 {
418 	/* Timeout 5us for PG#0, for other PGs 1us */
419 	drm_WARN_ON(&dev_priv->drm,
420 		    intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
421 					  SKL_FUSE_PG_DIST_STATUS(pg), 1));
422 }
423 
hsw_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)424 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
425 				  struct i915_power_well *power_well)
426 {
427 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
428 	int pw_idx = power_well->desc->hsw.idx;
429 	u32 val;
430 
431 	if (power_well->desc->hsw.has_fuses) {
432 		enum skl_power_gate pg;
433 
434 		pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
435 						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
436 		/*
437 		 * For PW1 we have to wait both for the PW0/PG0 fuse state
438 		 * before enabling the power well and PW1/PG1's own fuse
439 		 * state after the enabling. For all other power wells with
440 		 * fuses we only have to wait for that PW/PG's fuse state
441 		 * after the enabling.
442 		 */
443 		if (pg == SKL_PG1)
444 			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
445 	}
446 
447 	val = intel_de_read(dev_priv, regs->driver);
448 	intel_de_write(dev_priv, regs->driver,
449 		       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
450 
451 	hsw_wait_for_power_well_enable(dev_priv, power_well, false);
452 
453 	if (power_well->desc->hsw.has_fuses) {
454 		enum skl_power_gate pg;
455 
456 		pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
457 						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
458 		gen9_wait_for_power_well_fuses(dev_priv, pg);
459 	}
460 
461 	hsw_power_well_post_enable(dev_priv,
462 				   power_well->desc->hsw.irq_pipe_mask,
463 				   power_well->desc->hsw.has_vga);
464 }
465 
hsw_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)466 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
467 				   struct i915_power_well *power_well)
468 {
469 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
470 	int pw_idx = power_well->desc->hsw.idx;
471 	u32 val;
472 
473 	hsw_power_well_pre_disable(dev_priv,
474 				   power_well->desc->hsw.irq_pipe_mask);
475 
476 	val = intel_de_read(dev_priv, regs->driver);
477 	intel_de_write(dev_priv, regs->driver,
478 		       val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
479 	hsw_wait_for_power_well_disable(dev_priv, power_well);
480 }
481 
482 static void
icl_combo_phy_aux_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)483 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
484 				    struct i915_power_well *power_well)
485 {
486 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
487 	int pw_idx = power_well->desc->hsw.idx;
488 	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
489 	u32 val;
490 
491 	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
492 
493 	val = intel_de_read(dev_priv, regs->driver);
494 	intel_de_write(dev_priv, regs->driver,
495 		       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
496 
497 	if (DISPLAY_VER(dev_priv) < 12) {
498 		val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
499 		intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
500 			       val | ICL_LANE_ENABLE_AUX);
501 	}
502 
503 	hsw_wait_for_power_well_enable(dev_priv, power_well, false);
504 
505 	/* Display WA #1178: icl */
506 	if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
507 	    !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
508 		val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
509 		val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
510 		intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
511 	}
512 }
513 
514 static void
icl_combo_phy_aux_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)515 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
516 				     struct i915_power_well *power_well)
517 {
518 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
519 	int pw_idx = power_well->desc->hsw.idx;
520 	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
521 	u32 val;
522 
523 	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
524 
525 	val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
526 	intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
527 		       val & ~ICL_LANE_ENABLE_AUX);
528 
529 	val = intel_de_read(dev_priv, regs->driver);
530 	intel_de_write(dev_priv, regs->driver,
531 		       val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
532 
533 	hsw_wait_for_power_well_disable(dev_priv, power_well);
534 }
535 
536 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
537 
538 static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
539 
power_well_async_ref_count(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)540 static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
541 				      struct i915_power_well *power_well)
542 {
543 	int refs = hweight64(power_well->desc->domains &
544 			     async_put_domains_mask(&dev_priv->power_domains));
545 
546 	drm_WARN_ON(&dev_priv->drm, refs > power_well->count);
547 
548 	return refs;
549 }
550 
icl_tc_port_assert_ref_held(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,struct intel_digital_port * dig_port)551 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
552 					struct i915_power_well *power_well,
553 					struct intel_digital_port *dig_port)
554 {
555 	/* Bypass the check if all references are released asynchronously */
556 	if (power_well_async_ref_count(dev_priv, power_well) ==
557 	    power_well->count)
558 		return;
559 
560 	if (drm_WARN_ON(&dev_priv->drm, !dig_port))
561 		return;
562 
563 	if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port)
564 		return;
565 
566 	drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
567 }
568 
569 #else
570 
icl_tc_port_assert_ref_held(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,struct intel_digital_port * dig_port)571 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
572 					struct i915_power_well *power_well,
573 					struct intel_digital_port *dig_port)
574 {
575 }
576 
577 #endif
578 
579 #define TGL_AUX_PW_TO_TC_PORT(pw_idx)	((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
580 
icl_tc_cold_exit(struct drm_i915_private * i915)581 static void icl_tc_cold_exit(struct drm_i915_private *i915)
582 {
583 	int ret, tries = 0;
584 
585 	while (1) {
586 		ret = sandybridge_pcode_write_timeout(i915,
587 						      ICL_PCODE_EXIT_TCCOLD,
588 						      0, 250, 1);
589 		if (ret != -EAGAIN || ++tries == 3)
590 			break;
591 		msleep(1);
592 	}
593 
594 	/* Spec states that TC cold exit can take up to 1ms to complete */
595 	if (!ret)
596 		msleep(1);
597 
598 	/* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
599 	drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
600 		    "succeeded");
601 }
602 
603 static void
icl_tc_phy_aux_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)604 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
605 				 struct i915_power_well *power_well)
606 {
607 	enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
608 	struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
609 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
610 	bool is_tbt = power_well->desc->hsw.is_tc_tbt;
611 	bool timeout_expected;
612 	u32 val;
613 
614 	icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
615 
616 	val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
617 	val &= ~DP_AUX_CH_CTL_TBT_IO;
618 	if (is_tbt)
619 		val |= DP_AUX_CH_CTL_TBT_IO;
620 	intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
621 
622 	val = intel_de_read(dev_priv, regs->driver);
623 	intel_de_write(dev_priv, regs->driver,
624 		       val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx));
625 
626 	/*
627 	 * An AUX timeout is expected if the TBT DP tunnel is down,
628 	 * or need to enable AUX on a legacy TypeC port as part of the TC-cold
629 	 * exit sequence.
630 	 */
631 	timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
632 	if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port)
633 		icl_tc_cold_exit(dev_priv);
634 
635 	hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
636 
637 	if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) {
638 		enum tc_port tc_port;
639 
640 		tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
641 		intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
642 			       HIP_INDEX_VAL(tc_port, 0x2));
643 
644 		if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
645 					  DKL_CMN_UC_DW27_UC_HEALTH, 1))
646 			drm_warn(&dev_priv->drm,
647 				 "Timeout waiting TC uC health\n");
648 	}
649 }
650 
651 static void
icl_tc_phy_aux_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)652 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
653 				  struct i915_power_well *power_well)
654 {
655 	enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
656 	struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
657 
658 	icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
659 
660 	hsw_power_well_disable(dev_priv, power_well);
661 }
662 
663 static void
icl_aux_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)664 icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
665 			  struct i915_power_well *power_well)
666 {
667 	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
668 
669 	if (intel_phy_is_tc(dev_priv, phy))
670 		return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
671 	else if (IS_ICELAKE(dev_priv))
672 		return icl_combo_phy_aux_power_well_enable(dev_priv,
673 							   power_well);
674 	else
675 		return hsw_power_well_enable(dev_priv, power_well);
676 }
677 
678 static void
icl_aux_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)679 icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
680 			   struct i915_power_well *power_well)
681 {
682 	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
683 
684 	if (intel_phy_is_tc(dev_priv, phy))
685 		return icl_tc_phy_aux_power_well_disable(dev_priv, power_well);
686 	else if (IS_ICELAKE(dev_priv))
687 		return icl_combo_phy_aux_power_well_disable(dev_priv,
688 							    power_well);
689 	else
690 		return hsw_power_well_disable(dev_priv, power_well);
691 }
692 
693 /*
694  * We should only use the power well if we explicitly asked the hardware to
695  * enable it, so check if it's enabled and also check if we've requested it to
696  * be enabled.
697  */
hsw_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)698 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
699 				   struct i915_power_well *power_well)
700 {
701 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
702 	enum i915_power_well_id id = power_well->desc->id;
703 	int pw_idx = power_well->desc->hsw.idx;
704 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
705 		   HSW_PWR_WELL_CTL_STATE(pw_idx);
706 	u32 val;
707 
708 	val = intel_de_read(dev_priv, regs->driver);
709 
710 	/*
711 	 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
712 	 * and the MISC_IO PW will be not restored, so check instead for the
713 	 * BIOS's own request bits, which are forced-on for these power wells
714 	 * when exiting DC5/6.
715 	 */
716 	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
717 	    (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
718 		val |= intel_de_read(dev_priv, regs->bios);
719 
720 	return (val & mask) == mask;
721 }
722 
assert_can_enable_dc9(struct drm_i915_private * dev_priv)723 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
724 {
725 	drm_WARN_ONCE(&dev_priv->drm,
726 		      (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
727 		      "DC9 already programmed to be enabled.\n");
728 	drm_WARN_ONCE(&dev_priv->drm,
729 		      intel_de_read(dev_priv, DC_STATE_EN) &
730 		      DC_STATE_EN_UPTO_DC5,
731 		      "DC5 still not disabled to enable DC9.\n");
732 	drm_WARN_ONCE(&dev_priv->drm,
733 		      intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
734 		      HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
735 		      "Power well 2 on.\n");
736 	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
737 		      "Interrupts not disabled yet.\n");
738 
739 	 /*
740 	  * TODO: check for the following to verify the conditions to enter DC9
741 	  * state are satisfied:
742 	  * 1] Check relevant display engine registers to verify if mode set
743 	  * disable sequence was followed.
744 	  * 2] Check if display uninitialize sequence is initialized.
745 	  */
746 }
747 
assert_can_disable_dc9(struct drm_i915_private * dev_priv)748 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
749 {
750 	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
751 		      "Interrupts not disabled yet.\n");
752 	drm_WARN_ONCE(&dev_priv->drm,
753 		      intel_de_read(dev_priv, DC_STATE_EN) &
754 		      DC_STATE_EN_UPTO_DC5,
755 		      "DC5 still not disabled.\n");
756 
757 	 /*
758 	  * TODO: check for the following to verify DC9 state was indeed
759 	  * entered before programming to disable it:
760 	  * 1] Check relevant display engine registers to verify if mode
761 	  *  set disable sequence was followed.
762 	  * 2] Check if display uninitialize sequence is initialized.
763 	  */
764 }
765 
gen9_write_dc_state(struct drm_i915_private * dev_priv,u32 state)766 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
767 				u32 state)
768 {
769 	int rewrites = 0;
770 	int rereads = 0;
771 	u32 v;
772 
773 	intel_de_write(dev_priv, DC_STATE_EN, state);
774 
775 	/* It has been observed that disabling the dc6 state sometimes
776 	 * doesn't stick and dmc keeps returning old value. Make sure
777 	 * the write really sticks enough times and also force rewrite until
778 	 * we are confident that state is exactly what we want.
779 	 */
780 	do  {
781 		v = intel_de_read(dev_priv, DC_STATE_EN);
782 
783 		if (v != state) {
784 			intel_de_write(dev_priv, DC_STATE_EN, state);
785 			rewrites++;
786 			rereads = 0;
787 		} else if (rereads++ > 5) {
788 			break;
789 		}
790 
791 	} while (rewrites < 100);
792 
793 	if (v != state)
794 		drm_err(&dev_priv->drm,
795 			"Writing dc state to 0x%x failed, now 0x%x\n",
796 			state, v);
797 
798 	/* Most of the times we need one retry, avoid spam */
799 	if (rewrites > 1)
800 		drm_dbg_kms(&dev_priv->drm,
801 			    "Rewrote dc state to 0x%x %d times\n",
802 			    state, rewrites);
803 }
804 
gen9_dc_mask(struct drm_i915_private * dev_priv)805 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
806 {
807 	u32 mask;
808 
809 	mask = DC_STATE_EN_UPTO_DC5;
810 
811 	if (DISPLAY_VER(dev_priv) >= 12)
812 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
813 					  | DC_STATE_EN_DC9;
814 	else if (DISPLAY_VER(dev_priv) == 11)
815 		mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
816 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
817 		mask |= DC_STATE_EN_DC9;
818 	else
819 		mask |= DC_STATE_EN_UPTO_DC6;
820 
821 	return mask;
822 }
823 
gen9_sanitize_dc_state(struct drm_i915_private * dev_priv)824 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
825 {
826 	u32 val;
827 
828 	if (!HAS_DISPLAY(dev_priv))
829 		return;
830 
831 	val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
832 
833 	drm_dbg_kms(&dev_priv->drm,
834 		    "Resetting DC state tracking from %02x to %02x\n",
835 		    dev_priv->dmc.dc_state, val);
836 	dev_priv->dmc.dc_state = val;
837 }
838 
839 /**
840  * gen9_set_dc_state - set target display C power state
841  * @dev_priv: i915 device instance
842  * @state: target DC power state
843  * - DC_STATE_DISABLE
844  * - DC_STATE_EN_UPTO_DC5
845  * - DC_STATE_EN_UPTO_DC6
846  * - DC_STATE_EN_DC9
847  *
848  * Signal to DMC firmware/HW the target DC power state passed in @state.
849  * DMC/HW can turn off individual display clocks and power rails when entering
850  * a deeper DC power state (higher in number) and turns these back when exiting
851  * that state to a shallower power state (lower in number). The HW will decide
852  * when to actually enter a given state on an on-demand basis, for instance
853  * depending on the active state of display pipes. The state of display
854  * registers backed by affected power rails are saved/restored as needed.
855  *
856  * Based on the above enabling a deeper DC power state is asynchronous wrt.
857  * enabling it. Disabling a deeper power state is synchronous: for instance
858  * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
859  * back on and register state is restored. This is guaranteed by the MMIO write
860  * to DC_STATE_EN blocking until the state is restored.
861  */
gen9_set_dc_state(struct drm_i915_private * dev_priv,u32 state)862 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
863 {
864 	u32 val;
865 	u32 mask;
866 
867 	if (!HAS_DISPLAY(dev_priv))
868 		return;
869 
870 	if (drm_WARN_ON_ONCE(&dev_priv->drm,
871 			     state & ~dev_priv->dmc.allowed_dc_mask))
872 		state &= dev_priv->dmc.allowed_dc_mask;
873 
874 	val = intel_de_read(dev_priv, DC_STATE_EN);
875 	mask = gen9_dc_mask(dev_priv);
876 	drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
877 		    val & mask, state);
878 
879 	/* Check if DMC is ignoring our DC state requests */
880 	if ((val & mask) != dev_priv->dmc.dc_state)
881 		drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
882 			dev_priv->dmc.dc_state, val & mask);
883 
884 	val &= ~mask;
885 	val |= state;
886 
887 	gen9_write_dc_state(dev_priv, val);
888 
889 	dev_priv->dmc.dc_state = val & mask;
890 }
891 
892 static u32
sanitize_target_dc_state(struct drm_i915_private * dev_priv,u32 target_dc_state)893 sanitize_target_dc_state(struct drm_i915_private *dev_priv,
894 			 u32 target_dc_state)
895 {
896 	u32 states[] = {
897 		DC_STATE_EN_UPTO_DC6,
898 		DC_STATE_EN_UPTO_DC5,
899 		DC_STATE_EN_DC3CO,
900 		DC_STATE_DISABLE,
901 	};
902 	int i;
903 
904 	for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
905 		if (target_dc_state != states[i])
906 			continue;
907 
908 		if (dev_priv->dmc.allowed_dc_mask & target_dc_state)
909 			break;
910 
911 		target_dc_state = states[i + 1];
912 	}
913 
914 	return target_dc_state;
915 }
916 
tgl_enable_dc3co(struct drm_i915_private * dev_priv)917 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
918 {
919 	drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
920 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
921 }
922 
tgl_disable_dc3co(struct drm_i915_private * dev_priv)923 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
924 {
925 	u32 val;
926 
927 	drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
928 	val = intel_de_read(dev_priv, DC_STATE_EN);
929 	val &= ~DC_STATE_DC3CO_STATUS;
930 	intel_de_write(dev_priv, DC_STATE_EN, val);
931 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
932 	/*
933 	 * Delay of 200us DC3CO Exit time B.Spec 49196
934 	 */
935 	usleep_range(200, 210);
936 }
937 
bxt_enable_dc9(struct drm_i915_private * dev_priv)938 static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
939 {
940 	assert_can_enable_dc9(dev_priv);
941 
942 	drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
943 	/*
944 	 * Power sequencer reset is not needed on
945 	 * platforms with South Display Engine on PCH,
946 	 * because PPS registers are always on.
947 	 */
948 	if (!HAS_PCH_SPLIT(dev_priv))
949 		intel_pps_reset_all(dev_priv);
950 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
951 }
952 
bxt_disable_dc9(struct drm_i915_private * dev_priv)953 static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
954 {
955 	assert_can_disable_dc9(dev_priv);
956 
957 	drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
958 
959 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
960 
961 	intel_pps_unlock_regs_wa(dev_priv);
962 }
963 
assert_dmc_loaded(struct drm_i915_private * dev_priv)964 static void assert_dmc_loaded(struct drm_i915_private *dev_priv)
965 {
966 	drm_WARN_ONCE(&dev_priv->drm,
967 		      !intel_de_read(dev_priv,
968 				     DMC_PROGRAM(dev_priv->dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
969 				     "DMC program storage start is NULL\n");
970 	drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_SSP_BASE),
971 		      "DMC SSP Base Not fine\n");
972 	drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_HTP_SKL),
973 		      "DMC HTP Not fine\n");
974 }
975 
976 static struct i915_power_well *
lookup_power_well(struct drm_i915_private * dev_priv,enum i915_power_well_id power_well_id)977 lookup_power_well(struct drm_i915_private *dev_priv,
978 		  enum i915_power_well_id power_well_id)
979 {
980 	struct i915_power_well *power_well;
981 
982 	for_each_power_well(dev_priv, power_well)
983 		if (power_well->desc->id == power_well_id)
984 			return power_well;
985 
986 	/*
987 	 * It's not feasible to add error checking code to the callers since
988 	 * this condition really shouldn't happen and it doesn't even make sense
989 	 * to abort things like display initialization sequences. Just return
990 	 * the first power well and hope the WARN gets reported so we can fix
991 	 * our driver.
992 	 */
993 	drm_WARN(&dev_priv->drm, 1,
994 		 "Power well %d not defined for this platform\n",
995 		 power_well_id);
996 	return &dev_priv->power_domains.power_wells[0];
997 }
998 
999 /**
1000  * intel_display_power_set_target_dc_state - Set target dc state.
1001  * @dev_priv: i915 device
1002  * @state: state which needs to be set as target_dc_state.
1003  *
1004  * This function set the "DC off" power well target_dc_state,
1005  * based upon this target_dc_stste, "DC off" power well will
1006  * enable desired DC state.
1007  */
intel_display_power_set_target_dc_state(struct drm_i915_private * dev_priv,u32 state)1008 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
1009 					     u32 state)
1010 {
1011 	struct i915_power_well *power_well;
1012 	bool dc_off_enabled;
1013 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1014 
1015 	mutex_lock(&power_domains->lock);
1016 	power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
1017 
1018 	if (drm_WARN_ON(&dev_priv->drm, !power_well))
1019 		goto unlock;
1020 
1021 	state = sanitize_target_dc_state(dev_priv, state);
1022 
1023 	if (state == dev_priv->dmc.target_dc_state)
1024 		goto unlock;
1025 
1026 	dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
1027 							   power_well);
1028 	/*
1029 	 * If DC off power well is disabled, need to enable and disable the
1030 	 * DC off power well to effect target DC state.
1031 	 */
1032 	if (!dc_off_enabled)
1033 		power_well->desc->ops->enable(dev_priv, power_well);
1034 
1035 	dev_priv->dmc.target_dc_state = state;
1036 
1037 	if (!dc_off_enabled)
1038 		power_well->desc->ops->disable(dev_priv, power_well);
1039 
1040 unlock:
1041 	mutex_unlock(&power_domains->lock);
1042 }
1043 
assert_can_enable_dc5(struct drm_i915_private * dev_priv)1044 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
1045 {
1046 	enum i915_power_well_id high_pg;
1047 
1048 	/* Power wells at this level and above must be disabled for DC5 entry */
1049 	if (DISPLAY_VER(dev_priv) == 12)
1050 		high_pg = ICL_DISP_PW_3;
1051 	else
1052 		high_pg = SKL_DISP_PW_2;
1053 
1054 	drm_WARN_ONCE(&dev_priv->drm,
1055 		      intel_display_power_well_is_enabled(dev_priv, high_pg),
1056 		      "Power wells above platform's DC5 limit still enabled.\n");
1057 
1058 	drm_WARN_ONCE(&dev_priv->drm,
1059 		      (intel_de_read(dev_priv, DC_STATE_EN) &
1060 		       DC_STATE_EN_UPTO_DC5),
1061 		      "DC5 already programmed to be enabled.\n");
1062 	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
1063 
1064 	assert_dmc_loaded(dev_priv);
1065 }
1066 
gen9_enable_dc5(struct drm_i915_private * dev_priv)1067 static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
1068 {
1069 	assert_can_enable_dc5(dev_priv);
1070 
1071 	drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
1072 
1073 	/* Wa Display #1183: skl,kbl,cfl */
1074 	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
1075 		intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1076 			       intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1077 
1078 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
1079 }
1080 
assert_can_enable_dc6(struct drm_i915_private * dev_priv)1081 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
1082 {
1083 	drm_WARN_ONCE(&dev_priv->drm,
1084 		      intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1085 		      "Backlight is not disabled.\n");
1086 	drm_WARN_ONCE(&dev_priv->drm,
1087 		      (intel_de_read(dev_priv, DC_STATE_EN) &
1088 		       DC_STATE_EN_UPTO_DC6),
1089 		      "DC6 already programmed to be enabled.\n");
1090 
1091 	assert_dmc_loaded(dev_priv);
1092 }
1093 
skl_enable_dc6(struct drm_i915_private * dev_priv)1094 static void skl_enable_dc6(struct drm_i915_private *dev_priv)
1095 {
1096 	assert_can_enable_dc6(dev_priv);
1097 
1098 	drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
1099 
1100 	/* Wa Display #1183: skl,kbl,cfl */
1101 	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
1102 		intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1103 			       intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1104 
1105 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1106 }
1107 
hsw_power_well_sync_hw(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1108 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
1109 				   struct i915_power_well *power_well)
1110 {
1111 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1112 	int pw_idx = power_well->desc->hsw.idx;
1113 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
1114 	u32 bios_req = intel_de_read(dev_priv, regs->bios);
1115 
1116 	/* Take over the request bit if set by BIOS. */
1117 	if (bios_req & mask) {
1118 		u32 drv_req = intel_de_read(dev_priv, regs->driver);
1119 
1120 		if (!(drv_req & mask))
1121 			intel_de_write(dev_priv, regs->driver, drv_req | mask);
1122 		intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
1123 	}
1124 }
1125 
bxt_dpio_cmn_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1126 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1127 					   struct i915_power_well *power_well)
1128 {
1129 	bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
1130 }
1131 
bxt_dpio_cmn_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1132 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1133 					    struct i915_power_well *power_well)
1134 {
1135 	bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1136 }
1137 
bxt_dpio_cmn_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1138 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1139 					    struct i915_power_well *power_well)
1140 {
1141 	return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1142 }
1143 
bxt_verify_ddi_phy_power_wells(struct drm_i915_private * dev_priv)1144 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1145 {
1146 	struct i915_power_well *power_well;
1147 
1148 	power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1149 	if (power_well->count > 0)
1150 		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1151 
1152 	power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1153 	if (power_well->count > 0)
1154 		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1155 
1156 	if (IS_GEMINILAKE(dev_priv)) {
1157 		power_well = lookup_power_well(dev_priv,
1158 					       GLK_DISP_PW_DPIO_CMN_C);
1159 		if (power_well->count > 0)
1160 			bxt_ddi_phy_verify_state(dev_priv,
1161 						 power_well->desc->bxt.phy);
1162 	}
1163 }
1164 
gen9_dc_off_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1165 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1166 					   struct i915_power_well *power_well)
1167 {
1168 	return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
1169 		(intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
1170 }
1171 
gen9_assert_dbuf_enabled(struct drm_i915_private * dev_priv)1172 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1173 {
1174 	u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
1175 	u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices;
1176 
1177 	drm_WARN(&dev_priv->drm,
1178 		 hw_enabled_dbuf_slices != enabled_dbuf_slices,
1179 		 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
1180 		 hw_enabled_dbuf_slices,
1181 		 enabled_dbuf_slices);
1182 }
1183 
gen9_disable_dc_states(struct drm_i915_private * dev_priv)1184 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
1185 {
1186 	struct intel_cdclk_config cdclk_config = {};
1187 
1188 	if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) {
1189 		tgl_disable_dc3co(dev_priv);
1190 		return;
1191 	}
1192 
1193 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1194 
1195 	if (!HAS_DISPLAY(dev_priv))
1196 		return;
1197 
1198 	dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
1199 	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
1200 	drm_WARN_ON(&dev_priv->drm,
1201 		    intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
1202 					      &cdclk_config));
1203 
1204 	gen9_assert_dbuf_enabled(dev_priv);
1205 
1206 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
1207 		bxt_verify_ddi_phy_power_wells(dev_priv);
1208 
1209 	if (DISPLAY_VER(dev_priv) >= 11)
1210 		/*
1211 		 * DMC retains HW context only for port A, the other combo
1212 		 * PHY's HW context for port B is lost after DC transitions,
1213 		 * so we need to restore it manually.
1214 		 */
1215 		intel_combo_phy_init(dev_priv);
1216 }
1217 
gen9_dc_off_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1218 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1219 					  struct i915_power_well *power_well)
1220 {
1221 	gen9_disable_dc_states(dev_priv);
1222 }
1223 
gen9_dc_off_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1224 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1225 					   struct i915_power_well *power_well)
1226 {
1227 	if (!intel_dmc_has_payload(dev_priv))
1228 		return;
1229 
1230 	switch (dev_priv->dmc.target_dc_state) {
1231 	case DC_STATE_EN_DC3CO:
1232 		tgl_enable_dc3co(dev_priv);
1233 		break;
1234 	case DC_STATE_EN_UPTO_DC6:
1235 		skl_enable_dc6(dev_priv);
1236 		break;
1237 	case DC_STATE_EN_UPTO_DC5:
1238 		gen9_enable_dc5(dev_priv);
1239 		break;
1240 	}
1241 }
1242 
i9xx_power_well_sync_hw_noop(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1243 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1244 					 struct i915_power_well *power_well)
1245 {
1246 }
1247 
i9xx_always_on_power_well_noop(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1248 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1249 					   struct i915_power_well *power_well)
1250 {
1251 }
1252 
i9xx_always_on_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1253 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1254 					     struct i915_power_well *power_well)
1255 {
1256 	return true;
1257 }
1258 
i830_pipes_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1259 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1260 					 struct i915_power_well *power_well)
1261 {
1262 	if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1263 		i830_enable_pipe(dev_priv, PIPE_A);
1264 	if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1265 		i830_enable_pipe(dev_priv, PIPE_B);
1266 }
1267 
i830_pipes_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1268 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1269 					  struct i915_power_well *power_well)
1270 {
1271 	i830_disable_pipe(dev_priv, PIPE_B);
1272 	i830_disable_pipe(dev_priv, PIPE_A);
1273 }
1274 
i830_pipes_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1275 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1276 					  struct i915_power_well *power_well)
1277 {
1278 	return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1279 		intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1280 }
1281 
i830_pipes_power_well_sync_hw(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1282 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1283 					  struct i915_power_well *power_well)
1284 {
1285 	if (power_well->count > 0)
1286 		i830_pipes_power_well_enable(dev_priv, power_well);
1287 	else
1288 		i830_pipes_power_well_disable(dev_priv, power_well);
1289 }
1290 
vlv_set_power_well(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,bool enable)1291 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1292 			       struct i915_power_well *power_well, bool enable)
1293 {
1294 	int pw_idx = power_well->desc->vlv.idx;
1295 	u32 mask;
1296 	u32 state;
1297 	u32 ctrl;
1298 
1299 	mask = PUNIT_PWRGT_MASK(pw_idx);
1300 	state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1301 			 PUNIT_PWRGT_PWR_GATE(pw_idx);
1302 
1303 	vlv_punit_get(dev_priv);
1304 
1305 #define COND \
1306 	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1307 
1308 	if (COND)
1309 		goto out;
1310 
1311 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1312 	ctrl &= ~mask;
1313 	ctrl |= state;
1314 	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1315 
1316 	if (wait_for(COND, 100))
1317 		drm_err(&dev_priv->drm,
1318 			"timeout setting power well state %08x (%08x)\n",
1319 			state,
1320 			vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1321 
1322 #undef COND
1323 
1324 out:
1325 	vlv_punit_put(dev_priv);
1326 }
1327 
vlv_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1328 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1329 				  struct i915_power_well *power_well)
1330 {
1331 	vlv_set_power_well(dev_priv, power_well, true);
1332 }
1333 
vlv_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1334 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1335 				   struct i915_power_well *power_well)
1336 {
1337 	vlv_set_power_well(dev_priv, power_well, false);
1338 }
1339 
vlv_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1340 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1341 				   struct i915_power_well *power_well)
1342 {
1343 	int pw_idx = power_well->desc->vlv.idx;
1344 	bool enabled = false;
1345 	u32 mask;
1346 	u32 state;
1347 	u32 ctrl;
1348 
1349 	mask = PUNIT_PWRGT_MASK(pw_idx);
1350 	ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1351 
1352 	vlv_punit_get(dev_priv);
1353 
1354 	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1355 	/*
1356 	 * We only ever set the power-on and power-gate states, anything
1357 	 * else is unexpected.
1358 	 */
1359 	drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1360 		    state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1361 	if (state == ctrl)
1362 		enabled = true;
1363 
1364 	/*
1365 	 * A transient state at this point would mean some unexpected party
1366 	 * is poking at the power controls too.
1367 	 */
1368 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1369 	drm_WARN_ON(&dev_priv->drm, ctrl != state);
1370 
1371 	vlv_punit_put(dev_priv);
1372 
1373 	return enabled;
1374 }
1375 
vlv_init_display_clock_gating(struct drm_i915_private * dev_priv)1376 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1377 {
1378 	u32 val;
1379 
1380 	/*
1381 	 * On driver load, a pipe may be active and driving a DSI display.
1382 	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1383 	 * (and never recovering) in this case. intel_dsi_post_disable() will
1384 	 * clear it when we turn off the display.
1385 	 */
1386 	val = intel_de_read(dev_priv, DSPCLK_GATE_D);
1387 	val &= DPOUNIT_CLOCK_GATE_DISABLE;
1388 	val |= VRHUNIT_CLOCK_GATE_DISABLE;
1389 	intel_de_write(dev_priv, DSPCLK_GATE_D, val);
1390 
1391 	/*
1392 	 * Disable trickle feed and enable pnd deadline calculation
1393 	 */
1394 	intel_de_write(dev_priv, MI_ARB_VLV,
1395 		       MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1396 	intel_de_write(dev_priv, CBR1_VLV, 0);
1397 
1398 	drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
1399 	intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
1400 		       DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
1401 					 1000));
1402 }
1403 
vlv_display_power_well_init(struct drm_i915_private * dev_priv)1404 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1405 {
1406 	struct intel_encoder *encoder;
1407 	enum pipe pipe;
1408 
1409 	/*
1410 	 * Enable the CRI clock source so we can get at the
1411 	 * display and the reference clock for VGA
1412 	 * hotplug / manual detection. Supposedly DSI also
1413 	 * needs the ref clock up and running.
1414 	 *
1415 	 * CHV DPLL B/C have some issues if VGA mode is enabled.
1416 	 */
1417 	for_each_pipe(dev_priv, pipe) {
1418 		u32 val = intel_de_read(dev_priv, DPLL(pipe));
1419 
1420 		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1421 		if (pipe != PIPE_A)
1422 			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1423 
1424 		intel_de_write(dev_priv, DPLL(pipe), val);
1425 	}
1426 
1427 	vlv_init_display_clock_gating(dev_priv);
1428 
1429 	spin_lock_irq(&dev_priv->irq_lock);
1430 	valleyview_enable_display_irqs(dev_priv);
1431 	spin_unlock_irq(&dev_priv->irq_lock);
1432 
1433 	/*
1434 	 * During driver initialization/resume we can avoid restoring the
1435 	 * part of the HW/SW state that will be inited anyway explicitly.
1436 	 */
1437 	if (dev_priv->power_domains.initializing)
1438 		return;
1439 
1440 	intel_hpd_init(dev_priv);
1441 	intel_hpd_poll_disable(dev_priv);
1442 
1443 	/* Re-enable the ADPA, if we have one */
1444 	for_each_intel_encoder(&dev_priv->drm, encoder) {
1445 		if (encoder->type == INTEL_OUTPUT_ANALOG)
1446 			intel_crt_reset(&encoder->base);
1447 	}
1448 
1449 	intel_vga_redisable_power_on(dev_priv);
1450 
1451 	intel_pps_unlock_regs_wa(dev_priv);
1452 }
1453 
vlv_display_power_well_deinit(struct drm_i915_private * dev_priv)1454 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1455 {
1456 	spin_lock_irq(&dev_priv->irq_lock);
1457 	valleyview_disable_display_irqs(dev_priv);
1458 	spin_unlock_irq(&dev_priv->irq_lock);
1459 
1460 	/* make sure we're done processing display irqs */
1461 	intel_synchronize_irq(dev_priv);
1462 
1463 	intel_pps_reset_all(dev_priv);
1464 
1465 	/* Prevent us from re-enabling polling on accident in late suspend */
1466 	if (!dev_priv->drm.dev->power.is_suspended)
1467 		intel_hpd_poll_enable(dev_priv);
1468 }
1469 
vlv_display_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1470 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1471 					  struct i915_power_well *power_well)
1472 {
1473 	vlv_set_power_well(dev_priv, power_well, true);
1474 
1475 	vlv_display_power_well_init(dev_priv);
1476 }
1477 
vlv_display_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1478 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1479 					   struct i915_power_well *power_well)
1480 {
1481 	vlv_display_power_well_deinit(dev_priv);
1482 
1483 	vlv_set_power_well(dev_priv, power_well, false);
1484 }
1485 
vlv_dpio_cmn_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1486 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1487 					   struct i915_power_well *power_well)
1488 {
1489 	/* since ref/cri clock was enabled */
1490 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1491 
1492 	vlv_set_power_well(dev_priv, power_well, true);
1493 
1494 	/*
1495 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1496 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1497 	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1498 	 *   b.	The other bits such as sfr settings / modesel may all
1499 	 *	be set to 0.
1500 	 *
1501 	 * This should only be done on init and resume from S3 with
1502 	 * both PLLs disabled, or we risk losing DPIO and PLL
1503 	 * synchronization.
1504 	 */
1505 	intel_de_write(dev_priv, DPIO_CTL,
1506 		       intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
1507 }
1508 
vlv_dpio_cmn_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1509 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1510 					    struct i915_power_well *power_well)
1511 {
1512 	enum pipe pipe;
1513 
1514 	for_each_pipe(dev_priv, pipe)
1515 		assert_pll_disabled(dev_priv, pipe);
1516 
1517 	/* Assert common reset */
1518 	intel_de_write(dev_priv, DPIO_CTL,
1519 		       intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
1520 
1521 	vlv_set_power_well(dev_priv, power_well, false);
1522 }
1523 
1524 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1525 
1526 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1527 
assert_chv_phy_status(struct drm_i915_private * dev_priv)1528 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1529 {
1530 	struct i915_power_well *cmn_bc =
1531 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1532 	struct i915_power_well *cmn_d =
1533 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1534 	u32 phy_control = dev_priv->chv_phy_control;
1535 	u32 phy_status = 0;
1536 	u32 phy_status_mask = 0xffffffff;
1537 
1538 	/*
1539 	 * The BIOS can leave the PHY is some weird state
1540 	 * where it doesn't fully power down some parts.
1541 	 * Disable the asserts until the PHY has been fully
1542 	 * reset (ie. the power well has been disabled at
1543 	 * least once).
1544 	 */
1545 	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1546 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1547 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1548 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1549 				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1550 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1551 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1552 
1553 	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1554 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1555 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1556 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1557 
1558 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1559 		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1560 
1561 		/* this assumes override is only used to enable lanes */
1562 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1563 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1564 
1565 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1566 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1567 
1568 		/* CL1 is on whenever anything is on in either channel */
1569 		if (BITS_SET(phy_control,
1570 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1571 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1572 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1573 
1574 		/*
1575 		 * The DPLLB check accounts for the pipe B + port A usage
1576 		 * with CL2 powered up but all the lanes in the second channel
1577 		 * powered down.
1578 		 */
1579 		if (BITS_SET(phy_control,
1580 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1581 		    (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1582 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1583 
1584 		if (BITS_SET(phy_control,
1585 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1586 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1587 		if (BITS_SET(phy_control,
1588 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1589 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1590 
1591 		if (BITS_SET(phy_control,
1592 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1593 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1594 		if (BITS_SET(phy_control,
1595 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1596 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1597 	}
1598 
1599 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1600 		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1601 
1602 		/* this assumes override is only used to enable lanes */
1603 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1604 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1605 
1606 		if (BITS_SET(phy_control,
1607 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1608 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1609 
1610 		if (BITS_SET(phy_control,
1611 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1612 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1613 		if (BITS_SET(phy_control,
1614 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1615 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1616 	}
1617 
1618 	phy_status &= phy_status_mask;
1619 
1620 	/*
1621 	 * The PHY may be busy with some initial calibration and whatnot,
1622 	 * so the power state can take a while to actually change.
1623 	 */
1624 	if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
1625 				       phy_status_mask, phy_status, 10))
1626 		drm_err(&dev_priv->drm,
1627 			"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1628 			intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
1629 			phy_status, dev_priv->chv_phy_control);
1630 }
1631 
1632 #undef BITS_SET
1633 
chv_dpio_cmn_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1634 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1635 					   struct i915_power_well *power_well)
1636 {
1637 	enum dpio_phy phy;
1638 	enum pipe pipe;
1639 	u32 tmp;
1640 
1641 	drm_WARN_ON_ONCE(&dev_priv->drm,
1642 			 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1643 			 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1644 
1645 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1646 		pipe = PIPE_A;
1647 		phy = DPIO_PHY0;
1648 	} else {
1649 		pipe = PIPE_C;
1650 		phy = DPIO_PHY1;
1651 	}
1652 
1653 	/* since ref/cri clock was enabled */
1654 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1655 	vlv_set_power_well(dev_priv, power_well, true);
1656 
1657 	/* Poll for phypwrgood signal */
1658 	if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
1659 				  PHY_POWERGOOD(phy), 1))
1660 		drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
1661 			phy);
1662 
1663 	vlv_dpio_get(dev_priv);
1664 
1665 	/* Enable dynamic power down */
1666 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1667 	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1668 		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1669 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1670 
1671 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1672 		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1673 		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1674 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1675 	} else {
1676 		/*
1677 		 * Force the non-existing CL2 off. BXT does this
1678 		 * too, so maybe it saves some power even though
1679 		 * CL2 doesn't exist?
1680 		 */
1681 		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1682 		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1683 		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1684 	}
1685 
1686 	vlv_dpio_put(dev_priv);
1687 
1688 	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1689 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1690 		       dev_priv->chv_phy_control);
1691 
1692 	drm_dbg_kms(&dev_priv->drm,
1693 		    "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1694 		    phy, dev_priv->chv_phy_control);
1695 
1696 	assert_chv_phy_status(dev_priv);
1697 }
1698 
chv_dpio_cmn_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1699 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1700 					    struct i915_power_well *power_well)
1701 {
1702 	enum dpio_phy phy;
1703 
1704 	drm_WARN_ON_ONCE(&dev_priv->drm,
1705 			 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1706 			 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1707 
1708 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1709 		phy = DPIO_PHY0;
1710 		assert_pll_disabled(dev_priv, PIPE_A);
1711 		assert_pll_disabled(dev_priv, PIPE_B);
1712 	} else {
1713 		phy = DPIO_PHY1;
1714 		assert_pll_disabled(dev_priv, PIPE_C);
1715 	}
1716 
1717 	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1718 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1719 		       dev_priv->chv_phy_control);
1720 
1721 	vlv_set_power_well(dev_priv, power_well, false);
1722 
1723 	drm_dbg_kms(&dev_priv->drm,
1724 		    "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1725 		    phy, dev_priv->chv_phy_control);
1726 
1727 	/* PHY is fully reset now, so we can enable the PHY state asserts */
1728 	dev_priv->chv_phy_assert[phy] = true;
1729 
1730 	assert_chv_phy_status(dev_priv);
1731 }
1732 
assert_chv_phy_powergate(struct drm_i915_private * dev_priv,enum dpio_phy phy,enum dpio_channel ch,bool override,unsigned int mask)1733 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1734 				     enum dpio_channel ch, bool override, unsigned int mask)
1735 {
1736 	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1737 	u32 reg, val, expected, actual;
1738 
1739 	/*
1740 	 * The BIOS can leave the PHY is some weird state
1741 	 * where it doesn't fully power down some parts.
1742 	 * Disable the asserts until the PHY has been fully
1743 	 * reset (ie. the power well has been disabled at
1744 	 * least once).
1745 	 */
1746 	if (!dev_priv->chv_phy_assert[phy])
1747 		return;
1748 
1749 	if (ch == DPIO_CH0)
1750 		reg = _CHV_CMN_DW0_CH0;
1751 	else
1752 		reg = _CHV_CMN_DW6_CH1;
1753 
1754 	vlv_dpio_get(dev_priv);
1755 	val = vlv_dpio_read(dev_priv, pipe, reg);
1756 	vlv_dpio_put(dev_priv);
1757 
1758 	/*
1759 	 * This assumes !override is only used when the port is disabled.
1760 	 * All lanes should power down even without the override when
1761 	 * the port is disabled.
1762 	 */
1763 	if (!override || mask == 0xf) {
1764 		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1765 		/*
1766 		 * If CH1 common lane is not active anymore
1767 		 * (eg. for pipe B DPLL) the entire channel will
1768 		 * shut down, which causes the common lane registers
1769 		 * to read as 0. That means we can't actually check
1770 		 * the lane power down status bits, but as the entire
1771 		 * register reads as 0 it's a good indication that the
1772 		 * channel is indeed entirely powered down.
1773 		 */
1774 		if (ch == DPIO_CH1 && val == 0)
1775 			expected = 0;
1776 	} else if (mask != 0x0) {
1777 		expected = DPIO_ANYDL_POWERDOWN;
1778 	} else {
1779 		expected = 0;
1780 	}
1781 
1782 	if (ch == DPIO_CH0)
1783 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1784 	else
1785 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1786 	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1787 
1788 	drm_WARN(&dev_priv->drm, actual != expected,
1789 		 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1790 		 !!(actual & DPIO_ALLDL_POWERDOWN),
1791 		 !!(actual & DPIO_ANYDL_POWERDOWN),
1792 		 !!(expected & DPIO_ALLDL_POWERDOWN),
1793 		 !!(expected & DPIO_ANYDL_POWERDOWN),
1794 		 reg, val);
1795 }
1796 
chv_phy_powergate_ch(struct drm_i915_private * dev_priv,enum dpio_phy phy,enum dpio_channel ch,bool override)1797 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1798 			  enum dpio_channel ch, bool override)
1799 {
1800 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1801 	bool was_override;
1802 
1803 	mutex_lock(&power_domains->lock);
1804 
1805 	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1806 
1807 	if (override == was_override)
1808 		goto out;
1809 
1810 	if (override)
1811 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1812 	else
1813 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1814 
1815 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1816 		       dev_priv->chv_phy_control);
1817 
1818 	drm_dbg_kms(&dev_priv->drm,
1819 		    "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1820 		    phy, ch, dev_priv->chv_phy_control);
1821 
1822 	assert_chv_phy_status(dev_priv);
1823 
1824 out:
1825 	mutex_unlock(&power_domains->lock);
1826 
1827 	return was_override;
1828 }
1829 
chv_phy_powergate_lanes(struct intel_encoder * encoder,bool override,unsigned int mask)1830 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1831 			     bool override, unsigned int mask)
1832 {
1833 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1834 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1835 	enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
1836 	enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
1837 
1838 	mutex_lock(&power_domains->lock);
1839 
1840 	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1841 	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1842 
1843 	if (override)
1844 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1845 	else
1846 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1847 
1848 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1849 		       dev_priv->chv_phy_control);
1850 
1851 	drm_dbg_kms(&dev_priv->drm,
1852 		    "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1853 		    phy, ch, mask, dev_priv->chv_phy_control);
1854 
1855 	assert_chv_phy_status(dev_priv);
1856 
1857 	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1858 
1859 	mutex_unlock(&power_domains->lock);
1860 }
1861 
chv_pipe_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1862 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1863 					struct i915_power_well *power_well)
1864 {
1865 	enum pipe pipe = PIPE_A;
1866 	bool enabled;
1867 	u32 state, ctrl;
1868 
1869 	vlv_punit_get(dev_priv);
1870 
1871 	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1872 	/*
1873 	 * We only ever set the power-on and power-gate states, anything
1874 	 * else is unexpected.
1875 	 */
1876 	drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
1877 		    state != DP_SSS_PWR_GATE(pipe));
1878 	enabled = state == DP_SSS_PWR_ON(pipe);
1879 
1880 	/*
1881 	 * A transient state at this point would mean some unexpected party
1882 	 * is poking at the power controls too.
1883 	 */
1884 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1885 	drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
1886 
1887 	vlv_punit_put(dev_priv);
1888 
1889 	return enabled;
1890 }
1891 
chv_set_pipe_power_well(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,bool enable)1892 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1893 				    struct i915_power_well *power_well,
1894 				    bool enable)
1895 {
1896 	enum pipe pipe = PIPE_A;
1897 	u32 state;
1898 	u32 ctrl;
1899 
1900 	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1901 
1902 	vlv_punit_get(dev_priv);
1903 
1904 #define COND \
1905 	((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1906 
1907 	if (COND)
1908 		goto out;
1909 
1910 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1911 	ctrl &= ~DP_SSC_MASK(pipe);
1912 	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1913 	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1914 
1915 	if (wait_for(COND, 100))
1916 		drm_err(&dev_priv->drm,
1917 			"timeout setting power well state %08x (%08x)\n",
1918 			state,
1919 			vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1920 
1921 #undef COND
1922 
1923 out:
1924 	vlv_punit_put(dev_priv);
1925 }
1926 
chv_pipe_power_well_sync_hw(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1927 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1928 					struct i915_power_well *power_well)
1929 {
1930 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1931 		       dev_priv->chv_phy_control);
1932 }
1933 
chv_pipe_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1934 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1935 				       struct i915_power_well *power_well)
1936 {
1937 	chv_set_pipe_power_well(dev_priv, power_well, true);
1938 
1939 	vlv_display_power_well_init(dev_priv);
1940 }
1941 
chv_pipe_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1942 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1943 					struct i915_power_well *power_well)
1944 {
1945 	vlv_display_power_well_deinit(dev_priv);
1946 
1947 	chv_set_pipe_power_well(dev_priv, power_well, false);
1948 }
1949 
__async_put_domains_mask(struct i915_power_domains * power_domains)1950 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1951 {
1952 	return power_domains->async_put_domains[0] |
1953 	       power_domains->async_put_domains[1];
1954 }
1955 
1956 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1957 
1958 static bool
assert_async_put_domain_masks_disjoint(struct i915_power_domains * power_domains)1959 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1960 {
1961 	struct drm_i915_private *i915 = container_of(power_domains,
1962 						     struct drm_i915_private,
1963 						     power_domains);
1964 	return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] &
1965 			    power_domains->async_put_domains[1]);
1966 }
1967 
1968 static bool
__async_put_domains_state_ok(struct i915_power_domains * power_domains)1969 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
1970 {
1971 	struct drm_i915_private *i915 = container_of(power_domains,
1972 						     struct drm_i915_private,
1973 						     power_domains);
1974 	enum intel_display_power_domain domain;
1975 	bool err = false;
1976 
1977 	err |= !assert_async_put_domain_masks_disjoint(power_domains);
1978 	err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref !=
1979 			   !!__async_put_domains_mask(power_domains));
1980 
1981 	for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1982 		err |= drm_WARN_ON(&i915->drm,
1983 				   power_domains->domain_use_count[domain] != 1);
1984 
1985 	return !err;
1986 }
1987 
print_power_domains(struct i915_power_domains * power_domains,const char * prefix,u64 mask)1988 static void print_power_domains(struct i915_power_domains *power_domains,
1989 				const char *prefix, u64 mask)
1990 {
1991 	struct drm_i915_private *i915 = container_of(power_domains,
1992 						     struct drm_i915_private,
1993 						     power_domains);
1994 	enum intel_display_power_domain domain;
1995 
1996 	drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask));
1997 	for_each_power_domain(domain, mask)
1998 		drm_dbg(&i915->drm, "%s use_count %d\n",
1999 			intel_display_power_domain_str(domain),
2000 			power_domains->domain_use_count[domain]);
2001 }
2002 
2003 static void
print_async_put_domains_state(struct i915_power_domains * power_domains)2004 print_async_put_domains_state(struct i915_power_domains *power_domains)
2005 {
2006 	struct drm_i915_private *i915 = container_of(power_domains,
2007 						     struct drm_i915_private,
2008 						     power_domains);
2009 
2010 	drm_dbg(&i915->drm, "async_put_wakeref %u\n",
2011 		power_domains->async_put_wakeref);
2012 
2013 	print_power_domains(power_domains, "async_put_domains[0]",
2014 			    power_domains->async_put_domains[0]);
2015 	print_power_domains(power_domains, "async_put_domains[1]",
2016 			    power_domains->async_put_domains[1]);
2017 }
2018 
2019 static void
verify_async_put_domains_state(struct i915_power_domains * power_domains)2020 verify_async_put_domains_state(struct i915_power_domains *power_domains)
2021 {
2022 	if (!__async_put_domains_state_ok(power_domains))
2023 		print_async_put_domains_state(power_domains);
2024 }
2025 
2026 #else
2027 
2028 static void
assert_async_put_domain_masks_disjoint(struct i915_power_domains * power_domains)2029 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
2030 {
2031 }
2032 
2033 static void
verify_async_put_domains_state(struct i915_power_domains * power_domains)2034 verify_async_put_domains_state(struct i915_power_domains *power_domains)
2035 {
2036 }
2037 
2038 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
2039 
async_put_domains_mask(struct i915_power_domains * power_domains)2040 static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
2041 {
2042 	assert_async_put_domain_masks_disjoint(power_domains);
2043 
2044 	return __async_put_domains_mask(power_domains);
2045 }
2046 
2047 static void
async_put_domains_clear_domain(struct i915_power_domains * power_domains,enum intel_display_power_domain domain)2048 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
2049 			       enum intel_display_power_domain domain)
2050 {
2051 	assert_async_put_domain_masks_disjoint(power_domains);
2052 
2053 	power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
2054 	power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
2055 }
2056 
2057 static bool
intel_display_power_grab_async_put_ref(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)2058 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
2059 				       enum intel_display_power_domain domain)
2060 {
2061 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2062 	bool ret = false;
2063 
2064 	if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
2065 		goto out_verify;
2066 
2067 	async_put_domains_clear_domain(power_domains, domain);
2068 
2069 	ret = true;
2070 
2071 	if (async_put_domains_mask(power_domains))
2072 		goto out_verify;
2073 
2074 	cancel_delayed_work(&power_domains->async_put_work);
2075 	intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
2076 				 fetch_and_zero(&power_domains->async_put_wakeref));
2077 out_verify:
2078 	verify_async_put_domains_state(power_domains);
2079 
2080 	return ret;
2081 }
2082 
2083 static void
__intel_display_power_get_domain(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)2084 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
2085 				 enum intel_display_power_domain domain)
2086 {
2087 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2088 	struct i915_power_well *power_well;
2089 
2090 	if (intel_display_power_grab_async_put_ref(dev_priv, domain))
2091 		return;
2092 
2093 	for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
2094 		intel_power_well_get(dev_priv, power_well);
2095 
2096 	power_domains->domain_use_count[domain]++;
2097 }
2098 
2099 /**
2100  * intel_display_power_get - grab a power domain reference
2101  * @dev_priv: i915 device instance
2102  * @domain: power domain to reference
2103  *
2104  * This function grabs a power domain reference for @domain and ensures that the
2105  * power domain and all its parents are powered up. Therefore users should only
2106  * grab a reference to the innermost power domain they need.
2107  *
2108  * Any power domain reference obtained by this function must have a symmetric
2109  * call to intel_display_power_put() to release the reference again.
2110  */
intel_display_power_get(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)2111 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
2112 					enum intel_display_power_domain domain)
2113 {
2114 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2115 	intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2116 
2117 	mutex_lock(&power_domains->lock);
2118 	__intel_display_power_get_domain(dev_priv, domain);
2119 	mutex_unlock(&power_domains->lock);
2120 
2121 	return wakeref;
2122 }
2123 
2124 /**
2125  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
2126  * @dev_priv: i915 device instance
2127  * @domain: power domain to reference
2128  *
2129  * This function grabs a power domain reference for @domain and ensures that the
2130  * power domain and all its parents are powered up. Therefore users should only
2131  * grab a reference to the innermost power domain they need.
2132  *
2133  * Any power domain reference obtained by this function must have a symmetric
2134  * call to intel_display_power_put() to release the reference again.
2135  */
2136 intel_wakeref_t
intel_display_power_get_if_enabled(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)2137 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
2138 				   enum intel_display_power_domain domain)
2139 {
2140 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2141 	intel_wakeref_t wakeref;
2142 	bool is_enabled;
2143 
2144 	wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
2145 	if (!wakeref)
2146 		return false;
2147 
2148 	mutex_lock(&power_domains->lock);
2149 
2150 	if (__intel_display_power_is_enabled(dev_priv, domain)) {
2151 		__intel_display_power_get_domain(dev_priv, domain);
2152 		is_enabled = true;
2153 	} else {
2154 		is_enabled = false;
2155 	}
2156 
2157 	mutex_unlock(&power_domains->lock);
2158 
2159 	if (!is_enabled) {
2160 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2161 		wakeref = 0;
2162 	}
2163 
2164 	return wakeref;
2165 }
2166 
2167 static void
__intel_display_power_put_domain(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)2168 __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
2169 				 enum intel_display_power_domain domain)
2170 {
2171 	struct i915_power_domains *power_domains;
2172 	struct i915_power_well *power_well;
2173 	const char *name = intel_display_power_domain_str(domain);
2174 
2175 	power_domains = &dev_priv->power_domains;
2176 
2177 	drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
2178 		 "Use count on domain %s is already zero\n",
2179 		 name);
2180 	drm_WARN(&dev_priv->drm,
2181 		 async_put_domains_mask(power_domains) & BIT_ULL(domain),
2182 		 "Async disabling of domain %s is pending\n",
2183 		 name);
2184 
2185 	power_domains->domain_use_count[domain]--;
2186 
2187 	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
2188 		intel_power_well_put(dev_priv, power_well);
2189 }
2190 
__intel_display_power_put(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)2191 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
2192 				      enum intel_display_power_domain domain)
2193 {
2194 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2195 
2196 	mutex_lock(&power_domains->lock);
2197 	__intel_display_power_put_domain(dev_priv, domain);
2198 	mutex_unlock(&power_domains->lock);
2199 }
2200 
2201 static void
queue_async_put_domains_work(struct i915_power_domains * power_domains,intel_wakeref_t wakeref)2202 queue_async_put_domains_work(struct i915_power_domains *power_domains,
2203 			     intel_wakeref_t wakeref)
2204 {
2205 	struct drm_i915_private *i915 = container_of(power_domains,
2206 						     struct drm_i915_private,
2207 						     power_domains);
2208 	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
2209 	power_domains->async_put_wakeref = wakeref;
2210 	drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
2211 						    &power_domains->async_put_work,
2212 						    msecs_to_jiffies(100)));
2213 }
2214 
2215 static void
release_async_put_domains(struct i915_power_domains * power_domains,u64 mask)2216 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
2217 {
2218 	struct drm_i915_private *dev_priv =
2219 		container_of(power_domains, struct drm_i915_private,
2220 			     power_domains);
2221 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2222 	enum intel_display_power_domain domain;
2223 	intel_wakeref_t wakeref;
2224 
2225 	/*
2226 	 * The caller must hold already raw wakeref, upgrade that to a proper
2227 	 * wakeref to make the state checker happy about the HW access during
2228 	 * power well disabling.
2229 	 */
2230 	assert_rpm_raw_wakeref_held(rpm);
2231 	wakeref = intel_runtime_pm_get(rpm);
2232 
2233 	for_each_power_domain(domain, mask) {
2234 		/* Clear before put, so put's sanity check is happy. */
2235 		async_put_domains_clear_domain(power_domains, domain);
2236 		__intel_display_power_put_domain(dev_priv, domain);
2237 	}
2238 
2239 	intel_runtime_pm_put(rpm, wakeref);
2240 }
2241 
2242 static void
intel_display_power_put_async_work(struct work_struct * work)2243 intel_display_power_put_async_work(struct work_struct *work)
2244 {
2245 	struct drm_i915_private *dev_priv =
2246 		container_of(work, struct drm_i915_private,
2247 			     power_domains.async_put_work.work);
2248 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2249 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2250 	intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
2251 	intel_wakeref_t old_work_wakeref = 0;
2252 
2253 	mutex_lock(&power_domains->lock);
2254 
2255 	/*
2256 	 * Bail out if all the domain refs pending to be released were grabbed
2257 	 * by subsequent gets or a flush_work.
2258 	 */
2259 	old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2260 	if (!old_work_wakeref)
2261 		goto out_verify;
2262 
2263 	release_async_put_domains(power_domains,
2264 				  power_domains->async_put_domains[0]);
2265 
2266 	/* Requeue the work if more domains were async put meanwhile. */
2267 	if (power_domains->async_put_domains[1]) {
2268 		power_domains->async_put_domains[0] =
2269 			fetch_and_zero(&power_domains->async_put_domains[1]);
2270 		queue_async_put_domains_work(power_domains,
2271 					     fetch_and_zero(&new_work_wakeref));
2272 	} else {
2273 		/*
2274 		 * Cancel the work that got queued after this one got dequeued,
2275 		 * since here we released the corresponding async-put reference.
2276 		 */
2277 		cancel_delayed_work(&power_domains->async_put_work);
2278 	}
2279 
2280 out_verify:
2281 	verify_async_put_domains_state(power_domains);
2282 
2283 	mutex_unlock(&power_domains->lock);
2284 
2285 	if (old_work_wakeref)
2286 		intel_runtime_pm_put_raw(rpm, old_work_wakeref);
2287 	if (new_work_wakeref)
2288 		intel_runtime_pm_put_raw(rpm, new_work_wakeref);
2289 }
2290 
2291 /**
2292  * intel_display_power_put_async - release a power domain reference asynchronously
2293  * @i915: i915 device instance
2294  * @domain: power domain to reference
2295  * @wakeref: wakeref acquired for the reference that is being released
2296  *
2297  * This function drops the power domain reference obtained by
2298  * intel_display_power_get*() and schedules a work to power down the
2299  * corresponding hardware block if this is the last reference.
2300  */
__intel_display_power_put_async(struct drm_i915_private * i915,enum intel_display_power_domain domain,intel_wakeref_t wakeref)2301 void __intel_display_power_put_async(struct drm_i915_private *i915,
2302 				     enum intel_display_power_domain domain,
2303 				     intel_wakeref_t wakeref)
2304 {
2305 	struct i915_power_domains *power_domains = &i915->power_domains;
2306 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
2307 	intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
2308 
2309 	mutex_lock(&power_domains->lock);
2310 
2311 	if (power_domains->domain_use_count[domain] > 1) {
2312 		__intel_display_power_put_domain(i915, domain);
2313 
2314 		goto out_verify;
2315 	}
2316 
2317 	drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
2318 
2319 	/* Let a pending work requeue itself or queue a new one. */
2320 	if (power_domains->async_put_wakeref) {
2321 		power_domains->async_put_domains[1] |= BIT_ULL(domain);
2322 	} else {
2323 		power_domains->async_put_domains[0] |= BIT_ULL(domain);
2324 		queue_async_put_domains_work(power_domains,
2325 					     fetch_and_zero(&work_wakeref));
2326 	}
2327 
2328 out_verify:
2329 	verify_async_put_domains_state(power_domains);
2330 
2331 	mutex_unlock(&power_domains->lock);
2332 
2333 	if (work_wakeref)
2334 		intel_runtime_pm_put_raw(rpm, work_wakeref);
2335 
2336 	intel_runtime_pm_put(rpm, wakeref);
2337 }
2338 
2339 /**
2340  * intel_display_power_flush_work - flushes the async display power disabling work
2341  * @i915: i915 device instance
2342  *
2343  * Flushes any pending work that was scheduled by a preceding
2344  * intel_display_power_put_async() call, completing the disabling of the
2345  * corresponding power domains.
2346  *
2347  * Note that the work handler function may still be running after this
2348  * function returns; to ensure that the work handler isn't running use
2349  * intel_display_power_flush_work_sync() instead.
2350  */
intel_display_power_flush_work(struct drm_i915_private * i915)2351 void intel_display_power_flush_work(struct drm_i915_private *i915)
2352 {
2353 	struct i915_power_domains *power_domains = &i915->power_domains;
2354 	intel_wakeref_t work_wakeref;
2355 
2356 	mutex_lock(&power_domains->lock);
2357 
2358 	work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2359 	if (!work_wakeref)
2360 		goto out_verify;
2361 
2362 	release_async_put_domains(power_domains,
2363 				  async_put_domains_mask(power_domains));
2364 	cancel_delayed_work(&power_domains->async_put_work);
2365 
2366 out_verify:
2367 	verify_async_put_domains_state(power_domains);
2368 
2369 	mutex_unlock(&power_domains->lock);
2370 
2371 	if (work_wakeref)
2372 		intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
2373 }
2374 
2375 /**
2376  * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2377  * @i915: i915 device instance
2378  *
2379  * Like intel_display_power_flush_work(), but also ensure that the work
2380  * handler function is not running any more when this function returns.
2381  */
2382 static void
intel_display_power_flush_work_sync(struct drm_i915_private * i915)2383 intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2384 {
2385 	struct i915_power_domains *power_domains = &i915->power_domains;
2386 
2387 	intel_display_power_flush_work(i915);
2388 	cancel_delayed_work_sync(&power_domains->async_put_work);
2389 
2390 	verify_async_put_domains_state(power_domains);
2391 
2392 	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
2393 }
2394 
2395 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2396 /**
2397  * intel_display_power_put - release a power domain reference
2398  * @dev_priv: i915 device instance
2399  * @domain: power domain to reference
2400  * @wakeref: wakeref acquired for the reference that is being released
2401  *
2402  * This function drops the power domain reference obtained by
2403  * intel_display_power_get() and might power down the corresponding hardware
2404  * block right away if this is the last reference.
2405  */
intel_display_power_put(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain,intel_wakeref_t wakeref)2406 void intel_display_power_put(struct drm_i915_private *dev_priv,
2407 			     enum intel_display_power_domain domain,
2408 			     intel_wakeref_t wakeref)
2409 {
2410 	__intel_display_power_put(dev_priv, domain);
2411 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2412 }
2413 #else
2414 /**
2415  * intel_display_power_put_unchecked - release an unchecked power domain reference
2416  * @dev_priv: i915 device instance
2417  * @domain: power domain to reference
2418  *
2419  * This function drops the power domain reference obtained by
2420  * intel_display_power_get() and might power down the corresponding hardware
2421  * block right away if this is the last reference.
2422  *
2423  * This function is only for the power domain code's internal use to suppress wakeref
2424  * tracking when the correspondig debug kconfig option is disabled, should not
2425  * be used otherwise.
2426  */
intel_display_power_put_unchecked(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)2427 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
2428 				       enum intel_display_power_domain domain)
2429 {
2430 	__intel_display_power_put(dev_priv, domain);
2431 	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
2432 }
2433 #endif
2434 
2435 void
intel_display_power_get_in_set(struct drm_i915_private * i915,struct intel_display_power_domain_set * power_domain_set,enum intel_display_power_domain domain)2436 intel_display_power_get_in_set(struct drm_i915_private *i915,
2437 			       struct intel_display_power_domain_set *power_domain_set,
2438 			       enum intel_display_power_domain domain)
2439 {
2440 	intel_wakeref_t __maybe_unused wf;
2441 
2442 	drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
2443 
2444 	wf = intel_display_power_get(i915, domain);
2445 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2446 	power_domain_set->wakerefs[domain] = wf;
2447 #endif
2448 	power_domain_set->mask |= BIT_ULL(domain);
2449 }
2450 
2451 bool
intel_display_power_get_in_set_if_enabled(struct drm_i915_private * i915,struct intel_display_power_domain_set * power_domain_set,enum intel_display_power_domain domain)2452 intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
2453 					  struct intel_display_power_domain_set *power_domain_set,
2454 					  enum intel_display_power_domain domain)
2455 {
2456 	intel_wakeref_t wf;
2457 
2458 	drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
2459 
2460 	wf = intel_display_power_get_if_enabled(i915, domain);
2461 	if (!wf)
2462 		return false;
2463 
2464 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2465 	power_domain_set->wakerefs[domain] = wf;
2466 #endif
2467 	power_domain_set->mask |= BIT_ULL(domain);
2468 
2469 	return true;
2470 }
2471 
2472 void
intel_display_power_put_mask_in_set(struct drm_i915_private * i915,struct intel_display_power_domain_set * power_domain_set,u64 mask)2473 intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
2474 				    struct intel_display_power_domain_set *power_domain_set,
2475 				    u64 mask)
2476 {
2477 	enum intel_display_power_domain domain;
2478 
2479 	drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask);
2480 
2481 	for_each_power_domain(domain, mask) {
2482 		intel_wakeref_t __maybe_unused wf = -1;
2483 
2484 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2485 		wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
2486 #endif
2487 		intel_display_power_put(i915, domain, wf);
2488 		power_domain_set->mask &= ~BIT_ULL(domain);
2489 	}
2490 }
2491 
2492 #define I830_PIPES_POWER_DOMAINS (		\
2493 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2494 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2495 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2496 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2497 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2498 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2499 	BIT_ULL(POWER_DOMAIN_INIT))
2500 
2501 #define VLV_DISPLAY_POWER_DOMAINS (		\
2502 	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2503 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2504 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2505 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2506 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2507 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2508 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2509 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2510 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2511 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2512 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2513 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2514 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
2515 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |		\
2516 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2517 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2518 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2519 	BIT_ULL(POWER_DOMAIN_INIT))
2520 
2521 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
2522 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2523 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2524 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2525 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2526 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2527 	BIT_ULL(POWER_DOMAIN_INIT))
2528 
2529 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
2530 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2531 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2532 	BIT_ULL(POWER_DOMAIN_INIT))
2533 
2534 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
2535 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2536 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2537 	BIT_ULL(POWER_DOMAIN_INIT))
2538 
2539 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
2540 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2541 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2542 	BIT_ULL(POWER_DOMAIN_INIT))
2543 
2544 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
2545 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2546 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2547 	BIT_ULL(POWER_DOMAIN_INIT))
2548 
2549 #define CHV_DISPLAY_POWER_DOMAINS (		\
2550 	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2551 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2552 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2553 	BIT_ULL(POWER_DOMAIN_PIPE_C) |		\
2554 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2555 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2556 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2557 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2558 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2559 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |	\
2560 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2561 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2562 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2563 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2564 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2565 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
2566 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |		\
2567 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2568 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2569 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2570 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2571 	BIT_ULL(POWER_DOMAIN_INIT))
2572 
2573 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
2574 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2575 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2576 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2577 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2578 	BIT_ULL(POWER_DOMAIN_INIT))
2579 
2580 #define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
2581 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2582 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2583 	BIT_ULL(POWER_DOMAIN_INIT))
2584 
2585 #define HSW_DISPLAY_POWER_DOMAINS (			\
2586 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2587 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2588 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
2589 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2590 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2591 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2592 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2593 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2594 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2595 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2596 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2597 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2598 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2599 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
2600 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |			\
2601 	BIT_ULL(POWER_DOMAIN_INIT))
2602 
2603 #define BDW_DISPLAY_POWER_DOMAINS (			\
2604 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2605 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2606 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2607 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2608 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2609 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2610 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2611 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2612 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2613 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2614 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2615 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2616 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
2617 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |			\
2618 	BIT_ULL(POWER_DOMAIN_INIT))
2619 
2620 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2621 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2622 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2623 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2624 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2625 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2626 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2627 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2628 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2629 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2630 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2631 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
2632 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2633 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2634 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2635 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
2636 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |			\
2637 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2638 	BIT_ULL(POWER_DOMAIN_INIT))
2639 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (		\
2640 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2641 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
2642 	BIT_ULL(POWER_DOMAIN_INIT))
2643 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2644 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2645 	BIT_ULL(POWER_DOMAIN_INIT))
2646 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2647 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2648 	BIT_ULL(POWER_DOMAIN_INIT))
2649 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (		\
2650 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2651 	BIT_ULL(POWER_DOMAIN_INIT))
2652 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2653 	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2654 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2655 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2656 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2657 	BIT_ULL(POWER_DOMAIN_INIT))
2658 
2659 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2660 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2661 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2662 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2663 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2664 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2665 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2666 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2667 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2668 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2669 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2670 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2671 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
2672 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |			\
2673 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2674 	BIT_ULL(POWER_DOMAIN_INIT))
2675 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2676 	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2677 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2678 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2679 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2680 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2681 	BIT_ULL(POWER_DOMAIN_INIT))
2682 #define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
2683 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2684 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2685 	BIT_ULL(POWER_DOMAIN_INIT))
2686 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
2687 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2688 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2689 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2690 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2691 	BIT_ULL(POWER_DOMAIN_INIT))
2692 
2693 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2694 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2695 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2696 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2697 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2698 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2699 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2700 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2701 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2702 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2703 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2704 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2705 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
2706 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |			\
2707 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2708 	BIT_ULL(POWER_DOMAIN_INIT))
2709 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (		\
2710 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2711 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2712 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2713 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2714 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2715 #define GLK_DPIO_CMN_A_POWER_DOMAINS (			\
2716 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2717 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2718 	BIT_ULL(POWER_DOMAIN_INIT))
2719 #define GLK_DPIO_CMN_B_POWER_DOMAINS (			\
2720 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2721 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2722 	BIT_ULL(POWER_DOMAIN_INIT))
2723 #define GLK_DPIO_CMN_C_POWER_DOMAINS (			\
2724 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2725 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2726 	BIT_ULL(POWER_DOMAIN_INIT))
2727 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (		\
2728 	BIT_ULL(POWER_DOMAIN_AUX_A) |		\
2729 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2730 	BIT_ULL(POWER_DOMAIN_INIT))
2731 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (		\
2732 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2733 	BIT_ULL(POWER_DOMAIN_INIT))
2734 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (		\
2735 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2736 	BIT_ULL(POWER_DOMAIN_INIT))
2737 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2738 	GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2739 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2740 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2741 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2742 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2743 	BIT_ULL(POWER_DOMAIN_INIT))
2744 
2745 /*
2746  * ICL PW_0/PG_0 domains (HW/DMC control):
2747  * - PCI
2748  * - clocks except port PLL
2749  * - central power except FBC
2750  * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2751  * ICL PW_1/PG_1 domains (HW/DMC control):
2752  * - DBUF function
2753  * - PIPE_A and its planes, except VGA
2754  * - transcoder EDP + PSR
2755  * - transcoder DSI
2756  * - DDI_A
2757  * - FBC
2758  */
2759 #define ICL_PW_4_POWER_DOMAINS (			\
2760 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2761 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2762 	BIT_ULL(POWER_DOMAIN_INIT))
2763 	/* VDSC/joining */
2764 #define ICL_PW_3_POWER_DOMAINS (			\
2765 	ICL_PW_4_POWER_DOMAINS |			\
2766 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2767 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2768 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2769 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2770 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2771 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2772 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2773 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2774 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2775 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
2776 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2777 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2778 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2779 	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2780 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2781 	BIT_ULL(POWER_DOMAIN_AUX_C_TBT) |		\
2782 	BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |		\
2783 	BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |		\
2784 	BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |		\
2785 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2786 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
2787 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |			\
2788 	BIT_ULL(POWER_DOMAIN_INIT))
2789 	/*
2790 	 * - transcoder WD
2791 	 * - KVMR (HW control)
2792 	 */
2793 #define ICL_PW_2_POWER_DOMAINS (			\
2794 	ICL_PW_3_POWER_DOMAINS |			\
2795 	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |		\
2796 	BIT_ULL(POWER_DOMAIN_INIT))
2797 	/*
2798 	 * - KVMR (HW control)
2799 	 */
2800 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2801 	ICL_PW_2_POWER_DOMAINS |			\
2802 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2803 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2804 	BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) |			\
2805 	BIT_ULL(POWER_DOMAIN_INIT))
2806 
2807 #define ICL_DDI_IO_A_POWER_DOMAINS (			\
2808 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2809 #define ICL_DDI_IO_B_POWER_DOMAINS (			\
2810 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2811 #define ICL_DDI_IO_C_POWER_DOMAINS (			\
2812 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2813 #define ICL_DDI_IO_D_POWER_DOMAINS (			\
2814 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2815 #define ICL_DDI_IO_E_POWER_DOMAINS (			\
2816 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2817 #define ICL_DDI_IO_F_POWER_DOMAINS (			\
2818 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2819 
2820 #define ICL_AUX_A_IO_POWER_DOMAINS (			\
2821 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2822 	BIT_ULL(POWER_DOMAIN_AUX_A))
2823 #define ICL_AUX_B_IO_POWER_DOMAINS (			\
2824 	BIT_ULL(POWER_DOMAIN_AUX_B))
2825 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS (		\
2826 	BIT_ULL(POWER_DOMAIN_AUX_C))
2827 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS (		\
2828 	BIT_ULL(POWER_DOMAIN_AUX_D))
2829 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS (		\
2830 	BIT_ULL(POWER_DOMAIN_AUX_E))
2831 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS (		\
2832 	BIT_ULL(POWER_DOMAIN_AUX_F))
2833 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS (		\
2834 	BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
2835 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS (		\
2836 	BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2837 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS (		\
2838 	BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2839 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS (		\
2840 	BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2841 
2842 #define TGL_PW_5_POWER_DOMAINS (			\
2843 	BIT_ULL(POWER_DOMAIN_PIPE_D) |			\
2844 	BIT_ULL(POWER_DOMAIN_TRANSCODER_D) |		\
2845 	BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) |     \
2846 	BIT_ULL(POWER_DOMAIN_INIT))
2847 
2848 #define TGL_PW_4_POWER_DOMAINS (			\
2849 	TGL_PW_5_POWER_DOMAINS |			\
2850 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2851 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2852 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2853 	BIT_ULL(POWER_DOMAIN_INIT))
2854 
2855 #define TGL_PW_3_POWER_DOMAINS (			\
2856 	TGL_PW_4_POWER_DOMAINS |			\
2857 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2858 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2859 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2860 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) |	\
2861 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) |	\
2862 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) |	\
2863 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) |	\
2864 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC5) |	\
2865 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC6) |	\
2866 	BIT_ULL(POWER_DOMAIN_AUX_USBC1) |		\
2867 	BIT_ULL(POWER_DOMAIN_AUX_USBC2) |		\
2868 	BIT_ULL(POWER_DOMAIN_AUX_USBC3) |		\
2869 	BIT_ULL(POWER_DOMAIN_AUX_USBC4) |		\
2870 	BIT_ULL(POWER_DOMAIN_AUX_USBC5) |		\
2871 	BIT_ULL(POWER_DOMAIN_AUX_USBC6) |		\
2872 	BIT_ULL(POWER_DOMAIN_AUX_TBT1) |		\
2873 	BIT_ULL(POWER_DOMAIN_AUX_TBT2) |		\
2874 	BIT_ULL(POWER_DOMAIN_AUX_TBT3) |		\
2875 	BIT_ULL(POWER_DOMAIN_AUX_TBT4) |		\
2876 	BIT_ULL(POWER_DOMAIN_AUX_TBT5) |		\
2877 	BIT_ULL(POWER_DOMAIN_AUX_TBT6) |		\
2878 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2879 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
2880 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |			\
2881 	BIT_ULL(POWER_DOMAIN_INIT))
2882 
2883 #define TGL_PW_2_POWER_DOMAINS (			\
2884 	TGL_PW_3_POWER_DOMAINS |			\
2885 	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |	\
2886 	BIT_ULL(POWER_DOMAIN_INIT))
2887 
2888 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2889 	TGL_PW_3_POWER_DOMAINS |			\
2890 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2891 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2892 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2893 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2894 	BIT_ULL(POWER_DOMAIN_INIT))
2895 
2896 #define TGL_DDI_IO_TC1_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1)
2897 #define TGL_DDI_IO_TC2_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2)
2898 #define TGL_DDI_IO_TC3_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3)
2899 #define TGL_DDI_IO_TC4_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4)
2900 #define TGL_DDI_IO_TC5_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC5)
2901 #define TGL_DDI_IO_TC6_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC6)
2902 
2903 #define TGL_AUX_A_IO_POWER_DOMAINS (		\
2904 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |	\
2905 	BIT_ULL(POWER_DOMAIN_AUX_A))
2906 #define TGL_AUX_B_IO_POWER_DOMAINS (		\
2907 	BIT_ULL(POWER_DOMAIN_AUX_B))
2908 #define TGL_AUX_C_IO_POWER_DOMAINS (		\
2909 	BIT_ULL(POWER_DOMAIN_AUX_C))
2910 
2911 #define TGL_AUX_IO_USBC1_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC1)
2912 #define TGL_AUX_IO_USBC2_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC2)
2913 #define TGL_AUX_IO_USBC3_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC3)
2914 #define TGL_AUX_IO_USBC4_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC4)
2915 #define TGL_AUX_IO_USBC5_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC5)
2916 #define TGL_AUX_IO_USBC6_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC6)
2917 
2918 #define TGL_AUX_IO_TBT1_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT1)
2919 #define TGL_AUX_IO_TBT2_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT2)
2920 #define TGL_AUX_IO_TBT3_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT3)
2921 #define TGL_AUX_IO_TBT4_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT4)
2922 #define TGL_AUX_IO_TBT5_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT5)
2923 #define TGL_AUX_IO_TBT6_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT6)
2924 
2925 #define TGL_TC_COLD_OFF_POWER_DOMAINS (		\
2926 	BIT_ULL(POWER_DOMAIN_AUX_USBC1)	|	\
2927 	BIT_ULL(POWER_DOMAIN_AUX_USBC2)	|	\
2928 	BIT_ULL(POWER_DOMAIN_AUX_USBC3)	|	\
2929 	BIT_ULL(POWER_DOMAIN_AUX_USBC4)	|	\
2930 	BIT_ULL(POWER_DOMAIN_AUX_USBC5)	|	\
2931 	BIT_ULL(POWER_DOMAIN_AUX_USBC6)	|	\
2932 	BIT_ULL(POWER_DOMAIN_AUX_TBT1) |	\
2933 	BIT_ULL(POWER_DOMAIN_AUX_TBT2) |	\
2934 	BIT_ULL(POWER_DOMAIN_AUX_TBT3) |	\
2935 	BIT_ULL(POWER_DOMAIN_AUX_TBT4) |	\
2936 	BIT_ULL(POWER_DOMAIN_AUX_TBT5) |	\
2937 	BIT_ULL(POWER_DOMAIN_AUX_TBT6) |	\
2938 	BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
2939 
2940 #define RKL_PW_4_POWER_DOMAINS (			\
2941 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2942 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2943 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2944 	BIT_ULL(POWER_DOMAIN_INIT))
2945 
2946 #define RKL_PW_3_POWER_DOMAINS (			\
2947 	RKL_PW_4_POWER_DOMAINS |			\
2948 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2949 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2950 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
2951 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |			\
2952 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2953 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2954 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) |	\
2955 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) |	\
2956 	BIT_ULL(POWER_DOMAIN_AUX_USBC1) |		\
2957 	BIT_ULL(POWER_DOMAIN_AUX_USBC2) |		\
2958 	BIT_ULL(POWER_DOMAIN_INIT))
2959 
2960 /*
2961  * There is no PW_2/PG_2 on RKL.
2962  *
2963  * RKL PW_1/PG_1 domains (under HW/DMC control):
2964  * - DBUF function (note: registers are in PW0)
2965  * - PIPE_A and its planes and VDSC/joining, except VGA
2966  * - transcoder A
2967  * - DDI_A and DDI_B
2968  * - FBC
2969  *
2970  * RKL PW_0/PG_0 domains (under HW/DMC control):
2971  * - PCI
2972  * - clocks except port PLL
2973  * - shared functions:
2974  *     * interrupts except pipe interrupts
2975  *     * MBus except PIPE_MBUS_DBOX_CTL
2976  *     * DBUF registers
2977  * - central power except FBC
2978  * - top-level GTC (DDI-level GTC is in the well associated with the DDI)
2979  */
2980 
2981 #define RKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2982 	RKL_PW_3_POWER_DOMAINS |			\
2983 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2984 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2985 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2986 	BIT_ULL(POWER_DOMAIN_INIT))
2987 
2988 /*
2989  * DG1 onwards Audio MMIO/VERBS lies in PG0 power well.
2990  */
2991 #define DG1_PW_3_POWER_DOMAINS (			\
2992 	TGL_PW_4_POWER_DOMAINS |			\
2993 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2994 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2995 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2996 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) |	\
2997 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) |	\
2998 	BIT_ULL(POWER_DOMAIN_AUX_USBC1) |		\
2999 	BIT_ULL(POWER_DOMAIN_AUX_USBC2) |		\
3000 	BIT_ULL(POWER_DOMAIN_VGA) |			\
3001 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |			\
3002 	BIT_ULL(POWER_DOMAIN_INIT))
3003 
3004 #define DG1_PW_2_POWER_DOMAINS (			\
3005 	DG1_PW_3_POWER_DOMAINS |			\
3006 	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |	\
3007 	BIT_ULL(POWER_DOMAIN_INIT))
3008 
3009 #define DG1_DISPLAY_DC_OFF_POWER_DOMAINS (		\
3010 	DG1_PW_3_POWER_DOMAINS |			\
3011 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
3012 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
3013 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
3014 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
3015 	BIT_ULL(POWER_DOMAIN_INIT))
3016 
3017 /*
3018  * XE_LPD Power Domains
3019  *
3020  * Previous platforms required that PG(n-1) be enabled before PG(n).  That
3021  * dependency chain turns into a dependency tree on XE_LPD:
3022  *
3023  *       PG0
3024  *        |
3025  *     --PG1--
3026  *    /       \
3027  *  PGA     --PG2--
3028  *         /   |   \
3029  *       PGB  PGC  PGD
3030  *
3031  * Power wells must be enabled from top to bottom and disabled from bottom
3032  * to top.  This allows pipes to be power gated independently.
3033  */
3034 
3035 #define XELPD_PW_D_POWER_DOMAINS (			\
3036 	BIT_ULL(POWER_DOMAIN_PIPE_D) |			\
3037 	BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) |	\
3038 	BIT_ULL(POWER_DOMAIN_TRANSCODER_D) |		\
3039 	BIT_ULL(POWER_DOMAIN_INIT))
3040 
3041 #define XELPD_PW_C_POWER_DOMAINS (			\
3042 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
3043 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
3044 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
3045 	BIT_ULL(POWER_DOMAIN_INIT))
3046 
3047 #define XELPD_PW_B_POWER_DOMAINS (			\
3048 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
3049 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
3050 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
3051 	BIT_ULL(POWER_DOMAIN_INIT))
3052 
3053 #define XELPD_PW_A_POWER_DOMAINS (			\
3054 	BIT_ULL(POWER_DOMAIN_PIPE_A) |			\
3055 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
3056 	BIT_ULL(POWER_DOMAIN_INIT))
3057 
3058 #define XELPD_PW_2_POWER_DOMAINS (			\
3059 	XELPD_PW_B_POWER_DOMAINS |			\
3060 	XELPD_PW_C_POWER_DOMAINS |			\
3061 	XELPD_PW_D_POWER_DOMAINS |			\
3062 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |			\
3063 	BIT_ULL(POWER_DOMAIN_VGA) |			\
3064 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
3065 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D_XELPD) |	\
3066 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E_XELPD) |	\
3067 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) |	\
3068 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) |	\
3069 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) |	\
3070 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) |	\
3071 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
3072 	BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) |		\
3073 	BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) |		\
3074 	BIT_ULL(POWER_DOMAIN_AUX_USBC1) |			\
3075 	BIT_ULL(POWER_DOMAIN_AUX_USBC2) |			\
3076 	BIT_ULL(POWER_DOMAIN_AUX_USBC3) |			\
3077 	BIT_ULL(POWER_DOMAIN_AUX_USBC4) |			\
3078 	BIT_ULL(POWER_DOMAIN_AUX_TBT1) |			\
3079 	BIT_ULL(POWER_DOMAIN_AUX_TBT2) |			\
3080 	BIT_ULL(POWER_DOMAIN_AUX_TBT3) |			\
3081 	BIT_ULL(POWER_DOMAIN_AUX_TBT4) |			\
3082 	BIT_ULL(POWER_DOMAIN_INIT))
3083 
3084 /*
3085  * XELPD PW_1/PG_1 domains (under HW/DMC control):
3086  *  - DBUF function (registers are in PW0)
3087  *  - Transcoder A
3088  *  - DDI_A and DDI_B
3089  *
3090  * XELPD PW_0/PW_1 domains (under HW/DMC control):
3091  *  - PCI
3092  *  - Clocks except port PLL
3093  *  - Shared functions:
3094  *     * interrupts except pipe interrupts
3095  *     * MBus except PIPE_MBUS_DBOX_CTL
3096  *     * DBUF registers
3097  *  - Central power except FBC
3098  *  - Top-level GTC (DDI-level GTC is in the well associated with the DDI)
3099  */
3100 
3101 #define XELPD_DISPLAY_DC_OFF_POWER_DOMAINS (		\
3102 	XELPD_PW_2_POWER_DOMAINS |			\
3103 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
3104 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
3105 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
3106 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
3107 	BIT_ULL(POWER_DOMAIN_INIT))
3108 
3109 #define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_D_XELPD)
3110 #define XELPD_AUX_IO_E_XELPD_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_E_XELPD)
3111 #define XELPD_AUX_IO_USBC1_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC1)
3112 #define XELPD_AUX_IO_USBC2_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC2)
3113 #define XELPD_AUX_IO_USBC3_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC3)
3114 #define XELPD_AUX_IO_USBC4_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC4)
3115 
3116 #define XELPD_AUX_IO_TBT1_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_AUX_TBT1)
3117 #define XELPD_AUX_IO_TBT2_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_AUX_TBT2)
3118 #define XELPD_AUX_IO_TBT3_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_AUX_TBT3)
3119 #define XELPD_AUX_IO_TBT4_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_AUX_TBT4)
3120 
3121 #define XELPD_DDI_IO_D_XELPD_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_D_XELPD)
3122 #define XELPD_DDI_IO_E_XELPD_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_E_XELPD)
3123 #define XELPD_DDI_IO_TC1_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1)
3124 #define XELPD_DDI_IO_TC2_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2)
3125 #define XELPD_DDI_IO_TC3_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3)
3126 #define XELPD_DDI_IO_TC4_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4)
3127 
3128 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
3129 	.sync_hw = i9xx_power_well_sync_hw_noop,
3130 	.enable = i9xx_always_on_power_well_noop,
3131 	.disable = i9xx_always_on_power_well_noop,
3132 	.is_enabled = i9xx_always_on_power_well_enabled,
3133 };
3134 
3135 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
3136 	.sync_hw = chv_pipe_power_well_sync_hw,
3137 	.enable = chv_pipe_power_well_enable,
3138 	.disable = chv_pipe_power_well_disable,
3139 	.is_enabled = chv_pipe_power_well_enabled,
3140 };
3141 
3142 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
3143 	.sync_hw = i9xx_power_well_sync_hw_noop,
3144 	.enable = chv_dpio_cmn_power_well_enable,
3145 	.disable = chv_dpio_cmn_power_well_disable,
3146 	.is_enabled = vlv_power_well_enabled,
3147 };
3148 
3149 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
3150 	{
3151 		.name = "always-on",
3152 		.always_on = true,
3153 		.domains = POWER_DOMAIN_MASK,
3154 		.ops = &i9xx_always_on_power_well_ops,
3155 		.id = DISP_PW_ID_NONE,
3156 	},
3157 };
3158 
3159 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
3160 	.sync_hw = i830_pipes_power_well_sync_hw,
3161 	.enable = i830_pipes_power_well_enable,
3162 	.disable = i830_pipes_power_well_disable,
3163 	.is_enabled = i830_pipes_power_well_enabled,
3164 };
3165 
3166 static const struct i915_power_well_desc i830_power_wells[] = {
3167 	{
3168 		.name = "always-on",
3169 		.always_on = true,
3170 		.domains = POWER_DOMAIN_MASK,
3171 		.ops = &i9xx_always_on_power_well_ops,
3172 		.id = DISP_PW_ID_NONE,
3173 	},
3174 	{
3175 		.name = "pipes",
3176 		.domains = I830_PIPES_POWER_DOMAINS,
3177 		.ops = &i830_pipes_power_well_ops,
3178 		.id = DISP_PW_ID_NONE,
3179 	},
3180 };
3181 
3182 static const struct i915_power_well_ops hsw_power_well_ops = {
3183 	.sync_hw = hsw_power_well_sync_hw,
3184 	.enable = hsw_power_well_enable,
3185 	.disable = hsw_power_well_disable,
3186 	.is_enabled = hsw_power_well_enabled,
3187 };
3188 
3189 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
3190 	.sync_hw = i9xx_power_well_sync_hw_noop,
3191 	.enable = gen9_dc_off_power_well_enable,
3192 	.disable = gen9_dc_off_power_well_disable,
3193 	.is_enabled = gen9_dc_off_power_well_enabled,
3194 };
3195 
3196 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
3197 	.sync_hw = i9xx_power_well_sync_hw_noop,
3198 	.enable = bxt_dpio_cmn_power_well_enable,
3199 	.disable = bxt_dpio_cmn_power_well_disable,
3200 	.is_enabled = bxt_dpio_cmn_power_well_enabled,
3201 };
3202 
3203 static const struct i915_power_well_regs hsw_power_well_regs = {
3204 	.bios	= HSW_PWR_WELL_CTL1,
3205 	.driver	= HSW_PWR_WELL_CTL2,
3206 	.kvmr	= HSW_PWR_WELL_CTL3,
3207 	.debug	= HSW_PWR_WELL_CTL4,
3208 };
3209 
3210 static const struct i915_power_well_desc hsw_power_wells[] = {
3211 	{
3212 		.name = "always-on",
3213 		.always_on = true,
3214 		.domains = POWER_DOMAIN_MASK,
3215 		.ops = &i9xx_always_on_power_well_ops,
3216 		.id = DISP_PW_ID_NONE,
3217 	},
3218 	{
3219 		.name = "display",
3220 		.domains = HSW_DISPLAY_POWER_DOMAINS,
3221 		.ops = &hsw_power_well_ops,
3222 		.id = HSW_DISP_PW_GLOBAL,
3223 		{
3224 			.hsw.regs = &hsw_power_well_regs,
3225 			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
3226 			.hsw.has_vga = true,
3227 		},
3228 	},
3229 };
3230 
3231 static const struct i915_power_well_desc bdw_power_wells[] = {
3232 	{
3233 		.name = "always-on",
3234 		.always_on = true,
3235 		.domains = POWER_DOMAIN_MASK,
3236 		.ops = &i9xx_always_on_power_well_ops,
3237 		.id = DISP_PW_ID_NONE,
3238 	},
3239 	{
3240 		.name = "display",
3241 		.domains = BDW_DISPLAY_POWER_DOMAINS,
3242 		.ops = &hsw_power_well_ops,
3243 		.id = HSW_DISP_PW_GLOBAL,
3244 		{
3245 			.hsw.regs = &hsw_power_well_regs,
3246 			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
3247 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3248 			.hsw.has_vga = true,
3249 		},
3250 	},
3251 };
3252 
3253 static const struct i915_power_well_ops vlv_display_power_well_ops = {
3254 	.sync_hw = i9xx_power_well_sync_hw_noop,
3255 	.enable = vlv_display_power_well_enable,
3256 	.disable = vlv_display_power_well_disable,
3257 	.is_enabled = vlv_power_well_enabled,
3258 };
3259 
3260 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
3261 	.sync_hw = i9xx_power_well_sync_hw_noop,
3262 	.enable = vlv_dpio_cmn_power_well_enable,
3263 	.disable = vlv_dpio_cmn_power_well_disable,
3264 	.is_enabled = vlv_power_well_enabled,
3265 };
3266 
3267 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
3268 	.sync_hw = i9xx_power_well_sync_hw_noop,
3269 	.enable = vlv_power_well_enable,
3270 	.disable = vlv_power_well_disable,
3271 	.is_enabled = vlv_power_well_enabled,
3272 };
3273 
3274 static const struct i915_power_well_desc vlv_power_wells[] = {
3275 	{
3276 		.name = "always-on",
3277 		.always_on = true,
3278 		.domains = POWER_DOMAIN_MASK,
3279 		.ops = &i9xx_always_on_power_well_ops,
3280 		.id = DISP_PW_ID_NONE,
3281 	},
3282 	{
3283 		.name = "display",
3284 		.domains = VLV_DISPLAY_POWER_DOMAINS,
3285 		.ops = &vlv_display_power_well_ops,
3286 		.id = VLV_DISP_PW_DISP2D,
3287 		{
3288 			.vlv.idx = PUNIT_PWGT_IDX_DISP2D,
3289 		},
3290 	},
3291 	{
3292 		.name = "dpio-tx-b-01",
3293 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3294 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3295 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3296 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3297 		.ops = &vlv_dpio_power_well_ops,
3298 		.id = DISP_PW_ID_NONE,
3299 		{
3300 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
3301 		},
3302 	},
3303 	{
3304 		.name = "dpio-tx-b-23",
3305 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3306 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3307 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3308 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3309 		.ops = &vlv_dpio_power_well_ops,
3310 		.id = DISP_PW_ID_NONE,
3311 		{
3312 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
3313 		},
3314 	},
3315 	{
3316 		.name = "dpio-tx-c-01",
3317 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3318 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3319 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3320 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3321 		.ops = &vlv_dpio_power_well_ops,
3322 		.id = DISP_PW_ID_NONE,
3323 		{
3324 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
3325 		},
3326 	},
3327 	{
3328 		.name = "dpio-tx-c-23",
3329 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3330 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3331 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3332 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3333 		.ops = &vlv_dpio_power_well_ops,
3334 		.id = DISP_PW_ID_NONE,
3335 		{
3336 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
3337 		},
3338 	},
3339 	{
3340 		.name = "dpio-common",
3341 		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
3342 		.ops = &vlv_dpio_cmn_power_well_ops,
3343 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3344 		{
3345 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3346 		},
3347 	},
3348 };
3349 
3350 static const struct i915_power_well_desc chv_power_wells[] = {
3351 	{
3352 		.name = "always-on",
3353 		.always_on = true,
3354 		.domains = POWER_DOMAIN_MASK,
3355 		.ops = &i9xx_always_on_power_well_ops,
3356 		.id = DISP_PW_ID_NONE,
3357 	},
3358 	{
3359 		.name = "display",
3360 		/*
3361 		 * Pipe A power well is the new disp2d well. Pipe B and C
3362 		 * power wells don't actually exist. Pipe A power well is
3363 		 * required for any pipe to work.
3364 		 */
3365 		.domains = CHV_DISPLAY_POWER_DOMAINS,
3366 		.ops = &chv_pipe_power_well_ops,
3367 		.id = DISP_PW_ID_NONE,
3368 	},
3369 	{
3370 		.name = "dpio-common-bc",
3371 		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
3372 		.ops = &chv_dpio_cmn_power_well_ops,
3373 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3374 		{
3375 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3376 		},
3377 	},
3378 	{
3379 		.name = "dpio-common-d",
3380 		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
3381 		.ops = &chv_dpio_cmn_power_well_ops,
3382 		.id = CHV_DISP_PW_DPIO_CMN_D,
3383 		{
3384 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
3385 		},
3386 	},
3387 };
3388 
intel_display_power_well_is_enabled(struct drm_i915_private * dev_priv,enum i915_power_well_id power_well_id)3389 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
3390 					 enum i915_power_well_id power_well_id)
3391 {
3392 	struct i915_power_well *power_well;
3393 	bool ret;
3394 
3395 	power_well = lookup_power_well(dev_priv, power_well_id);
3396 	ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
3397 
3398 	return ret;
3399 }
3400 
3401 static const struct i915_power_well_desc skl_power_wells[] = {
3402 	{
3403 		.name = "always-on",
3404 		.always_on = true,
3405 		.domains = POWER_DOMAIN_MASK,
3406 		.ops = &i9xx_always_on_power_well_ops,
3407 		.id = DISP_PW_ID_NONE,
3408 	},
3409 	{
3410 		.name = "power well 1",
3411 		/* Handled by the DMC firmware */
3412 		.always_on = true,
3413 		.domains = 0,
3414 		.ops = &hsw_power_well_ops,
3415 		.id = SKL_DISP_PW_1,
3416 		{
3417 			.hsw.regs = &hsw_power_well_regs,
3418 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3419 			.hsw.has_fuses = true,
3420 		},
3421 	},
3422 	{
3423 		.name = "MISC IO power well",
3424 		/* Handled by the DMC firmware */
3425 		.always_on = true,
3426 		.domains = 0,
3427 		.ops = &hsw_power_well_ops,
3428 		.id = SKL_DISP_PW_MISC_IO,
3429 		{
3430 			.hsw.regs = &hsw_power_well_regs,
3431 			.hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
3432 		},
3433 	},
3434 	{
3435 		.name = "DC off",
3436 		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
3437 		.ops = &gen9_dc_off_power_well_ops,
3438 		.id = SKL_DISP_DC_OFF,
3439 	},
3440 	{
3441 		.name = "power well 2",
3442 		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3443 		.ops = &hsw_power_well_ops,
3444 		.id = SKL_DISP_PW_2,
3445 		{
3446 			.hsw.regs = &hsw_power_well_regs,
3447 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3448 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3449 			.hsw.has_vga = true,
3450 			.hsw.has_fuses = true,
3451 		},
3452 	},
3453 	{
3454 		.name = "DDI A/E IO power well",
3455 		.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
3456 		.ops = &hsw_power_well_ops,
3457 		.id = DISP_PW_ID_NONE,
3458 		{
3459 			.hsw.regs = &hsw_power_well_regs,
3460 			.hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
3461 		},
3462 	},
3463 	{
3464 		.name = "DDI B IO power well",
3465 		.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3466 		.ops = &hsw_power_well_ops,
3467 		.id = DISP_PW_ID_NONE,
3468 		{
3469 			.hsw.regs = &hsw_power_well_regs,
3470 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3471 		},
3472 	},
3473 	{
3474 		.name = "DDI C IO power well",
3475 		.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3476 		.ops = &hsw_power_well_ops,
3477 		.id = DISP_PW_ID_NONE,
3478 		{
3479 			.hsw.regs = &hsw_power_well_regs,
3480 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3481 		},
3482 	},
3483 	{
3484 		.name = "DDI D IO power well",
3485 		.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
3486 		.ops = &hsw_power_well_ops,
3487 		.id = DISP_PW_ID_NONE,
3488 		{
3489 			.hsw.regs = &hsw_power_well_regs,
3490 			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3491 		},
3492 	},
3493 };
3494 
3495 static const struct i915_power_well_desc bxt_power_wells[] = {
3496 	{
3497 		.name = "always-on",
3498 		.always_on = true,
3499 		.domains = POWER_DOMAIN_MASK,
3500 		.ops = &i9xx_always_on_power_well_ops,
3501 		.id = DISP_PW_ID_NONE,
3502 	},
3503 	{
3504 		.name = "power well 1",
3505 		/* Handled by the DMC firmware */
3506 		.always_on = true,
3507 		.domains = 0,
3508 		.ops = &hsw_power_well_ops,
3509 		.id = SKL_DISP_PW_1,
3510 		{
3511 			.hsw.regs = &hsw_power_well_regs,
3512 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3513 			.hsw.has_fuses = true,
3514 		},
3515 	},
3516 	{
3517 		.name = "DC off",
3518 		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3519 		.ops = &gen9_dc_off_power_well_ops,
3520 		.id = SKL_DISP_DC_OFF,
3521 	},
3522 	{
3523 		.name = "power well 2",
3524 		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3525 		.ops = &hsw_power_well_ops,
3526 		.id = SKL_DISP_PW_2,
3527 		{
3528 			.hsw.regs = &hsw_power_well_regs,
3529 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3530 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3531 			.hsw.has_vga = true,
3532 			.hsw.has_fuses = true,
3533 		},
3534 	},
3535 	{
3536 		.name = "dpio-common-a",
3537 		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3538 		.ops = &bxt_dpio_cmn_power_well_ops,
3539 		.id = BXT_DISP_PW_DPIO_CMN_A,
3540 		{
3541 			.bxt.phy = DPIO_PHY1,
3542 		},
3543 	},
3544 	{
3545 		.name = "dpio-common-bc",
3546 		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3547 		.ops = &bxt_dpio_cmn_power_well_ops,
3548 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3549 		{
3550 			.bxt.phy = DPIO_PHY0,
3551 		},
3552 	},
3553 };
3554 
3555 static const struct i915_power_well_desc glk_power_wells[] = {
3556 	{
3557 		.name = "always-on",
3558 		.always_on = true,
3559 		.domains = POWER_DOMAIN_MASK,
3560 		.ops = &i9xx_always_on_power_well_ops,
3561 		.id = DISP_PW_ID_NONE,
3562 	},
3563 	{
3564 		.name = "power well 1",
3565 		/* Handled by the DMC firmware */
3566 		.always_on = true,
3567 		.domains = 0,
3568 		.ops = &hsw_power_well_ops,
3569 		.id = SKL_DISP_PW_1,
3570 		{
3571 			.hsw.regs = &hsw_power_well_regs,
3572 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3573 			.hsw.has_fuses = true,
3574 		},
3575 	},
3576 	{
3577 		.name = "DC off",
3578 		.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3579 		.ops = &gen9_dc_off_power_well_ops,
3580 		.id = SKL_DISP_DC_OFF,
3581 	},
3582 	{
3583 		.name = "power well 2",
3584 		.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3585 		.ops = &hsw_power_well_ops,
3586 		.id = SKL_DISP_PW_2,
3587 		{
3588 			.hsw.regs = &hsw_power_well_regs,
3589 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3590 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3591 			.hsw.has_vga = true,
3592 			.hsw.has_fuses = true,
3593 		},
3594 	},
3595 	{
3596 		.name = "dpio-common-a",
3597 		.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3598 		.ops = &bxt_dpio_cmn_power_well_ops,
3599 		.id = BXT_DISP_PW_DPIO_CMN_A,
3600 		{
3601 			.bxt.phy = DPIO_PHY1,
3602 		},
3603 	},
3604 	{
3605 		.name = "dpio-common-b",
3606 		.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3607 		.ops = &bxt_dpio_cmn_power_well_ops,
3608 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3609 		{
3610 			.bxt.phy = DPIO_PHY0,
3611 		},
3612 	},
3613 	{
3614 		.name = "dpio-common-c",
3615 		.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3616 		.ops = &bxt_dpio_cmn_power_well_ops,
3617 		.id = GLK_DISP_PW_DPIO_CMN_C,
3618 		{
3619 			.bxt.phy = DPIO_PHY2,
3620 		},
3621 	},
3622 	{
3623 		.name = "AUX A",
3624 		.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3625 		.ops = &hsw_power_well_ops,
3626 		.id = DISP_PW_ID_NONE,
3627 		{
3628 			.hsw.regs = &hsw_power_well_regs,
3629 			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3630 		},
3631 	},
3632 	{
3633 		.name = "AUX B",
3634 		.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3635 		.ops = &hsw_power_well_ops,
3636 		.id = DISP_PW_ID_NONE,
3637 		{
3638 			.hsw.regs = &hsw_power_well_regs,
3639 			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3640 		},
3641 	},
3642 	{
3643 		.name = "AUX C",
3644 		.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3645 		.ops = &hsw_power_well_ops,
3646 		.id = DISP_PW_ID_NONE,
3647 		{
3648 			.hsw.regs = &hsw_power_well_regs,
3649 			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3650 		},
3651 	},
3652 	{
3653 		.name = "DDI A IO power well",
3654 		.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3655 		.ops = &hsw_power_well_ops,
3656 		.id = DISP_PW_ID_NONE,
3657 		{
3658 			.hsw.regs = &hsw_power_well_regs,
3659 			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3660 		},
3661 	},
3662 	{
3663 		.name = "DDI B IO power well",
3664 		.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3665 		.ops = &hsw_power_well_ops,
3666 		.id = DISP_PW_ID_NONE,
3667 		{
3668 			.hsw.regs = &hsw_power_well_regs,
3669 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3670 		},
3671 	},
3672 	{
3673 		.name = "DDI C IO power well",
3674 		.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3675 		.ops = &hsw_power_well_ops,
3676 		.id = DISP_PW_ID_NONE,
3677 		{
3678 			.hsw.regs = &hsw_power_well_regs,
3679 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3680 		},
3681 	},
3682 };
3683 
3684 static const struct i915_power_well_ops icl_aux_power_well_ops = {
3685 	.sync_hw = hsw_power_well_sync_hw,
3686 	.enable = icl_aux_power_well_enable,
3687 	.disable = icl_aux_power_well_disable,
3688 	.is_enabled = hsw_power_well_enabled,
3689 };
3690 
3691 static const struct i915_power_well_regs icl_aux_power_well_regs = {
3692 	.bios	= ICL_PWR_WELL_CTL_AUX1,
3693 	.driver	= ICL_PWR_WELL_CTL_AUX2,
3694 	.debug	= ICL_PWR_WELL_CTL_AUX4,
3695 };
3696 
3697 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3698 	.bios	= ICL_PWR_WELL_CTL_DDI1,
3699 	.driver	= ICL_PWR_WELL_CTL_DDI2,
3700 	.debug	= ICL_PWR_WELL_CTL_DDI4,
3701 };
3702 
3703 static const struct i915_power_well_desc icl_power_wells[] = {
3704 	{
3705 		.name = "always-on",
3706 		.always_on = true,
3707 		.domains = POWER_DOMAIN_MASK,
3708 		.ops = &i9xx_always_on_power_well_ops,
3709 		.id = DISP_PW_ID_NONE,
3710 	},
3711 	{
3712 		.name = "power well 1",
3713 		/* Handled by the DMC firmware */
3714 		.always_on = true,
3715 		.domains = 0,
3716 		.ops = &hsw_power_well_ops,
3717 		.id = SKL_DISP_PW_1,
3718 		{
3719 			.hsw.regs = &hsw_power_well_regs,
3720 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3721 			.hsw.has_fuses = true,
3722 		},
3723 	},
3724 	{
3725 		.name = "DC off",
3726 		.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3727 		.ops = &gen9_dc_off_power_well_ops,
3728 		.id = SKL_DISP_DC_OFF,
3729 	},
3730 	{
3731 		.name = "power well 2",
3732 		.domains = ICL_PW_2_POWER_DOMAINS,
3733 		.ops = &hsw_power_well_ops,
3734 		.id = SKL_DISP_PW_2,
3735 		{
3736 			.hsw.regs = &hsw_power_well_regs,
3737 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3738 			.hsw.has_fuses = true,
3739 		},
3740 	},
3741 	{
3742 		.name = "power well 3",
3743 		.domains = ICL_PW_3_POWER_DOMAINS,
3744 		.ops = &hsw_power_well_ops,
3745 		.id = ICL_DISP_PW_3,
3746 		{
3747 			.hsw.regs = &hsw_power_well_regs,
3748 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3749 			.hsw.irq_pipe_mask = BIT(PIPE_B),
3750 			.hsw.has_vga = true,
3751 			.hsw.has_fuses = true,
3752 		},
3753 	},
3754 	{
3755 		.name = "DDI A IO",
3756 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3757 		.ops = &hsw_power_well_ops,
3758 		.id = DISP_PW_ID_NONE,
3759 		{
3760 			.hsw.regs = &icl_ddi_power_well_regs,
3761 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3762 		},
3763 	},
3764 	{
3765 		.name = "DDI B IO",
3766 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3767 		.ops = &hsw_power_well_ops,
3768 		.id = DISP_PW_ID_NONE,
3769 		{
3770 			.hsw.regs = &icl_ddi_power_well_regs,
3771 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3772 		},
3773 	},
3774 	{
3775 		.name = "DDI C IO",
3776 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3777 		.ops = &hsw_power_well_ops,
3778 		.id = DISP_PW_ID_NONE,
3779 		{
3780 			.hsw.regs = &icl_ddi_power_well_regs,
3781 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3782 		},
3783 	},
3784 	{
3785 		.name = "DDI D IO",
3786 		.domains = ICL_DDI_IO_D_POWER_DOMAINS,
3787 		.ops = &hsw_power_well_ops,
3788 		.id = DISP_PW_ID_NONE,
3789 		{
3790 			.hsw.regs = &icl_ddi_power_well_regs,
3791 			.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3792 		},
3793 	},
3794 	{
3795 		.name = "DDI E IO",
3796 		.domains = ICL_DDI_IO_E_POWER_DOMAINS,
3797 		.ops = &hsw_power_well_ops,
3798 		.id = DISP_PW_ID_NONE,
3799 		{
3800 			.hsw.regs = &icl_ddi_power_well_regs,
3801 			.hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3802 		},
3803 	},
3804 	{
3805 		.name = "DDI F IO",
3806 		.domains = ICL_DDI_IO_F_POWER_DOMAINS,
3807 		.ops = &hsw_power_well_ops,
3808 		.id = DISP_PW_ID_NONE,
3809 		{
3810 			.hsw.regs = &icl_ddi_power_well_regs,
3811 			.hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3812 		},
3813 	},
3814 	{
3815 		.name = "AUX A",
3816 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
3817 		.ops = &icl_aux_power_well_ops,
3818 		.id = DISP_PW_ID_NONE,
3819 		{
3820 			.hsw.regs = &icl_aux_power_well_regs,
3821 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3822 		},
3823 	},
3824 	{
3825 		.name = "AUX B",
3826 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
3827 		.ops = &icl_aux_power_well_ops,
3828 		.id = DISP_PW_ID_NONE,
3829 		{
3830 			.hsw.regs = &icl_aux_power_well_regs,
3831 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3832 		},
3833 	},
3834 	{
3835 		.name = "AUX C TC1",
3836 		.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3837 		.ops = &icl_aux_power_well_ops,
3838 		.id = DISP_PW_ID_NONE,
3839 		{
3840 			.hsw.regs = &icl_aux_power_well_regs,
3841 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3842 			.hsw.is_tc_tbt = false,
3843 		},
3844 	},
3845 	{
3846 		.name = "AUX D TC2",
3847 		.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3848 		.ops = &icl_aux_power_well_ops,
3849 		.id = DISP_PW_ID_NONE,
3850 		{
3851 			.hsw.regs = &icl_aux_power_well_regs,
3852 			.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3853 			.hsw.is_tc_tbt = false,
3854 		},
3855 	},
3856 	{
3857 		.name = "AUX E TC3",
3858 		.domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
3859 		.ops = &icl_aux_power_well_ops,
3860 		.id = DISP_PW_ID_NONE,
3861 		{
3862 			.hsw.regs = &icl_aux_power_well_regs,
3863 			.hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3864 			.hsw.is_tc_tbt = false,
3865 		},
3866 	},
3867 	{
3868 		.name = "AUX F TC4",
3869 		.domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
3870 		.ops = &icl_aux_power_well_ops,
3871 		.id = DISP_PW_ID_NONE,
3872 		{
3873 			.hsw.regs = &icl_aux_power_well_regs,
3874 			.hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3875 			.hsw.is_tc_tbt = false,
3876 		},
3877 	},
3878 	{
3879 		.name = "AUX C TBT1",
3880 		.domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
3881 		.ops = &icl_aux_power_well_ops,
3882 		.id = DISP_PW_ID_NONE,
3883 		{
3884 			.hsw.regs = &icl_aux_power_well_regs,
3885 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3886 			.hsw.is_tc_tbt = true,
3887 		},
3888 	},
3889 	{
3890 		.name = "AUX D TBT2",
3891 		.domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
3892 		.ops = &icl_aux_power_well_ops,
3893 		.id = DISP_PW_ID_NONE,
3894 		{
3895 			.hsw.regs = &icl_aux_power_well_regs,
3896 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3897 			.hsw.is_tc_tbt = true,
3898 		},
3899 	},
3900 	{
3901 		.name = "AUX E TBT3",
3902 		.domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
3903 		.ops = &icl_aux_power_well_ops,
3904 		.id = DISP_PW_ID_NONE,
3905 		{
3906 			.hsw.regs = &icl_aux_power_well_regs,
3907 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3908 			.hsw.is_tc_tbt = true,
3909 		},
3910 	},
3911 	{
3912 		.name = "AUX F TBT4",
3913 		.domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
3914 		.ops = &icl_aux_power_well_ops,
3915 		.id = DISP_PW_ID_NONE,
3916 		{
3917 			.hsw.regs = &icl_aux_power_well_regs,
3918 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3919 			.hsw.is_tc_tbt = true,
3920 		},
3921 	},
3922 	{
3923 		.name = "power well 4",
3924 		.domains = ICL_PW_4_POWER_DOMAINS,
3925 		.ops = &hsw_power_well_ops,
3926 		.id = DISP_PW_ID_NONE,
3927 		{
3928 			.hsw.regs = &hsw_power_well_regs,
3929 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
3930 			.hsw.has_fuses = true,
3931 			.hsw.irq_pipe_mask = BIT(PIPE_C),
3932 		},
3933 	},
3934 };
3935 
3936 static void
tgl_tc_cold_request(struct drm_i915_private * i915,bool block)3937 tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
3938 {
3939 	u8 tries = 0;
3940 	int ret;
3941 
3942 	while (1) {
3943 		u32 low_val;
3944 		u32 high_val = 0;
3945 
3946 		if (block)
3947 			low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ;
3948 		else
3949 			low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ;
3950 
3951 		/*
3952 		 * Spec states that we should timeout the request after 200us
3953 		 * but the function below will timeout after 500us
3954 		 */
3955 		ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val,
3956 					     &high_val);
3957 		if (ret == 0) {
3958 			if (block &&
3959 			    (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
3960 				ret = -EIO;
3961 			else
3962 				break;
3963 		}
3964 
3965 		if (++tries == 3)
3966 			break;
3967 
3968 		msleep(1);
3969 	}
3970 
3971 	if (ret)
3972 		drm_err(&i915->drm, "TC cold %sblock failed\n",
3973 			block ? "" : "un");
3974 	else
3975 		drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
3976 			    block ? "" : "un");
3977 }
3978 
3979 static void
tgl_tc_cold_off_power_well_enable(struct drm_i915_private * i915,struct i915_power_well * power_well)3980 tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
3981 				  struct i915_power_well *power_well)
3982 {
3983 	tgl_tc_cold_request(i915, true);
3984 }
3985 
3986 static void
tgl_tc_cold_off_power_well_disable(struct drm_i915_private * i915,struct i915_power_well * power_well)3987 tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
3988 				   struct i915_power_well *power_well)
3989 {
3990 	tgl_tc_cold_request(i915, false);
3991 }
3992 
3993 static void
tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private * i915,struct i915_power_well * power_well)3994 tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
3995 				   struct i915_power_well *power_well)
3996 {
3997 	if (power_well->count > 0)
3998 		tgl_tc_cold_off_power_well_enable(i915, power_well);
3999 	else
4000 		tgl_tc_cold_off_power_well_disable(i915, power_well);
4001 }
4002 
4003 static bool
tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)4004 tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
4005 				      struct i915_power_well *power_well)
4006 {
4007 	/*
4008 	 * Not the correctly implementation but there is no way to just read it
4009 	 * from PCODE, so returning count to avoid state mismatch errors
4010 	 */
4011 	return power_well->count;
4012 }
4013 
4014 static const struct i915_power_well_ops tgl_tc_cold_off_ops = {
4015 	.sync_hw = tgl_tc_cold_off_power_well_sync_hw,
4016 	.enable = tgl_tc_cold_off_power_well_enable,
4017 	.disable = tgl_tc_cold_off_power_well_disable,
4018 	.is_enabled = tgl_tc_cold_off_power_well_is_enabled,
4019 };
4020 
4021 static const struct i915_power_well_desc tgl_power_wells[] = {
4022 	{
4023 		.name = "always-on",
4024 		.always_on = true,
4025 		.domains = POWER_DOMAIN_MASK,
4026 		.ops = &i9xx_always_on_power_well_ops,
4027 		.id = DISP_PW_ID_NONE,
4028 	},
4029 	{
4030 		.name = "power well 1",
4031 		/* Handled by the DMC firmware */
4032 		.always_on = true,
4033 		.domains = 0,
4034 		.ops = &hsw_power_well_ops,
4035 		.id = SKL_DISP_PW_1,
4036 		{
4037 			.hsw.regs = &hsw_power_well_regs,
4038 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
4039 			.hsw.has_fuses = true,
4040 		},
4041 	},
4042 	{
4043 		.name = "DC off",
4044 		.domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
4045 		.ops = &gen9_dc_off_power_well_ops,
4046 		.id = SKL_DISP_DC_OFF,
4047 	},
4048 	{
4049 		.name = "power well 2",
4050 		.domains = TGL_PW_2_POWER_DOMAINS,
4051 		.ops = &hsw_power_well_ops,
4052 		.id = SKL_DISP_PW_2,
4053 		{
4054 			.hsw.regs = &hsw_power_well_regs,
4055 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
4056 			.hsw.has_fuses = true,
4057 		},
4058 	},
4059 	{
4060 		.name = "power well 3",
4061 		.domains = TGL_PW_3_POWER_DOMAINS,
4062 		.ops = &hsw_power_well_ops,
4063 		.id = ICL_DISP_PW_3,
4064 		{
4065 			.hsw.regs = &hsw_power_well_regs,
4066 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
4067 			.hsw.irq_pipe_mask = BIT(PIPE_B),
4068 			.hsw.has_vga = true,
4069 			.hsw.has_fuses = true,
4070 		},
4071 	},
4072 	{
4073 		.name = "DDI A IO",
4074 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
4075 		.ops = &hsw_power_well_ops,
4076 		.id = DISP_PW_ID_NONE,
4077 		{
4078 			.hsw.regs = &icl_ddi_power_well_regs,
4079 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4080 		}
4081 	},
4082 	{
4083 		.name = "DDI B IO",
4084 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
4085 		.ops = &hsw_power_well_ops,
4086 		.id = DISP_PW_ID_NONE,
4087 		{
4088 			.hsw.regs = &icl_ddi_power_well_regs,
4089 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4090 		}
4091 	},
4092 	{
4093 		.name = "DDI C IO",
4094 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
4095 		.ops = &hsw_power_well_ops,
4096 		.id = DISP_PW_ID_NONE,
4097 		{
4098 			.hsw.regs = &icl_ddi_power_well_regs,
4099 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
4100 		}
4101 	},
4102 	{
4103 		.name = "DDI IO TC1",
4104 		.domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
4105 		.ops = &hsw_power_well_ops,
4106 		.id = DISP_PW_ID_NONE,
4107 		{
4108 			.hsw.regs = &icl_ddi_power_well_regs,
4109 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4110 		},
4111 	},
4112 	{
4113 		.name = "DDI IO TC2",
4114 		.domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
4115 		.ops = &hsw_power_well_ops,
4116 		.id = DISP_PW_ID_NONE,
4117 		{
4118 			.hsw.regs = &icl_ddi_power_well_regs,
4119 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4120 		},
4121 	},
4122 	{
4123 		.name = "DDI IO TC3",
4124 		.domains = TGL_DDI_IO_TC3_POWER_DOMAINS,
4125 		.ops = &hsw_power_well_ops,
4126 		.id = DISP_PW_ID_NONE,
4127 		{
4128 			.hsw.regs = &icl_ddi_power_well_regs,
4129 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
4130 		},
4131 	},
4132 	{
4133 		.name = "DDI IO TC4",
4134 		.domains = TGL_DDI_IO_TC4_POWER_DOMAINS,
4135 		.ops = &hsw_power_well_ops,
4136 		.id = DISP_PW_ID_NONE,
4137 		{
4138 			.hsw.regs = &icl_ddi_power_well_regs,
4139 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
4140 		},
4141 	},
4142 	{
4143 		.name = "DDI IO TC5",
4144 		.domains = TGL_DDI_IO_TC5_POWER_DOMAINS,
4145 		.ops = &hsw_power_well_ops,
4146 		.id = DISP_PW_ID_NONE,
4147 		{
4148 			.hsw.regs = &icl_ddi_power_well_regs,
4149 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
4150 		},
4151 	},
4152 	{
4153 		.name = "DDI IO TC6",
4154 		.domains = TGL_DDI_IO_TC6_POWER_DOMAINS,
4155 		.ops = &hsw_power_well_ops,
4156 		.id = DISP_PW_ID_NONE,
4157 		{
4158 			.hsw.regs = &icl_ddi_power_well_regs,
4159 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
4160 		},
4161 	},
4162 	{
4163 		.name = "TC cold off",
4164 		.domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
4165 		.ops = &tgl_tc_cold_off_ops,
4166 		.id = TGL_DISP_PW_TC_COLD_OFF,
4167 	},
4168 	{
4169 		.name = "AUX A",
4170 		.domains = TGL_AUX_A_IO_POWER_DOMAINS,
4171 		.ops = &icl_aux_power_well_ops,
4172 		.id = DISP_PW_ID_NONE,
4173 		{
4174 			.hsw.regs = &icl_aux_power_well_regs,
4175 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4176 		},
4177 	},
4178 	{
4179 		.name = "AUX B",
4180 		.domains = TGL_AUX_B_IO_POWER_DOMAINS,
4181 		.ops = &icl_aux_power_well_ops,
4182 		.id = DISP_PW_ID_NONE,
4183 		{
4184 			.hsw.regs = &icl_aux_power_well_regs,
4185 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4186 		},
4187 	},
4188 	{
4189 		.name = "AUX C",
4190 		.domains = TGL_AUX_C_IO_POWER_DOMAINS,
4191 		.ops = &icl_aux_power_well_ops,
4192 		.id = DISP_PW_ID_NONE,
4193 		{
4194 			.hsw.regs = &icl_aux_power_well_regs,
4195 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
4196 		},
4197 	},
4198 	{
4199 		.name = "AUX USBC1",
4200 		.domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
4201 		.ops = &icl_aux_power_well_ops,
4202 		.id = DISP_PW_ID_NONE,
4203 		{
4204 			.hsw.regs = &icl_aux_power_well_regs,
4205 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4206 			.hsw.is_tc_tbt = false,
4207 		},
4208 	},
4209 	{
4210 		.name = "AUX USBC2",
4211 		.domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
4212 		.ops = &icl_aux_power_well_ops,
4213 		.id = DISP_PW_ID_NONE,
4214 		{
4215 			.hsw.regs = &icl_aux_power_well_regs,
4216 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4217 			.hsw.is_tc_tbt = false,
4218 		},
4219 	},
4220 	{
4221 		.name = "AUX USBC3",
4222 		.domains = TGL_AUX_IO_USBC3_POWER_DOMAINS,
4223 		.ops = &icl_aux_power_well_ops,
4224 		.id = DISP_PW_ID_NONE,
4225 		{
4226 			.hsw.regs = &icl_aux_power_well_regs,
4227 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
4228 			.hsw.is_tc_tbt = false,
4229 		},
4230 	},
4231 	{
4232 		.name = "AUX USBC4",
4233 		.domains = TGL_AUX_IO_USBC4_POWER_DOMAINS,
4234 		.ops = &icl_aux_power_well_ops,
4235 		.id = DISP_PW_ID_NONE,
4236 		{
4237 			.hsw.regs = &icl_aux_power_well_regs,
4238 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
4239 			.hsw.is_tc_tbt = false,
4240 		},
4241 	},
4242 	{
4243 		.name = "AUX USBC5",
4244 		.domains = TGL_AUX_IO_USBC5_POWER_DOMAINS,
4245 		.ops = &icl_aux_power_well_ops,
4246 		.id = DISP_PW_ID_NONE,
4247 		{
4248 			.hsw.regs = &icl_aux_power_well_regs,
4249 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
4250 			.hsw.is_tc_tbt = false,
4251 		},
4252 	},
4253 	{
4254 		.name = "AUX USBC6",
4255 		.domains = TGL_AUX_IO_USBC6_POWER_DOMAINS,
4256 		.ops = &icl_aux_power_well_ops,
4257 		.id = DISP_PW_ID_NONE,
4258 		{
4259 			.hsw.regs = &icl_aux_power_well_regs,
4260 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
4261 			.hsw.is_tc_tbt = false,
4262 		},
4263 	},
4264 	{
4265 		.name = "AUX TBT1",
4266 		.domains = TGL_AUX_IO_TBT1_POWER_DOMAINS,
4267 		.ops = &icl_aux_power_well_ops,
4268 		.id = DISP_PW_ID_NONE,
4269 		{
4270 			.hsw.regs = &icl_aux_power_well_regs,
4271 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
4272 			.hsw.is_tc_tbt = true,
4273 		},
4274 	},
4275 	{
4276 		.name = "AUX TBT2",
4277 		.domains = TGL_AUX_IO_TBT2_POWER_DOMAINS,
4278 		.ops = &icl_aux_power_well_ops,
4279 		.id = DISP_PW_ID_NONE,
4280 		{
4281 			.hsw.regs = &icl_aux_power_well_regs,
4282 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
4283 			.hsw.is_tc_tbt = true,
4284 		},
4285 	},
4286 	{
4287 		.name = "AUX TBT3",
4288 		.domains = TGL_AUX_IO_TBT3_POWER_DOMAINS,
4289 		.ops = &icl_aux_power_well_ops,
4290 		.id = DISP_PW_ID_NONE,
4291 		{
4292 			.hsw.regs = &icl_aux_power_well_regs,
4293 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
4294 			.hsw.is_tc_tbt = true,
4295 		},
4296 	},
4297 	{
4298 		.name = "AUX TBT4",
4299 		.domains = TGL_AUX_IO_TBT4_POWER_DOMAINS,
4300 		.ops = &icl_aux_power_well_ops,
4301 		.id = DISP_PW_ID_NONE,
4302 		{
4303 			.hsw.regs = &icl_aux_power_well_regs,
4304 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
4305 			.hsw.is_tc_tbt = true,
4306 		},
4307 	},
4308 	{
4309 		.name = "AUX TBT5",
4310 		.domains = TGL_AUX_IO_TBT5_POWER_DOMAINS,
4311 		.ops = &icl_aux_power_well_ops,
4312 		.id = DISP_PW_ID_NONE,
4313 		{
4314 			.hsw.regs = &icl_aux_power_well_regs,
4315 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
4316 			.hsw.is_tc_tbt = true,
4317 		},
4318 	},
4319 	{
4320 		.name = "AUX TBT6",
4321 		.domains = TGL_AUX_IO_TBT6_POWER_DOMAINS,
4322 		.ops = &icl_aux_power_well_ops,
4323 		.id = DISP_PW_ID_NONE,
4324 		{
4325 			.hsw.regs = &icl_aux_power_well_regs,
4326 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
4327 			.hsw.is_tc_tbt = true,
4328 		},
4329 	},
4330 	{
4331 		.name = "power well 4",
4332 		.domains = TGL_PW_4_POWER_DOMAINS,
4333 		.ops = &hsw_power_well_ops,
4334 		.id = DISP_PW_ID_NONE,
4335 		{
4336 			.hsw.regs = &hsw_power_well_regs,
4337 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
4338 			.hsw.has_fuses = true,
4339 			.hsw.irq_pipe_mask = BIT(PIPE_C),
4340 		}
4341 	},
4342 	{
4343 		.name = "power well 5",
4344 		.domains = TGL_PW_5_POWER_DOMAINS,
4345 		.ops = &hsw_power_well_ops,
4346 		.id = DISP_PW_ID_NONE,
4347 		{
4348 			.hsw.regs = &hsw_power_well_regs,
4349 			.hsw.idx = TGL_PW_CTL_IDX_PW_5,
4350 			.hsw.has_fuses = true,
4351 			.hsw.irq_pipe_mask = BIT(PIPE_D),
4352 		},
4353 	},
4354 };
4355 
4356 static const struct i915_power_well_desc rkl_power_wells[] = {
4357 	{
4358 		.name = "always-on",
4359 		.always_on = true,
4360 		.domains = POWER_DOMAIN_MASK,
4361 		.ops = &i9xx_always_on_power_well_ops,
4362 		.id = DISP_PW_ID_NONE,
4363 	},
4364 	{
4365 		.name = "power well 1",
4366 		/* Handled by the DMC firmware */
4367 		.always_on = true,
4368 		.domains = 0,
4369 		.ops = &hsw_power_well_ops,
4370 		.id = SKL_DISP_PW_1,
4371 		{
4372 			.hsw.regs = &hsw_power_well_regs,
4373 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
4374 			.hsw.has_fuses = true,
4375 		},
4376 	},
4377 	{
4378 		.name = "DC off",
4379 		.domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS,
4380 		.ops = &gen9_dc_off_power_well_ops,
4381 		.id = SKL_DISP_DC_OFF,
4382 	},
4383 	{
4384 		.name = "power well 3",
4385 		.domains = RKL_PW_3_POWER_DOMAINS,
4386 		.ops = &hsw_power_well_ops,
4387 		.id = ICL_DISP_PW_3,
4388 		{
4389 			.hsw.regs = &hsw_power_well_regs,
4390 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
4391 			.hsw.irq_pipe_mask = BIT(PIPE_B),
4392 			.hsw.has_vga = true,
4393 			.hsw.has_fuses = true,
4394 		},
4395 	},
4396 	{
4397 		.name = "power well 4",
4398 		.domains = RKL_PW_4_POWER_DOMAINS,
4399 		.ops = &hsw_power_well_ops,
4400 		.id = DISP_PW_ID_NONE,
4401 		{
4402 			.hsw.regs = &hsw_power_well_regs,
4403 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
4404 			.hsw.has_fuses = true,
4405 			.hsw.irq_pipe_mask = BIT(PIPE_C),
4406 		}
4407 	},
4408 	{
4409 		.name = "DDI A IO",
4410 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
4411 		.ops = &hsw_power_well_ops,
4412 		.id = DISP_PW_ID_NONE,
4413 		{
4414 			.hsw.regs = &icl_ddi_power_well_regs,
4415 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4416 		}
4417 	},
4418 	{
4419 		.name = "DDI B IO",
4420 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
4421 		.ops = &hsw_power_well_ops,
4422 		.id = DISP_PW_ID_NONE,
4423 		{
4424 			.hsw.regs = &icl_ddi_power_well_regs,
4425 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4426 		}
4427 	},
4428 	{
4429 		.name = "DDI IO TC1",
4430 		.domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
4431 		.ops = &hsw_power_well_ops,
4432 		.id = DISP_PW_ID_NONE,
4433 		{
4434 			.hsw.regs = &icl_ddi_power_well_regs,
4435 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4436 		},
4437 	},
4438 	{
4439 		.name = "DDI IO TC2",
4440 		.domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
4441 		.ops = &hsw_power_well_ops,
4442 		.id = DISP_PW_ID_NONE,
4443 		{
4444 			.hsw.regs = &icl_ddi_power_well_regs,
4445 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4446 		},
4447 	},
4448 	{
4449 		.name = "AUX A",
4450 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
4451 		.ops = &icl_aux_power_well_ops,
4452 		.id = DISP_PW_ID_NONE,
4453 		{
4454 			.hsw.regs = &icl_aux_power_well_regs,
4455 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4456 		},
4457 	},
4458 	{
4459 		.name = "AUX B",
4460 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
4461 		.ops = &icl_aux_power_well_ops,
4462 		.id = DISP_PW_ID_NONE,
4463 		{
4464 			.hsw.regs = &icl_aux_power_well_regs,
4465 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4466 		},
4467 	},
4468 	{
4469 		.name = "AUX USBC1",
4470 		.domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
4471 		.ops = &icl_aux_power_well_ops,
4472 		.id = DISP_PW_ID_NONE,
4473 		{
4474 			.hsw.regs = &icl_aux_power_well_regs,
4475 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4476 		},
4477 	},
4478 	{
4479 		.name = "AUX USBC2",
4480 		.domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
4481 		.ops = &icl_aux_power_well_ops,
4482 		.id = DISP_PW_ID_NONE,
4483 		{
4484 			.hsw.regs = &icl_aux_power_well_regs,
4485 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4486 		},
4487 	},
4488 };
4489 
4490 static const struct i915_power_well_desc dg1_power_wells[] = {
4491 	{
4492 		.name = "always-on",
4493 		.always_on = true,
4494 		.domains = POWER_DOMAIN_MASK,
4495 		.ops = &i9xx_always_on_power_well_ops,
4496 		.id = DISP_PW_ID_NONE,
4497 	},
4498 	{
4499 		.name = "power well 1",
4500 		/* Handled by the DMC firmware */
4501 		.always_on = true,
4502 		.domains = 0,
4503 		.ops = &hsw_power_well_ops,
4504 		.id = SKL_DISP_PW_1,
4505 		{
4506 			.hsw.regs = &hsw_power_well_regs,
4507 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
4508 			.hsw.has_fuses = true,
4509 		},
4510 	},
4511 	{
4512 		.name = "DC off",
4513 		.domains = DG1_DISPLAY_DC_OFF_POWER_DOMAINS,
4514 		.ops = &gen9_dc_off_power_well_ops,
4515 		.id = SKL_DISP_DC_OFF,
4516 	},
4517 	{
4518 		.name = "power well 2",
4519 		.domains = DG1_PW_2_POWER_DOMAINS,
4520 		.ops = &hsw_power_well_ops,
4521 		.id = SKL_DISP_PW_2,
4522 		{
4523 			.hsw.regs = &hsw_power_well_regs,
4524 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
4525 			.hsw.has_fuses = true,
4526 		},
4527 	},
4528 	{
4529 		.name = "power well 3",
4530 		.domains = DG1_PW_3_POWER_DOMAINS,
4531 		.ops = &hsw_power_well_ops,
4532 		.id = ICL_DISP_PW_3,
4533 		{
4534 			.hsw.regs = &hsw_power_well_regs,
4535 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
4536 			.hsw.irq_pipe_mask = BIT(PIPE_B),
4537 			.hsw.has_vga = true,
4538 			.hsw.has_fuses = true,
4539 		},
4540 	},
4541 	{
4542 		.name = "DDI A IO",
4543 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
4544 		.ops = &hsw_power_well_ops,
4545 		.id = DISP_PW_ID_NONE,
4546 		{
4547 			.hsw.regs = &icl_ddi_power_well_regs,
4548 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4549 		}
4550 	},
4551 	{
4552 		.name = "DDI B IO",
4553 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
4554 		.ops = &hsw_power_well_ops,
4555 		.id = DISP_PW_ID_NONE,
4556 		{
4557 			.hsw.regs = &icl_ddi_power_well_regs,
4558 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4559 		}
4560 	},
4561 	{
4562 		.name = "DDI IO TC1",
4563 		.domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
4564 		.ops = &hsw_power_well_ops,
4565 		.id = DISP_PW_ID_NONE,
4566 		{
4567 			.hsw.regs = &icl_ddi_power_well_regs,
4568 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4569 		},
4570 	},
4571 	{
4572 		.name = "DDI IO TC2",
4573 		.domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
4574 		.ops = &hsw_power_well_ops,
4575 		.id = DISP_PW_ID_NONE,
4576 		{
4577 			.hsw.regs = &icl_ddi_power_well_regs,
4578 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4579 		},
4580 	},
4581 	{
4582 		.name = "AUX A",
4583 		.domains = TGL_AUX_A_IO_POWER_DOMAINS,
4584 		.ops = &icl_aux_power_well_ops,
4585 		.id = DISP_PW_ID_NONE,
4586 		{
4587 			.hsw.regs = &icl_aux_power_well_regs,
4588 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4589 		},
4590 	},
4591 	{
4592 		.name = "AUX B",
4593 		.domains = TGL_AUX_B_IO_POWER_DOMAINS,
4594 		.ops = &icl_aux_power_well_ops,
4595 		.id = DISP_PW_ID_NONE,
4596 		{
4597 			.hsw.regs = &icl_aux_power_well_regs,
4598 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4599 		},
4600 	},
4601 	{
4602 		.name = "AUX USBC1",
4603 		.domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
4604 		.ops = &icl_aux_power_well_ops,
4605 		.id = DISP_PW_ID_NONE,
4606 		{
4607 			.hsw.regs = &icl_aux_power_well_regs,
4608 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4609 			.hsw.is_tc_tbt = false,
4610 		},
4611 	},
4612 	{
4613 		.name = "AUX USBC2",
4614 		.domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
4615 		.ops = &icl_aux_power_well_ops,
4616 		.id = DISP_PW_ID_NONE,
4617 		{
4618 			.hsw.regs = &icl_aux_power_well_regs,
4619 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4620 			.hsw.is_tc_tbt = false,
4621 		},
4622 	},
4623 	{
4624 		.name = "power well 4",
4625 		.domains = TGL_PW_4_POWER_DOMAINS,
4626 		.ops = &hsw_power_well_ops,
4627 		.id = DISP_PW_ID_NONE,
4628 		{
4629 			.hsw.regs = &hsw_power_well_regs,
4630 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
4631 			.hsw.has_fuses = true,
4632 			.hsw.irq_pipe_mask = BIT(PIPE_C),
4633 		}
4634 	},
4635 	{
4636 		.name = "power well 5",
4637 		.domains = TGL_PW_5_POWER_DOMAINS,
4638 		.ops = &hsw_power_well_ops,
4639 		.id = DISP_PW_ID_NONE,
4640 		{
4641 			.hsw.regs = &hsw_power_well_regs,
4642 			.hsw.idx = TGL_PW_CTL_IDX_PW_5,
4643 			.hsw.has_fuses = true,
4644 			.hsw.irq_pipe_mask = BIT(PIPE_D),
4645 		},
4646 	},
4647 };
4648 
4649 static const struct i915_power_well_desc xelpd_power_wells[] = {
4650 	{
4651 		.name = "always-on",
4652 		.always_on = true,
4653 		.domains = POWER_DOMAIN_MASK,
4654 		.ops = &i9xx_always_on_power_well_ops,
4655 		.id = DISP_PW_ID_NONE,
4656 	},
4657 	{
4658 		.name = "power well 1",
4659 		/* Handled by the DMC firmware */
4660 		.always_on = true,
4661 		.domains = 0,
4662 		.ops = &hsw_power_well_ops,
4663 		.id = SKL_DISP_PW_1,
4664 		{
4665 			.hsw.regs = &hsw_power_well_regs,
4666 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
4667 			.hsw.has_fuses = true,
4668 		},
4669 	},
4670 	{
4671 		.name = "DC off",
4672 		.domains = XELPD_DISPLAY_DC_OFF_POWER_DOMAINS,
4673 		.ops = &gen9_dc_off_power_well_ops,
4674 		.id = SKL_DISP_DC_OFF,
4675 	},
4676 	{
4677 		.name = "power well 2",
4678 		.domains = XELPD_PW_2_POWER_DOMAINS,
4679 		.ops = &hsw_power_well_ops,
4680 		.id = SKL_DISP_PW_2,
4681 		{
4682 			.hsw.regs = &hsw_power_well_regs,
4683 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
4684 			.hsw.has_vga = true,
4685 			.hsw.has_fuses = true,
4686 		},
4687 	},
4688 	{
4689 		.name = "power well A",
4690 		.domains = XELPD_PW_A_POWER_DOMAINS,
4691 		.ops = &hsw_power_well_ops,
4692 		.id = DISP_PW_ID_NONE,
4693 		{
4694 			.hsw.regs = &hsw_power_well_regs,
4695 			.hsw.idx = XELPD_PW_CTL_IDX_PW_A,
4696 			.hsw.irq_pipe_mask = BIT(PIPE_A),
4697 			.hsw.has_fuses = true,
4698 		},
4699 	},
4700 	{
4701 		.name = "power well B",
4702 		.domains = XELPD_PW_B_POWER_DOMAINS,
4703 		.ops = &hsw_power_well_ops,
4704 		.id = DISP_PW_ID_NONE,
4705 		{
4706 			.hsw.regs = &hsw_power_well_regs,
4707 			.hsw.idx = XELPD_PW_CTL_IDX_PW_B,
4708 			.hsw.irq_pipe_mask = BIT(PIPE_B),
4709 			.hsw.has_fuses = true,
4710 		},
4711 	},
4712 	{
4713 		.name = "power well C",
4714 		.domains = XELPD_PW_C_POWER_DOMAINS,
4715 		.ops = &hsw_power_well_ops,
4716 		.id = DISP_PW_ID_NONE,
4717 		{
4718 			.hsw.regs = &hsw_power_well_regs,
4719 			.hsw.idx = XELPD_PW_CTL_IDX_PW_C,
4720 			.hsw.irq_pipe_mask = BIT(PIPE_C),
4721 			.hsw.has_fuses = true,
4722 		},
4723 	},
4724 	{
4725 		.name = "power well D",
4726 		.domains = XELPD_PW_D_POWER_DOMAINS,
4727 		.ops = &hsw_power_well_ops,
4728 		.id = DISP_PW_ID_NONE,
4729 		{
4730 			.hsw.regs = &hsw_power_well_regs,
4731 			.hsw.idx = XELPD_PW_CTL_IDX_PW_D,
4732 			.hsw.irq_pipe_mask = BIT(PIPE_D),
4733 			.hsw.has_fuses = true,
4734 		},
4735 	},
4736 	{
4737 		.name = "DDI A IO",
4738 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
4739 		.ops = &hsw_power_well_ops,
4740 		.id = DISP_PW_ID_NONE,
4741 		{
4742 			.hsw.regs = &icl_ddi_power_well_regs,
4743 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4744 		}
4745 	},
4746 	{
4747 		.name = "DDI B IO",
4748 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
4749 		.ops = &hsw_power_well_ops,
4750 		.id = DISP_PW_ID_NONE,
4751 		{
4752 			.hsw.regs = &icl_ddi_power_well_regs,
4753 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4754 		}
4755 	},
4756 	{
4757 		.name = "DDI C IO",
4758 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
4759 		.ops = &hsw_power_well_ops,
4760 		.id = DISP_PW_ID_NONE,
4761 		{
4762 			.hsw.regs = &icl_ddi_power_well_regs,
4763 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
4764 		}
4765 	},
4766 	{
4767 		.name = "DDI IO D_XELPD",
4768 		.domains = XELPD_DDI_IO_D_XELPD_POWER_DOMAINS,
4769 		.ops = &hsw_power_well_ops,
4770 		.id = DISP_PW_ID_NONE,
4771 		{
4772 			.hsw.regs = &icl_ddi_power_well_regs,
4773 			.hsw.idx = XELPD_PW_CTL_IDX_DDI_D,
4774 		}
4775 	},
4776 	{
4777 		.name = "DDI IO E_XELPD",
4778 		.domains = XELPD_DDI_IO_E_XELPD_POWER_DOMAINS,
4779 		.ops = &hsw_power_well_ops,
4780 		.id = DISP_PW_ID_NONE,
4781 		{
4782 			.hsw.regs = &icl_ddi_power_well_regs,
4783 			.hsw.idx = XELPD_PW_CTL_IDX_DDI_E,
4784 		}
4785 	},
4786 	{
4787 		.name = "DDI IO TC1",
4788 		.domains = XELPD_DDI_IO_TC1_POWER_DOMAINS,
4789 		.ops = &hsw_power_well_ops,
4790 		.id = DISP_PW_ID_NONE,
4791 		{
4792 			.hsw.regs = &icl_ddi_power_well_regs,
4793 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4794 		}
4795 	},
4796 	{
4797 		.name = "DDI IO TC2",
4798 		.domains = XELPD_DDI_IO_TC2_POWER_DOMAINS,
4799 		.ops = &hsw_power_well_ops,
4800 		.id = DISP_PW_ID_NONE,
4801 		{
4802 			.hsw.regs = &icl_ddi_power_well_regs,
4803 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4804 		}
4805 	},
4806 	{
4807 		.name = "DDI IO TC3",
4808 		.domains = XELPD_DDI_IO_TC3_POWER_DOMAINS,
4809 		.ops = &hsw_power_well_ops,
4810 		.id = DISP_PW_ID_NONE,
4811 		{
4812 			.hsw.regs = &icl_ddi_power_well_regs,
4813 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
4814 		}
4815 	},
4816 	{
4817 		.name = "DDI IO TC4",
4818 		.domains = XELPD_DDI_IO_TC4_POWER_DOMAINS,
4819 		.ops = &hsw_power_well_ops,
4820 		.id = DISP_PW_ID_NONE,
4821 		{
4822 			.hsw.regs = &icl_ddi_power_well_regs,
4823 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
4824 		}
4825 	},
4826 	{
4827 		.name = "AUX A",
4828 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
4829 		.ops = &icl_aux_power_well_ops,
4830 		.id = DISP_PW_ID_NONE,
4831 		{
4832 			.hsw.regs = &icl_aux_power_well_regs,
4833 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4834 			.hsw.fixed_enable_delay = 600,
4835 		},
4836 	},
4837 	{
4838 		.name = "AUX B",
4839 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
4840 		.ops = &icl_aux_power_well_ops,
4841 		.id = DISP_PW_ID_NONE,
4842 		{
4843 			.hsw.regs = &icl_aux_power_well_regs,
4844 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4845 			.hsw.fixed_enable_delay = 600,
4846 		},
4847 	},
4848 	{
4849 		.name = "AUX C",
4850 		.domains = TGL_AUX_C_IO_POWER_DOMAINS,
4851 		.ops = &icl_aux_power_well_ops,
4852 		.id = DISP_PW_ID_NONE,
4853 		{
4854 			.hsw.regs = &icl_aux_power_well_regs,
4855 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
4856 			.hsw.fixed_enable_delay = 600,
4857 		},
4858 	},
4859 	{
4860 		.name = "AUX D_XELPD",
4861 		.domains = XELPD_AUX_IO_D_XELPD_POWER_DOMAINS,
4862 		.ops = &icl_aux_power_well_ops,
4863 		.id = DISP_PW_ID_NONE,
4864 		{
4865 			.hsw.regs = &icl_aux_power_well_regs,
4866 			.hsw.idx = XELPD_PW_CTL_IDX_AUX_D,
4867 			.hsw.fixed_enable_delay = 600,
4868 		},
4869 	},
4870 	{
4871 		.name = "AUX E_XELPD",
4872 		.domains = XELPD_AUX_IO_E_XELPD_POWER_DOMAINS,
4873 		.ops = &icl_aux_power_well_ops,
4874 		.id = DISP_PW_ID_NONE,
4875 		{
4876 			.hsw.regs = &icl_aux_power_well_regs,
4877 			.hsw.idx = XELPD_PW_CTL_IDX_AUX_E,
4878 		},
4879 	},
4880 	{
4881 		.name = "AUX USBC1",
4882 		.domains = XELPD_AUX_IO_USBC1_POWER_DOMAINS,
4883 		.ops = &icl_aux_power_well_ops,
4884 		.id = DISP_PW_ID_NONE,
4885 		{
4886 			.hsw.regs = &icl_aux_power_well_regs,
4887 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4888 			.hsw.fixed_enable_delay = 600,
4889 		},
4890 	},
4891 	{
4892 		.name = "AUX USBC2",
4893 		.domains = XELPD_AUX_IO_USBC2_POWER_DOMAINS,
4894 		.ops = &icl_aux_power_well_ops,
4895 		.id = DISP_PW_ID_NONE,
4896 		{
4897 			.hsw.regs = &icl_aux_power_well_regs,
4898 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4899 		},
4900 	},
4901 	{
4902 		.name = "AUX USBC3",
4903 		.domains = XELPD_AUX_IO_USBC3_POWER_DOMAINS,
4904 		.ops = &icl_aux_power_well_ops,
4905 		.id = DISP_PW_ID_NONE,
4906 		{
4907 			.hsw.regs = &icl_aux_power_well_regs,
4908 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
4909 		},
4910 	},
4911 	{
4912 		.name = "AUX USBC4",
4913 		.domains = XELPD_AUX_IO_USBC4_POWER_DOMAINS,
4914 		.ops = &icl_aux_power_well_ops,
4915 		.id = DISP_PW_ID_NONE,
4916 		{
4917 			.hsw.regs = &icl_aux_power_well_regs,
4918 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
4919 		},
4920 	},
4921 	{
4922 		.name = "AUX TBT1",
4923 		.domains = XELPD_AUX_IO_TBT1_POWER_DOMAINS,
4924 		.ops = &icl_aux_power_well_ops,
4925 		.id = DISP_PW_ID_NONE,
4926 		{
4927 			.hsw.regs = &icl_aux_power_well_regs,
4928 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
4929 			.hsw.is_tc_tbt = true,
4930 		},
4931 	},
4932 	{
4933 		.name = "AUX TBT2",
4934 		.domains = XELPD_AUX_IO_TBT2_POWER_DOMAINS,
4935 		.ops = &icl_aux_power_well_ops,
4936 		.id = DISP_PW_ID_NONE,
4937 		{
4938 			.hsw.regs = &icl_aux_power_well_regs,
4939 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
4940 			.hsw.is_tc_tbt = true,
4941 		},
4942 	},
4943 	{
4944 		.name = "AUX TBT3",
4945 		.domains = XELPD_AUX_IO_TBT3_POWER_DOMAINS,
4946 		.ops = &icl_aux_power_well_ops,
4947 		.id = DISP_PW_ID_NONE,
4948 		{
4949 			.hsw.regs = &icl_aux_power_well_regs,
4950 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
4951 			.hsw.is_tc_tbt = true,
4952 		},
4953 	},
4954 	{
4955 		.name = "AUX TBT4",
4956 		.domains = XELPD_AUX_IO_TBT4_POWER_DOMAINS,
4957 		.ops = &icl_aux_power_well_ops,
4958 		.id = DISP_PW_ID_NONE,
4959 		{
4960 			.hsw.regs = &icl_aux_power_well_regs,
4961 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
4962 			.hsw.is_tc_tbt = true,
4963 		},
4964 	},
4965 };
4966 
4967 static int
sanitize_disable_power_well_option(const struct drm_i915_private * dev_priv,int disable_power_well)4968 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
4969 				   int disable_power_well)
4970 {
4971 	if (disable_power_well >= 0)
4972 		return !!disable_power_well;
4973 
4974 	return 1;
4975 }
4976 
get_allowed_dc_mask(const struct drm_i915_private * dev_priv,int enable_dc)4977 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
4978 			       int enable_dc)
4979 {
4980 	u32 mask;
4981 	int requested_dc;
4982 	int max_dc;
4983 
4984 	if (!HAS_DISPLAY(dev_priv))
4985 		return 0;
4986 
4987 	if (IS_DG1(dev_priv))
4988 		max_dc = 3;
4989 	else if (DISPLAY_VER(dev_priv) >= 12)
4990 		max_dc = 4;
4991 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4992 		max_dc = 1;
4993 	else if (DISPLAY_VER(dev_priv) >= 9)
4994 		max_dc = 2;
4995 	else
4996 		max_dc = 0;
4997 
4998 	/*
4999 	 * DC9 has a separate HW flow from the rest of the DC states,
5000 	 * not depending on the DMC firmware. It's needed by system
5001 	 * suspend/resume, so allow it unconditionally.
5002 	 */
5003 	mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
5004 		DISPLAY_VER(dev_priv) >= 11 ?
5005 	       DC_STATE_EN_DC9 : 0;
5006 
5007 	if (!dev_priv->params.disable_power_well)
5008 		max_dc = 0;
5009 
5010 	if (enable_dc >= 0 && enable_dc <= max_dc) {
5011 		requested_dc = enable_dc;
5012 	} else if (enable_dc == -1) {
5013 		requested_dc = max_dc;
5014 	} else if (enable_dc > max_dc && enable_dc <= 4) {
5015 		drm_dbg_kms(&dev_priv->drm,
5016 			    "Adjusting requested max DC state (%d->%d)\n",
5017 			    enable_dc, max_dc);
5018 		requested_dc = max_dc;
5019 	} else {
5020 		drm_err(&dev_priv->drm,
5021 			"Unexpected value for enable_dc (%d)\n", enable_dc);
5022 		requested_dc = max_dc;
5023 	}
5024 
5025 	switch (requested_dc) {
5026 	case 4:
5027 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
5028 		break;
5029 	case 3:
5030 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
5031 		break;
5032 	case 2:
5033 		mask |= DC_STATE_EN_UPTO_DC6;
5034 		break;
5035 	case 1:
5036 		mask |= DC_STATE_EN_UPTO_DC5;
5037 		break;
5038 	}
5039 
5040 	drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
5041 
5042 	return mask;
5043 }
5044 
5045 static int
__set_power_wells(struct i915_power_domains * power_domains,const struct i915_power_well_desc * power_well_descs,int power_well_descs_sz,u64 skip_mask)5046 __set_power_wells(struct i915_power_domains *power_domains,
5047 		  const struct i915_power_well_desc *power_well_descs,
5048 		  int power_well_descs_sz, u64 skip_mask)
5049 {
5050 	struct drm_i915_private *i915 = container_of(power_domains,
5051 						     struct drm_i915_private,
5052 						     power_domains);
5053 	u64 power_well_ids = 0;
5054 	int power_well_count = 0;
5055 	int i, plt_idx = 0;
5056 
5057 	for (i = 0; i < power_well_descs_sz; i++)
5058 		if (!(BIT_ULL(power_well_descs[i].id) & skip_mask))
5059 			power_well_count++;
5060 
5061 	power_domains->power_well_count = power_well_count;
5062 	power_domains->power_wells =
5063 				kcalloc(power_well_count,
5064 					sizeof(*power_domains->power_wells),
5065 					GFP_KERNEL);
5066 	if (!power_domains->power_wells)
5067 		return -ENOMEM;
5068 
5069 	for (i = 0; i < power_well_descs_sz; i++) {
5070 		enum i915_power_well_id id = power_well_descs[i].id;
5071 
5072 		if (BIT_ULL(id) & skip_mask)
5073 			continue;
5074 
5075 		power_domains->power_wells[plt_idx++].desc =
5076 			&power_well_descs[i];
5077 
5078 		if (id == DISP_PW_ID_NONE)
5079 			continue;
5080 
5081 		drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8);
5082 		drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id));
5083 		power_well_ids |= BIT_ULL(id);
5084 	}
5085 
5086 	return 0;
5087 }
5088 
5089 #define set_power_wells_mask(power_domains, __power_well_descs, skip_mask) \
5090 	__set_power_wells(power_domains, __power_well_descs, \
5091 			  ARRAY_SIZE(__power_well_descs), skip_mask)
5092 
5093 #define set_power_wells(power_domains, __power_well_descs) \
5094 	set_power_wells_mask(power_domains, __power_well_descs, 0)
5095 
5096 /**
5097  * intel_power_domains_init - initializes the power domain structures
5098  * @dev_priv: i915 device instance
5099  *
5100  * Initializes the power domain structures for @dev_priv depending upon the
5101  * supported platform.
5102  */
intel_power_domains_init(struct drm_i915_private * dev_priv)5103 int intel_power_domains_init(struct drm_i915_private *dev_priv)
5104 {
5105 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5106 	int err;
5107 
5108 	dev_priv->params.disable_power_well =
5109 		sanitize_disable_power_well_option(dev_priv,
5110 						   dev_priv->params.disable_power_well);
5111 	dev_priv->dmc.allowed_dc_mask =
5112 		get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
5113 
5114 	dev_priv->dmc.target_dc_state =
5115 		sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
5116 
5117 	BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
5118 
5119 	mutex_init(&power_domains->lock);
5120 
5121 	INIT_DELAYED_WORK(&power_domains->async_put_work,
5122 			  intel_display_power_put_async_work);
5123 
5124 	/*
5125 	 * The enabling order will be from lower to higher indexed wells,
5126 	 * the disabling order is reversed.
5127 	 */
5128 	if (!HAS_DISPLAY(dev_priv)) {
5129 		power_domains->power_well_count = 0;
5130 		err = 0;
5131 	} else if (DISPLAY_VER(dev_priv) >= 13) {
5132 		err = set_power_wells(power_domains, xelpd_power_wells);
5133 	} else if (IS_DG1(dev_priv)) {
5134 		err = set_power_wells(power_domains, dg1_power_wells);
5135 	} else if (IS_ALDERLAKE_S(dev_priv)) {
5136 		err = set_power_wells_mask(power_domains, tgl_power_wells,
5137 					   BIT_ULL(TGL_DISP_PW_TC_COLD_OFF));
5138 	} else if (IS_ROCKETLAKE(dev_priv)) {
5139 		err = set_power_wells(power_domains, rkl_power_wells);
5140 	} else if (DISPLAY_VER(dev_priv) == 12) {
5141 		err = set_power_wells(power_domains, tgl_power_wells);
5142 	} else if (DISPLAY_VER(dev_priv) == 11) {
5143 		err = set_power_wells(power_domains, icl_power_wells);
5144 	} else if (IS_GEMINILAKE(dev_priv)) {
5145 		err = set_power_wells(power_domains, glk_power_wells);
5146 	} else if (IS_BROXTON(dev_priv)) {
5147 		err = set_power_wells(power_domains, bxt_power_wells);
5148 	} else if (DISPLAY_VER(dev_priv) == 9) {
5149 		err = set_power_wells(power_domains, skl_power_wells);
5150 	} else if (IS_CHERRYVIEW(dev_priv)) {
5151 		err = set_power_wells(power_domains, chv_power_wells);
5152 	} else if (IS_BROADWELL(dev_priv)) {
5153 		err = set_power_wells(power_domains, bdw_power_wells);
5154 	} else if (IS_HASWELL(dev_priv)) {
5155 		err = set_power_wells(power_domains, hsw_power_wells);
5156 	} else if (IS_VALLEYVIEW(dev_priv)) {
5157 		err = set_power_wells(power_domains, vlv_power_wells);
5158 	} else if (IS_I830(dev_priv)) {
5159 		err = set_power_wells(power_domains, i830_power_wells);
5160 	} else {
5161 		err = set_power_wells(power_domains, i9xx_always_on_power_well);
5162 	}
5163 
5164 	return err;
5165 }
5166 
5167 /**
5168  * intel_power_domains_cleanup - clean up power domains resources
5169  * @dev_priv: i915 device instance
5170  *
5171  * Release any resources acquired by intel_power_domains_init()
5172  */
intel_power_domains_cleanup(struct drm_i915_private * dev_priv)5173 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
5174 {
5175 	kfree(dev_priv->power_domains.power_wells);
5176 }
5177 
intel_power_domains_sync_hw(struct drm_i915_private * dev_priv)5178 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
5179 {
5180 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5181 	struct i915_power_well *power_well;
5182 
5183 	mutex_lock(&power_domains->lock);
5184 	for_each_power_well(dev_priv, power_well) {
5185 		power_well->desc->ops->sync_hw(dev_priv, power_well);
5186 		power_well->hw_enabled =
5187 			power_well->desc->ops->is_enabled(dev_priv, power_well);
5188 	}
5189 	mutex_unlock(&power_domains->lock);
5190 }
5191 
gen9_dbuf_slice_set(struct drm_i915_private * dev_priv,enum dbuf_slice slice,bool enable)5192 static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
5193 				enum dbuf_slice slice, bool enable)
5194 {
5195 	i915_reg_t reg = DBUF_CTL_S(slice);
5196 	bool state;
5197 
5198 	intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST,
5199 		     enable ? DBUF_POWER_REQUEST : 0);
5200 	intel_de_posting_read(dev_priv, reg);
5201 	udelay(10);
5202 
5203 	state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
5204 	drm_WARN(&dev_priv->drm, enable != state,
5205 		 "DBuf slice %d power %s timeout!\n",
5206 		 slice, enabledisable(enable));
5207 }
5208 
gen9_dbuf_slices_update(struct drm_i915_private * dev_priv,u8 req_slices)5209 void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
5210 			     u8 req_slices)
5211 {
5212 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5213 	u8 slice_mask = INTEL_INFO(dev_priv)->dbuf.slice_mask;
5214 	enum dbuf_slice slice;
5215 
5216 	drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
5217 		 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
5218 		 req_slices, slice_mask);
5219 
5220 	drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
5221 		    req_slices);
5222 
5223 	/*
5224 	 * Might be running this in parallel to gen9_dc_off_power_well_enable
5225 	 * being called from intel_dp_detect for instance,
5226 	 * which causes assertion triggered by race condition,
5227 	 * as gen9_assert_dbuf_enabled might preempt this when registers
5228 	 * were already updated, while dev_priv was not.
5229 	 */
5230 	mutex_lock(&power_domains->lock);
5231 
5232 	for_each_dbuf_slice(dev_priv, slice)
5233 		gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
5234 
5235 	dev_priv->dbuf.enabled_slices = req_slices;
5236 
5237 	mutex_unlock(&power_domains->lock);
5238 }
5239 
gen9_dbuf_enable(struct drm_i915_private * dev_priv)5240 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
5241 {
5242 	dev_priv->dbuf.enabled_slices =
5243 		intel_enabled_dbuf_slices_mask(dev_priv);
5244 
5245 	/*
5246 	 * Just power up at least 1 slice, we will
5247 	 * figure out later which slices we have and what we need.
5248 	 */
5249 	gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) |
5250 				dev_priv->dbuf.enabled_slices);
5251 }
5252 
gen9_dbuf_disable(struct drm_i915_private * dev_priv)5253 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
5254 {
5255 	gen9_dbuf_slices_update(dev_priv, 0);
5256 }
5257 
gen12_dbuf_slices_config(struct drm_i915_private * dev_priv)5258 static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
5259 {
5260 	enum dbuf_slice slice;
5261 
5262 	if (IS_ALDERLAKE_P(dev_priv))
5263 		return;
5264 
5265 	for_each_dbuf_slice(dev_priv, slice)
5266 		intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
5267 			     DBUF_TRACKER_STATE_SERVICE_MASK,
5268 			     DBUF_TRACKER_STATE_SERVICE(8));
5269 }
5270 
icl_mbus_init(struct drm_i915_private * dev_priv)5271 static void icl_mbus_init(struct drm_i915_private *dev_priv)
5272 {
5273 	unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask;
5274 	u32 mask, val, i;
5275 
5276 	if (IS_ALDERLAKE_P(dev_priv))
5277 		return;
5278 
5279 	mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
5280 		MBUS_ABOX_BT_CREDIT_POOL2_MASK |
5281 		MBUS_ABOX_B_CREDIT_MASK |
5282 		MBUS_ABOX_BW_CREDIT_MASK;
5283 	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
5284 		MBUS_ABOX_BT_CREDIT_POOL2(16) |
5285 		MBUS_ABOX_B_CREDIT(1) |
5286 		MBUS_ABOX_BW_CREDIT(1);
5287 
5288 	/*
5289 	 * gen12 platforms that use abox1 and abox2 for pixel data reads still
5290 	 * expect us to program the abox_ctl0 register as well, even though
5291 	 * we don't have to program other instance-0 registers like BW_BUDDY.
5292 	 */
5293 	if (DISPLAY_VER(dev_priv) == 12)
5294 		abox_regs |= BIT(0);
5295 
5296 	for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
5297 		intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
5298 }
5299 
hsw_assert_cdclk(struct drm_i915_private * dev_priv)5300 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
5301 {
5302 	u32 val = intel_de_read(dev_priv, LCPLL_CTL);
5303 
5304 	/*
5305 	 * The LCPLL register should be turned on by the BIOS. For now
5306 	 * let's just check its state and print errors in case
5307 	 * something is wrong.  Don't even try to turn it on.
5308 	 */
5309 
5310 	if (val & LCPLL_CD_SOURCE_FCLK)
5311 		drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
5312 
5313 	if (val & LCPLL_PLL_DISABLE)
5314 		drm_err(&dev_priv->drm, "LCPLL is disabled\n");
5315 
5316 	if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
5317 		drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
5318 }
5319 
assert_can_disable_lcpll(struct drm_i915_private * dev_priv)5320 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
5321 {
5322 	struct drm_device *dev = &dev_priv->drm;
5323 	struct intel_crtc *crtc;
5324 
5325 	for_each_intel_crtc(dev, crtc)
5326 		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
5327 				pipe_name(crtc->pipe));
5328 
5329 	I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
5330 			"Display power well on\n");
5331 	I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
5332 			"SPLL enabled\n");
5333 	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
5334 			"WRPLL1 enabled\n");
5335 	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
5336 			"WRPLL2 enabled\n");
5337 	I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
5338 			"Panel power on\n");
5339 	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
5340 			"CPU PWM1 enabled\n");
5341 	if (IS_HASWELL(dev_priv))
5342 		I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
5343 				"CPU PWM2 enabled\n");
5344 	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
5345 			"PCH PWM1 enabled\n");
5346 	I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
5347 			"Utility pin enabled\n");
5348 	I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
5349 			"PCH GTC enabled\n");
5350 
5351 	/*
5352 	 * In theory we can still leave IRQs enabled, as long as only the HPD
5353 	 * interrupts remain enabled. We used to check for that, but since it's
5354 	 * gen-specific and since we only disable LCPLL after we fully disable
5355 	 * the interrupts, the check below should be enough.
5356 	 */
5357 	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
5358 }
5359 
hsw_read_dcomp(struct drm_i915_private * dev_priv)5360 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
5361 {
5362 	if (IS_HASWELL(dev_priv))
5363 		return intel_de_read(dev_priv, D_COMP_HSW);
5364 	else
5365 		return intel_de_read(dev_priv, D_COMP_BDW);
5366 }
5367 
hsw_write_dcomp(struct drm_i915_private * dev_priv,u32 val)5368 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
5369 {
5370 	if (IS_HASWELL(dev_priv)) {
5371 		if (sandybridge_pcode_write(dev_priv,
5372 					    GEN6_PCODE_WRITE_D_COMP, val))
5373 			drm_dbg_kms(&dev_priv->drm,
5374 				    "Failed to write to D_COMP\n");
5375 	} else {
5376 		intel_de_write(dev_priv, D_COMP_BDW, val);
5377 		intel_de_posting_read(dev_priv, D_COMP_BDW);
5378 	}
5379 }
5380 
5381 /*
5382  * This function implements pieces of two sequences from BSpec:
5383  * - Sequence for display software to disable LCPLL
5384  * - Sequence for display software to allow package C8+
5385  * The steps implemented here are just the steps that actually touch the LCPLL
5386  * register. Callers should take care of disabling all the display engine
5387  * functions, doing the mode unset, fixing interrupts, etc.
5388  */
hsw_disable_lcpll(struct drm_i915_private * dev_priv,bool switch_to_fclk,bool allow_power_down)5389 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
5390 			      bool switch_to_fclk, bool allow_power_down)
5391 {
5392 	u32 val;
5393 
5394 	assert_can_disable_lcpll(dev_priv);
5395 
5396 	val = intel_de_read(dev_priv, LCPLL_CTL);
5397 
5398 	if (switch_to_fclk) {
5399 		val |= LCPLL_CD_SOURCE_FCLK;
5400 		intel_de_write(dev_priv, LCPLL_CTL, val);
5401 
5402 		if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
5403 				LCPLL_CD_SOURCE_FCLK_DONE, 1))
5404 			drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
5405 
5406 		val = intel_de_read(dev_priv, LCPLL_CTL);
5407 	}
5408 
5409 	val |= LCPLL_PLL_DISABLE;
5410 	intel_de_write(dev_priv, LCPLL_CTL, val);
5411 	intel_de_posting_read(dev_priv, LCPLL_CTL);
5412 
5413 	if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
5414 		drm_err(&dev_priv->drm, "LCPLL still locked\n");
5415 
5416 	val = hsw_read_dcomp(dev_priv);
5417 	val |= D_COMP_COMP_DISABLE;
5418 	hsw_write_dcomp(dev_priv, val);
5419 	ndelay(100);
5420 
5421 	if (wait_for((hsw_read_dcomp(dev_priv) &
5422 		      D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
5423 		drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
5424 
5425 	if (allow_power_down) {
5426 		val = intel_de_read(dev_priv, LCPLL_CTL);
5427 		val |= LCPLL_POWER_DOWN_ALLOW;
5428 		intel_de_write(dev_priv, LCPLL_CTL, val);
5429 		intel_de_posting_read(dev_priv, LCPLL_CTL);
5430 	}
5431 }
5432 
5433 /*
5434  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
5435  * source.
5436  */
hsw_restore_lcpll(struct drm_i915_private * dev_priv)5437 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
5438 {
5439 	u32 val;
5440 
5441 	val = intel_de_read(dev_priv, LCPLL_CTL);
5442 
5443 	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
5444 		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
5445 		return;
5446 
5447 	/*
5448 	 * Make sure we're not on PC8 state before disabling PC8, otherwise
5449 	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
5450 	 */
5451 	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
5452 
5453 	if (val & LCPLL_POWER_DOWN_ALLOW) {
5454 		val &= ~LCPLL_POWER_DOWN_ALLOW;
5455 		intel_de_write(dev_priv, LCPLL_CTL, val);
5456 		intel_de_posting_read(dev_priv, LCPLL_CTL);
5457 	}
5458 
5459 	val = hsw_read_dcomp(dev_priv);
5460 	val |= D_COMP_COMP_FORCE;
5461 	val &= ~D_COMP_COMP_DISABLE;
5462 	hsw_write_dcomp(dev_priv, val);
5463 
5464 	val = intel_de_read(dev_priv, LCPLL_CTL);
5465 	val &= ~LCPLL_PLL_DISABLE;
5466 	intel_de_write(dev_priv, LCPLL_CTL, val);
5467 
5468 	if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
5469 		drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
5470 
5471 	if (val & LCPLL_CD_SOURCE_FCLK) {
5472 		val = intel_de_read(dev_priv, LCPLL_CTL);
5473 		val &= ~LCPLL_CD_SOURCE_FCLK;
5474 		intel_de_write(dev_priv, LCPLL_CTL, val);
5475 
5476 		if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
5477 				 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
5478 			drm_err(&dev_priv->drm,
5479 				"Switching back to LCPLL failed\n");
5480 	}
5481 
5482 	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
5483 
5484 	intel_update_cdclk(dev_priv);
5485 	intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
5486 }
5487 
5488 /*
5489  * Package states C8 and deeper are really deep PC states that can only be
5490  * reached when all the devices on the system allow it, so even if the graphics
5491  * device allows PC8+, it doesn't mean the system will actually get to these
5492  * states. Our driver only allows PC8+ when going into runtime PM.
5493  *
5494  * The requirements for PC8+ are that all the outputs are disabled, the power
5495  * well is disabled and most interrupts are disabled, and these are also
5496  * requirements for runtime PM. When these conditions are met, we manually do
5497  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
5498  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
5499  * hang the machine.
5500  *
5501  * When we really reach PC8 or deeper states (not just when we allow it) we lose
5502  * the state of some registers, so when we come back from PC8+ we need to
5503  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
5504  * need to take care of the registers kept by RC6. Notice that this happens even
5505  * if we don't put the device in PCI D3 state (which is what currently happens
5506  * because of the runtime PM support).
5507  *
5508  * For more, read "Display Sequences for Package C8" on the hardware
5509  * documentation.
5510  */
hsw_enable_pc8(struct drm_i915_private * dev_priv)5511 static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
5512 {
5513 	u32 val;
5514 
5515 	drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
5516 
5517 	if (HAS_PCH_LPT_LP(dev_priv)) {
5518 		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
5519 		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
5520 		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
5521 	}
5522 
5523 	lpt_disable_clkout_dp(dev_priv);
5524 	hsw_disable_lcpll(dev_priv, true, true);
5525 }
5526 
hsw_disable_pc8(struct drm_i915_private * dev_priv)5527 static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
5528 {
5529 	u32 val;
5530 
5531 	drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
5532 
5533 	hsw_restore_lcpll(dev_priv);
5534 	intel_init_pch_refclk(dev_priv);
5535 
5536 	if (HAS_PCH_LPT_LP(dev_priv)) {
5537 		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
5538 		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
5539 		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
5540 	}
5541 }
5542 
intel_pch_reset_handshake(struct drm_i915_private * dev_priv,bool enable)5543 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
5544 				      bool enable)
5545 {
5546 	i915_reg_t reg;
5547 	u32 reset_bits, val;
5548 
5549 	if (IS_IVYBRIDGE(dev_priv)) {
5550 		reg = GEN7_MSG_CTL;
5551 		reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
5552 	} else {
5553 		reg = HSW_NDE_RSTWRN_OPT;
5554 		reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
5555 	}
5556 
5557 	val = intel_de_read(dev_priv, reg);
5558 
5559 	if (enable)
5560 		val |= reset_bits;
5561 	else
5562 		val &= ~reset_bits;
5563 
5564 	intel_de_write(dev_priv, reg, val);
5565 }
5566 
skl_display_core_init(struct drm_i915_private * dev_priv,bool resume)5567 static void skl_display_core_init(struct drm_i915_private *dev_priv,
5568 				  bool resume)
5569 {
5570 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5571 	struct i915_power_well *well;
5572 
5573 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5574 
5575 	/* enable PCH reset handshake */
5576 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5577 
5578 	if (!HAS_DISPLAY(dev_priv))
5579 		return;
5580 
5581 	/* enable PG1 and Misc I/O */
5582 	mutex_lock(&power_domains->lock);
5583 
5584 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5585 	intel_power_well_enable(dev_priv, well);
5586 
5587 	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
5588 	intel_power_well_enable(dev_priv, well);
5589 
5590 	mutex_unlock(&power_domains->lock);
5591 
5592 	intel_cdclk_init_hw(dev_priv);
5593 
5594 	gen9_dbuf_enable(dev_priv);
5595 
5596 	if (resume && intel_dmc_has_payload(dev_priv))
5597 		intel_dmc_load_program(dev_priv);
5598 }
5599 
skl_display_core_uninit(struct drm_i915_private * dev_priv)5600 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
5601 {
5602 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5603 	struct i915_power_well *well;
5604 
5605 	if (!HAS_DISPLAY(dev_priv))
5606 		return;
5607 
5608 	gen9_disable_dc_states(dev_priv);
5609 
5610 	gen9_dbuf_disable(dev_priv);
5611 
5612 	intel_cdclk_uninit_hw(dev_priv);
5613 
5614 	/* The spec doesn't call for removing the reset handshake flag */
5615 	/* disable PG1 and Misc I/O */
5616 
5617 	mutex_lock(&power_domains->lock);
5618 
5619 	/*
5620 	 * BSpec says to keep the MISC IO power well enabled here, only
5621 	 * remove our request for power well 1.
5622 	 * Note that even though the driver's request is removed power well 1
5623 	 * may stay enabled after this due to DMC's own request on it.
5624 	 */
5625 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5626 	intel_power_well_disable(dev_priv, well);
5627 
5628 	mutex_unlock(&power_domains->lock);
5629 
5630 	usleep_range(10, 30);		/* 10 us delay per Bspec */
5631 }
5632 
bxt_display_core_init(struct drm_i915_private * dev_priv,bool resume)5633 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
5634 {
5635 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5636 	struct i915_power_well *well;
5637 
5638 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5639 
5640 	/*
5641 	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
5642 	 * or else the reset will hang because there is no PCH to respond.
5643 	 * Move the handshake programming to initialization sequence.
5644 	 * Previously was left up to BIOS.
5645 	 */
5646 	intel_pch_reset_handshake(dev_priv, false);
5647 
5648 	if (!HAS_DISPLAY(dev_priv))
5649 		return;
5650 
5651 	/* Enable PG1 */
5652 	mutex_lock(&power_domains->lock);
5653 
5654 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5655 	intel_power_well_enable(dev_priv, well);
5656 
5657 	mutex_unlock(&power_domains->lock);
5658 
5659 	intel_cdclk_init_hw(dev_priv);
5660 
5661 	gen9_dbuf_enable(dev_priv);
5662 
5663 	if (resume && intel_dmc_has_payload(dev_priv))
5664 		intel_dmc_load_program(dev_priv);
5665 }
5666 
bxt_display_core_uninit(struct drm_i915_private * dev_priv)5667 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
5668 {
5669 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5670 	struct i915_power_well *well;
5671 
5672 	if (!HAS_DISPLAY(dev_priv))
5673 		return;
5674 
5675 	gen9_disable_dc_states(dev_priv);
5676 
5677 	gen9_dbuf_disable(dev_priv);
5678 
5679 	intel_cdclk_uninit_hw(dev_priv);
5680 
5681 	/* The spec doesn't call for removing the reset handshake flag */
5682 
5683 	/*
5684 	 * Disable PW1 (PG1).
5685 	 * Note that even though the driver's request is removed power well 1
5686 	 * may stay enabled after this due to DMC's own request on it.
5687 	 */
5688 	mutex_lock(&power_domains->lock);
5689 
5690 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5691 	intel_power_well_disable(dev_priv, well);
5692 
5693 	mutex_unlock(&power_domains->lock);
5694 
5695 	usleep_range(10, 30);		/* 10 us delay per Bspec */
5696 }
5697 
5698 struct buddy_page_mask {
5699 	u32 page_mask;
5700 	u8 type;
5701 	u8 num_channels;
5702 };
5703 
5704 static const struct buddy_page_mask tgl_buddy_page_masks[] = {
5705 	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0xF },
5706 	{ .num_channels = 1, .type = INTEL_DRAM_DDR5,	.page_mask = 0xF },
5707 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
5708 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C },
5709 	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1F },
5710 	{ .num_channels = 2, .type = INTEL_DRAM_DDR5,   .page_mask = 0x1E },
5711 	{ .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
5712 	{ .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 },
5713 	{}
5714 };
5715 
5716 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
5717 	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
5718 	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1 },
5719 	{ .num_channels = 1, .type = INTEL_DRAM_DDR5,   .page_mask = 0x1 },
5720 	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 },
5721 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
5722 	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x3 },
5723 	{ .num_channels = 2, .type = INTEL_DRAM_DDR5,   .page_mask = 0x3 },
5724 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 },
5725 	{}
5726 };
5727 
tgl_bw_buddy_init(struct drm_i915_private * dev_priv)5728 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
5729 {
5730 	enum intel_dram_type type = dev_priv->dram_info.type;
5731 	u8 num_channels = dev_priv->dram_info.num_channels;
5732 	const struct buddy_page_mask *table;
5733 	unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask;
5734 	int config, i;
5735 
5736 	/* BW_BUDDY registers are not used on dgpu's beyond DG1 */
5737 	if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv))
5738 		return;
5739 
5740 	if (IS_ALDERLAKE_S(dev_priv) ||
5741 	    IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
5742 	    IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
5743 	    IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
5744 		/* Wa_1409767108:tgl,dg1,adl-s */
5745 		table = wa_1409767108_buddy_page_masks;
5746 	else
5747 		table = tgl_buddy_page_masks;
5748 
5749 	for (config = 0; table[config].page_mask != 0; config++)
5750 		if (table[config].num_channels == num_channels &&
5751 		    table[config].type == type)
5752 			break;
5753 
5754 	if (table[config].page_mask == 0) {
5755 		drm_dbg(&dev_priv->drm,
5756 			"Unknown memory configuration; disabling address buddy logic.\n");
5757 		for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
5758 			intel_de_write(dev_priv, BW_BUDDY_CTL(i),
5759 				       BW_BUDDY_DISABLE);
5760 	} else {
5761 		for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
5762 			intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
5763 				       table[config].page_mask);
5764 
5765 			/* Wa_22010178259:tgl,dg1,rkl,adl-s */
5766 			if (DISPLAY_VER(dev_priv) == 12)
5767 				intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
5768 					     BW_BUDDY_TLB_REQ_TIMER_MASK,
5769 					     BW_BUDDY_TLB_REQ_TIMER(0x8));
5770 		}
5771 	}
5772 }
5773 
icl_display_core_init(struct drm_i915_private * dev_priv,bool resume)5774 static void icl_display_core_init(struct drm_i915_private *dev_priv,
5775 				  bool resume)
5776 {
5777 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5778 	struct i915_power_well *well;
5779 	u32 val;
5780 
5781 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5782 
5783 	/* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
5784 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP &&
5785 	    INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
5786 		intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
5787 			     PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
5788 
5789 	/* 1. Enable PCH reset handshake. */
5790 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5791 
5792 	if (!HAS_DISPLAY(dev_priv))
5793 		return;
5794 
5795 	/* 2. Initialize all combo phys */
5796 	intel_combo_phy_init(dev_priv);
5797 
5798 	/*
5799 	 * 3. Enable Power Well 1 (PG1).
5800 	 *    The AUX IO power wells will be enabled on demand.
5801 	 */
5802 	mutex_lock(&power_domains->lock);
5803 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5804 	intel_power_well_enable(dev_priv, well);
5805 	mutex_unlock(&power_domains->lock);
5806 
5807 	/* 4. Enable CDCLK. */
5808 	intel_cdclk_init_hw(dev_priv);
5809 
5810 	if (DISPLAY_VER(dev_priv) >= 12)
5811 		gen12_dbuf_slices_config(dev_priv);
5812 
5813 	/* 5. Enable DBUF. */
5814 	gen9_dbuf_enable(dev_priv);
5815 
5816 	/* 6. Setup MBUS. */
5817 	icl_mbus_init(dev_priv);
5818 
5819 	/* 7. Program arbiter BW_BUDDY registers */
5820 	if (DISPLAY_VER(dev_priv) >= 12)
5821 		tgl_bw_buddy_init(dev_priv);
5822 
5823 	/* 8. Ensure PHYs have completed calibration and adaptation */
5824 	if (IS_DG2(dev_priv))
5825 		intel_snps_phy_wait_for_calibration(dev_priv);
5826 
5827 	if (resume && intel_dmc_has_payload(dev_priv))
5828 		intel_dmc_load_program(dev_priv);
5829 
5830 	/* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */
5831 	if (DISPLAY_VER(dev_priv) >= 12) {
5832 		val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
5833 		      DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
5834 		intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val);
5835 	}
5836 
5837 	/* Wa_14011503030:xelpd */
5838 	if (DISPLAY_VER(dev_priv) >= 13)
5839 		intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
5840 }
5841 
icl_display_core_uninit(struct drm_i915_private * dev_priv)5842 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
5843 {
5844 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5845 	struct i915_power_well *well;
5846 
5847 	if (!HAS_DISPLAY(dev_priv))
5848 		return;
5849 
5850 	gen9_disable_dc_states(dev_priv);
5851 
5852 	/* 1. Disable all display engine functions -> aready done */
5853 
5854 	/* 2. Disable DBUF */
5855 	gen9_dbuf_disable(dev_priv);
5856 
5857 	/* 3. Disable CD clock */
5858 	intel_cdclk_uninit_hw(dev_priv);
5859 
5860 	/*
5861 	 * 4. Disable Power Well 1 (PG1).
5862 	 *    The AUX IO power wells are toggled on demand, so they are already
5863 	 *    disabled at this point.
5864 	 */
5865 	mutex_lock(&power_domains->lock);
5866 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5867 	intel_power_well_disable(dev_priv, well);
5868 	mutex_unlock(&power_domains->lock);
5869 
5870 	/* 5. */
5871 	intel_combo_phy_uninit(dev_priv);
5872 }
5873 
chv_phy_control_init(struct drm_i915_private * dev_priv)5874 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
5875 {
5876 	struct i915_power_well *cmn_bc =
5877 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5878 	struct i915_power_well *cmn_d =
5879 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
5880 
5881 	/*
5882 	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
5883 	 * workaround never ever read DISPLAY_PHY_CONTROL, and
5884 	 * instead maintain a shadow copy ourselves. Use the actual
5885 	 * power well state and lane status to reconstruct the
5886 	 * expected initial value.
5887 	 */
5888 	dev_priv->chv_phy_control =
5889 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
5890 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
5891 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
5892 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
5893 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
5894 
5895 	/*
5896 	 * If all lanes are disabled we leave the override disabled
5897 	 * with all power down bits cleared to match the state we
5898 	 * would use after disabling the port. Otherwise enable the
5899 	 * override and set the lane powerdown bits accding to the
5900 	 * current lane status.
5901 	 */
5902 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
5903 		u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
5904 		unsigned int mask;
5905 
5906 		mask = status & DPLL_PORTB_READY_MASK;
5907 		if (mask == 0xf)
5908 			mask = 0x0;
5909 		else
5910 			dev_priv->chv_phy_control |=
5911 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
5912 
5913 		dev_priv->chv_phy_control |=
5914 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
5915 
5916 		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
5917 		if (mask == 0xf)
5918 			mask = 0x0;
5919 		else
5920 			dev_priv->chv_phy_control |=
5921 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
5922 
5923 		dev_priv->chv_phy_control |=
5924 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
5925 
5926 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
5927 
5928 		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
5929 	} else {
5930 		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
5931 	}
5932 
5933 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
5934 		u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
5935 		unsigned int mask;
5936 
5937 		mask = status & DPLL_PORTD_READY_MASK;
5938 
5939 		if (mask == 0xf)
5940 			mask = 0x0;
5941 		else
5942 			dev_priv->chv_phy_control |=
5943 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
5944 
5945 		dev_priv->chv_phy_control |=
5946 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
5947 
5948 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
5949 
5950 		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
5951 	} else {
5952 		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
5953 	}
5954 
5955 	drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
5956 		    dev_priv->chv_phy_control);
5957 
5958 	/* Defer application of initial phy_control to enabling the powerwell */
5959 }
5960 
vlv_cmnlane_wa(struct drm_i915_private * dev_priv)5961 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
5962 {
5963 	struct i915_power_well *cmn =
5964 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5965 	struct i915_power_well *disp2d =
5966 		lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
5967 
5968 	/* If the display might be already active skip this */
5969 	if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
5970 	    disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
5971 	    intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
5972 		return;
5973 
5974 	drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
5975 
5976 	/* cmnlane needs DPLL registers */
5977 	disp2d->desc->ops->enable(dev_priv, disp2d);
5978 
5979 	/*
5980 	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
5981 	 * Need to assert and de-assert PHY SB reset by gating the
5982 	 * common lane power, then un-gating it.
5983 	 * Simply ungating isn't enough to reset the PHY enough to get
5984 	 * ports and lanes running.
5985 	 */
5986 	cmn->desc->ops->disable(dev_priv, cmn);
5987 }
5988 
vlv_punit_is_power_gated(struct drm_i915_private * dev_priv,u32 reg0)5989 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
5990 {
5991 	bool ret;
5992 
5993 	vlv_punit_get(dev_priv);
5994 	ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
5995 	vlv_punit_put(dev_priv);
5996 
5997 	return ret;
5998 }
5999 
assert_ved_power_gated(struct drm_i915_private * dev_priv)6000 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
6001 {
6002 	drm_WARN(&dev_priv->drm,
6003 		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
6004 		 "VED not power gated\n");
6005 }
6006 
assert_isp_power_gated(struct drm_i915_private * dev_priv)6007 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
6008 {
6009 	static const struct pci_device_id isp_ids[] = {
6010 		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
6011 		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
6012 		{}
6013 	};
6014 
6015 	drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
6016 		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
6017 		 "ISP not power gated\n");
6018 }
6019 
6020 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
6021 
6022 /**
6023  * intel_power_domains_init_hw - initialize hardware power domain state
6024  * @i915: i915 device instance
6025  * @resume: Called from resume code paths or not
6026  *
6027  * This function initializes the hardware power domain state and enables all
6028  * power wells belonging to the INIT power domain. Power wells in other
6029  * domains (and not in the INIT domain) are referenced or disabled by
6030  * intel_modeset_readout_hw_state(). After that the reference count of each
6031  * power well must match its HW enabled state, see
6032  * intel_power_domains_verify_state().
6033  *
6034  * It will return with power domains disabled (to be enabled later by
6035  * intel_power_domains_enable()) and must be paired with
6036  * intel_power_domains_driver_remove().
6037  */
intel_power_domains_init_hw(struct drm_i915_private * i915,bool resume)6038 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
6039 {
6040 	struct i915_power_domains *power_domains = &i915->power_domains;
6041 
6042 	power_domains->initializing = true;
6043 
6044 	if (DISPLAY_VER(i915) >= 11) {
6045 		icl_display_core_init(i915, resume);
6046 	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
6047 		bxt_display_core_init(i915, resume);
6048 	} else if (DISPLAY_VER(i915) == 9) {
6049 		skl_display_core_init(i915, resume);
6050 	} else if (IS_CHERRYVIEW(i915)) {
6051 		mutex_lock(&power_domains->lock);
6052 		chv_phy_control_init(i915);
6053 		mutex_unlock(&power_domains->lock);
6054 		assert_isp_power_gated(i915);
6055 	} else if (IS_VALLEYVIEW(i915)) {
6056 		mutex_lock(&power_domains->lock);
6057 		vlv_cmnlane_wa(i915);
6058 		mutex_unlock(&power_domains->lock);
6059 		assert_ved_power_gated(i915);
6060 		assert_isp_power_gated(i915);
6061 	} else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
6062 		hsw_assert_cdclk(i915);
6063 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
6064 	} else if (IS_IVYBRIDGE(i915)) {
6065 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
6066 	}
6067 
6068 	/*
6069 	 * Keep all power wells enabled for any dependent HW access during
6070 	 * initialization and to make sure we keep BIOS enabled display HW
6071 	 * resources powered until display HW readout is complete. We drop
6072 	 * this reference in intel_power_domains_enable().
6073 	 */
6074 	drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
6075 	power_domains->init_wakeref =
6076 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
6077 
6078 	/* Disable power support if the user asked so. */
6079 	if (!i915->params.disable_power_well) {
6080 		drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
6081 		i915->power_domains.disable_wakeref = intel_display_power_get(i915,
6082 									      POWER_DOMAIN_INIT);
6083 	}
6084 	intel_power_domains_sync_hw(i915);
6085 
6086 	power_domains->initializing = false;
6087 }
6088 
6089 /**
6090  * intel_power_domains_driver_remove - deinitialize hw power domain state
6091  * @i915: i915 device instance
6092  *
6093  * De-initializes the display power domain HW state. It also ensures that the
6094  * device stays powered up so that the driver can be reloaded.
6095  *
6096  * It must be called with power domains already disabled (after a call to
6097  * intel_power_domains_disable()) and must be paired with
6098  * intel_power_domains_init_hw().
6099  */
intel_power_domains_driver_remove(struct drm_i915_private * i915)6100 void intel_power_domains_driver_remove(struct drm_i915_private *i915)
6101 {
6102 	intel_wakeref_t wakeref __maybe_unused =
6103 		fetch_and_zero(&i915->power_domains.init_wakeref);
6104 
6105 	/* Remove the refcount we took to keep power well support disabled. */
6106 	if (!i915->params.disable_power_well)
6107 		intel_display_power_put(i915, POWER_DOMAIN_INIT,
6108 					fetch_and_zero(&i915->power_domains.disable_wakeref));
6109 
6110 	intel_display_power_flush_work_sync(i915);
6111 
6112 	intel_power_domains_verify_state(i915);
6113 
6114 	/* Keep the power well enabled, but cancel its rpm wakeref. */
6115 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
6116 }
6117 
6118 /**
6119  * intel_power_domains_enable - enable toggling of display power wells
6120  * @i915: i915 device instance
6121  *
6122  * Enable the ondemand enabling/disabling of the display power wells. Note that
6123  * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
6124  * only at specific points of the display modeset sequence, thus they are not
6125  * affected by the intel_power_domains_enable()/disable() calls. The purpose
6126  * of these function is to keep the rest of power wells enabled until the end
6127  * of display HW readout (which will acquire the power references reflecting
6128  * the current HW state).
6129  */
intel_power_domains_enable(struct drm_i915_private * i915)6130 void intel_power_domains_enable(struct drm_i915_private *i915)
6131 {
6132 	intel_wakeref_t wakeref __maybe_unused =
6133 		fetch_and_zero(&i915->power_domains.init_wakeref);
6134 
6135 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
6136 	intel_power_domains_verify_state(i915);
6137 }
6138 
6139 /**
6140  * intel_power_domains_disable - disable toggling of display power wells
6141  * @i915: i915 device instance
6142  *
6143  * Disable the ondemand enabling/disabling of the display power wells. See
6144  * intel_power_domains_enable() for which power wells this call controls.
6145  */
intel_power_domains_disable(struct drm_i915_private * i915)6146 void intel_power_domains_disable(struct drm_i915_private *i915)
6147 {
6148 	struct i915_power_domains *power_domains = &i915->power_domains;
6149 
6150 	drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
6151 	power_domains->init_wakeref =
6152 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
6153 
6154 	intel_power_domains_verify_state(i915);
6155 }
6156 
6157 /**
6158  * intel_power_domains_suspend - suspend power domain state
6159  * @i915: i915 device instance
6160  * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
6161  *
6162  * This function prepares the hardware power domain state before entering
6163  * system suspend.
6164  *
6165  * It must be called with power domains already disabled (after a call to
6166  * intel_power_domains_disable()) and paired with intel_power_domains_resume().
6167  */
intel_power_domains_suspend(struct drm_i915_private * i915,enum i915_drm_suspend_mode suspend_mode)6168 void intel_power_domains_suspend(struct drm_i915_private *i915,
6169 				 enum i915_drm_suspend_mode suspend_mode)
6170 {
6171 	struct i915_power_domains *power_domains = &i915->power_domains;
6172 	intel_wakeref_t wakeref __maybe_unused =
6173 		fetch_and_zero(&power_domains->init_wakeref);
6174 
6175 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
6176 
6177 	/*
6178 	 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
6179 	 * support don't manually deinit the power domains. This also means the
6180 	 * DMC firmware will stay active, it will power down any HW
6181 	 * resources as required and also enable deeper system power states
6182 	 * that would be blocked if the firmware was inactive.
6183 	 */
6184 	if (!(i915->dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
6185 	    suspend_mode == I915_DRM_SUSPEND_IDLE &&
6186 	    intel_dmc_has_payload(i915)) {
6187 		intel_display_power_flush_work(i915);
6188 		intel_power_domains_verify_state(i915);
6189 		return;
6190 	}
6191 
6192 	/*
6193 	 * Even if power well support was disabled we still want to disable
6194 	 * power wells if power domains must be deinitialized for suspend.
6195 	 */
6196 	if (!i915->params.disable_power_well)
6197 		intel_display_power_put(i915, POWER_DOMAIN_INIT,
6198 					fetch_and_zero(&i915->power_domains.disable_wakeref));
6199 
6200 	intel_display_power_flush_work(i915);
6201 	intel_power_domains_verify_state(i915);
6202 
6203 	if (DISPLAY_VER(i915) >= 11)
6204 		icl_display_core_uninit(i915);
6205 	else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
6206 		bxt_display_core_uninit(i915);
6207 	else if (DISPLAY_VER(i915) == 9)
6208 		skl_display_core_uninit(i915);
6209 
6210 	power_domains->display_core_suspended = true;
6211 }
6212 
6213 /**
6214  * intel_power_domains_resume - resume power domain state
6215  * @i915: i915 device instance
6216  *
6217  * This function resume the hardware power domain state during system resume.
6218  *
6219  * It will return with power domain support disabled (to be enabled later by
6220  * intel_power_domains_enable()) and must be paired with
6221  * intel_power_domains_suspend().
6222  */
intel_power_domains_resume(struct drm_i915_private * i915)6223 void intel_power_domains_resume(struct drm_i915_private *i915)
6224 {
6225 	struct i915_power_domains *power_domains = &i915->power_domains;
6226 
6227 	if (power_domains->display_core_suspended) {
6228 		intel_power_domains_init_hw(i915, true);
6229 		power_domains->display_core_suspended = false;
6230 	} else {
6231 		drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
6232 		power_domains->init_wakeref =
6233 			intel_display_power_get(i915, POWER_DOMAIN_INIT);
6234 	}
6235 
6236 	intel_power_domains_verify_state(i915);
6237 }
6238 
6239 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
6240 
intel_power_domains_dump_info(struct drm_i915_private * i915)6241 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
6242 {
6243 	struct i915_power_domains *power_domains = &i915->power_domains;
6244 	struct i915_power_well *power_well;
6245 
6246 	for_each_power_well(i915, power_well) {
6247 		enum intel_display_power_domain domain;
6248 
6249 		drm_dbg(&i915->drm, "%-25s %d\n",
6250 			power_well->desc->name, power_well->count);
6251 
6252 		for_each_power_domain(domain, power_well->desc->domains)
6253 			drm_dbg(&i915->drm, "  %-23s %d\n",
6254 				intel_display_power_domain_str(domain),
6255 				power_domains->domain_use_count[domain]);
6256 	}
6257 }
6258 
6259 /**
6260  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
6261  * @i915: i915 device instance
6262  *
6263  * Verify if the reference count of each power well matches its HW enabled
6264  * state and the total refcount of the domains it belongs to. This must be
6265  * called after modeset HW state sanitization, which is responsible for
6266  * acquiring reference counts for any power wells in use and disabling the
6267  * ones left on by BIOS but not required by any active output.
6268  */
intel_power_domains_verify_state(struct drm_i915_private * i915)6269 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
6270 {
6271 	struct i915_power_domains *power_domains = &i915->power_domains;
6272 	struct i915_power_well *power_well;
6273 	bool dump_domain_info;
6274 
6275 	mutex_lock(&power_domains->lock);
6276 
6277 	verify_async_put_domains_state(power_domains);
6278 
6279 	dump_domain_info = false;
6280 	for_each_power_well(i915, power_well) {
6281 		enum intel_display_power_domain domain;
6282 		int domains_count;
6283 		bool enabled;
6284 
6285 		enabled = power_well->desc->ops->is_enabled(i915, power_well);
6286 		if ((power_well->count || power_well->desc->always_on) !=
6287 		    enabled)
6288 			drm_err(&i915->drm,
6289 				"power well %s state mismatch (refcount %d/enabled %d)",
6290 				power_well->desc->name,
6291 				power_well->count, enabled);
6292 
6293 		domains_count = 0;
6294 		for_each_power_domain(domain, power_well->desc->domains)
6295 			domains_count += power_domains->domain_use_count[domain];
6296 
6297 		if (power_well->count != domains_count) {
6298 			drm_err(&i915->drm,
6299 				"power well %s refcount/domain refcount mismatch "
6300 				"(refcount %d/domains refcount %d)\n",
6301 				power_well->desc->name, power_well->count,
6302 				domains_count);
6303 			dump_domain_info = true;
6304 		}
6305 	}
6306 
6307 	if (dump_domain_info) {
6308 		static bool dumped;
6309 
6310 		if (!dumped) {
6311 			intel_power_domains_dump_info(i915);
6312 			dumped = true;
6313 		}
6314 	}
6315 
6316 	mutex_unlock(&power_domains->lock);
6317 }
6318 
6319 #else
6320 
intel_power_domains_verify_state(struct drm_i915_private * i915)6321 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
6322 {
6323 }
6324 
6325 #endif
6326 
intel_display_power_suspend_late(struct drm_i915_private * i915)6327 void intel_display_power_suspend_late(struct drm_i915_private *i915)
6328 {
6329 	if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
6330 	    IS_BROXTON(i915)) {
6331 		bxt_enable_dc9(i915);
6332 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
6333 		hsw_enable_pc8(i915);
6334 	}
6335 
6336 	/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
6337 	if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
6338 		intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
6339 }
6340 
intel_display_power_resume_early(struct drm_i915_private * i915)6341 void intel_display_power_resume_early(struct drm_i915_private *i915)
6342 {
6343 	if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
6344 	    IS_BROXTON(i915)) {
6345 		gen9_sanitize_dc_state(i915);
6346 		bxt_disable_dc9(i915);
6347 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
6348 		hsw_disable_pc8(i915);
6349 	}
6350 
6351 	/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
6352 	if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
6353 		intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
6354 }
6355 
intel_display_power_suspend(struct drm_i915_private * i915)6356 void intel_display_power_suspend(struct drm_i915_private *i915)
6357 {
6358 	if (DISPLAY_VER(i915) >= 11) {
6359 		icl_display_core_uninit(i915);
6360 		bxt_enable_dc9(i915);
6361 	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
6362 		bxt_display_core_uninit(i915);
6363 		bxt_enable_dc9(i915);
6364 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
6365 		hsw_enable_pc8(i915);
6366 	}
6367 }
6368 
intel_display_power_resume(struct drm_i915_private * i915)6369 void intel_display_power_resume(struct drm_i915_private *i915)
6370 {
6371 	if (DISPLAY_VER(i915) >= 11) {
6372 		bxt_disable_dc9(i915);
6373 		icl_display_core_init(i915, true);
6374 		if (intel_dmc_has_payload(i915)) {
6375 			if (i915->dmc.allowed_dc_mask &
6376 			    DC_STATE_EN_UPTO_DC6)
6377 				skl_enable_dc6(i915);
6378 			else if (i915->dmc.allowed_dc_mask &
6379 				 DC_STATE_EN_UPTO_DC5)
6380 				gen9_enable_dc5(i915);
6381 		}
6382 	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
6383 		bxt_disable_dc9(i915);
6384 		bxt_display_core_init(i915, true);
6385 		if (intel_dmc_has_payload(i915) &&
6386 		    (i915->dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
6387 			gen9_enable_dc5(i915);
6388 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
6389 		hsw_disable_pc8(i915);
6390 	}
6391 }
6392