1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *    Daniel Vetter <daniel.vetter@ffwll.ch>
26  *
27  */
28 
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
31 
32 #include "i915_drv.h"
33 #include "intel_drv.h"
34 
35 /**
36  * DOC: runtime pm
37  *
38  * The i915 driver supports dynamic enabling and disabling of entire hardware
39  * blocks at runtime. This is especially important on the display side where
40  * software is supposed to control many power gates manually on recent hardware,
41  * since on the GT side a lot of the power management is done by the hardware.
42  * But even there some manual control at the device level is required.
43  *
44  * Since i915 supports a diverse set of platforms with a unified codebase and
45  * hardware engineers just love to shuffle functionality around between power
46  * domains there's a sizeable amount of indirection required. This file provides
47  * generic functions to the driver for grabbing and releasing references for
48  * abstract power domains. It then maps those to the actual power wells
49  * present for a given platform.
50  */
51 
52 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
53 					 enum i915_power_well_id power_well_id);
54 
55 static struct i915_power_well *
56 lookup_power_well(struct drm_i915_private *dev_priv,
57 		  enum i915_power_well_id power_well_id);
58 
59 const char *
intel_display_power_domain_str(enum intel_display_power_domain domain)60 intel_display_power_domain_str(enum intel_display_power_domain domain)
61 {
62 	switch (domain) {
63 	case POWER_DOMAIN_PIPE_A:
64 		return "PIPE_A";
65 	case POWER_DOMAIN_PIPE_B:
66 		return "PIPE_B";
67 	case POWER_DOMAIN_PIPE_C:
68 		return "PIPE_C";
69 	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
70 		return "PIPE_A_PANEL_FITTER";
71 	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
72 		return "PIPE_B_PANEL_FITTER";
73 	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
74 		return "PIPE_C_PANEL_FITTER";
75 	case POWER_DOMAIN_TRANSCODER_A:
76 		return "TRANSCODER_A";
77 	case POWER_DOMAIN_TRANSCODER_B:
78 		return "TRANSCODER_B";
79 	case POWER_DOMAIN_TRANSCODER_C:
80 		return "TRANSCODER_C";
81 	case POWER_DOMAIN_TRANSCODER_EDP:
82 		return "TRANSCODER_EDP";
83 	case POWER_DOMAIN_TRANSCODER_DSI_A:
84 		return "TRANSCODER_DSI_A";
85 	case POWER_DOMAIN_TRANSCODER_DSI_C:
86 		return "TRANSCODER_DSI_C";
87 	case POWER_DOMAIN_PORT_DDI_A_LANES:
88 		return "PORT_DDI_A_LANES";
89 	case POWER_DOMAIN_PORT_DDI_B_LANES:
90 		return "PORT_DDI_B_LANES";
91 	case POWER_DOMAIN_PORT_DDI_C_LANES:
92 		return "PORT_DDI_C_LANES";
93 	case POWER_DOMAIN_PORT_DDI_D_LANES:
94 		return "PORT_DDI_D_LANES";
95 	case POWER_DOMAIN_PORT_DDI_E_LANES:
96 		return "PORT_DDI_E_LANES";
97 	case POWER_DOMAIN_PORT_DDI_F_LANES:
98 		return "PORT_DDI_F_LANES";
99 	case POWER_DOMAIN_PORT_DDI_A_IO:
100 		return "PORT_DDI_A_IO";
101 	case POWER_DOMAIN_PORT_DDI_B_IO:
102 		return "PORT_DDI_B_IO";
103 	case POWER_DOMAIN_PORT_DDI_C_IO:
104 		return "PORT_DDI_C_IO";
105 	case POWER_DOMAIN_PORT_DDI_D_IO:
106 		return "PORT_DDI_D_IO";
107 	case POWER_DOMAIN_PORT_DDI_E_IO:
108 		return "PORT_DDI_E_IO";
109 	case POWER_DOMAIN_PORT_DDI_F_IO:
110 		return "PORT_DDI_F_IO";
111 	case POWER_DOMAIN_PORT_DSI:
112 		return "PORT_DSI";
113 	case POWER_DOMAIN_PORT_CRT:
114 		return "PORT_CRT";
115 	case POWER_DOMAIN_PORT_OTHER:
116 		return "PORT_OTHER";
117 	case POWER_DOMAIN_VGA:
118 		return "VGA";
119 	case POWER_DOMAIN_AUDIO:
120 		return "AUDIO";
121 	case POWER_DOMAIN_PLLS:
122 		return "PLLS";
123 	case POWER_DOMAIN_AUX_A:
124 		return "AUX_A";
125 	case POWER_DOMAIN_AUX_B:
126 		return "AUX_B";
127 	case POWER_DOMAIN_AUX_C:
128 		return "AUX_C";
129 	case POWER_DOMAIN_AUX_D:
130 		return "AUX_D";
131 	case POWER_DOMAIN_AUX_E:
132 		return "AUX_E";
133 	case POWER_DOMAIN_AUX_F:
134 		return "AUX_F";
135 	case POWER_DOMAIN_AUX_IO_A:
136 		return "AUX_IO_A";
137 	case POWER_DOMAIN_AUX_TBT1:
138 		return "AUX_TBT1";
139 	case POWER_DOMAIN_AUX_TBT2:
140 		return "AUX_TBT2";
141 	case POWER_DOMAIN_AUX_TBT3:
142 		return "AUX_TBT3";
143 	case POWER_DOMAIN_AUX_TBT4:
144 		return "AUX_TBT4";
145 	case POWER_DOMAIN_GMBUS:
146 		return "GMBUS";
147 	case POWER_DOMAIN_INIT:
148 		return "INIT";
149 	case POWER_DOMAIN_MODESET:
150 		return "MODESET";
151 	case POWER_DOMAIN_GT_IRQ:
152 		return "GT_IRQ";
153 	default:
154 		MISSING_CASE(domain);
155 		return "?";
156 	}
157 }
158 
intel_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)159 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
160 				    struct i915_power_well *power_well)
161 {
162 	DRM_DEBUG_KMS("enabling %s\n", power_well->name);
163 	power_well->ops->enable(dev_priv, power_well);
164 	power_well->hw_enabled = true;
165 }
166 
intel_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)167 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
168 				     struct i915_power_well *power_well)
169 {
170 	DRM_DEBUG_KMS("disabling %s\n", power_well->name);
171 	power_well->hw_enabled = false;
172 	power_well->ops->disable(dev_priv, power_well);
173 }
174 
intel_power_well_get(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)175 static void intel_power_well_get(struct drm_i915_private *dev_priv,
176 				 struct i915_power_well *power_well)
177 {
178 	if (!power_well->count++)
179 		intel_power_well_enable(dev_priv, power_well);
180 }
181 
intel_power_well_put(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)182 static void intel_power_well_put(struct drm_i915_private *dev_priv,
183 				 struct i915_power_well *power_well)
184 {
185 	WARN(!power_well->count, "Use count on power well %s is already zero",
186 	     power_well->name);
187 
188 	if (!--power_well->count)
189 		intel_power_well_disable(dev_priv, power_well);
190 }
191 
192 /**
193  * __intel_display_power_is_enabled - unlocked check for a power domain
194  * @dev_priv: i915 device instance
195  * @domain: power domain to check
196  *
197  * This is the unlocked version of intel_display_power_is_enabled() and should
198  * only be used from error capture and recovery code where deadlocks are
199  * possible.
200  *
201  * Returns:
202  * True when the power domain is enabled, false otherwise.
203  */
__intel_display_power_is_enabled(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)204 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
205 				      enum intel_display_power_domain domain)
206 {
207 	struct i915_power_well *power_well;
208 	bool is_enabled;
209 
210 	if (dev_priv->runtime_pm.suspended)
211 		return false;
212 
213 	is_enabled = true;
214 
215 	for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
216 		if (power_well->always_on)
217 			continue;
218 
219 		if (!power_well->hw_enabled) {
220 			is_enabled = false;
221 			break;
222 		}
223 	}
224 
225 	return is_enabled;
226 }
227 
228 /**
229  * intel_display_power_is_enabled - check for a power domain
230  * @dev_priv: i915 device instance
231  * @domain: power domain to check
232  *
233  * This function can be used to check the hw power domain state. It is mostly
234  * used in hardware state readout functions. Everywhere else code should rely
235  * upon explicit power domain reference counting to ensure that the hardware
236  * block is powered up before accessing it.
237  *
238  * Callers must hold the relevant modesetting locks to ensure that concurrent
239  * threads can't disable the power well while the caller tries to read a few
240  * registers.
241  *
242  * Returns:
243  * True when the power domain is enabled, false otherwise.
244  */
intel_display_power_is_enabled(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)245 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
246 				    enum intel_display_power_domain domain)
247 {
248 	struct i915_power_domains *power_domains;
249 	bool ret;
250 
251 	power_domains = &dev_priv->power_domains;
252 
253 	mutex_lock(&power_domains->lock);
254 	ret = __intel_display_power_is_enabled(dev_priv, domain);
255 	mutex_unlock(&power_domains->lock);
256 
257 	return ret;
258 }
259 
260 /**
261  * intel_display_set_init_power - set the initial power domain state
262  * @dev_priv: i915 device instance
263  * @enable: whether to enable or disable the initial power domain state
264  *
265  * For simplicity our driver load/unload and system suspend/resume code assumes
266  * that all power domains are always enabled. This functions controls the state
267  * of this little hack. While the initial power domain state is enabled runtime
268  * pm is effectively disabled.
269  */
intel_display_set_init_power(struct drm_i915_private * dev_priv,bool enable)270 void intel_display_set_init_power(struct drm_i915_private *dev_priv,
271 				  bool enable)
272 {
273 	if (dev_priv->power_domains.init_power_on == enable)
274 		return;
275 
276 	if (enable)
277 		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
278 	else
279 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
280 
281 	dev_priv->power_domains.init_power_on = enable;
282 }
283 
284 /*
285  * Starting with Haswell, we have a "Power Down Well" that can be turned off
286  * when not needed anymore. We have 4 registers that can request the power well
287  * to be enabled, and it will only be disabled if none of the registers is
288  * requesting it to be enabled.
289  */
hsw_power_well_post_enable(struct drm_i915_private * dev_priv,u8 irq_pipe_mask,bool has_vga)290 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
291 				       u8 irq_pipe_mask, bool has_vga)
292 {
293 	struct pci_dev *pdev = dev_priv->drm.pdev;
294 
295 	/*
296 	 * After we re-enable the power well, if we touch VGA register 0x3d5
297 	 * we'll get unclaimed register interrupts. This stops after we write
298 	 * anything to the VGA MSR register. The vgacon module uses this
299 	 * register all the time, so if we unbind our driver and, as a
300 	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
301 	 * console_unlock(). So make here we touch the VGA MSR register, making
302 	 * sure vgacon can keep working normally without triggering interrupts
303 	 * and error messages.
304 	 */
305 	if (has_vga) {
306 		vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
307 		outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
308 		vga_put(pdev, VGA_RSRC_LEGACY_IO);
309 	}
310 
311 	if (irq_pipe_mask)
312 		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
313 }
314 
hsw_power_well_pre_disable(struct drm_i915_private * dev_priv,u8 irq_pipe_mask)315 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
316 				       u8 irq_pipe_mask)
317 {
318 	if (irq_pipe_mask)
319 		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
320 }
321 
322 
hsw_wait_for_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)323 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
324 					   struct i915_power_well *power_well)
325 {
326 	enum i915_power_well_id id = power_well->id;
327 
328 	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
329 	WARN_ON(intel_wait_for_register(dev_priv,
330 					HSW_PWR_WELL_CTL_DRIVER(id),
331 					HSW_PWR_WELL_CTL_STATE(id),
332 					HSW_PWR_WELL_CTL_STATE(id),
333 					1));
334 }
335 
hsw_power_well_requesters(struct drm_i915_private * dev_priv,enum i915_power_well_id id)336 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
337 				     enum i915_power_well_id id)
338 {
339 	u32 req_mask = HSW_PWR_WELL_CTL_REQ(id);
340 	u32 ret;
341 
342 	ret = I915_READ(HSW_PWR_WELL_CTL_BIOS(id)) & req_mask ? 1 : 0;
343 	ret |= I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & req_mask ? 2 : 0;
344 	ret |= I915_READ(HSW_PWR_WELL_CTL_KVMR) & req_mask ? 4 : 0;
345 	ret |= I915_READ(HSW_PWR_WELL_CTL_DEBUG(id)) & req_mask ? 8 : 0;
346 
347 	return ret;
348 }
349 
hsw_wait_for_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)350 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
351 					    struct i915_power_well *power_well)
352 {
353 	enum i915_power_well_id id = power_well->id;
354 	bool disabled;
355 	u32 reqs;
356 
357 	/*
358 	 * Bspec doesn't require waiting for PWs to get disabled, but still do
359 	 * this for paranoia. The known cases where a PW will be forced on:
360 	 * - a KVMR request on any power well via the KVMR request register
361 	 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
362 	 *   DEBUG request registers
363 	 * Skip the wait in case any of the request bits are set and print a
364 	 * diagnostic message.
365 	 */
366 	wait_for((disabled = !(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) &
367 			       HSW_PWR_WELL_CTL_STATE(id))) ||
368 		 (reqs = hsw_power_well_requesters(dev_priv, id)), 1);
369 	if (disabled)
370 		return;
371 
372 	DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
373 		      power_well->name,
374 		      !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
375 }
376 
gen9_wait_for_power_well_fuses(struct drm_i915_private * dev_priv,enum skl_power_gate pg)377 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
378 					   enum skl_power_gate pg)
379 {
380 	/* Timeout 5us for PG#0, for other PGs 1us */
381 	WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS,
382 					SKL_FUSE_PG_DIST_STATUS(pg),
383 					SKL_FUSE_PG_DIST_STATUS(pg), 1));
384 }
385 
hsw_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)386 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
387 				  struct i915_power_well *power_well)
388 {
389 	enum i915_power_well_id id = power_well->id;
390 	bool wait_fuses = power_well->hsw.has_fuses;
391 	enum skl_power_gate uninitialized_var(pg);
392 	u32 val;
393 
394 	if (wait_fuses) {
395 		pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_TO_PG(id) :
396 						 SKL_PW_TO_PG(id);
397 		/*
398 		 * For PW1 we have to wait both for the PW0/PG0 fuse state
399 		 * before enabling the power well and PW1/PG1's own fuse
400 		 * state after the enabling. For all other power wells with
401 		 * fuses we only have to wait for that PW/PG's fuse state
402 		 * after the enabling.
403 		 */
404 		if (pg == SKL_PG1)
405 			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
406 	}
407 
408 	val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
409 	I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id));
410 	hsw_wait_for_power_well_enable(dev_priv, power_well);
411 
412 	/* Display WA #1178: cnl */
413 	if (IS_CANNONLAKE(dev_priv) &&
414 	    (id == CNL_DISP_PW_AUX_B || id == CNL_DISP_PW_AUX_C ||
415 	     id == CNL_DISP_PW_AUX_D || id == CNL_DISP_PW_AUX_F)) {
416 		val = I915_READ(CNL_AUX_ANAOVRD1(id));
417 		val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
418 		I915_WRITE(CNL_AUX_ANAOVRD1(id), val);
419 	}
420 
421 	if (wait_fuses)
422 		gen9_wait_for_power_well_fuses(dev_priv, pg);
423 
424 	hsw_power_well_post_enable(dev_priv, power_well->hsw.irq_pipe_mask,
425 				   power_well->hsw.has_vga);
426 }
427 
hsw_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)428 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
429 				   struct i915_power_well *power_well)
430 {
431 	enum i915_power_well_id id = power_well->id;
432 	u32 val;
433 
434 	hsw_power_well_pre_disable(dev_priv, power_well->hsw.irq_pipe_mask);
435 
436 	val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
437 	I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id),
438 		   val & ~HSW_PWR_WELL_CTL_REQ(id));
439 	hsw_wait_for_power_well_disable(dev_priv, power_well);
440 }
441 
442 #define ICL_AUX_PW_TO_PORT(pw)	((pw) - ICL_DISP_PW_AUX_A)
443 
444 static void
icl_combo_phy_aux_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)445 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
446 				    struct i915_power_well *power_well)
447 {
448 	enum i915_power_well_id id = power_well->id;
449 	enum port port = ICL_AUX_PW_TO_PORT(id);
450 	u32 val;
451 
452 	val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
453 	I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id));
454 
455 	val = I915_READ(ICL_PORT_CL_DW12(port));
456 	I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
457 
458 	hsw_wait_for_power_well_enable(dev_priv, power_well);
459 }
460 
461 static void
icl_combo_phy_aux_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)462 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
463 				     struct i915_power_well *power_well)
464 {
465 	enum i915_power_well_id id = power_well->id;
466 	enum port port = ICL_AUX_PW_TO_PORT(id);
467 	u32 val;
468 
469 	val = I915_READ(ICL_PORT_CL_DW12(port));
470 	I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
471 
472 	val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
473 	I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id),
474 		   val & ~HSW_PWR_WELL_CTL_REQ(id));
475 
476 	hsw_wait_for_power_well_disable(dev_priv, power_well);
477 }
478 
479 /*
480  * We should only use the power well if we explicitly asked the hardware to
481  * enable it, so check if it's enabled and also check if we've requested it to
482  * be enabled.
483  */
hsw_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)484 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
485 				   struct i915_power_well *power_well)
486 {
487 	enum i915_power_well_id id = power_well->id;
488 	u32 mask = HSW_PWR_WELL_CTL_REQ(id) | HSW_PWR_WELL_CTL_STATE(id);
489 
490 	return (I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & mask) == mask;
491 }
492 
assert_can_enable_dc9(struct drm_i915_private * dev_priv)493 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
494 {
495 	enum i915_power_well_id id = SKL_DISP_PW_2;
496 
497 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
498 		  "DC9 already programmed to be enabled.\n");
499 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
500 		  "DC5 still not disabled to enable DC9.\n");
501 	WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) &
502 		  HSW_PWR_WELL_CTL_REQ(id),
503 		  "Power well 2 on.\n");
504 	WARN_ONCE(intel_irqs_enabled(dev_priv),
505 		  "Interrupts not disabled yet.\n");
506 
507 	 /*
508 	  * TODO: check for the following to verify the conditions to enter DC9
509 	  * state are satisfied:
510 	  * 1] Check relevant display engine registers to verify if mode set
511 	  * disable sequence was followed.
512 	  * 2] Check if display uninitialize sequence is initialized.
513 	  */
514 }
515 
assert_can_disable_dc9(struct drm_i915_private * dev_priv)516 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
517 {
518 	WARN_ONCE(intel_irqs_enabled(dev_priv),
519 		  "Interrupts not disabled yet.\n");
520 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
521 		  "DC5 still not disabled.\n");
522 
523 	 /*
524 	  * TODO: check for the following to verify DC9 state was indeed
525 	  * entered before programming to disable it:
526 	  * 1] Check relevant display engine registers to verify if mode
527 	  *  set disable sequence was followed.
528 	  * 2] Check if display uninitialize sequence is initialized.
529 	  */
530 }
531 
gen9_write_dc_state(struct drm_i915_private * dev_priv,u32 state)532 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
533 				u32 state)
534 {
535 	int rewrites = 0;
536 	int rereads = 0;
537 	u32 v;
538 
539 	I915_WRITE(DC_STATE_EN, state);
540 
541 	/* It has been observed that disabling the dc6 state sometimes
542 	 * doesn't stick and dmc keeps returning old value. Make sure
543 	 * the write really sticks enough times and also force rewrite until
544 	 * we are confident that state is exactly what we want.
545 	 */
546 	do  {
547 		v = I915_READ(DC_STATE_EN);
548 
549 		if (v != state) {
550 			I915_WRITE(DC_STATE_EN, state);
551 			rewrites++;
552 			rereads = 0;
553 		} else if (rereads++ > 5) {
554 			break;
555 		}
556 
557 	} while (rewrites < 100);
558 
559 	if (v != state)
560 		DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
561 			  state, v);
562 
563 	/* Most of the times we need one retry, avoid spam */
564 	if (rewrites > 1)
565 		DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
566 			      state, rewrites);
567 }
568 
gen9_dc_mask(struct drm_i915_private * dev_priv)569 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
570 {
571 	u32 mask;
572 
573 	mask = DC_STATE_EN_UPTO_DC5;
574 	if (IS_GEN9_LP(dev_priv))
575 		mask |= DC_STATE_EN_DC9;
576 	else
577 		mask |= DC_STATE_EN_UPTO_DC6;
578 
579 	return mask;
580 }
581 
gen9_sanitize_dc_state(struct drm_i915_private * dev_priv)582 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
583 {
584 	u32 val;
585 
586 	val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
587 
588 	DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
589 		      dev_priv->csr.dc_state, val);
590 	dev_priv->csr.dc_state = val;
591 }
592 
593 /**
594  * gen9_set_dc_state - set target display C power state
595  * @dev_priv: i915 device instance
596  * @state: target DC power state
597  * - DC_STATE_DISABLE
598  * - DC_STATE_EN_UPTO_DC5
599  * - DC_STATE_EN_UPTO_DC6
600  * - DC_STATE_EN_DC9
601  *
602  * Signal to DMC firmware/HW the target DC power state passed in @state.
603  * DMC/HW can turn off individual display clocks and power rails when entering
604  * a deeper DC power state (higher in number) and turns these back when exiting
605  * that state to a shallower power state (lower in number). The HW will decide
606  * when to actually enter a given state on an on-demand basis, for instance
607  * depending on the active state of display pipes. The state of display
608  * registers backed by affected power rails are saved/restored as needed.
609  *
610  * Based on the above enabling a deeper DC power state is asynchronous wrt.
611  * enabling it. Disabling a deeper power state is synchronous: for instance
612  * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
613  * back on and register state is restored. This is guaranteed by the MMIO write
614  * to DC_STATE_EN blocking until the state is restored.
615  */
gen9_set_dc_state(struct drm_i915_private * dev_priv,uint32_t state)616 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
617 {
618 	uint32_t val;
619 	uint32_t mask;
620 
621 	if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
622 		state &= dev_priv->csr.allowed_dc_mask;
623 
624 	val = I915_READ(DC_STATE_EN);
625 	mask = gen9_dc_mask(dev_priv);
626 	DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
627 		      val & mask, state);
628 
629 	/* Check if DMC is ignoring our DC state requests */
630 	if ((val & mask) != dev_priv->csr.dc_state)
631 		DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
632 			  dev_priv->csr.dc_state, val & mask);
633 
634 	val &= ~mask;
635 	val |= state;
636 
637 	gen9_write_dc_state(dev_priv, val);
638 
639 	dev_priv->csr.dc_state = val & mask;
640 }
641 
bxt_enable_dc9(struct drm_i915_private * dev_priv)642 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
643 {
644 	assert_can_enable_dc9(dev_priv);
645 
646 	DRM_DEBUG_KMS("Enabling DC9\n");
647 
648 	intel_power_sequencer_reset(dev_priv);
649 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
650 }
651 
bxt_disable_dc9(struct drm_i915_private * dev_priv)652 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
653 {
654 	assert_can_disable_dc9(dev_priv);
655 
656 	DRM_DEBUG_KMS("Disabling DC9\n");
657 
658 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
659 
660 	intel_pps_unlock_regs_wa(dev_priv);
661 }
662 
assert_csr_loaded(struct drm_i915_private * dev_priv)663 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
664 {
665 	WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
666 		  "CSR program storage start is NULL\n");
667 	WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
668 	WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
669 }
670 
assert_can_enable_dc5(struct drm_i915_private * dev_priv)671 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
672 {
673 	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
674 					SKL_DISP_PW_2);
675 
676 	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
677 
678 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
679 		  "DC5 already programmed to be enabled.\n");
680 	assert_rpm_wakelock_held(dev_priv);
681 
682 	assert_csr_loaded(dev_priv);
683 }
684 
gen9_enable_dc5(struct drm_i915_private * dev_priv)685 void gen9_enable_dc5(struct drm_i915_private *dev_priv)
686 {
687 	assert_can_enable_dc5(dev_priv);
688 
689 	DRM_DEBUG_KMS("Enabling DC5\n");
690 
691 	/* Wa Display #1183: skl,kbl,cfl */
692 	if (IS_GEN9_BC(dev_priv))
693 		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
694 			   SKL_SELECT_ALTERNATE_DC_EXIT);
695 
696 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
697 }
698 
assert_can_enable_dc6(struct drm_i915_private * dev_priv)699 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
700 {
701 	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
702 		  "Backlight is not disabled.\n");
703 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
704 		  "DC6 already programmed to be enabled.\n");
705 
706 	assert_csr_loaded(dev_priv);
707 }
708 
skl_enable_dc6(struct drm_i915_private * dev_priv)709 static void skl_enable_dc6(struct drm_i915_private *dev_priv)
710 {
711 	assert_can_enable_dc6(dev_priv);
712 
713 	DRM_DEBUG_KMS("Enabling DC6\n");
714 
715 	/* Wa Display #1183: skl,kbl,cfl */
716 	if (IS_GEN9_BC(dev_priv))
717 		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
718 			   SKL_SELECT_ALTERNATE_DC_EXIT);
719 
720 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
721 }
722 
hsw_power_well_sync_hw(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)723 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
724 				   struct i915_power_well *power_well)
725 {
726 	enum i915_power_well_id id = power_well->id;
727 	u32 mask = HSW_PWR_WELL_CTL_REQ(id);
728 	u32 bios_req = I915_READ(HSW_PWR_WELL_CTL_BIOS(id));
729 
730 	/* Take over the request bit if set by BIOS. */
731 	if (bios_req & mask) {
732 		u32 drv_req = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
733 
734 		if (!(drv_req & mask))
735 			I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), drv_req | mask);
736 		I915_WRITE(HSW_PWR_WELL_CTL_BIOS(id), bios_req & ~mask);
737 	}
738 }
739 
bxt_dpio_cmn_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)740 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
741 					   struct i915_power_well *power_well)
742 {
743 	bxt_ddi_phy_init(dev_priv, power_well->bxt.phy);
744 }
745 
bxt_dpio_cmn_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)746 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
747 					    struct i915_power_well *power_well)
748 {
749 	bxt_ddi_phy_uninit(dev_priv, power_well->bxt.phy);
750 }
751 
bxt_dpio_cmn_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)752 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
753 					    struct i915_power_well *power_well)
754 {
755 	return bxt_ddi_phy_is_enabled(dev_priv, power_well->bxt.phy);
756 }
757 
bxt_verify_ddi_phy_power_wells(struct drm_i915_private * dev_priv)758 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
759 {
760 	struct i915_power_well *power_well;
761 
762 	power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
763 	if (power_well->count > 0)
764 		bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
765 
766 	power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
767 	if (power_well->count > 0)
768 		bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
769 
770 	if (IS_GEMINILAKE(dev_priv)) {
771 		power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C);
772 		if (power_well->count > 0)
773 			bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
774 	}
775 }
776 
gen9_dc_off_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)777 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
778 					   struct i915_power_well *power_well)
779 {
780 	return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
781 }
782 
gen9_assert_dbuf_enabled(struct drm_i915_private * dev_priv)783 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
784 {
785 	u32 tmp = I915_READ(DBUF_CTL);
786 
787 	WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
788 	     (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
789 	     "Unexpected DBuf power power state (0x%08x)\n", tmp);
790 }
791 
gen9_dc_off_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)792 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
793 					  struct i915_power_well *power_well)
794 {
795 	struct intel_cdclk_state cdclk_state = {};
796 
797 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
798 
799 	dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
800 	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
801 	WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
802 
803 	gen9_assert_dbuf_enabled(dev_priv);
804 
805 	if (IS_GEN9_LP(dev_priv))
806 		bxt_verify_ddi_phy_power_wells(dev_priv);
807 }
808 
gen9_dc_off_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)809 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
810 					   struct i915_power_well *power_well)
811 {
812 	if (!dev_priv->csr.dmc_payload)
813 		return;
814 
815 	if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
816 		skl_enable_dc6(dev_priv);
817 	else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
818 		gen9_enable_dc5(dev_priv);
819 }
820 
i9xx_power_well_sync_hw_noop(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)821 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
822 					 struct i915_power_well *power_well)
823 {
824 }
825 
i9xx_always_on_power_well_noop(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)826 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
827 					   struct i915_power_well *power_well)
828 {
829 }
830 
i9xx_always_on_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)831 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
832 					     struct i915_power_well *power_well)
833 {
834 	return true;
835 }
836 
i830_pipes_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)837 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
838 					 struct i915_power_well *power_well)
839 {
840 	if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
841 		i830_enable_pipe(dev_priv, PIPE_A);
842 	if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
843 		i830_enable_pipe(dev_priv, PIPE_B);
844 }
845 
i830_pipes_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)846 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
847 					  struct i915_power_well *power_well)
848 {
849 	i830_disable_pipe(dev_priv, PIPE_B);
850 	i830_disable_pipe(dev_priv, PIPE_A);
851 }
852 
i830_pipes_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)853 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
854 					  struct i915_power_well *power_well)
855 {
856 	return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
857 		I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
858 }
859 
i830_pipes_power_well_sync_hw(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)860 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
861 					  struct i915_power_well *power_well)
862 {
863 	if (power_well->count > 0)
864 		i830_pipes_power_well_enable(dev_priv, power_well);
865 	else
866 		i830_pipes_power_well_disable(dev_priv, power_well);
867 }
868 
vlv_set_power_well(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,bool enable)869 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
870 			       struct i915_power_well *power_well, bool enable)
871 {
872 	enum i915_power_well_id power_well_id = power_well->id;
873 	u32 mask;
874 	u32 state;
875 	u32 ctrl;
876 
877 	mask = PUNIT_PWRGT_MASK(power_well_id);
878 	state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
879 			 PUNIT_PWRGT_PWR_GATE(power_well_id);
880 
881 	mutex_lock(&dev_priv->pcu_lock);
882 
883 #define COND \
884 	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
885 
886 	if (COND)
887 		goto out;
888 
889 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
890 	ctrl &= ~mask;
891 	ctrl |= state;
892 	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
893 
894 	if (wait_for(COND, 100))
895 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
896 			  state,
897 			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
898 
899 #undef COND
900 
901 out:
902 	mutex_unlock(&dev_priv->pcu_lock);
903 }
904 
vlv_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)905 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
906 				  struct i915_power_well *power_well)
907 {
908 	vlv_set_power_well(dev_priv, power_well, true);
909 }
910 
vlv_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)911 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
912 				   struct i915_power_well *power_well)
913 {
914 	vlv_set_power_well(dev_priv, power_well, false);
915 }
916 
vlv_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)917 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
918 				   struct i915_power_well *power_well)
919 {
920 	enum i915_power_well_id power_well_id = power_well->id;
921 	bool enabled = false;
922 	u32 mask;
923 	u32 state;
924 	u32 ctrl;
925 
926 	mask = PUNIT_PWRGT_MASK(power_well_id);
927 	ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
928 
929 	mutex_lock(&dev_priv->pcu_lock);
930 
931 	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
932 	/*
933 	 * We only ever set the power-on and power-gate states, anything
934 	 * else is unexpected.
935 	 */
936 	WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
937 		state != PUNIT_PWRGT_PWR_GATE(power_well_id));
938 	if (state == ctrl)
939 		enabled = true;
940 
941 	/*
942 	 * A transient state at this point would mean some unexpected party
943 	 * is poking at the power controls too.
944 	 */
945 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
946 	WARN_ON(ctrl != state);
947 
948 	mutex_unlock(&dev_priv->pcu_lock);
949 
950 	return enabled;
951 }
952 
vlv_init_display_clock_gating(struct drm_i915_private * dev_priv)953 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
954 {
955 	u32 val;
956 
957 	/*
958 	 * On driver load, a pipe may be active and driving a DSI display.
959 	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
960 	 * (and never recovering) in this case. intel_dsi_post_disable() will
961 	 * clear it when we turn off the display.
962 	 */
963 	val = I915_READ(DSPCLK_GATE_D);
964 	val &= DPOUNIT_CLOCK_GATE_DISABLE;
965 	val |= VRHUNIT_CLOCK_GATE_DISABLE;
966 	I915_WRITE(DSPCLK_GATE_D, val);
967 
968 	/*
969 	 * Disable trickle feed and enable pnd deadline calculation
970 	 */
971 	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
972 	I915_WRITE(CBR1_VLV, 0);
973 
974 	WARN_ON(dev_priv->rawclk_freq == 0);
975 
976 	I915_WRITE(RAWCLK_FREQ_VLV,
977 		   DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
978 }
979 
vlv_display_power_well_init(struct drm_i915_private * dev_priv)980 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
981 {
982 	struct intel_encoder *encoder;
983 	enum pipe pipe;
984 
985 	/*
986 	 * Enable the CRI clock source so we can get at the
987 	 * display and the reference clock for VGA
988 	 * hotplug / manual detection. Supposedly DSI also
989 	 * needs the ref clock up and running.
990 	 *
991 	 * CHV DPLL B/C have some issues if VGA mode is enabled.
992 	 */
993 	for_each_pipe(dev_priv, pipe) {
994 		u32 val = I915_READ(DPLL(pipe));
995 
996 		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
997 		if (pipe != PIPE_A)
998 			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
999 
1000 		I915_WRITE(DPLL(pipe), val);
1001 	}
1002 
1003 	vlv_init_display_clock_gating(dev_priv);
1004 
1005 	spin_lock_irq(&dev_priv->irq_lock);
1006 	valleyview_enable_display_irqs(dev_priv);
1007 	spin_unlock_irq(&dev_priv->irq_lock);
1008 
1009 	/*
1010 	 * During driver initialization/resume we can avoid restoring the
1011 	 * part of the HW/SW state that will be inited anyway explicitly.
1012 	 */
1013 	if (dev_priv->power_domains.initializing)
1014 		return;
1015 
1016 	intel_hpd_init(dev_priv);
1017 
1018 	/* Re-enable the ADPA, if we have one */
1019 	for_each_intel_encoder(&dev_priv->drm, encoder) {
1020 		if (encoder->type == INTEL_OUTPUT_ANALOG)
1021 			intel_crt_reset(&encoder->base);
1022 	}
1023 
1024 	i915_redisable_vga_power_on(dev_priv);
1025 
1026 	intel_pps_unlock_regs_wa(dev_priv);
1027 }
1028 
vlv_display_power_well_deinit(struct drm_i915_private * dev_priv)1029 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1030 {
1031 	spin_lock_irq(&dev_priv->irq_lock);
1032 	valleyview_disable_display_irqs(dev_priv);
1033 	spin_unlock_irq(&dev_priv->irq_lock);
1034 
1035 	/* make sure we're done processing display irqs */
1036 	synchronize_irq(dev_priv->drm.irq);
1037 
1038 	intel_power_sequencer_reset(dev_priv);
1039 
1040 	/* Prevent us from re-enabling polling on accident in late suspend */
1041 	if (!dev_priv->drm.dev->power.is_suspended)
1042 		intel_hpd_poll_init(dev_priv);
1043 }
1044 
vlv_display_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1045 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1046 					  struct i915_power_well *power_well)
1047 {
1048 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1049 
1050 	vlv_set_power_well(dev_priv, power_well, true);
1051 
1052 	vlv_display_power_well_init(dev_priv);
1053 }
1054 
vlv_display_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1055 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1056 					   struct i915_power_well *power_well)
1057 {
1058 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1059 
1060 	vlv_display_power_well_deinit(dev_priv);
1061 
1062 	vlv_set_power_well(dev_priv, power_well, false);
1063 }
1064 
vlv_dpio_cmn_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1065 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1066 					   struct i915_power_well *power_well)
1067 {
1068 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1069 
1070 	/* since ref/cri clock was enabled */
1071 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1072 
1073 	vlv_set_power_well(dev_priv, power_well, true);
1074 
1075 	/*
1076 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1077 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1078 	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1079 	 *   b.	The other bits such as sfr settings / modesel may all
1080 	 *	be set to 0.
1081 	 *
1082 	 * This should only be done on init and resume from S3 with
1083 	 * both PLLs disabled, or we risk losing DPIO and PLL
1084 	 * synchronization.
1085 	 */
1086 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1087 }
1088 
vlv_dpio_cmn_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1089 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1090 					    struct i915_power_well *power_well)
1091 {
1092 	enum pipe pipe;
1093 
1094 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1095 
1096 	for_each_pipe(dev_priv, pipe)
1097 		assert_pll_disabled(dev_priv, pipe);
1098 
1099 	/* Assert common reset */
1100 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1101 
1102 	vlv_set_power_well(dev_priv, power_well, false);
1103 }
1104 
1105 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1106 
1107 static struct i915_power_well *
lookup_power_well(struct drm_i915_private * dev_priv,enum i915_power_well_id power_well_id)1108 lookup_power_well(struct drm_i915_private *dev_priv,
1109 		  enum i915_power_well_id power_well_id)
1110 {
1111 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1112 	int i;
1113 
1114 	for (i = 0; i < power_domains->power_well_count; i++) {
1115 		struct i915_power_well *power_well;
1116 
1117 		power_well = &power_domains->power_wells[i];
1118 		if (power_well->id == power_well_id)
1119 			return power_well;
1120 	}
1121 
1122 	return NULL;
1123 }
1124 
1125 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1126 
assert_chv_phy_status(struct drm_i915_private * dev_priv)1127 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1128 {
1129 	struct i915_power_well *cmn_bc =
1130 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1131 	struct i915_power_well *cmn_d =
1132 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1133 	u32 phy_control = dev_priv->chv_phy_control;
1134 	u32 phy_status = 0;
1135 	u32 phy_status_mask = 0xffffffff;
1136 
1137 	/*
1138 	 * The BIOS can leave the PHY is some weird state
1139 	 * where it doesn't fully power down some parts.
1140 	 * Disable the asserts until the PHY has been fully
1141 	 * reset (ie. the power well has been disabled at
1142 	 * least once).
1143 	 */
1144 	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1145 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1146 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1147 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1148 				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1149 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1150 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1151 
1152 	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1153 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1154 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1155 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1156 
1157 	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1158 		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1159 
1160 		/* this assumes override is only used to enable lanes */
1161 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1162 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1163 
1164 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1165 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1166 
1167 		/* CL1 is on whenever anything is on in either channel */
1168 		if (BITS_SET(phy_control,
1169 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1170 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1171 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1172 
1173 		/*
1174 		 * The DPLLB check accounts for the pipe B + port A usage
1175 		 * with CL2 powered up but all the lanes in the second channel
1176 		 * powered down.
1177 		 */
1178 		if (BITS_SET(phy_control,
1179 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1180 		    (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1181 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1182 
1183 		if (BITS_SET(phy_control,
1184 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1185 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1186 		if (BITS_SET(phy_control,
1187 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1188 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1189 
1190 		if (BITS_SET(phy_control,
1191 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1192 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1193 		if (BITS_SET(phy_control,
1194 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1195 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1196 	}
1197 
1198 	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1199 		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1200 
1201 		/* this assumes override is only used to enable lanes */
1202 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1203 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1204 
1205 		if (BITS_SET(phy_control,
1206 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1207 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1208 
1209 		if (BITS_SET(phy_control,
1210 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1211 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1212 		if (BITS_SET(phy_control,
1213 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1214 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1215 	}
1216 
1217 	phy_status &= phy_status_mask;
1218 
1219 	/*
1220 	 * The PHY may be busy with some initial calibration and whatnot,
1221 	 * so the power state can take a while to actually change.
1222 	 */
1223 	if (intel_wait_for_register(dev_priv,
1224 				    DISPLAY_PHY_STATUS,
1225 				    phy_status_mask,
1226 				    phy_status,
1227 				    10))
1228 		DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1229 			  I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1230 			   phy_status, dev_priv->chv_phy_control);
1231 }
1232 
1233 #undef BITS_SET
1234 
chv_dpio_cmn_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1235 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1236 					   struct i915_power_well *power_well)
1237 {
1238 	enum dpio_phy phy;
1239 	enum pipe pipe;
1240 	uint32_t tmp;
1241 
1242 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1243 		     power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1244 
1245 	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1246 		pipe = PIPE_A;
1247 		phy = DPIO_PHY0;
1248 	} else {
1249 		pipe = PIPE_C;
1250 		phy = DPIO_PHY1;
1251 	}
1252 
1253 	/* since ref/cri clock was enabled */
1254 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1255 	vlv_set_power_well(dev_priv, power_well, true);
1256 
1257 	/* Poll for phypwrgood signal */
1258 	if (intel_wait_for_register(dev_priv,
1259 				    DISPLAY_PHY_STATUS,
1260 				    PHY_POWERGOOD(phy),
1261 				    PHY_POWERGOOD(phy),
1262 				    1))
1263 		DRM_ERROR("Display PHY %d is not power up\n", phy);
1264 
1265 	mutex_lock(&dev_priv->sb_lock);
1266 
1267 	/* Enable dynamic power down */
1268 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1269 	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1270 		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1271 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1272 
1273 	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1274 		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1275 		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1276 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1277 	} else {
1278 		/*
1279 		 * Force the non-existing CL2 off. BXT does this
1280 		 * too, so maybe it saves some power even though
1281 		 * CL2 doesn't exist?
1282 		 */
1283 		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1284 		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1285 		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1286 	}
1287 
1288 	mutex_unlock(&dev_priv->sb_lock);
1289 
1290 	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1291 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1292 
1293 	DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1294 		      phy, dev_priv->chv_phy_control);
1295 
1296 	assert_chv_phy_status(dev_priv);
1297 }
1298 
chv_dpio_cmn_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1299 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1300 					    struct i915_power_well *power_well)
1301 {
1302 	enum dpio_phy phy;
1303 
1304 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1305 		     power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1306 
1307 	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1308 		phy = DPIO_PHY0;
1309 		assert_pll_disabled(dev_priv, PIPE_A);
1310 		assert_pll_disabled(dev_priv, PIPE_B);
1311 	} else {
1312 		phy = DPIO_PHY1;
1313 		assert_pll_disabled(dev_priv, PIPE_C);
1314 	}
1315 
1316 	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1317 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1318 
1319 	vlv_set_power_well(dev_priv, power_well, false);
1320 
1321 	DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1322 		      phy, dev_priv->chv_phy_control);
1323 
1324 	/* PHY is fully reset now, so we can enable the PHY state asserts */
1325 	dev_priv->chv_phy_assert[phy] = true;
1326 
1327 	assert_chv_phy_status(dev_priv);
1328 }
1329 
assert_chv_phy_powergate(struct drm_i915_private * dev_priv,enum dpio_phy phy,enum dpio_channel ch,bool override,unsigned int mask)1330 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1331 				     enum dpio_channel ch, bool override, unsigned int mask)
1332 {
1333 	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1334 	u32 reg, val, expected, actual;
1335 
1336 	/*
1337 	 * The BIOS can leave the PHY is some weird state
1338 	 * where it doesn't fully power down some parts.
1339 	 * Disable the asserts until the PHY has been fully
1340 	 * reset (ie. the power well has been disabled at
1341 	 * least once).
1342 	 */
1343 	if (!dev_priv->chv_phy_assert[phy])
1344 		return;
1345 
1346 	if (ch == DPIO_CH0)
1347 		reg = _CHV_CMN_DW0_CH0;
1348 	else
1349 		reg = _CHV_CMN_DW6_CH1;
1350 
1351 	mutex_lock(&dev_priv->sb_lock);
1352 	val = vlv_dpio_read(dev_priv, pipe, reg);
1353 	mutex_unlock(&dev_priv->sb_lock);
1354 
1355 	/*
1356 	 * This assumes !override is only used when the port is disabled.
1357 	 * All lanes should power down even without the override when
1358 	 * the port is disabled.
1359 	 */
1360 	if (!override || mask == 0xf) {
1361 		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1362 		/*
1363 		 * If CH1 common lane is not active anymore
1364 		 * (eg. for pipe B DPLL) the entire channel will
1365 		 * shut down, which causes the common lane registers
1366 		 * to read as 0. That means we can't actually check
1367 		 * the lane power down status bits, but as the entire
1368 		 * register reads as 0 it's a good indication that the
1369 		 * channel is indeed entirely powered down.
1370 		 */
1371 		if (ch == DPIO_CH1 && val == 0)
1372 			expected = 0;
1373 	} else if (mask != 0x0) {
1374 		expected = DPIO_ANYDL_POWERDOWN;
1375 	} else {
1376 		expected = 0;
1377 	}
1378 
1379 	if (ch == DPIO_CH0)
1380 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1381 	else
1382 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1383 	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1384 
1385 	WARN(actual != expected,
1386 	     "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1387 	     !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1388 	     !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1389 	     reg, val);
1390 }
1391 
chv_phy_powergate_ch(struct drm_i915_private * dev_priv,enum dpio_phy phy,enum dpio_channel ch,bool override)1392 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1393 			  enum dpio_channel ch, bool override)
1394 {
1395 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1396 	bool was_override;
1397 
1398 	mutex_lock(&power_domains->lock);
1399 
1400 	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1401 
1402 	if (override == was_override)
1403 		goto out;
1404 
1405 	if (override)
1406 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1407 	else
1408 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1409 
1410 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1411 
1412 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1413 		      phy, ch, dev_priv->chv_phy_control);
1414 
1415 	assert_chv_phy_status(dev_priv);
1416 
1417 out:
1418 	mutex_unlock(&power_domains->lock);
1419 
1420 	return was_override;
1421 }
1422 
chv_phy_powergate_lanes(struct intel_encoder * encoder,bool override,unsigned int mask)1423 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1424 			     bool override, unsigned int mask)
1425 {
1426 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1427 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1428 	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1429 	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1430 
1431 	mutex_lock(&power_domains->lock);
1432 
1433 	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1434 	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1435 
1436 	if (override)
1437 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1438 	else
1439 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1440 
1441 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1442 
1443 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1444 		      phy, ch, mask, dev_priv->chv_phy_control);
1445 
1446 	assert_chv_phy_status(dev_priv);
1447 
1448 	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1449 
1450 	mutex_unlock(&power_domains->lock);
1451 }
1452 
chv_pipe_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1453 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1454 					struct i915_power_well *power_well)
1455 {
1456 	enum pipe pipe = PIPE_A;
1457 	bool enabled;
1458 	u32 state, ctrl;
1459 
1460 	mutex_lock(&dev_priv->pcu_lock);
1461 
1462 	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1463 	/*
1464 	 * We only ever set the power-on and power-gate states, anything
1465 	 * else is unexpected.
1466 	 */
1467 	WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1468 	enabled = state == DP_SSS_PWR_ON(pipe);
1469 
1470 	/*
1471 	 * A transient state at this point would mean some unexpected party
1472 	 * is poking at the power controls too.
1473 	 */
1474 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1475 	WARN_ON(ctrl << 16 != state);
1476 
1477 	mutex_unlock(&dev_priv->pcu_lock);
1478 
1479 	return enabled;
1480 }
1481 
chv_set_pipe_power_well(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,bool enable)1482 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1483 				    struct i915_power_well *power_well,
1484 				    bool enable)
1485 {
1486 	enum pipe pipe = PIPE_A;
1487 	u32 state;
1488 	u32 ctrl;
1489 
1490 	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1491 
1492 	mutex_lock(&dev_priv->pcu_lock);
1493 
1494 #define COND \
1495 	((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1496 
1497 	if (COND)
1498 		goto out;
1499 
1500 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1501 	ctrl &= ~DP_SSC_MASK(pipe);
1502 	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1503 	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1504 
1505 	if (wait_for(COND, 100))
1506 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1507 			  state,
1508 			  vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1509 
1510 #undef COND
1511 
1512 out:
1513 	mutex_unlock(&dev_priv->pcu_lock);
1514 }
1515 
chv_pipe_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1516 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1517 				       struct i915_power_well *power_well)
1518 {
1519 	WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A);
1520 
1521 	chv_set_pipe_power_well(dev_priv, power_well, true);
1522 
1523 	vlv_display_power_well_init(dev_priv);
1524 }
1525 
chv_pipe_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1526 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1527 					struct i915_power_well *power_well)
1528 {
1529 	WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A);
1530 
1531 	vlv_display_power_well_deinit(dev_priv);
1532 
1533 	chv_set_pipe_power_well(dev_priv, power_well, false);
1534 }
1535 
1536 static void
__intel_display_power_get_domain(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)1537 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1538 				 enum intel_display_power_domain domain)
1539 {
1540 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1541 	struct i915_power_well *power_well;
1542 
1543 	for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1544 		intel_power_well_get(dev_priv, power_well);
1545 
1546 	power_domains->domain_use_count[domain]++;
1547 }
1548 
1549 /**
1550  * intel_display_power_get - grab a power domain reference
1551  * @dev_priv: i915 device instance
1552  * @domain: power domain to reference
1553  *
1554  * This function grabs a power domain reference for @domain and ensures that the
1555  * power domain and all its parents are powered up. Therefore users should only
1556  * grab a reference to the innermost power domain they need.
1557  *
1558  * Any power domain reference obtained by this function must have a symmetric
1559  * call to intel_display_power_put() to release the reference again.
1560  */
intel_display_power_get(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)1561 void intel_display_power_get(struct drm_i915_private *dev_priv,
1562 			     enum intel_display_power_domain domain)
1563 {
1564 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1565 
1566 	intel_runtime_pm_get(dev_priv);
1567 
1568 	mutex_lock(&power_domains->lock);
1569 
1570 	__intel_display_power_get_domain(dev_priv, domain);
1571 
1572 	mutex_unlock(&power_domains->lock);
1573 }
1574 
1575 /**
1576  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1577  * @dev_priv: i915 device instance
1578  * @domain: power domain to reference
1579  *
1580  * This function grabs a power domain reference for @domain and ensures that the
1581  * power domain and all its parents are powered up. Therefore users should only
1582  * grab a reference to the innermost power domain they need.
1583  *
1584  * Any power domain reference obtained by this function must have a symmetric
1585  * call to intel_display_power_put() to release the reference again.
1586  */
intel_display_power_get_if_enabled(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)1587 bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1588 					enum intel_display_power_domain domain)
1589 {
1590 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1591 	bool is_enabled;
1592 
1593 	if (!intel_runtime_pm_get_if_in_use(dev_priv))
1594 		return false;
1595 
1596 	mutex_lock(&power_domains->lock);
1597 
1598 	if (__intel_display_power_is_enabled(dev_priv, domain)) {
1599 		__intel_display_power_get_domain(dev_priv, domain);
1600 		is_enabled = true;
1601 	} else {
1602 		is_enabled = false;
1603 	}
1604 
1605 	mutex_unlock(&power_domains->lock);
1606 
1607 	if (!is_enabled)
1608 		intel_runtime_pm_put(dev_priv);
1609 
1610 	return is_enabled;
1611 }
1612 
1613 /**
1614  * intel_display_power_put - release a power domain reference
1615  * @dev_priv: i915 device instance
1616  * @domain: power domain to reference
1617  *
1618  * This function drops the power domain reference obtained by
1619  * intel_display_power_get() and might power down the corresponding hardware
1620  * block right away if this is the last reference.
1621  */
intel_display_power_put(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)1622 void intel_display_power_put(struct drm_i915_private *dev_priv,
1623 			     enum intel_display_power_domain domain)
1624 {
1625 	struct i915_power_domains *power_domains;
1626 	struct i915_power_well *power_well;
1627 
1628 	power_domains = &dev_priv->power_domains;
1629 
1630 	mutex_lock(&power_domains->lock);
1631 
1632 	WARN(!power_domains->domain_use_count[domain],
1633 	     "Use count on domain %s is already zero\n",
1634 	     intel_display_power_domain_str(domain));
1635 	power_domains->domain_use_count[domain]--;
1636 
1637 	for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain))
1638 		intel_power_well_put(dev_priv, power_well);
1639 
1640 	mutex_unlock(&power_domains->lock);
1641 
1642 	intel_runtime_pm_put(dev_priv);
1643 }
1644 
1645 #define I830_PIPES_POWER_DOMAINS (		\
1646 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1647 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1648 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1649 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1650 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1651 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1652 	BIT_ULL(POWER_DOMAIN_INIT))
1653 
1654 #define VLV_DISPLAY_POWER_DOMAINS (		\
1655 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1656 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1657 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1658 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1659 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1660 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1661 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1662 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1663 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
1664 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
1665 	BIT_ULL(POWER_DOMAIN_VGA) |			\
1666 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
1667 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1668 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1669 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
1670 	BIT_ULL(POWER_DOMAIN_INIT))
1671 
1672 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
1673 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1674 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1675 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
1676 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1677 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1678 	BIT_ULL(POWER_DOMAIN_INIT))
1679 
1680 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
1681 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1682 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1683 	BIT_ULL(POWER_DOMAIN_INIT))
1684 
1685 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
1686 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1687 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1688 	BIT_ULL(POWER_DOMAIN_INIT))
1689 
1690 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
1691 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1692 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1693 	BIT_ULL(POWER_DOMAIN_INIT))
1694 
1695 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
1696 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1697 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1698 	BIT_ULL(POWER_DOMAIN_INIT))
1699 
1700 #define CHV_DISPLAY_POWER_DOMAINS (		\
1701 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1702 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1703 	BIT_ULL(POWER_DOMAIN_PIPE_C) |		\
1704 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1705 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1706 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
1707 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1708 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1709 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |	\
1710 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1711 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1712 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
1713 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
1714 	BIT_ULL(POWER_DOMAIN_VGA) |			\
1715 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
1716 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1717 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1718 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
1719 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
1720 	BIT_ULL(POWER_DOMAIN_INIT))
1721 
1722 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
1723 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1724 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1725 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1726 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1727 	BIT_ULL(POWER_DOMAIN_INIT))
1728 
1729 #define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
1730 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
1731 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
1732 	BIT_ULL(POWER_DOMAIN_INIT))
1733 
1734 #define HSW_DISPLAY_POWER_DOMAINS (			\
1735 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1736 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1737 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
1738 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1739 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1740 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1741 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1742 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1743 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1744 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1745 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1746 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
1747 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1748 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1749 	BIT_ULL(POWER_DOMAIN_INIT))
1750 
1751 #define BDW_DISPLAY_POWER_DOMAINS (			\
1752 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1753 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1754 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1755 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1756 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1757 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1758 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1759 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1760 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1761 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1762 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
1763 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1764 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1765 	BIT_ULL(POWER_DOMAIN_INIT))
1766 
1767 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1768 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1769 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1770 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1771 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1772 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1773 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1774 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1775 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1776 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1777 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1778 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
1779 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
1780 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1781 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
1782 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1783 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1784 	BIT_ULL(POWER_DOMAIN_INIT))
1785 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (		\
1786 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
1787 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
1788 	BIT_ULL(POWER_DOMAIN_INIT))
1789 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
1790 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
1791 	BIT_ULL(POWER_DOMAIN_INIT))
1792 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
1793 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
1794 	BIT_ULL(POWER_DOMAIN_INIT))
1795 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (		\
1796 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
1797 	BIT_ULL(POWER_DOMAIN_INIT))
1798 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1799 	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1800 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1801 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1802 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1803 	BIT_ULL(POWER_DOMAIN_INIT))
1804 
1805 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1806 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1807 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1808 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1809 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1810 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1811 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1812 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1813 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1814 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1815 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1816 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1817 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1818 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1819 	BIT_ULL(POWER_DOMAIN_INIT))
1820 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1821 	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1822 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1823 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1824 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1825 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
1826 	BIT_ULL(POWER_DOMAIN_INIT))
1827 #define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
1828 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
1829 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1830 	BIT_ULL(POWER_DOMAIN_INIT))
1831 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
1832 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1833 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1834 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1835 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1836 	BIT_ULL(POWER_DOMAIN_INIT))
1837 
1838 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1839 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1840 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1841 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1842 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1843 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1844 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1845 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1846 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1847 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1848 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
1849 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1850 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1851 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1852 	BIT_ULL(POWER_DOMAIN_INIT))
1853 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (		\
1854 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
1855 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
1856 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
1857 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
1858 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
1859 #define GLK_DPIO_CMN_A_POWER_DOMAINS (			\
1860 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
1861 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1862 	BIT_ULL(POWER_DOMAIN_INIT))
1863 #define GLK_DPIO_CMN_B_POWER_DOMAINS (			\
1864 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1865 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1866 	BIT_ULL(POWER_DOMAIN_INIT))
1867 #define GLK_DPIO_CMN_C_POWER_DOMAINS (			\
1868 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1869 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1870 	BIT_ULL(POWER_DOMAIN_INIT))
1871 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (		\
1872 	BIT_ULL(POWER_DOMAIN_AUX_A) |		\
1873 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
1874 	BIT_ULL(POWER_DOMAIN_INIT))
1875 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (		\
1876 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1877 	BIT_ULL(POWER_DOMAIN_INIT))
1878 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (		\
1879 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1880 	BIT_ULL(POWER_DOMAIN_INIT))
1881 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1882 	GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1883 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1884 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1885 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1886 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
1887 	BIT_ULL(POWER_DOMAIN_INIT))
1888 
1889 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1890 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1891 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1892 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1893 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1894 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1895 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1896 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1897 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1898 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1899 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1900 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |		\
1901 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
1902 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1903 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
1904 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
1905 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1906 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1907 	BIT_ULL(POWER_DOMAIN_INIT))
1908 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (		\
1909 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
1910 	BIT_ULL(POWER_DOMAIN_INIT))
1911 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (		\
1912 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
1913 	BIT_ULL(POWER_DOMAIN_INIT))
1914 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (		\
1915 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
1916 	BIT_ULL(POWER_DOMAIN_INIT))
1917 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (		\
1918 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
1919 	BIT_ULL(POWER_DOMAIN_INIT))
1920 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS (		\
1921 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1922 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
1923 	BIT_ULL(POWER_DOMAIN_INIT))
1924 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS (		\
1925 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1926 	BIT_ULL(POWER_DOMAIN_INIT))
1927 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS (		\
1928 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1929 	BIT_ULL(POWER_DOMAIN_INIT))
1930 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS (		\
1931 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
1932 	BIT_ULL(POWER_DOMAIN_INIT))
1933 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS (		\
1934 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
1935 	BIT_ULL(POWER_DOMAIN_INIT))
1936 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (		\
1937 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
1938 	BIT_ULL(POWER_DOMAIN_INIT))
1939 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1940 	CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1941 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1942 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1943 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1944 	BIT_ULL(POWER_DOMAIN_INIT))
1945 
1946 /*
1947  * ICL PW_0/PG_0 domains (HW/DMC control):
1948  * - PCI
1949  * - clocks except port PLL
1950  * - central power except FBC
1951  * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
1952  * ICL PW_1/PG_1 domains (HW/DMC control):
1953  * - DBUF function
1954  * - PIPE_A and its planes, except VGA
1955  * - transcoder EDP + PSR
1956  * - transcoder DSI
1957  * - DDI_A
1958  * - FBC
1959  */
1960 #define ICL_PW_4_POWER_DOMAINS (			\
1961 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1962 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
1963 	BIT_ULL(POWER_DOMAIN_INIT))
1964 	/* VDSC/joining */
1965 #define ICL_PW_3_POWER_DOMAINS (			\
1966 	ICL_PW_4_POWER_DOMAINS |			\
1967 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1968 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1969 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1970 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1971 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1972 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1973 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
1974 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1975 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
1976 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
1977 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
1978 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
1979 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
1980 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
1981 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
1982 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1983 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1984 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
1985 	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
1986 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
1987 	BIT_ULL(POWER_DOMAIN_AUX_TBT1) |		\
1988 	BIT_ULL(POWER_DOMAIN_AUX_TBT2) |		\
1989 	BIT_ULL(POWER_DOMAIN_AUX_TBT3) |		\
1990 	BIT_ULL(POWER_DOMAIN_AUX_TBT4) |		\
1991 	BIT_ULL(POWER_DOMAIN_VGA) |			\
1992 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1993 	BIT_ULL(POWER_DOMAIN_INIT))
1994 	/*
1995 	 * - transcoder WD
1996 	 * - KVMR (HW control)
1997 	 */
1998 #define ICL_PW_2_POWER_DOMAINS (			\
1999 	ICL_PW_3_POWER_DOMAINS |			\
2000 	BIT_ULL(POWER_DOMAIN_INIT))
2001 	/*
2002 	 * - eDP/DSI VDSC
2003 	 * - KVMR (HW control)
2004 	 */
2005 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2006 	ICL_PW_2_POWER_DOMAINS |			\
2007 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2008 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2009 	BIT_ULL(POWER_DOMAIN_INIT))
2010 
2011 #define ICL_DDI_IO_A_POWER_DOMAINS (			\
2012 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2013 #define ICL_DDI_IO_B_POWER_DOMAINS (			\
2014 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2015 #define ICL_DDI_IO_C_POWER_DOMAINS (			\
2016 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2017 #define ICL_DDI_IO_D_POWER_DOMAINS (			\
2018 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2019 #define ICL_DDI_IO_E_POWER_DOMAINS (			\
2020 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2021 #define ICL_DDI_IO_F_POWER_DOMAINS (			\
2022 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2023 
2024 #define ICL_AUX_A_IO_POWER_DOMAINS (			\
2025 	BIT_ULL(POWER_DOMAIN_AUX_A))
2026 #define ICL_AUX_B_IO_POWER_DOMAINS (			\
2027 	BIT_ULL(POWER_DOMAIN_AUX_B))
2028 #define ICL_AUX_C_IO_POWER_DOMAINS (			\
2029 	BIT_ULL(POWER_DOMAIN_AUX_C))
2030 #define ICL_AUX_D_IO_POWER_DOMAINS (			\
2031 	BIT_ULL(POWER_DOMAIN_AUX_D))
2032 #define ICL_AUX_E_IO_POWER_DOMAINS (			\
2033 	BIT_ULL(POWER_DOMAIN_AUX_E))
2034 #define ICL_AUX_F_IO_POWER_DOMAINS (			\
2035 	BIT_ULL(POWER_DOMAIN_AUX_F))
2036 #define ICL_AUX_TBT1_IO_POWER_DOMAINS (			\
2037 	BIT_ULL(POWER_DOMAIN_AUX_TBT1))
2038 #define ICL_AUX_TBT2_IO_POWER_DOMAINS (			\
2039 	BIT_ULL(POWER_DOMAIN_AUX_TBT2))
2040 #define ICL_AUX_TBT3_IO_POWER_DOMAINS (			\
2041 	BIT_ULL(POWER_DOMAIN_AUX_TBT3))
2042 #define ICL_AUX_TBT4_IO_POWER_DOMAINS (			\
2043 	BIT_ULL(POWER_DOMAIN_AUX_TBT4))
2044 
2045 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2046 	.sync_hw = i9xx_power_well_sync_hw_noop,
2047 	.enable = i9xx_always_on_power_well_noop,
2048 	.disable = i9xx_always_on_power_well_noop,
2049 	.is_enabled = i9xx_always_on_power_well_enabled,
2050 };
2051 
2052 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2053 	.sync_hw = i9xx_power_well_sync_hw_noop,
2054 	.enable = chv_pipe_power_well_enable,
2055 	.disable = chv_pipe_power_well_disable,
2056 	.is_enabled = chv_pipe_power_well_enabled,
2057 };
2058 
2059 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2060 	.sync_hw = i9xx_power_well_sync_hw_noop,
2061 	.enable = chv_dpio_cmn_power_well_enable,
2062 	.disable = chv_dpio_cmn_power_well_disable,
2063 	.is_enabled = vlv_power_well_enabled,
2064 };
2065 
2066 static struct i915_power_well i9xx_always_on_power_well[] = {
2067 	{
2068 		.name = "always-on",
2069 		.always_on = 1,
2070 		.domains = POWER_DOMAIN_MASK,
2071 		.ops = &i9xx_always_on_power_well_ops,
2072 		.id = I915_DISP_PW_ALWAYS_ON,
2073 	},
2074 };
2075 
2076 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2077 	.sync_hw = i830_pipes_power_well_sync_hw,
2078 	.enable = i830_pipes_power_well_enable,
2079 	.disable = i830_pipes_power_well_disable,
2080 	.is_enabled = i830_pipes_power_well_enabled,
2081 };
2082 
2083 static struct i915_power_well i830_power_wells[] = {
2084 	{
2085 		.name = "always-on",
2086 		.always_on = 1,
2087 		.domains = POWER_DOMAIN_MASK,
2088 		.ops = &i9xx_always_on_power_well_ops,
2089 		.id = I915_DISP_PW_ALWAYS_ON,
2090 	},
2091 	{
2092 		.name = "pipes",
2093 		.domains = I830_PIPES_POWER_DOMAINS,
2094 		.ops = &i830_pipes_power_well_ops,
2095 		.id = I830_DISP_PW_PIPES,
2096 	},
2097 };
2098 
2099 static const struct i915_power_well_ops hsw_power_well_ops = {
2100 	.sync_hw = hsw_power_well_sync_hw,
2101 	.enable = hsw_power_well_enable,
2102 	.disable = hsw_power_well_disable,
2103 	.is_enabled = hsw_power_well_enabled,
2104 };
2105 
2106 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2107 	.sync_hw = i9xx_power_well_sync_hw_noop,
2108 	.enable = gen9_dc_off_power_well_enable,
2109 	.disable = gen9_dc_off_power_well_disable,
2110 	.is_enabled = gen9_dc_off_power_well_enabled,
2111 };
2112 
2113 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2114 	.sync_hw = i9xx_power_well_sync_hw_noop,
2115 	.enable = bxt_dpio_cmn_power_well_enable,
2116 	.disable = bxt_dpio_cmn_power_well_disable,
2117 	.is_enabled = bxt_dpio_cmn_power_well_enabled,
2118 };
2119 
2120 static struct i915_power_well hsw_power_wells[] = {
2121 	{
2122 		.name = "always-on",
2123 		.always_on = 1,
2124 		.domains = POWER_DOMAIN_MASK,
2125 		.ops = &i9xx_always_on_power_well_ops,
2126 		.id = I915_DISP_PW_ALWAYS_ON,
2127 	},
2128 	{
2129 		.name = "display",
2130 		.domains = HSW_DISPLAY_POWER_DOMAINS,
2131 		.ops = &hsw_power_well_ops,
2132 		.id = HSW_DISP_PW_GLOBAL,
2133 		{
2134 			.hsw.has_vga = true,
2135 		},
2136 	},
2137 };
2138 
2139 static struct i915_power_well bdw_power_wells[] = {
2140 	{
2141 		.name = "always-on",
2142 		.always_on = 1,
2143 		.domains = POWER_DOMAIN_MASK,
2144 		.ops = &i9xx_always_on_power_well_ops,
2145 		.id = I915_DISP_PW_ALWAYS_ON,
2146 	},
2147 	{
2148 		.name = "display",
2149 		.domains = BDW_DISPLAY_POWER_DOMAINS,
2150 		.ops = &hsw_power_well_ops,
2151 		.id = HSW_DISP_PW_GLOBAL,
2152 		{
2153 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2154 			.hsw.has_vga = true,
2155 		},
2156 	},
2157 };
2158 
2159 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2160 	.sync_hw = i9xx_power_well_sync_hw_noop,
2161 	.enable = vlv_display_power_well_enable,
2162 	.disable = vlv_display_power_well_disable,
2163 	.is_enabled = vlv_power_well_enabled,
2164 };
2165 
2166 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2167 	.sync_hw = i9xx_power_well_sync_hw_noop,
2168 	.enable = vlv_dpio_cmn_power_well_enable,
2169 	.disable = vlv_dpio_cmn_power_well_disable,
2170 	.is_enabled = vlv_power_well_enabled,
2171 };
2172 
2173 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2174 	.sync_hw = i9xx_power_well_sync_hw_noop,
2175 	.enable = vlv_power_well_enable,
2176 	.disable = vlv_power_well_disable,
2177 	.is_enabled = vlv_power_well_enabled,
2178 };
2179 
2180 static struct i915_power_well vlv_power_wells[] = {
2181 	{
2182 		.name = "always-on",
2183 		.always_on = 1,
2184 		.domains = POWER_DOMAIN_MASK,
2185 		.ops = &i9xx_always_on_power_well_ops,
2186 		.id = I915_DISP_PW_ALWAYS_ON,
2187 	},
2188 	{
2189 		.name = "display",
2190 		.domains = VLV_DISPLAY_POWER_DOMAINS,
2191 		.id = PUNIT_POWER_WELL_DISP2D,
2192 		.ops = &vlv_display_power_well_ops,
2193 	},
2194 	{
2195 		.name = "dpio-tx-b-01",
2196 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2197 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2198 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2199 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2200 		.ops = &vlv_dpio_power_well_ops,
2201 		.id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
2202 	},
2203 	{
2204 		.name = "dpio-tx-b-23",
2205 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2206 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2207 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2208 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2209 		.ops = &vlv_dpio_power_well_ops,
2210 		.id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
2211 	},
2212 	{
2213 		.name = "dpio-tx-c-01",
2214 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2215 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2216 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2217 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2218 		.ops = &vlv_dpio_power_well_ops,
2219 		.id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
2220 	},
2221 	{
2222 		.name = "dpio-tx-c-23",
2223 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2224 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2225 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2226 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2227 		.ops = &vlv_dpio_power_well_ops,
2228 		.id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
2229 	},
2230 	{
2231 		.name = "dpio-common",
2232 		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2233 		.id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2234 		.ops = &vlv_dpio_cmn_power_well_ops,
2235 	},
2236 };
2237 
2238 static struct i915_power_well chv_power_wells[] = {
2239 	{
2240 		.name = "always-on",
2241 		.always_on = 1,
2242 		.domains = POWER_DOMAIN_MASK,
2243 		.ops = &i9xx_always_on_power_well_ops,
2244 		.id = I915_DISP_PW_ALWAYS_ON,
2245 	},
2246 	{
2247 		.name = "display",
2248 		/*
2249 		 * Pipe A power well is the new disp2d well. Pipe B and C
2250 		 * power wells don't actually exist. Pipe A power well is
2251 		 * required for any pipe to work.
2252 		 */
2253 		.domains = CHV_DISPLAY_POWER_DOMAINS,
2254 		.id = CHV_DISP_PW_PIPE_A,
2255 		.ops = &chv_pipe_power_well_ops,
2256 	},
2257 	{
2258 		.name = "dpio-common-bc",
2259 		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2260 		.id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2261 		.ops = &chv_dpio_cmn_power_well_ops,
2262 	},
2263 	{
2264 		.name = "dpio-common-d",
2265 		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2266 		.id = PUNIT_POWER_WELL_DPIO_CMN_D,
2267 		.ops = &chv_dpio_cmn_power_well_ops,
2268 	},
2269 };
2270 
intel_display_power_well_is_enabled(struct drm_i915_private * dev_priv,enum i915_power_well_id power_well_id)2271 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2272 					 enum i915_power_well_id power_well_id)
2273 {
2274 	struct i915_power_well *power_well;
2275 	bool ret;
2276 
2277 	power_well = lookup_power_well(dev_priv, power_well_id);
2278 	ret = power_well->ops->is_enabled(dev_priv, power_well);
2279 
2280 	return ret;
2281 }
2282 
2283 static struct i915_power_well skl_power_wells[] = {
2284 	{
2285 		.name = "always-on",
2286 		.always_on = 1,
2287 		.domains = POWER_DOMAIN_MASK,
2288 		.ops = &i9xx_always_on_power_well_ops,
2289 		.id = I915_DISP_PW_ALWAYS_ON,
2290 	},
2291 	{
2292 		.name = "power well 1",
2293 		/* Handled by the DMC firmware */
2294 		.domains = 0,
2295 		.ops = &hsw_power_well_ops,
2296 		.id = SKL_DISP_PW_1,
2297 		{
2298 			.hsw.has_fuses = true,
2299 		},
2300 	},
2301 	{
2302 		.name = "MISC IO power well",
2303 		/* Handled by the DMC firmware */
2304 		.domains = 0,
2305 		.ops = &hsw_power_well_ops,
2306 		.id = SKL_DISP_PW_MISC_IO,
2307 	},
2308 	{
2309 		.name = "DC off",
2310 		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2311 		.ops = &gen9_dc_off_power_well_ops,
2312 		.id = SKL_DISP_PW_DC_OFF,
2313 	},
2314 	{
2315 		.name = "power well 2",
2316 		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2317 		.ops = &hsw_power_well_ops,
2318 		.id = SKL_DISP_PW_2,
2319 		{
2320 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2321 			.hsw.has_vga = true,
2322 			.hsw.has_fuses = true,
2323 		},
2324 	},
2325 	{
2326 		.name = "DDI A/E IO power well",
2327 		.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
2328 		.ops = &hsw_power_well_ops,
2329 		.id = SKL_DISP_PW_DDI_A_E,
2330 	},
2331 	{
2332 		.name = "DDI B IO power well",
2333 		.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2334 		.ops = &hsw_power_well_ops,
2335 		.id = SKL_DISP_PW_DDI_B,
2336 	},
2337 	{
2338 		.name = "DDI C IO power well",
2339 		.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2340 		.ops = &hsw_power_well_ops,
2341 		.id = SKL_DISP_PW_DDI_C,
2342 	},
2343 	{
2344 		.name = "DDI D IO power well",
2345 		.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
2346 		.ops = &hsw_power_well_ops,
2347 		.id = SKL_DISP_PW_DDI_D,
2348 	},
2349 };
2350 
2351 static struct i915_power_well bxt_power_wells[] = {
2352 	{
2353 		.name = "always-on",
2354 		.always_on = 1,
2355 		.domains = POWER_DOMAIN_MASK,
2356 		.ops = &i9xx_always_on_power_well_ops,
2357 		.id = I915_DISP_PW_ALWAYS_ON,
2358 	},
2359 	{
2360 		.name = "power well 1",
2361 		.domains = 0,
2362 		.ops = &hsw_power_well_ops,
2363 		.id = SKL_DISP_PW_1,
2364 		{
2365 			.hsw.has_fuses = true,
2366 		},
2367 	},
2368 	{
2369 		.name = "DC off",
2370 		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2371 		.ops = &gen9_dc_off_power_well_ops,
2372 		.id = SKL_DISP_PW_DC_OFF,
2373 	},
2374 	{
2375 		.name = "power well 2",
2376 		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2377 		.ops = &hsw_power_well_ops,
2378 		.id = SKL_DISP_PW_2,
2379 		{
2380 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2381 			.hsw.has_vga = true,
2382 			.hsw.has_fuses = true,
2383 		},
2384 	},
2385 	{
2386 		.name = "dpio-common-a",
2387 		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2388 		.ops = &bxt_dpio_cmn_power_well_ops,
2389 		.id = BXT_DPIO_CMN_A,
2390 		{
2391 			.bxt.phy = DPIO_PHY1,
2392 		},
2393 	},
2394 	{
2395 		.name = "dpio-common-bc",
2396 		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2397 		.ops = &bxt_dpio_cmn_power_well_ops,
2398 		.id = BXT_DPIO_CMN_BC,
2399 		{
2400 			.bxt.phy = DPIO_PHY0,
2401 		},
2402 	},
2403 };
2404 
2405 static struct i915_power_well glk_power_wells[] = {
2406 	{
2407 		.name = "always-on",
2408 		.always_on = 1,
2409 		.domains = POWER_DOMAIN_MASK,
2410 		.ops = &i9xx_always_on_power_well_ops,
2411 		.id = I915_DISP_PW_ALWAYS_ON,
2412 	},
2413 	{
2414 		.name = "power well 1",
2415 		/* Handled by the DMC firmware */
2416 		.domains = 0,
2417 		.ops = &hsw_power_well_ops,
2418 		.id = SKL_DISP_PW_1,
2419 		{
2420 			.hsw.has_fuses = true,
2421 		},
2422 	},
2423 	{
2424 		.name = "DC off",
2425 		.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2426 		.ops = &gen9_dc_off_power_well_ops,
2427 		.id = SKL_DISP_PW_DC_OFF,
2428 	},
2429 	{
2430 		.name = "power well 2",
2431 		.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2432 		.ops = &hsw_power_well_ops,
2433 		.id = SKL_DISP_PW_2,
2434 		{
2435 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2436 			.hsw.has_vga = true,
2437 			.hsw.has_fuses = true,
2438 		},
2439 	},
2440 	{
2441 		.name = "dpio-common-a",
2442 		.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2443 		.ops = &bxt_dpio_cmn_power_well_ops,
2444 		.id = BXT_DPIO_CMN_A,
2445 		{
2446 			.bxt.phy = DPIO_PHY1,
2447 		},
2448 	},
2449 	{
2450 		.name = "dpio-common-b",
2451 		.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2452 		.ops = &bxt_dpio_cmn_power_well_ops,
2453 		.id = BXT_DPIO_CMN_BC,
2454 		{
2455 			.bxt.phy = DPIO_PHY0,
2456 		},
2457 	},
2458 	{
2459 		.name = "dpio-common-c",
2460 		.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2461 		.ops = &bxt_dpio_cmn_power_well_ops,
2462 		.id = GLK_DPIO_CMN_C,
2463 		{
2464 			.bxt.phy = DPIO_PHY2,
2465 		},
2466 	},
2467 	{
2468 		.name = "AUX A",
2469 		.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
2470 		.ops = &hsw_power_well_ops,
2471 		.id = GLK_DISP_PW_AUX_A,
2472 	},
2473 	{
2474 		.name = "AUX B",
2475 		.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
2476 		.ops = &hsw_power_well_ops,
2477 		.id = GLK_DISP_PW_AUX_B,
2478 	},
2479 	{
2480 		.name = "AUX C",
2481 		.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
2482 		.ops = &hsw_power_well_ops,
2483 		.id = GLK_DISP_PW_AUX_C,
2484 	},
2485 	{
2486 		.name = "DDI A IO power well",
2487 		.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
2488 		.ops = &hsw_power_well_ops,
2489 		.id = GLK_DISP_PW_DDI_A,
2490 	},
2491 	{
2492 		.name = "DDI B IO power well",
2493 		.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2494 		.ops = &hsw_power_well_ops,
2495 		.id = SKL_DISP_PW_DDI_B,
2496 	},
2497 	{
2498 		.name = "DDI C IO power well",
2499 		.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2500 		.ops = &hsw_power_well_ops,
2501 		.id = SKL_DISP_PW_DDI_C,
2502 	},
2503 };
2504 
2505 static struct i915_power_well cnl_power_wells[] = {
2506 	{
2507 		.name = "always-on",
2508 		.always_on = 1,
2509 		.domains = POWER_DOMAIN_MASK,
2510 		.ops = &i9xx_always_on_power_well_ops,
2511 		.id = I915_DISP_PW_ALWAYS_ON,
2512 	},
2513 	{
2514 		.name = "power well 1",
2515 		/* Handled by the DMC firmware */
2516 		.domains = 0,
2517 		.ops = &hsw_power_well_ops,
2518 		.id = SKL_DISP_PW_1,
2519 		{
2520 			.hsw.has_fuses = true,
2521 		},
2522 	},
2523 	{
2524 		.name = "AUX A",
2525 		.domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
2526 		.ops = &hsw_power_well_ops,
2527 		.id = CNL_DISP_PW_AUX_A,
2528 	},
2529 	{
2530 		.name = "AUX B",
2531 		.domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
2532 		.ops = &hsw_power_well_ops,
2533 		.id = CNL_DISP_PW_AUX_B,
2534 	},
2535 	{
2536 		.name = "AUX C",
2537 		.domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
2538 		.ops = &hsw_power_well_ops,
2539 		.id = CNL_DISP_PW_AUX_C,
2540 	},
2541 	{
2542 		.name = "AUX D",
2543 		.domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
2544 		.ops = &hsw_power_well_ops,
2545 		.id = CNL_DISP_PW_AUX_D,
2546 	},
2547 	{
2548 		.name = "DC off",
2549 		.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
2550 		.ops = &gen9_dc_off_power_well_ops,
2551 		.id = SKL_DISP_PW_DC_OFF,
2552 	},
2553 	{
2554 		.name = "power well 2",
2555 		.domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2556 		.ops = &hsw_power_well_ops,
2557 		.id = SKL_DISP_PW_2,
2558 		{
2559 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2560 			.hsw.has_vga = true,
2561 			.hsw.has_fuses = true,
2562 		},
2563 	},
2564 	{
2565 		.name = "DDI A IO power well",
2566 		.domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
2567 		.ops = &hsw_power_well_ops,
2568 		.id = CNL_DISP_PW_DDI_A,
2569 	},
2570 	{
2571 		.name = "DDI B IO power well",
2572 		.domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
2573 		.ops = &hsw_power_well_ops,
2574 		.id = SKL_DISP_PW_DDI_B,
2575 	},
2576 	{
2577 		.name = "DDI C IO power well",
2578 		.domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
2579 		.ops = &hsw_power_well_ops,
2580 		.id = SKL_DISP_PW_DDI_C,
2581 	},
2582 	{
2583 		.name = "DDI D IO power well",
2584 		.domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
2585 		.ops = &hsw_power_well_ops,
2586 		.id = SKL_DISP_PW_DDI_D,
2587 	},
2588 	{
2589 		.name = "DDI F IO power well",
2590 		.domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
2591 		.ops = &hsw_power_well_ops,
2592 		.id = CNL_DISP_PW_DDI_F,
2593 	},
2594 	{
2595 		.name = "AUX F",
2596 		.domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
2597 		.ops = &hsw_power_well_ops,
2598 		.id = CNL_DISP_PW_AUX_F,
2599 	},
2600 };
2601 
2602 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
2603 	.sync_hw = hsw_power_well_sync_hw,
2604 	.enable = icl_combo_phy_aux_power_well_enable,
2605 	.disable = icl_combo_phy_aux_power_well_disable,
2606 	.is_enabled = hsw_power_well_enabled,
2607 };
2608 
2609 static struct i915_power_well icl_power_wells[] = {
2610 	{
2611 		.name = "always-on",
2612 		.always_on = 1,
2613 		.domains = POWER_DOMAIN_MASK,
2614 		.ops = &i9xx_always_on_power_well_ops,
2615 		.id = I915_DISP_PW_ALWAYS_ON,
2616 	},
2617 	{
2618 		.name = "power well 1",
2619 		/* Handled by the DMC firmware */
2620 		.domains = 0,
2621 		.ops = &hsw_power_well_ops,
2622 		.id = ICL_DISP_PW_1,
2623 		.hsw.has_fuses = true,
2624 	},
2625 	{
2626 		.name = "power well 2",
2627 		.domains = ICL_PW_2_POWER_DOMAINS,
2628 		.ops = &hsw_power_well_ops,
2629 		.id = ICL_DISP_PW_2,
2630 		.hsw.has_fuses = true,
2631 	},
2632 	{
2633 		.name = "DC off",
2634 		.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
2635 		.ops = &gen9_dc_off_power_well_ops,
2636 		.id = SKL_DISP_PW_DC_OFF,
2637 	},
2638 	{
2639 		.name = "power well 3",
2640 		.domains = ICL_PW_3_POWER_DOMAINS,
2641 		.ops = &hsw_power_well_ops,
2642 		.id = ICL_DISP_PW_3,
2643 		.hsw.irq_pipe_mask = BIT(PIPE_B),
2644 		.hsw.has_vga = true,
2645 		.hsw.has_fuses = true,
2646 	},
2647 	{
2648 		.name = "DDI A IO",
2649 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
2650 		.ops = &hsw_power_well_ops,
2651 		.id = ICL_DISP_PW_DDI_A,
2652 	},
2653 	{
2654 		.name = "DDI B IO",
2655 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
2656 		.ops = &hsw_power_well_ops,
2657 		.id = ICL_DISP_PW_DDI_B,
2658 	},
2659 	{
2660 		.name = "DDI C IO",
2661 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
2662 		.ops = &hsw_power_well_ops,
2663 		.id = ICL_DISP_PW_DDI_C,
2664 	},
2665 	{
2666 		.name = "DDI D IO",
2667 		.domains = ICL_DDI_IO_D_POWER_DOMAINS,
2668 		.ops = &hsw_power_well_ops,
2669 		.id = ICL_DISP_PW_DDI_D,
2670 	},
2671 	{
2672 		.name = "DDI E IO",
2673 		.domains = ICL_DDI_IO_E_POWER_DOMAINS,
2674 		.ops = &hsw_power_well_ops,
2675 		.id = ICL_DISP_PW_DDI_E,
2676 	},
2677 	{
2678 		.name = "DDI F IO",
2679 		.domains = ICL_DDI_IO_F_POWER_DOMAINS,
2680 		.ops = &hsw_power_well_ops,
2681 		.id = ICL_DISP_PW_DDI_F,
2682 	},
2683 	{
2684 		.name = "AUX A",
2685 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
2686 		.ops = &icl_combo_phy_aux_power_well_ops,
2687 		.id = ICL_DISP_PW_AUX_A,
2688 	},
2689 	{
2690 		.name = "AUX B",
2691 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
2692 		.ops = &icl_combo_phy_aux_power_well_ops,
2693 		.id = ICL_DISP_PW_AUX_B,
2694 	},
2695 	{
2696 		.name = "AUX C",
2697 		.domains = ICL_AUX_C_IO_POWER_DOMAINS,
2698 		.ops = &hsw_power_well_ops,
2699 		.id = ICL_DISP_PW_AUX_C,
2700 	},
2701 	{
2702 		.name = "AUX D",
2703 		.domains = ICL_AUX_D_IO_POWER_DOMAINS,
2704 		.ops = &hsw_power_well_ops,
2705 		.id = ICL_DISP_PW_AUX_D,
2706 	},
2707 	{
2708 		.name = "AUX E",
2709 		.domains = ICL_AUX_E_IO_POWER_DOMAINS,
2710 		.ops = &hsw_power_well_ops,
2711 		.id = ICL_DISP_PW_AUX_E,
2712 	},
2713 	{
2714 		.name = "AUX F",
2715 		.domains = ICL_AUX_F_IO_POWER_DOMAINS,
2716 		.ops = &hsw_power_well_ops,
2717 		.id = ICL_DISP_PW_AUX_F,
2718 	},
2719 	{
2720 		.name = "AUX TBT1",
2721 		.domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
2722 		.ops = &hsw_power_well_ops,
2723 		.id = ICL_DISP_PW_AUX_TBT1,
2724 	},
2725 	{
2726 		.name = "AUX TBT2",
2727 		.domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
2728 		.ops = &hsw_power_well_ops,
2729 		.id = ICL_DISP_PW_AUX_TBT2,
2730 	},
2731 	{
2732 		.name = "AUX TBT3",
2733 		.domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
2734 		.ops = &hsw_power_well_ops,
2735 		.id = ICL_DISP_PW_AUX_TBT3,
2736 	},
2737 	{
2738 		.name = "AUX TBT4",
2739 		.domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
2740 		.ops = &hsw_power_well_ops,
2741 		.id = ICL_DISP_PW_AUX_TBT4,
2742 	},
2743 	{
2744 		.name = "power well 4",
2745 		.domains = ICL_PW_4_POWER_DOMAINS,
2746 		.ops = &hsw_power_well_ops,
2747 		.id = ICL_DISP_PW_4,
2748 		.hsw.has_fuses = true,
2749 		.hsw.irq_pipe_mask = BIT(PIPE_C),
2750 	},
2751 };
2752 
2753 static int
sanitize_disable_power_well_option(const struct drm_i915_private * dev_priv,int disable_power_well)2754 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
2755 				   int disable_power_well)
2756 {
2757 	if (disable_power_well >= 0)
2758 		return !!disable_power_well;
2759 
2760 	return 1;
2761 }
2762 
get_allowed_dc_mask(const struct drm_i915_private * dev_priv,int enable_dc)2763 static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
2764 				    int enable_dc)
2765 {
2766 	uint32_t mask;
2767 	int requested_dc;
2768 	int max_dc;
2769 
2770 	if (IS_GEN9_BC(dev_priv) || INTEL_INFO(dev_priv)->gen >= 10) {
2771 		max_dc = 2;
2772 		mask = 0;
2773 	} else if (IS_GEN9_LP(dev_priv)) {
2774 		max_dc = 1;
2775 		/*
2776 		 * DC9 has a separate HW flow from the rest of the DC states,
2777 		 * not depending on the DMC firmware. It's needed by system
2778 		 * suspend/resume, so allow it unconditionally.
2779 		 */
2780 		mask = DC_STATE_EN_DC9;
2781 	} else {
2782 		max_dc = 0;
2783 		mask = 0;
2784 	}
2785 
2786 	if (!i915_modparams.disable_power_well)
2787 		max_dc = 0;
2788 
2789 	if (enable_dc >= 0 && enable_dc <= max_dc) {
2790 		requested_dc = enable_dc;
2791 	} else if (enable_dc == -1) {
2792 		requested_dc = max_dc;
2793 	} else if (enable_dc > max_dc && enable_dc <= 2) {
2794 		DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
2795 			      enable_dc, max_dc);
2796 		requested_dc = max_dc;
2797 	} else {
2798 		DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
2799 		requested_dc = max_dc;
2800 	}
2801 
2802 	if (requested_dc > 1)
2803 		mask |= DC_STATE_EN_UPTO_DC6;
2804 	if (requested_dc > 0)
2805 		mask |= DC_STATE_EN_UPTO_DC5;
2806 
2807 	DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
2808 
2809 	return mask;
2810 }
2811 
assert_power_well_ids_unique(struct drm_i915_private * dev_priv)2812 static void assert_power_well_ids_unique(struct drm_i915_private *dev_priv)
2813 {
2814 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2815 	u64 power_well_ids;
2816 	int i;
2817 
2818 	power_well_ids = 0;
2819 	for (i = 0; i < power_domains->power_well_count; i++) {
2820 		enum i915_power_well_id id = power_domains->power_wells[i].id;
2821 
2822 		WARN_ON(id >= sizeof(power_well_ids) * 8);
2823 		WARN_ON(power_well_ids & BIT_ULL(id));
2824 		power_well_ids |= BIT_ULL(id);
2825 	}
2826 }
2827 
2828 #define set_power_wells(power_domains, __power_wells) ({		\
2829 	(power_domains)->power_wells = (__power_wells);			\
2830 	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
2831 })
2832 
2833 /**
2834  * intel_power_domains_init - initializes the power domain structures
2835  * @dev_priv: i915 device instance
2836  *
2837  * Initializes the power domain structures for @dev_priv depending upon the
2838  * supported platform.
2839  */
intel_power_domains_init(struct drm_i915_private * dev_priv)2840 int intel_power_domains_init(struct drm_i915_private *dev_priv)
2841 {
2842 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2843 
2844 	i915_modparams.disable_power_well =
2845 		sanitize_disable_power_well_option(dev_priv,
2846 						   i915_modparams.disable_power_well);
2847 	dev_priv->csr.allowed_dc_mask =
2848 		get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
2849 
2850 	BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
2851 
2852 	mutex_init(&power_domains->lock);
2853 
2854 	/*
2855 	 * The enabling order will be from lower to higher indexed wells,
2856 	 * the disabling order is reversed.
2857 	 */
2858 	if (IS_ICELAKE(dev_priv)) {
2859 		set_power_wells(power_domains, icl_power_wells);
2860 	} else if (IS_HASWELL(dev_priv)) {
2861 		set_power_wells(power_domains, hsw_power_wells);
2862 	} else if (IS_BROADWELL(dev_priv)) {
2863 		set_power_wells(power_domains, bdw_power_wells);
2864 	} else if (IS_GEN9_BC(dev_priv)) {
2865 		set_power_wells(power_domains, skl_power_wells);
2866 	} else if (IS_CANNONLAKE(dev_priv)) {
2867 		set_power_wells(power_domains, cnl_power_wells);
2868 
2869 		/*
2870 		 * DDI and Aux IO are getting enabled for all ports
2871 		 * regardless the presence or use. So, in order to avoid
2872 		 * timeouts, lets remove them from the list
2873 		 * for the SKUs without port F.
2874 		 */
2875 		if (!IS_CNL_WITH_PORT_F(dev_priv))
2876 			power_domains->power_well_count -= 2;
2877 
2878 	} else if (IS_BROXTON(dev_priv)) {
2879 		set_power_wells(power_domains, bxt_power_wells);
2880 	} else if (IS_GEMINILAKE(dev_priv)) {
2881 		set_power_wells(power_domains, glk_power_wells);
2882 	} else if (IS_CHERRYVIEW(dev_priv)) {
2883 		set_power_wells(power_domains, chv_power_wells);
2884 	} else if (IS_VALLEYVIEW(dev_priv)) {
2885 		set_power_wells(power_domains, vlv_power_wells);
2886 	} else if (IS_I830(dev_priv)) {
2887 		set_power_wells(power_domains, i830_power_wells);
2888 	} else {
2889 		set_power_wells(power_domains, i9xx_always_on_power_well);
2890 	}
2891 
2892 	assert_power_well_ids_unique(dev_priv);
2893 
2894 	return 0;
2895 }
2896 
2897 /**
2898  * intel_power_domains_fini - finalizes the power domain structures
2899  * @dev_priv: i915 device instance
2900  *
2901  * Finalizes the power domain structures for @dev_priv depending upon the
2902  * supported platform. This function also disables runtime pm and ensures that
2903  * the device stays powered up so that the driver can be reloaded.
2904  */
intel_power_domains_fini(struct drm_i915_private * dev_priv)2905 void intel_power_domains_fini(struct drm_i915_private *dev_priv)
2906 {
2907 	struct device *kdev = &dev_priv->drm.pdev->dev;
2908 
2909 	/*
2910 	 * The i915.ko module is still not prepared to be loaded when
2911 	 * the power well is not enabled, so just enable it in case
2912 	 * we're going to unload/reload.
2913 	 * The following also reacquires the RPM reference the core passed
2914 	 * to the driver during loading, which is dropped in
2915 	 * intel_runtime_pm_enable(). We have to hand back the control of the
2916 	 * device to the core with this reference held.
2917 	 */
2918 	intel_display_set_init_power(dev_priv, true);
2919 
2920 	/* Remove the refcount we took to keep power well support disabled. */
2921 	if (!i915_modparams.disable_power_well)
2922 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2923 
2924 	/*
2925 	 * Remove the refcount we took in intel_runtime_pm_enable() in case
2926 	 * the platform doesn't support runtime PM.
2927 	 */
2928 	if (!HAS_RUNTIME_PM(dev_priv))
2929 		pm_runtime_put(kdev);
2930 }
2931 
intel_power_domains_sync_hw(struct drm_i915_private * dev_priv)2932 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
2933 {
2934 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2935 	struct i915_power_well *power_well;
2936 
2937 	mutex_lock(&power_domains->lock);
2938 	for_each_power_well(dev_priv, power_well) {
2939 		power_well->ops->sync_hw(dev_priv, power_well);
2940 		power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
2941 								     power_well);
2942 	}
2943 	mutex_unlock(&power_domains->lock);
2944 }
2945 
2946 static inline
intel_dbuf_slice_set(struct drm_i915_private * dev_priv,i915_reg_t reg,bool enable)2947 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
2948 			  i915_reg_t reg, bool enable)
2949 {
2950 	u32 val, status;
2951 
2952 	val = I915_READ(reg);
2953 	val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
2954 	I915_WRITE(reg, val);
2955 	POSTING_READ(reg);
2956 	udelay(10);
2957 
2958 	status = I915_READ(reg) & DBUF_POWER_STATE;
2959 	if ((enable && !status) || (!enable && status)) {
2960 		DRM_ERROR("DBus power %s timeout!\n",
2961 			  enable ? "enable" : "disable");
2962 		return false;
2963 	}
2964 	return true;
2965 }
2966 
gen9_dbuf_enable(struct drm_i915_private * dev_priv)2967 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
2968 {
2969 	intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
2970 }
2971 
gen9_dbuf_disable(struct drm_i915_private * dev_priv)2972 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
2973 {
2974 	intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
2975 }
2976 
intel_dbuf_max_slices(struct drm_i915_private * dev_priv)2977 static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
2978 {
2979 	if (INTEL_GEN(dev_priv) < 11)
2980 		return 1;
2981 	return 2;
2982 }
2983 
icl_dbuf_slices_update(struct drm_i915_private * dev_priv,u8 req_slices)2984 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
2985 			    u8 req_slices)
2986 {
2987 	u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
2988 	u32 val;
2989 	bool ret;
2990 
2991 	if (req_slices > intel_dbuf_max_slices(dev_priv)) {
2992 		DRM_ERROR("Invalid number of dbuf slices requested\n");
2993 		return;
2994 	}
2995 
2996 	if (req_slices == hw_enabled_slices || req_slices == 0)
2997 		return;
2998 
2999 	val = I915_READ(DBUF_CTL_S2);
3000 	if (req_slices > hw_enabled_slices)
3001 		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
3002 	else
3003 		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
3004 
3005 	if (ret)
3006 		dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
3007 }
3008 
icl_dbuf_enable(struct drm_i915_private * dev_priv)3009 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
3010 {
3011 	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
3012 	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
3013 	POSTING_READ(DBUF_CTL_S2);
3014 
3015 	udelay(10);
3016 
3017 	if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3018 	    !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3019 		DRM_ERROR("DBuf power enable timeout\n");
3020 	else
3021 		dev_priv->wm.skl_hw.ddb.enabled_slices = 2;
3022 }
3023 
icl_dbuf_disable(struct drm_i915_private * dev_priv)3024 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
3025 {
3026 	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
3027 	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
3028 	POSTING_READ(DBUF_CTL_S2);
3029 
3030 	udelay(10);
3031 
3032 	if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3033 	    (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3034 		DRM_ERROR("DBuf power disable timeout!\n");
3035 	else
3036 		dev_priv->wm.skl_hw.ddb.enabled_slices = 0;
3037 }
3038 
icl_mbus_init(struct drm_i915_private * dev_priv)3039 static void icl_mbus_init(struct drm_i915_private *dev_priv)
3040 {
3041 	uint32_t val;
3042 
3043 	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
3044 	      MBUS_ABOX_BT_CREDIT_POOL2(16) |
3045 	      MBUS_ABOX_B_CREDIT(1) |
3046 	      MBUS_ABOX_BW_CREDIT(1);
3047 
3048 	I915_WRITE(MBUS_ABOX_CTL, val);
3049 }
3050 
skl_display_core_init(struct drm_i915_private * dev_priv,bool resume)3051 static void skl_display_core_init(struct drm_i915_private *dev_priv,
3052 				   bool resume)
3053 {
3054 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3055 	struct i915_power_well *well;
3056 	uint32_t val;
3057 
3058 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3059 
3060 	/* enable PCH reset handshake */
3061 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
3062 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
3063 
3064 	/* enable PG1 and Misc I/O */
3065 	mutex_lock(&power_domains->lock);
3066 
3067 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3068 	intel_power_well_enable(dev_priv, well);
3069 
3070 	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
3071 	intel_power_well_enable(dev_priv, well);
3072 
3073 	mutex_unlock(&power_domains->lock);
3074 
3075 	skl_init_cdclk(dev_priv);
3076 
3077 	gen9_dbuf_enable(dev_priv);
3078 
3079 	if (resume && dev_priv->csr.dmc_payload)
3080 		intel_csr_load_program(dev_priv);
3081 }
3082 
skl_display_core_uninit(struct drm_i915_private * dev_priv)3083 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
3084 {
3085 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3086 	struct i915_power_well *well;
3087 
3088 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3089 
3090 	gen9_dbuf_disable(dev_priv);
3091 
3092 	skl_uninit_cdclk(dev_priv);
3093 
3094 	/* The spec doesn't call for removing the reset handshake flag */
3095 	/* disable PG1 and Misc I/O */
3096 
3097 	mutex_lock(&power_domains->lock);
3098 
3099 	/*
3100 	 * BSpec says to keep the MISC IO power well enabled here, only
3101 	 * remove our request for power well 1.
3102 	 * Note that even though the driver's request is removed power well 1
3103 	 * may stay enabled after this due to DMC's own request on it.
3104 	 */
3105 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3106 	intel_power_well_disable(dev_priv, well);
3107 
3108 	mutex_unlock(&power_domains->lock);
3109 
3110 	usleep_range(10, 30);		/* 10 us delay per Bspec */
3111 }
3112 
bxt_display_core_init(struct drm_i915_private * dev_priv,bool resume)3113 void bxt_display_core_init(struct drm_i915_private *dev_priv,
3114 			   bool resume)
3115 {
3116 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3117 	struct i915_power_well *well;
3118 	uint32_t val;
3119 
3120 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3121 
3122 	/*
3123 	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
3124 	 * or else the reset will hang because there is no PCH to respond.
3125 	 * Move the handshake programming to initialization sequence.
3126 	 * Previously was left up to BIOS.
3127 	 */
3128 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
3129 	val &= ~RESET_PCH_HANDSHAKE_ENABLE;
3130 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
3131 
3132 	/* Enable PG1 */
3133 	mutex_lock(&power_domains->lock);
3134 
3135 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3136 	intel_power_well_enable(dev_priv, well);
3137 
3138 	mutex_unlock(&power_domains->lock);
3139 
3140 	bxt_init_cdclk(dev_priv);
3141 
3142 	gen9_dbuf_enable(dev_priv);
3143 
3144 	if (resume && dev_priv->csr.dmc_payload)
3145 		intel_csr_load_program(dev_priv);
3146 }
3147 
bxt_display_core_uninit(struct drm_i915_private * dev_priv)3148 void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
3149 {
3150 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3151 	struct i915_power_well *well;
3152 
3153 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3154 
3155 	gen9_dbuf_disable(dev_priv);
3156 
3157 	bxt_uninit_cdclk(dev_priv);
3158 
3159 	/* The spec doesn't call for removing the reset handshake flag */
3160 
3161 	/*
3162 	 * Disable PW1 (PG1).
3163 	 * Note that even though the driver's request is removed power well 1
3164 	 * may stay enabled after this due to DMC's own request on it.
3165 	 */
3166 	mutex_lock(&power_domains->lock);
3167 
3168 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3169 	intel_power_well_disable(dev_priv, well);
3170 
3171 	mutex_unlock(&power_domains->lock);
3172 
3173 	usleep_range(10, 30);		/* 10 us delay per Bspec */
3174 }
3175 
3176 enum {
3177 	PROCMON_0_85V_DOT_0,
3178 	PROCMON_0_95V_DOT_0,
3179 	PROCMON_0_95V_DOT_1,
3180 	PROCMON_1_05V_DOT_0,
3181 	PROCMON_1_05V_DOT_1,
3182 };
3183 
3184 static const struct cnl_procmon {
3185 	u32 dw1, dw9, dw10;
3186 } cnl_procmon_values[] = {
3187 	[PROCMON_0_85V_DOT_0] =
3188 		{ .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
3189 	[PROCMON_0_95V_DOT_0] =
3190 		{ .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
3191 	[PROCMON_0_95V_DOT_1] =
3192 		{ .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
3193 	[PROCMON_1_05V_DOT_0] =
3194 		{ .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
3195 	[PROCMON_1_05V_DOT_1] =
3196 		{ .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
3197 };
3198 
3199 /*
3200  * CNL has just one set of registers, while ICL has two sets: one for port A and
3201  * the other for port B. The CNL registers are equivalent to the ICL port A
3202  * registers, that's why we call the ICL macros even though the function has CNL
3203  * on its name.
3204  */
cnl_set_procmon_ref_values(struct drm_i915_private * dev_priv,enum port port)3205 static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
3206 				       enum port port)
3207 {
3208 	const struct cnl_procmon *procmon;
3209 	u32 val;
3210 
3211 	val = I915_READ(ICL_PORT_COMP_DW3(port));
3212 	switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
3213 	default:
3214 		MISSING_CASE(val);
3215 		/* fall through */
3216 	case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
3217 		procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
3218 		break;
3219 	case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
3220 		procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
3221 		break;
3222 	case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
3223 		procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
3224 		break;
3225 	case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
3226 		procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
3227 		break;
3228 	case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
3229 		procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
3230 		break;
3231 	}
3232 
3233 	val = I915_READ(ICL_PORT_COMP_DW1(port));
3234 	val &= ~((0xff << 16) | 0xff);
3235 	val |= procmon->dw1;
3236 	I915_WRITE(ICL_PORT_COMP_DW1(port), val);
3237 
3238 	I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
3239 	I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
3240 }
3241 
cnl_display_core_init(struct drm_i915_private * dev_priv,bool resume)3242 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
3243 {
3244 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3245 	struct i915_power_well *well;
3246 	u32 val;
3247 
3248 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3249 
3250 	/* 1. Enable PCH Reset Handshake */
3251 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
3252 	val |= RESET_PCH_HANDSHAKE_ENABLE;
3253 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
3254 
3255 	/* 2. Enable Comp */
3256 	val = I915_READ(CHICKEN_MISC_2);
3257 	val &= ~CNL_COMP_PWR_DOWN;
3258 	I915_WRITE(CHICKEN_MISC_2, val);
3259 
3260 	/* Dummy PORT_A to get the correct CNL register from the ICL macro */
3261 	cnl_set_procmon_ref_values(dev_priv, PORT_A);
3262 
3263 	val = I915_READ(CNL_PORT_COMP_DW0);
3264 	val |= COMP_INIT;
3265 	I915_WRITE(CNL_PORT_COMP_DW0, val);
3266 
3267 	/* 3. */
3268 	val = I915_READ(CNL_PORT_CL1CM_DW5);
3269 	val |= CL_POWER_DOWN_ENABLE;
3270 	I915_WRITE(CNL_PORT_CL1CM_DW5, val);
3271 
3272 	/*
3273 	 * 4. Enable Power Well 1 (PG1).
3274 	 *    The AUX IO power wells will be enabled on demand.
3275 	 */
3276 	mutex_lock(&power_domains->lock);
3277 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3278 	intel_power_well_enable(dev_priv, well);
3279 	mutex_unlock(&power_domains->lock);
3280 
3281 	/* 5. Enable CD clock */
3282 	cnl_init_cdclk(dev_priv);
3283 
3284 	/* 6. Enable DBUF */
3285 	gen9_dbuf_enable(dev_priv);
3286 
3287 	if (resume && dev_priv->csr.dmc_payload)
3288 		intel_csr_load_program(dev_priv);
3289 }
3290 
cnl_display_core_uninit(struct drm_i915_private * dev_priv)3291 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
3292 {
3293 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3294 	struct i915_power_well *well;
3295 	u32 val;
3296 
3297 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3298 
3299 	/* 1. Disable all display engine functions -> aready done */
3300 
3301 	/* 2. Disable DBUF */
3302 	gen9_dbuf_disable(dev_priv);
3303 
3304 	/* 3. Disable CD clock */
3305 	cnl_uninit_cdclk(dev_priv);
3306 
3307 	/*
3308 	 * 4. Disable Power Well 1 (PG1).
3309 	 *    The AUX IO power wells are toggled on demand, so they are already
3310 	 *    disabled at this point.
3311 	 */
3312 	mutex_lock(&power_domains->lock);
3313 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3314 	intel_power_well_disable(dev_priv, well);
3315 	mutex_unlock(&power_domains->lock);
3316 
3317 	usleep_range(10, 30);		/* 10 us delay per Bspec */
3318 
3319 	/* 5. Disable Comp */
3320 	val = I915_READ(CHICKEN_MISC_2);
3321 	val |= CNL_COMP_PWR_DOWN;
3322 	I915_WRITE(CHICKEN_MISC_2, val);
3323 }
3324 
icl_display_core_init(struct drm_i915_private * dev_priv,bool resume)3325 static void icl_display_core_init(struct drm_i915_private *dev_priv,
3326 				  bool resume)
3327 {
3328 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3329 	struct i915_power_well *well;
3330 	enum port port;
3331 	u32 val;
3332 
3333 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3334 
3335 	/* 1. Enable PCH reset handshake. */
3336 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
3337 	val |= RESET_PCH_HANDSHAKE_ENABLE;
3338 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
3339 
3340 	for (port = PORT_A; port <= PORT_B; port++) {
3341 		/* 2. Enable DDI combo PHY comp. */
3342 		val = I915_READ(ICL_PHY_MISC(port));
3343 		val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
3344 		I915_WRITE(ICL_PHY_MISC(port), val);
3345 
3346 		cnl_set_procmon_ref_values(dev_priv, port);
3347 
3348 		val = I915_READ(ICL_PORT_COMP_DW0(port));
3349 		val |= COMP_INIT;
3350 		I915_WRITE(ICL_PORT_COMP_DW0(port), val);
3351 
3352 		/* 3. Set power down enable. */
3353 		val = I915_READ(ICL_PORT_CL_DW5(port));
3354 		val |= CL_POWER_DOWN_ENABLE;
3355 		I915_WRITE(ICL_PORT_CL_DW5(port), val);
3356 	}
3357 
3358 	/*
3359 	 * 4. Enable Power Well 1 (PG1).
3360 	 *    The AUX IO power wells will be enabled on demand.
3361 	 */
3362 	mutex_lock(&power_domains->lock);
3363 	well = lookup_power_well(dev_priv, ICL_DISP_PW_1);
3364 	intel_power_well_enable(dev_priv, well);
3365 	mutex_unlock(&power_domains->lock);
3366 
3367 	/* 5. Enable CDCLK. */
3368 	icl_init_cdclk(dev_priv);
3369 
3370 	/* 6. Enable DBUF. */
3371 	icl_dbuf_enable(dev_priv);
3372 
3373 	/* 7. Setup MBUS. */
3374 	icl_mbus_init(dev_priv);
3375 
3376 	/* 8. CHICKEN_DCPR_1 */
3377 	I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
3378 					CNL_DDI_CLOCK_REG_ACCESS_ON);
3379 }
3380 
icl_display_core_uninit(struct drm_i915_private * dev_priv)3381 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
3382 {
3383 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3384 	struct i915_power_well *well;
3385 	enum port port;
3386 	u32 val;
3387 
3388 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3389 
3390 	/* 1. Disable all display engine functions -> aready done */
3391 
3392 	/* 2. Disable DBUF */
3393 	icl_dbuf_disable(dev_priv);
3394 
3395 	/* 3. Disable CD clock */
3396 	icl_uninit_cdclk(dev_priv);
3397 
3398 	/*
3399 	 * 4. Disable Power Well 1 (PG1).
3400 	 *    The AUX IO power wells are toggled on demand, so they are already
3401 	 *    disabled at this point.
3402 	 */
3403 	mutex_lock(&power_domains->lock);
3404 	well = lookup_power_well(dev_priv, ICL_DISP_PW_1);
3405 	intel_power_well_disable(dev_priv, well);
3406 	mutex_unlock(&power_domains->lock);
3407 
3408 	/* 5. Disable Comp */
3409 	for (port = PORT_A; port <= PORT_B; port++) {
3410 		val = I915_READ(ICL_PHY_MISC(port));
3411 		val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
3412 		I915_WRITE(ICL_PHY_MISC(port), val);
3413 	}
3414 }
3415 
chv_phy_control_init(struct drm_i915_private * dev_priv)3416 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
3417 {
3418 	struct i915_power_well *cmn_bc =
3419 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
3420 	struct i915_power_well *cmn_d =
3421 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
3422 
3423 	/*
3424 	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
3425 	 * workaround never ever read DISPLAY_PHY_CONTROL, and
3426 	 * instead maintain a shadow copy ourselves. Use the actual
3427 	 * power well state and lane status to reconstruct the
3428 	 * expected initial value.
3429 	 */
3430 	dev_priv->chv_phy_control =
3431 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
3432 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
3433 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
3434 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
3435 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
3436 
3437 	/*
3438 	 * If all lanes are disabled we leave the override disabled
3439 	 * with all power down bits cleared to match the state we
3440 	 * would use after disabling the port. Otherwise enable the
3441 	 * override and set the lane powerdown bits accding to the
3442 	 * current lane status.
3443 	 */
3444 	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
3445 		uint32_t status = I915_READ(DPLL(PIPE_A));
3446 		unsigned int mask;
3447 
3448 		mask = status & DPLL_PORTB_READY_MASK;
3449 		if (mask == 0xf)
3450 			mask = 0x0;
3451 		else
3452 			dev_priv->chv_phy_control |=
3453 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
3454 
3455 		dev_priv->chv_phy_control |=
3456 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
3457 
3458 		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
3459 		if (mask == 0xf)
3460 			mask = 0x0;
3461 		else
3462 			dev_priv->chv_phy_control |=
3463 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
3464 
3465 		dev_priv->chv_phy_control |=
3466 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
3467 
3468 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
3469 
3470 		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
3471 	} else {
3472 		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
3473 	}
3474 
3475 	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
3476 		uint32_t status = I915_READ(DPIO_PHY_STATUS);
3477 		unsigned int mask;
3478 
3479 		mask = status & DPLL_PORTD_READY_MASK;
3480 
3481 		if (mask == 0xf)
3482 			mask = 0x0;
3483 		else
3484 			dev_priv->chv_phy_control |=
3485 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
3486 
3487 		dev_priv->chv_phy_control |=
3488 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
3489 
3490 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
3491 
3492 		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
3493 	} else {
3494 		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
3495 	}
3496 
3497 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
3498 
3499 	DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
3500 		      dev_priv->chv_phy_control);
3501 }
3502 
vlv_cmnlane_wa(struct drm_i915_private * dev_priv)3503 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
3504 {
3505 	struct i915_power_well *cmn =
3506 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
3507 	struct i915_power_well *disp2d =
3508 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
3509 
3510 	/* If the display might be already active skip this */
3511 	if (cmn->ops->is_enabled(dev_priv, cmn) &&
3512 	    disp2d->ops->is_enabled(dev_priv, disp2d) &&
3513 	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
3514 		return;
3515 
3516 	DRM_DEBUG_KMS("toggling display PHY side reset\n");
3517 
3518 	/* cmnlane needs DPLL registers */
3519 	disp2d->ops->enable(dev_priv, disp2d);
3520 
3521 	/*
3522 	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
3523 	 * Need to assert and de-assert PHY SB reset by gating the
3524 	 * common lane power, then un-gating it.
3525 	 * Simply ungating isn't enough to reset the PHY enough to get
3526 	 * ports and lanes running.
3527 	 */
3528 	cmn->ops->disable(dev_priv, cmn);
3529 }
3530 
3531 /**
3532  * intel_power_domains_init_hw - initialize hardware power domain state
3533  * @dev_priv: i915 device instance
3534  * @resume: Called from resume code paths or not
3535  *
3536  * This function initializes the hardware power domain state and enables all
3537  * power wells belonging to the INIT power domain. Power wells in other
3538  * domains (and not in the INIT domain) are referenced or disabled during the
3539  * modeset state HW readout. After that the reference count of each power well
3540  * must match its HW enabled state, see intel_power_domains_verify_state().
3541  */
intel_power_domains_init_hw(struct drm_i915_private * dev_priv,bool resume)3542 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
3543 {
3544 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3545 
3546 	power_domains->initializing = true;
3547 
3548 	if (IS_ICELAKE(dev_priv)) {
3549 		icl_display_core_init(dev_priv, resume);
3550 	} else if (IS_CANNONLAKE(dev_priv)) {
3551 		cnl_display_core_init(dev_priv, resume);
3552 	} else if (IS_GEN9_BC(dev_priv)) {
3553 		skl_display_core_init(dev_priv, resume);
3554 	} else if (IS_GEN9_LP(dev_priv)) {
3555 		bxt_display_core_init(dev_priv, resume);
3556 	} else if (IS_CHERRYVIEW(dev_priv)) {
3557 		mutex_lock(&power_domains->lock);
3558 		chv_phy_control_init(dev_priv);
3559 		mutex_unlock(&power_domains->lock);
3560 	} else if (IS_VALLEYVIEW(dev_priv)) {
3561 		mutex_lock(&power_domains->lock);
3562 		vlv_cmnlane_wa(dev_priv);
3563 		mutex_unlock(&power_domains->lock);
3564 	}
3565 
3566 	/* For now, we need the power well to be always enabled. */
3567 	intel_display_set_init_power(dev_priv, true);
3568 	/* Disable power support if the user asked so. */
3569 	if (!i915_modparams.disable_power_well)
3570 		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
3571 	intel_power_domains_sync_hw(dev_priv);
3572 	power_domains->initializing = false;
3573 }
3574 
3575 /**
3576  * intel_power_domains_suspend - suspend power domain state
3577  * @dev_priv: i915 device instance
3578  *
3579  * This function prepares the hardware power domain state before entering
3580  * system suspend. It must be paired with intel_power_domains_init_hw().
3581  */
intel_power_domains_suspend(struct drm_i915_private * dev_priv)3582 void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
3583 {
3584 	/*
3585 	 * Even if power well support was disabled we still want to disable
3586 	 * power wells while we are system suspended.
3587 	 */
3588 	if (!i915_modparams.disable_power_well)
3589 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
3590 
3591 	if (IS_ICELAKE(dev_priv))
3592 		icl_display_core_uninit(dev_priv);
3593 	else if (IS_CANNONLAKE(dev_priv))
3594 		cnl_display_core_uninit(dev_priv);
3595 	else if (IS_GEN9_BC(dev_priv))
3596 		skl_display_core_uninit(dev_priv);
3597 	else if (IS_GEN9_LP(dev_priv))
3598 		bxt_display_core_uninit(dev_priv);
3599 }
3600 
intel_power_domains_dump_info(struct drm_i915_private * dev_priv)3601 static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
3602 {
3603 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3604 	struct i915_power_well *power_well;
3605 
3606 	for_each_power_well(dev_priv, power_well) {
3607 		enum intel_display_power_domain domain;
3608 
3609 		DRM_DEBUG_DRIVER("%-25s %d\n",
3610 				 power_well->name, power_well->count);
3611 
3612 		for_each_power_domain(domain, power_well->domains)
3613 			DRM_DEBUG_DRIVER("  %-23s %d\n",
3614 					 intel_display_power_domain_str(domain),
3615 					 power_domains->domain_use_count[domain]);
3616 	}
3617 }
3618 
3619 /**
3620  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
3621  * @dev_priv: i915 device instance
3622  *
3623  * Verify if the reference count of each power well matches its HW enabled
3624  * state and the total refcount of the domains it belongs to. This must be
3625  * called after modeset HW state sanitization, which is responsible for
3626  * acquiring reference counts for any power wells in use and disabling the
3627  * ones left on by BIOS but not required by any active output.
3628  */
intel_power_domains_verify_state(struct drm_i915_private * dev_priv)3629 void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
3630 {
3631 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3632 	struct i915_power_well *power_well;
3633 	bool dump_domain_info;
3634 
3635 	mutex_lock(&power_domains->lock);
3636 
3637 	dump_domain_info = false;
3638 	for_each_power_well(dev_priv, power_well) {
3639 		enum intel_display_power_domain domain;
3640 		int domains_count;
3641 		bool enabled;
3642 
3643 		/*
3644 		 * Power wells not belonging to any domain (like the MISC_IO
3645 		 * and PW1 power wells) are under FW control, so ignore them,
3646 		 * since their state can change asynchronously.
3647 		 */
3648 		if (!power_well->domains)
3649 			continue;
3650 
3651 		enabled = power_well->ops->is_enabled(dev_priv, power_well);
3652 		if ((power_well->count || power_well->always_on) != enabled)
3653 			DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
3654 				  power_well->name, power_well->count, enabled);
3655 
3656 		domains_count = 0;
3657 		for_each_power_domain(domain, power_well->domains)
3658 			domains_count += power_domains->domain_use_count[domain];
3659 
3660 		if (power_well->count != domains_count) {
3661 			DRM_ERROR("power well %s refcount/domain refcount mismatch "
3662 				  "(refcount %d/domains refcount %d)\n",
3663 				  power_well->name, power_well->count,
3664 				  domains_count);
3665 			dump_domain_info = true;
3666 		}
3667 	}
3668 
3669 	if (dump_domain_info) {
3670 		static bool dumped;
3671 
3672 		if (!dumped) {
3673 			intel_power_domains_dump_info(dev_priv);
3674 			dumped = true;
3675 		}
3676 	}
3677 
3678 	mutex_unlock(&power_domains->lock);
3679 }
3680 
3681 /**
3682  * intel_runtime_pm_get - grab a runtime pm reference
3683  * @dev_priv: i915 device instance
3684  *
3685  * This function grabs a device-level runtime pm reference (mostly used for GEM
3686  * code to ensure the GTT or GT is on) and ensures that it is powered up.
3687  *
3688  * Any runtime pm reference obtained by this function must have a symmetric
3689  * call to intel_runtime_pm_put() to release the reference again.
3690  */
intel_runtime_pm_get(struct drm_i915_private * dev_priv)3691 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
3692 {
3693 	struct pci_dev *pdev = dev_priv->drm.pdev;
3694 	struct device *kdev = &pdev->dev;
3695 	int ret;
3696 
3697 	ret = pm_runtime_get_sync(kdev);
3698 	WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
3699 
3700 	atomic_inc(&dev_priv->runtime_pm.wakeref_count);
3701 	assert_rpm_wakelock_held(dev_priv);
3702 }
3703 
3704 /**
3705  * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
3706  * @dev_priv: i915 device instance
3707  *
3708  * This function grabs a device-level runtime pm reference if the device is
3709  * already in use and ensures that it is powered up. It is illegal to try
3710  * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
3711  *
3712  * Any runtime pm reference obtained by this function must have a symmetric
3713  * call to intel_runtime_pm_put() to release the reference again.
3714  *
3715  * Returns: True if the wakeref was acquired, or False otherwise.
3716  */
intel_runtime_pm_get_if_in_use(struct drm_i915_private * dev_priv)3717 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
3718 {
3719 	if (IS_ENABLED(CONFIG_PM)) {
3720 		struct pci_dev *pdev = dev_priv->drm.pdev;
3721 		struct device *kdev = &pdev->dev;
3722 
3723 		/*
3724 		 * In cases runtime PM is disabled by the RPM core and we get
3725 		 * an -EINVAL return value we are not supposed to call this
3726 		 * function, since the power state is undefined. This applies
3727 		 * atm to the late/early system suspend/resume handlers.
3728 		 */
3729 		if (pm_runtime_get_if_in_use(kdev) <= 0)
3730 			return false;
3731 	}
3732 
3733 	atomic_inc(&dev_priv->runtime_pm.wakeref_count);
3734 	assert_rpm_wakelock_held(dev_priv);
3735 
3736 	return true;
3737 }
3738 
3739 /**
3740  * intel_runtime_pm_get_noresume - grab a runtime pm reference
3741  * @dev_priv: i915 device instance
3742  *
3743  * This function grabs a device-level runtime pm reference (mostly used for GEM
3744  * code to ensure the GTT or GT is on).
3745  *
3746  * It will _not_ power up the device but instead only check that it's powered
3747  * on.  Therefore it is only valid to call this functions from contexts where
3748  * the device is known to be powered up and where trying to power it up would
3749  * result in hilarity and deadlocks. That pretty much means only the system
3750  * suspend/resume code where this is used to grab runtime pm references for
3751  * delayed setup down in work items.
3752  *
3753  * Any runtime pm reference obtained by this function must have a symmetric
3754  * call to intel_runtime_pm_put() to release the reference again.
3755  */
intel_runtime_pm_get_noresume(struct drm_i915_private * dev_priv)3756 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
3757 {
3758 	struct pci_dev *pdev = dev_priv->drm.pdev;
3759 	struct device *kdev = &pdev->dev;
3760 
3761 	assert_rpm_wakelock_held(dev_priv);
3762 	pm_runtime_get_noresume(kdev);
3763 
3764 	atomic_inc(&dev_priv->runtime_pm.wakeref_count);
3765 }
3766 
3767 /**
3768  * intel_runtime_pm_put - release a runtime pm reference
3769  * @dev_priv: i915 device instance
3770  *
3771  * This function drops the device-level runtime pm reference obtained by
3772  * intel_runtime_pm_get() and might power down the corresponding
3773  * hardware block right away if this is the last reference.
3774  */
intel_runtime_pm_put(struct drm_i915_private * dev_priv)3775 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
3776 {
3777 	struct pci_dev *pdev = dev_priv->drm.pdev;
3778 	struct device *kdev = &pdev->dev;
3779 
3780 	assert_rpm_wakelock_held(dev_priv);
3781 	atomic_dec(&dev_priv->runtime_pm.wakeref_count);
3782 
3783 	pm_runtime_mark_last_busy(kdev);
3784 	pm_runtime_put_autosuspend(kdev);
3785 }
3786 
3787 /**
3788  * intel_runtime_pm_enable - enable runtime pm
3789  * @dev_priv: i915 device instance
3790  *
3791  * This function enables runtime pm at the end of the driver load sequence.
3792  *
3793  * Note that this function does currently not enable runtime pm for the
3794  * subordinate display power domains. That is only done on the first modeset
3795  * using intel_display_set_init_power().
3796  */
intel_runtime_pm_enable(struct drm_i915_private * dev_priv)3797 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
3798 {
3799 	struct pci_dev *pdev = dev_priv->drm.pdev;
3800 	struct device *kdev = &pdev->dev;
3801 
3802 	pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
3803 	pm_runtime_mark_last_busy(kdev);
3804 
3805 	/*
3806 	 * Take a permanent reference to disable the RPM functionality and drop
3807 	 * it only when unloading the driver. Use the low level get/put helpers,
3808 	 * so the driver's own RPM reference tracking asserts also work on
3809 	 * platforms without RPM support.
3810 	 */
3811 	if (!HAS_RUNTIME_PM(dev_priv)) {
3812 		int ret;
3813 
3814 		pm_runtime_dont_use_autosuspend(kdev);
3815 		ret = pm_runtime_get_sync(kdev);
3816 		WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
3817 	} else {
3818 		pm_runtime_use_autosuspend(kdev);
3819 	}
3820 
3821 	/*
3822 	 * The core calls the driver load handler with an RPM reference held.
3823 	 * We drop that here and will reacquire it during unloading in
3824 	 * intel_power_domains_fini().
3825 	 */
3826 	pm_runtime_put_autosuspend(kdev);
3827 }
3828