1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Eric Anholt <eric@anholt.net>
25  */
26 
27 #include <acpi/video.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/intel-iommu.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/dma-resv.h>
34 #include <linux/slab.h>
35 
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_damage_helper.h>
40 #include <drm/drm_dp_helper.h>
41 #include <drm/drm_edid.h>
42 #include <drm/drm_fourcc.h>
43 #include <drm/drm_plane_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/drm_rect.h>
46 
47 #include "display/intel_audio.h"
48 #include "display/intel_crt.h"
49 #include "display/intel_ddi.h"
50 #include "display/intel_display_debugfs.h"
51 #include "display/intel_dp.h"
52 #include "display/intel_dp_mst.h"
53 #include "display/intel_dpll.h"
54 #include "display/intel_dpll_mgr.h"
55 #include "display/intel_dsi.h"
56 #include "display/intel_dvo.h"
57 #include "display/intel_fb.h"
58 #include "display/intel_gmbus.h"
59 #include "display/intel_hdmi.h"
60 #include "display/intel_lvds.h"
61 #include "display/intel_sdvo.h"
62 #include "display/intel_snps_phy.h"
63 #include "display/intel_tv.h"
64 #include "display/intel_vdsc.h"
65 #include "display/intel_vrr.h"
66 
67 #include "gem/i915_gem_lmem.h"
68 #include "gem/i915_gem_object.h"
69 
70 #include "gt/intel_rps.h"
71 #include "gt/gen8_ppgtt.h"
72 
73 #include "g4x_dp.h"
74 #include "g4x_hdmi.h"
75 #include "i915_drv.h"
76 #include "intel_acpi.h"
77 #include "intel_atomic.h"
78 #include "intel_atomic_plane.h"
79 #include "intel_bw.h"
80 #include "intel_cdclk.h"
81 #include "intel_color.h"
82 #include "intel_crtc.h"
83 #include "intel_de.h"
84 #include "intel_display_types.h"
85 #include "intel_dmc.h"
86 #include "intel_dp_link_training.h"
87 #include "intel_fbc.h"
88 #include "intel_fdi.h"
89 #include "intel_fbdev.h"
90 #include "intel_fifo_underrun.h"
91 #include "intel_frontbuffer.h"
92 #include "intel_hdcp.h"
93 #include "intel_hotplug.h"
94 #include "intel_overlay.h"
95 #include "intel_pipe_crc.h"
96 #include "intel_pm.h"
97 #include "intel_pps.h"
98 #include "intel_psr.h"
99 #include "intel_quirks.h"
100 #include "intel_sideband.h"
101 #include "intel_sprite.h"
102 #include "intel_tc.h"
103 #include "intel_vga.h"
104 #include "i9xx_plane.h"
105 #include "skl_scaler.h"
106 #include "skl_universal_plane.h"
107 
108 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
109 				struct intel_crtc_state *pipe_config);
110 static void ilk_pch_clock_get(struct intel_crtc *crtc,
111 			      struct intel_crtc_state *pipe_config);
112 
113 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
114 				  struct drm_i915_gem_object *obj,
115 				  struct drm_mode_fb_cmd2 *mode_cmd);
116 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
117 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
118 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
119 					 const struct intel_link_m_n *m_n,
120 					 const struct intel_link_m_n *m2_n2);
121 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
122 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
123 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
124 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
125 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
126 static void intel_modeset_setup_hw_state(struct drm_device *dev,
127 					 struct drm_modeset_acquire_ctx *ctx);
128 
129 struct i915_dpt {
130 	struct i915_address_space vm;
131 
132 	struct drm_i915_gem_object *obj;
133 	struct i915_vma *vma;
134 	void __iomem *iomem;
135 };
136 
137 #define i915_is_dpt(vm) ((vm)->is_dpt)
138 
139 static inline struct i915_dpt *
i915_vm_to_dpt(struct i915_address_space * vm)140 i915_vm_to_dpt(struct i915_address_space *vm)
141 {
142 	BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
143 	GEM_BUG_ON(!i915_is_dpt(vm));
144 	return container_of(vm, struct i915_dpt, vm);
145 }
146 
147 #define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
148 
gen8_set_pte(void __iomem * addr,gen8_pte_t pte)149 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
150 {
151 	writeq(pte, addr);
152 }
153 
dpt_insert_page(struct i915_address_space * vm,dma_addr_t addr,u64 offset,enum i915_cache_level level,u32 flags)154 static void dpt_insert_page(struct i915_address_space *vm,
155 			    dma_addr_t addr,
156 			    u64 offset,
157 			    enum i915_cache_level level,
158 			    u32 flags)
159 {
160 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
161 	gen8_pte_t __iomem *base = dpt->iomem;
162 
163 	gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
164 		     vm->pte_encode(addr, level, flags));
165 }
166 
dpt_insert_entries(struct i915_address_space * vm,struct i915_vma * vma,enum i915_cache_level level,u32 flags)167 static void dpt_insert_entries(struct i915_address_space *vm,
168 			       struct i915_vma *vma,
169 			       enum i915_cache_level level,
170 			       u32 flags)
171 {
172 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
173 	gen8_pte_t __iomem *base = dpt->iomem;
174 	const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags);
175 	struct sgt_iter sgt_iter;
176 	dma_addr_t addr;
177 	int i;
178 
179 	/*
180 	 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
181 	 * not to allow the user to override access to a read only page.
182 	 */
183 
184 	i = vma->node.start / I915_GTT_PAGE_SIZE;
185 	for_each_sgt_daddr(addr, sgt_iter, vma->pages)
186 		gen8_set_pte(&base[i++], pte_encode | addr);
187 }
188 
dpt_clear_range(struct i915_address_space * vm,u64 start,u64 length)189 static void dpt_clear_range(struct i915_address_space *vm,
190 			    u64 start, u64 length)
191 {
192 }
193 
dpt_bind_vma(struct i915_address_space * vm,struct i915_vm_pt_stash * stash,struct i915_vma * vma,enum i915_cache_level cache_level,u32 flags)194 static void dpt_bind_vma(struct i915_address_space *vm,
195 			 struct i915_vm_pt_stash *stash,
196 			 struct i915_vma *vma,
197 			 enum i915_cache_level cache_level,
198 			 u32 flags)
199 {
200 	struct drm_i915_gem_object *obj = vma->obj;
201 	u32 pte_flags;
202 
203 	/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
204 	pte_flags = 0;
205 	if (vma->vm->has_read_only && i915_gem_object_is_readonly(obj))
206 		pte_flags |= PTE_READ_ONLY;
207 	if (i915_gem_object_is_lmem(obj))
208 		pte_flags |= PTE_LM;
209 
210 	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
211 
212 	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
213 
214 	/*
215 	 * Without aliasing PPGTT there's no difference between
216 	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
217 	 * upgrade to both bound if we bind either to avoid double-binding.
218 	 */
219 	atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
220 }
221 
dpt_unbind_vma(struct i915_address_space * vm,struct i915_vma * vma)222 static void dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
223 {
224 	vm->clear_range(vm, vma->node.start, vma->size);
225 }
226 
dpt_cleanup(struct i915_address_space * vm)227 static void dpt_cleanup(struct i915_address_space *vm)
228 {
229 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
230 
231 	i915_gem_object_put(dpt->obj);
232 }
233 
234 static struct i915_address_space *
intel_dpt_create(struct intel_framebuffer * fb)235 intel_dpt_create(struct intel_framebuffer *fb)
236 {
237 	struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base;
238 	struct drm_i915_private *i915 = to_i915(obj->dev);
239 	struct drm_i915_gem_object *dpt_obj;
240 	struct i915_address_space *vm;
241 	struct i915_dpt *dpt;
242 	size_t size;
243 	int ret;
244 
245 	if (intel_fb_needs_pot_stride_remap(fb))
246 		size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
247 	else
248 		size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
249 
250 	size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
251 
252 	if (HAS_LMEM(i915))
253 		dpt_obj = i915_gem_object_create_lmem(i915, size, 0);
254 	else
255 		dpt_obj = i915_gem_object_create_stolen(i915, size);
256 	if (IS_ERR(dpt_obj))
257 		return ERR_CAST(dpt_obj);
258 
259 	ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
260 	if (ret) {
261 		i915_gem_object_put(dpt_obj);
262 		return ERR_PTR(ret);
263 	}
264 
265 	dpt = kzalloc(sizeof(*dpt), GFP_KERNEL);
266 	if (!dpt) {
267 		i915_gem_object_put(dpt_obj);
268 		return ERR_PTR(-ENOMEM);
269 	}
270 
271 	vm = &dpt->vm;
272 
273 	vm->gt = &i915->gt;
274 	vm->i915 = i915;
275 	vm->dma = i915->drm.dev;
276 	vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
277 	vm->is_dpt = true;
278 
279 	i915_address_space_init(vm, VM_CLASS_DPT);
280 
281 	vm->insert_page = dpt_insert_page;
282 	vm->clear_range = dpt_clear_range;
283 	vm->insert_entries = dpt_insert_entries;
284 	vm->cleanup = dpt_cleanup;
285 
286 	vm->vma_ops.bind_vma    = dpt_bind_vma;
287 	vm->vma_ops.unbind_vma  = dpt_unbind_vma;
288 	vm->vma_ops.set_pages   = ggtt_set_pages;
289 	vm->vma_ops.clear_pages = clear_pages;
290 
291 	vm->pte_encode = gen8_ggtt_pte_encode;
292 
293 	dpt->obj = dpt_obj;
294 
295 	return &dpt->vm;
296 }
297 
intel_dpt_destroy(struct i915_address_space * vm)298 static void intel_dpt_destroy(struct i915_address_space *vm)
299 {
300 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
301 
302 	i915_vm_close(&dpt->vm);
303 }
304 
305 /* returns HPLL frequency in kHz */
vlv_get_hpll_vco(struct drm_i915_private * dev_priv)306 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
307 {
308 	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
309 
310 	/* Obtain SKU information */
311 	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
312 		CCK_FUSE_HPLL_FREQ_MASK;
313 
314 	return vco_freq[hpll_freq] * 1000;
315 }
316 
vlv_get_cck_clock(struct drm_i915_private * dev_priv,const char * name,u32 reg,int ref_freq)317 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
318 		      const char *name, u32 reg, int ref_freq)
319 {
320 	u32 val;
321 	int divider;
322 
323 	val = vlv_cck_read(dev_priv, reg);
324 	divider = val & CCK_FREQUENCY_VALUES;
325 
326 	drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
327 		 (divider << CCK_FREQUENCY_STATUS_SHIFT),
328 		 "%s change in progress\n", name);
329 
330 	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
331 }
332 
vlv_get_cck_clock_hpll(struct drm_i915_private * dev_priv,const char * name,u32 reg)333 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
334 			   const char *name, u32 reg)
335 {
336 	int hpll;
337 
338 	vlv_cck_get(dev_priv);
339 
340 	if (dev_priv->hpll_freq == 0)
341 		dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
342 
343 	hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
344 
345 	vlv_cck_put(dev_priv);
346 
347 	return hpll;
348 }
349 
intel_update_czclk(struct drm_i915_private * dev_priv)350 static void intel_update_czclk(struct drm_i915_private *dev_priv)
351 {
352 	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
353 		return;
354 
355 	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
356 						      CCK_CZ_CLOCK_CONTROL);
357 
358 	drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
359 		dev_priv->czclk_freq);
360 }
361 
362 /* WA Display #0827: Gen9:all */
363 static void
skl_wa_827(struct drm_i915_private * dev_priv,enum pipe pipe,bool enable)364 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
365 {
366 	if (enable)
367 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
368 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
369 	else
370 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
371 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
372 }
373 
374 /* Wa_2006604312:icl,ehl */
375 static void
icl_wa_scalerclkgating(struct drm_i915_private * dev_priv,enum pipe pipe,bool enable)376 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
377 		       bool enable)
378 {
379 	if (enable)
380 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
381 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
382 	else
383 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
384 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
385 }
386 
387 static bool
is_trans_port_sync_slave(const struct intel_crtc_state * crtc_state)388 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
389 {
390 	return crtc_state->master_transcoder != INVALID_TRANSCODER;
391 }
392 
393 static bool
is_trans_port_sync_master(const struct intel_crtc_state * crtc_state)394 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
395 {
396 	return crtc_state->sync_mode_slaves_mask != 0;
397 }
398 
399 bool
is_trans_port_sync_mode(const struct intel_crtc_state * crtc_state)400 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
401 {
402 	return is_trans_port_sync_master(crtc_state) ||
403 		is_trans_port_sync_slave(crtc_state);
404 }
405 
pipe_scanline_is_moving(struct drm_i915_private * dev_priv,enum pipe pipe)406 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
407 				    enum pipe pipe)
408 {
409 	i915_reg_t reg = PIPEDSL(pipe);
410 	u32 line1, line2;
411 	u32 line_mask;
412 
413 	if (DISPLAY_VER(dev_priv) == 2)
414 		line_mask = DSL_LINEMASK_GEN2;
415 	else
416 		line_mask = DSL_LINEMASK_GEN3;
417 
418 	line1 = intel_de_read(dev_priv, reg) & line_mask;
419 	msleep(5);
420 	line2 = intel_de_read(dev_priv, reg) & line_mask;
421 
422 	return line1 != line2;
423 }
424 
wait_for_pipe_scanline_moving(struct intel_crtc * crtc,bool state)425 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
426 {
427 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
428 	enum pipe pipe = crtc->pipe;
429 
430 	/* Wait for the display line to settle/start moving */
431 	if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
432 		drm_err(&dev_priv->drm,
433 			"pipe %c scanline %s wait timed out\n",
434 			pipe_name(pipe), onoff(state));
435 }
436 
intel_wait_for_pipe_scanline_stopped(struct intel_crtc * crtc)437 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
438 {
439 	wait_for_pipe_scanline_moving(crtc, false);
440 }
441 
intel_wait_for_pipe_scanline_moving(struct intel_crtc * crtc)442 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
443 {
444 	wait_for_pipe_scanline_moving(crtc, true);
445 }
446 
447 static void
intel_wait_for_pipe_off(const struct intel_crtc_state * old_crtc_state)448 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
449 {
450 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
451 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
452 
453 	if (DISPLAY_VER(dev_priv) >= 4) {
454 		enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
455 		i915_reg_t reg = PIPECONF(cpu_transcoder);
456 
457 		/* Wait for the Pipe State to go off */
458 		if (intel_de_wait_for_clear(dev_priv, reg,
459 					    I965_PIPECONF_ACTIVE, 100))
460 			drm_WARN(&dev_priv->drm, 1,
461 				 "pipe_off wait timed out\n");
462 	} else {
463 		intel_wait_for_pipe_scanline_stopped(crtc);
464 	}
465 }
466 
467 /* Only for pre-ILK configs */
assert_pll(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)468 void assert_pll(struct drm_i915_private *dev_priv,
469 		enum pipe pipe, bool state)
470 {
471 	u32 val;
472 	bool cur_state;
473 
474 	val = intel_de_read(dev_priv, DPLL(pipe));
475 	cur_state = !!(val & DPLL_VCO_ENABLE);
476 	I915_STATE_WARN(cur_state != state,
477 	     "PLL state assertion failure (expected %s, current %s)\n",
478 			onoff(state), onoff(cur_state));
479 }
480 
481 /* XXX: the dsi pll is shared between MIPI DSI ports */
assert_dsi_pll(struct drm_i915_private * dev_priv,bool state)482 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
483 {
484 	u32 val;
485 	bool cur_state;
486 
487 	vlv_cck_get(dev_priv);
488 	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
489 	vlv_cck_put(dev_priv);
490 
491 	cur_state = val & DSI_PLL_VCO_EN;
492 	I915_STATE_WARN(cur_state != state,
493 	     "DSI PLL state assertion failure (expected %s, current %s)\n",
494 			onoff(state), onoff(cur_state));
495 }
496 
assert_fdi_tx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)497 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
498 			  enum pipe pipe, bool state)
499 {
500 	bool cur_state;
501 
502 	if (HAS_DDI(dev_priv)) {
503 		/*
504 		 * DDI does not have a specific FDI_TX register.
505 		 *
506 		 * FDI is never fed from EDP transcoder
507 		 * so pipe->transcoder cast is fine here.
508 		 */
509 		enum transcoder cpu_transcoder = (enum transcoder)pipe;
510 		u32 val = intel_de_read(dev_priv,
511 					TRANS_DDI_FUNC_CTL(cpu_transcoder));
512 		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
513 	} else {
514 		u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
515 		cur_state = !!(val & FDI_TX_ENABLE);
516 	}
517 	I915_STATE_WARN(cur_state != state,
518 	     "FDI TX state assertion failure (expected %s, current %s)\n",
519 			onoff(state), onoff(cur_state));
520 }
521 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
522 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
523 
assert_fdi_rx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)524 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
525 			  enum pipe pipe, bool state)
526 {
527 	u32 val;
528 	bool cur_state;
529 
530 	val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
531 	cur_state = !!(val & FDI_RX_ENABLE);
532 	I915_STATE_WARN(cur_state != state,
533 	     "FDI RX state assertion failure (expected %s, current %s)\n",
534 			onoff(state), onoff(cur_state));
535 }
536 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
537 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
538 
assert_fdi_tx_pll_enabled(struct drm_i915_private * dev_priv,enum pipe pipe)539 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
540 				      enum pipe pipe)
541 {
542 	u32 val;
543 
544 	/* ILK FDI PLL is always enabled */
545 	if (IS_IRONLAKE(dev_priv))
546 		return;
547 
548 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
549 	if (HAS_DDI(dev_priv))
550 		return;
551 
552 	val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
553 	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
554 }
555 
assert_fdi_rx_pll(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)556 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
557 		       enum pipe pipe, bool state)
558 {
559 	u32 val;
560 	bool cur_state;
561 
562 	val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
563 	cur_state = !!(val & FDI_RX_PLL_ENABLE);
564 	I915_STATE_WARN(cur_state != state,
565 	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
566 			onoff(state), onoff(cur_state));
567 }
568 
assert_panel_unlocked(struct drm_i915_private * dev_priv,enum pipe pipe)569 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
570 {
571 	i915_reg_t pp_reg;
572 	u32 val;
573 	enum pipe panel_pipe = INVALID_PIPE;
574 	bool locked = true;
575 
576 	if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
577 		return;
578 
579 	if (HAS_PCH_SPLIT(dev_priv)) {
580 		u32 port_sel;
581 
582 		pp_reg = PP_CONTROL(0);
583 		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
584 
585 		switch (port_sel) {
586 		case PANEL_PORT_SELECT_LVDS:
587 			intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
588 			break;
589 		case PANEL_PORT_SELECT_DPA:
590 			g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
591 			break;
592 		case PANEL_PORT_SELECT_DPC:
593 			g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
594 			break;
595 		case PANEL_PORT_SELECT_DPD:
596 			g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
597 			break;
598 		default:
599 			MISSING_CASE(port_sel);
600 			break;
601 		}
602 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
603 		/* presumably write lock depends on pipe, not port select */
604 		pp_reg = PP_CONTROL(pipe);
605 		panel_pipe = pipe;
606 	} else {
607 		u32 port_sel;
608 
609 		pp_reg = PP_CONTROL(0);
610 		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
611 
612 		drm_WARN_ON(&dev_priv->drm,
613 			    port_sel != PANEL_PORT_SELECT_LVDS);
614 		intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
615 	}
616 
617 	val = intel_de_read(dev_priv, pp_reg);
618 	if (!(val & PANEL_POWER_ON) ||
619 	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
620 		locked = false;
621 
622 	I915_STATE_WARN(panel_pipe == pipe && locked,
623 	     "panel assertion failure, pipe %c regs locked\n",
624 	     pipe_name(pipe));
625 }
626 
assert_pipe(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder,bool state)627 void assert_pipe(struct drm_i915_private *dev_priv,
628 		 enum transcoder cpu_transcoder, bool state)
629 {
630 	bool cur_state;
631 	enum intel_display_power_domain power_domain;
632 	intel_wakeref_t wakeref;
633 
634 	/* we keep both pipes enabled on 830 */
635 	if (IS_I830(dev_priv))
636 		state = true;
637 
638 	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
639 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
640 	if (wakeref) {
641 		u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
642 		cur_state = !!(val & PIPECONF_ENABLE);
643 
644 		intel_display_power_put(dev_priv, power_domain, wakeref);
645 	} else {
646 		cur_state = false;
647 	}
648 
649 	I915_STATE_WARN(cur_state != state,
650 			"transcoder %s assertion failure (expected %s, current %s)\n",
651 			transcoder_name(cpu_transcoder),
652 			onoff(state), onoff(cur_state));
653 }
654 
assert_plane(struct intel_plane * plane,bool state)655 static void assert_plane(struct intel_plane *plane, bool state)
656 {
657 	enum pipe pipe;
658 	bool cur_state;
659 
660 	cur_state = plane->get_hw_state(plane, &pipe);
661 
662 	I915_STATE_WARN(cur_state != state,
663 			"%s assertion failure (expected %s, current %s)\n",
664 			plane->base.name, onoff(state), onoff(cur_state));
665 }
666 
667 #define assert_plane_enabled(p) assert_plane(p, true)
668 #define assert_plane_disabled(p) assert_plane(p, false)
669 
assert_planes_disabled(struct intel_crtc * crtc)670 static void assert_planes_disabled(struct intel_crtc *crtc)
671 {
672 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
673 	struct intel_plane *plane;
674 
675 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
676 		assert_plane_disabled(plane);
677 }
678 
assert_pch_transcoder_disabled(struct drm_i915_private * dev_priv,enum pipe pipe)679 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
680 				    enum pipe pipe)
681 {
682 	u32 val;
683 	bool enabled;
684 
685 	val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
686 	enabled = !!(val & TRANS_ENABLE);
687 	I915_STATE_WARN(enabled,
688 	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
689 	     pipe_name(pipe));
690 }
691 
assert_pch_dp_disabled(struct drm_i915_private * dev_priv,enum pipe pipe,enum port port,i915_reg_t dp_reg)692 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
693 				   enum pipe pipe, enum port port,
694 				   i915_reg_t dp_reg)
695 {
696 	enum pipe port_pipe;
697 	bool state;
698 
699 	state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
700 
701 	I915_STATE_WARN(state && port_pipe == pipe,
702 			"PCH DP %c enabled on transcoder %c, should be disabled\n",
703 			port_name(port), pipe_name(pipe));
704 
705 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
706 			"IBX PCH DP %c still using transcoder B\n",
707 			port_name(port));
708 }
709 
assert_pch_hdmi_disabled(struct drm_i915_private * dev_priv,enum pipe pipe,enum port port,i915_reg_t hdmi_reg)710 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
711 				     enum pipe pipe, enum port port,
712 				     i915_reg_t hdmi_reg)
713 {
714 	enum pipe port_pipe;
715 	bool state;
716 
717 	state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
718 
719 	I915_STATE_WARN(state && port_pipe == pipe,
720 			"PCH HDMI %c enabled on transcoder %c, should be disabled\n",
721 			port_name(port), pipe_name(pipe));
722 
723 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
724 			"IBX PCH HDMI %c still using transcoder B\n",
725 			port_name(port));
726 }
727 
assert_pch_ports_disabled(struct drm_i915_private * dev_priv,enum pipe pipe)728 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
729 				      enum pipe pipe)
730 {
731 	enum pipe port_pipe;
732 
733 	assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
734 	assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
735 	assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
736 
737 	I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
738 			port_pipe == pipe,
739 			"PCH VGA enabled on transcoder %c, should be disabled\n",
740 			pipe_name(pipe));
741 
742 	I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
743 			port_pipe == pipe,
744 			"PCH LVDS enabled on transcoder %c, should be disabled\n",
745 			pipe_name(pipe));
746 
747 	/* PCH SDVOB multiplex with HDMIB */
748 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
749 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
750 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
751 }
752 
vlv_wait_port_ready(struct drm_i915_private * dev_priv,struct intel_digital_port * dig_port,unsigned int expected_mask)753 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
754 			 struct intel_digital_port *dig_port,
755 			 unsigned int expected_mask)
756 {
757 	u32 port_mask;
758 	i915_reg_t dpll_reg;
759 
760 	switch (dig_port->base.port) {
761 	case PORT_B:
762 		port_mask = DPLL_PORTB_READY_MASK;
763 		dpll_reg = DPLL(0);
764 		break;
765 	case PORT_C:
766 		port_mask = DPLL_PORTC_READY_MASK;
767 		dpll_reg = DPLL(0);
768 		expected_mask <<= 4;
769 		break;
770 	case PORT_D:
771 		port_mask = DPLL_PORTD_READY_MASK;
772 		dpll_reg = DPIO_PHY_STATUS;
773 		break;
774 	default:
775 		BUG();
776 	}
777 
778 	if (intel_de_wait_for_register(dev_priv, dpll_reg,
779 				       port_mask, expected_mask, 1000))
780 		drm_WARN(&dev_priv->drm, 1,
781 			 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
782 			 dig_port->base.base.base.id, dig_port->base.base.name,
783 			 intel_de_read(dev_priv, dpll_reg) & port_mask,
784 			 expected_mask);
785 }
786 
ilk_enable_pch_transcoder(const struct intel_crtc_state * crtc_state)787 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
788 {
789 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
790 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
791 	enum pipe pipe = crtc->pipe;
792 	i915_reg_t reg;
793 	u32 val, pipeconf_val;
794 
795 	/* Make sure PCH DPLL is enabled */
796 	assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
797 
798 	/* FDI must be feeding us bits for PCH ports */
799 	assert_fdi_tx_enabled(dev_priv, pipe);
800 	assert_fdi_rx_enabled(dev_priv, pipe);
801 
802 	if (HAS_PCH_CPT(dev_priv)) {
803 		reg = TRANS_CHICKEN2(pipe);
804 		val = intel_de_read(dev_priv, reg);
805 		/*
806 		 * Workaround: Set the timing override bit
807 		 * before enabling the pch transcoder.
808 		 */
809 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
810 		/* Configure frame start delay to match the CPU */
811 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
812 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
813 		intel_de_write(dev_priv, reg, val);
814 	}
815 
816 	reg = PCH_TRANSCONF(pipe);
817 	val = intel_de_read(dev_priv, reg);
818 	pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
819 
820 	if (HAS_PCH_IBX(dev_priv)) {
821 		/* Configure frame start delay to match the CPU */
822 		val &= ~TRANS_FRAME_START_DELAY_MASK;
823 		val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
824 
825 		/*
826 		 * Make the BPC in transcoder be consistent with
827 		 * that in pipeconf reg. For HDMI we must use 8bpc
828 		 * here for both 8bpc and 12bpc.
829 		 */
830 		val &= ~PIPECONF_BPC_MASK;
831 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
832 			val |= PIPECONF_8BPC;
833 		else
834 			val |= pipeconf_val & PIPECONF_BPC_MASK;
835 	}
836 
837 	val &= ~TRANS_INTERLACE_MASK;
838 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
839 		if (HAS_PCH_IBX(dev_priv) &&
840 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
841 			val |= TRANS_LEGACY_INTERLACED_ILK;
842 		else
843 			val |= TRANS_INTERLACED;
844 	} else {
845 		val |= TRANS_PROGRESSIVE;
846 	}
847 
848 	intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
849 	if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
850 		drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
851 			pipe_name(pipe));
852 }
853 
lpt_enable_pch_transcoder(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)854 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
855 				      enum transcoder cpu_transcoder)
856 {
857 	u32 val, pipeconf_val;
858 
859 	/* FDI must be feeding us bits for PCH ports */
860 	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
861 	assert_fdi_rx_enabled(dev_priv, PIPE_A);
862 
863 	val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
864 	/* Workaround: set timing override bit. */
865 	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
866 	/* Configure frame start delay to match the CPU */
867 	val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
868 	val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
869 	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
870 
871 	val = TRANS_ENABLE;
872 	pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
873 
874 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
875 	    PIPECONF_INTERLACED_ILK)
876 		val |= TRANS_INTERLACED;
877 	else
878 		val |= TRANS_PROGRESSIVE;
879 
880 	intel_de_write(dev_priv, LPT_TRANSCONF, val);
881 	if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
882 				  TRANS_STATE_ENABLE, 100))
883 		drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
884 }
885 
ilk_disable_pch_transcoder(struct drm_i915_private * dev_priv,enum pipe pipe)886 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
887 				       enum pipe pipe)
888 {
889 	i915_reg_t reg;
890 	u32 val;
891 
892 	/* FDI relies on the transcoder */
893 	assert_fdi_tx_disabled(dev_priv, pipe);
894 	assert_fdi_rx_disabled(dev_priv, pipe);
895 
896 	/* Ports must be off as well */
897 	assert_pch_ports_disabled(dev_priv, pipe);
898 
899 	reg = PCH_TRANSCONF(pipe);
900 	val = intel_de_read(dev_priv, reg);
901 	val &= ~TRANS_ENABLE;
902 	intel_de_write(dev_priv, reg, val);
903 	/* wait for PCH transcoder off, transcoder state */
904 	if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
905 		drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
906 			pipe_name(pipe));
907 
908 	if (HAS_PCH_CPT(dev_priv)) {
909 		/* Workaround: Clear the timing override chicken bit again. */
910 		reg = TRANS_CHICKEN2(pipe);
911 		val = intel_de_read(dev_priv, reg);
912 		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
913 		intel_de_write(dev_priv, reg, val);
914 	}
915 }
916 
lpt_disable_pch_transcoder(struct drm_i915_private * dev_priv)917 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
918 {
919 	u32 val;
920 
921 	val = intel_de_read(dev_priv, LPT_TRANSCONF);
922 	val &= ~TRANS_ENABLE;
923 	intel_de_write(dev_priv, LPT_TRANSCONF, val);
924 	/* wait for PCH transcoder off, transcoder state */
925 	if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
926 				    TRANS_STATE_ENABLE, 50))
927 		drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
928 
929 	/* Workaround: clear timing override bit. */
930 	val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
931 	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
932 	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
933 }
934 
intel_crtc_pch_transcoder(struct intel_crtc * crtc)935 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
936 {
937 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
938 
939 	if (HAS_PCH_LPT(dev_priv))
940 		return PIPE_A;
941 	else
942 		return crtc->pipe;
943 }
944 
intel_enable_pipe(const struct intel_crtc_state * new_crtc_state)945 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
946 {
947 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
948 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
949 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
950 	enum pipe pipe = crtc->pipe;
951 	i915_reg_t reg;
952 	u32 val;
953 
954 	drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
955 
956 	assert_planes_disabled(crtc);
957 
958 	/*
959 	 * A pipe without a PLL won't actually be able to drive bits from
960 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
961 	 * need the check.
962 	 */
963 	if (HAS_GMCH(dev_priv)) {
964 		if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
965 			assert_dsi_pll_enabled(dev_priv);
966 		else
967 			assert_pll_enabled(dev_priv, pipe);
968 	} else {
969 		if (new_crtc_state->has_pch_encoder) {
970 			/* if driving the PCH, we need FDI enabled */
971 			assert_fdi_rx_pll_enabled(dev_priv,
972 						  intel_crtc_pch_transcoder(crtc));
973 			assert_fdi_tx_pll_enabled(dev_priv,
974 						  (enum pipe) cpu_transcoder);
975 		}
976 		/* FIXME: assert CPU port conditions for SNB+ */
977 	}
978 
979 	/* Wa_22012358565:adl-p */
980 	if (DISPLAY_VER(dev_priv) == 13)
981 		intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
982 			     0, PIPE_ARB_USE_PROG_SLOTS);
983 
984 	reg = PIPECONF(cpu_transcoder);
985 	val = intel_de_read(dev_priv, reg);
986 	if (val & PIPECONF_ENABLE) {
987 		/* we keep both pipes enabled on 830 */
988 		drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
989 		return;
990 	}
991 
992 	intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
993 	intel_de_posting_read(dev_priv, reg);
994 
995 	/*
996 	 * Until the pipe starts PIPEDSL reads will return a stale value,
997 	 * which causes an apparent vblank timestamp jump when PIPEDSL
998 	 * resets to its proper value. That also messes up the frame count
999 	 * when it's derived from the timestamps. So let's wait for the
1000 	 * pipe to start properly before we call drm_crtc_vblank_on()
1001 	 */
1002 	if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1003 		intel_wait_for_pipe_scanline_moving(crtc);
1004 }
1005 
intel_disable_pipe(const struct intel_crtc_state * old_crtc_state)1006 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1007 {
1008 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1009 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1010 	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1011 	enum pipe pipe = crtc->pipe;
1012 	i915_reg_t reg;
1013 	u32 val;
1014 
1015 	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
1016 
1017 	/*
1018 	 * Make sure planes won't keep trying to pump pixels to us,
1019 	 * or we might hang the display.
1020 	 */
1021 	assert_planes_disabled(crtc);
1022 
1023 	reg = PIPECONF(cpu_transcoder);
1024 	val = intel_de_read(dev_priv, reg);
1025 	if ((val & PIPECONF_ENABLE) == 0)
1026 		return;
1027 
1028 	/*
1029 	 * Double wide has implications for planes
1030 	 * so best keep it disabled when not needed.
1031 	 */
1032 	if (old_crtc_state->double_wide)
1033 		val &= ~PIPECONF_DOUBLE_WIDE;
1034 
1035 	/* Don't disable pipe or pipe PLLs if needed */
1036 	if (!IS_I830(dev_priv))
1037 		val &= ~PIPECONF_ENABLE;
1038 
1039 	if (DISPLAY_VER(dev_priv) >= 12)
1040 		intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
1041 			     FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
1042 
1043 	intel_de_write(dev_priv, reg, val);
1044 	if ((val & PIPECONF_ENABLE) == 0)
1045 		intel_wait_for_pipe_off(old_crtc_state);
1046 }
1047 
1048 bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info * info,u64 modifier)1049 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
1050 				    u64 modifier)
1051 {
1052 	return info->is_yuv &&
1053 	       info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
1054 }
1055 
1056 unsigned int
intel_tile_width_bytes(const struct drm_framebuffer * fb,int color_plane)1057 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1058 {
1059 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
1060 	unsigned int cpp = fb->format->cpp[color_plane];
1061 
1062 	switch (fb->modifier) {
1063 	case DRM_FORMAT_MOD_LINEAR:
1064 		return intel_tile_size(dev_priv);
1065 	case I915_FORMAT_MOD_X_TILED:
1066 		if (DISPLAY_VER(dev_priv) == 2)
1067 			return 128;
1068 		else
1069 			return 512;
1070 	case I915_FORMAT_MOD_Y_TILED_CCS:
1071 		if (is_ccs_plane(fb, color_plane))
1072 			return 128;
1073 		fallthrough;
1074 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1075 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1076 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1077 		if (is_ccs_plane(fb, color_plane))
1078 			return 64;
1079 		fallthrough;
1080 	case I915_FORMAT_MOD_Y_TILED:
1081 		if (DISPLAY_VER(dev_priv) == 2 || HAS_128_BYTE_Y_TILING(dev_priv))
1082 			return 128;
1083 		else
1084 			return 512;
1085 	case I915_FORMAT_MOD_Yf_TILED_CCS:
1086 		if (is_ccs_plane(fb, color_plane))
1087 			return 128;
1088 		fallthrough;
1089 	case I915_FORMAT_MOD_Yf_TILED:
1090 		switch (cpp) {
1091 		case 1:
1092 			return 64;
1093 		case 2:
1094 		case 4:
1095 			return 128;
1096 		case 8:
1097 		case 16:
1098 			return 256;
1099 		default:
1100 			MISSING_CASE(cpp);
1101 			return cpp;
1102 		}
1103 		break;
1104 	default:
1105 		MISSING_CASE(fb->modifier);
1106 		return cpp;
1107 	}
1108 }
1109 
1110 unsigned int
intel_fb_align_height(const struct drm_framebuffer * fb,int color_plane,unsigned int height)1111 intel_fb_align_height(const struct drm_framebuffer *fb,
1112 		      int color_plane, unsigned int height)
1113 {
1114 	unsigned int tile_height = intel_tile_height(fb, color_plane);
1115 
1116 	return ALIGN(height, tile_height);
1117 }
1118 
intel_rotation_info_size(const struct intel_rotation_info * rot_info)1119 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1120 {
1121 	unsigned int size = 0;
1122 	int i;
1123 
1124 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1125 		size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
1126 
1127 	return size;
1128 }
1129 
intel_remapped_info_size(const struct intel_remapped_info * rem_info)1130 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
1131 {
1132 	unsigned int size = 0;
1133 	int i;
1134 
1135 	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
1136 		size += rem_info->plane[i].dst_stride * rem_info->plane[i].height;
1137 
1138 	return size;
1139 }
1140 
intel_linear_alignment(const struct drm_i915_private * dev_priv)1141 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
1142 {
1143 	if (DISPLAY_VER(dev_priv) >= 9)
1144 		return 256 * 1024;
1145 	else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
1146 		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1147 		return 128 * 1024;
1148 	else if (DISPLAY_VER(dev_priv) >= 4)
1149 		return 4 * 1024;
1150 	else
1151 		return 0;
1152 }
1153 
has_async_flips(struct drm_i915_private * i915)1154 static bool has_async_flips(struct drm_i915_private *i915)
1155 {
1156 	return DISPLAY_VER(i915) >= 5;
1157 }
1158 
intel_surf_alignment(const struct drm_framebuffer * fb,int color_plane)1159 unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
1160 				  int color_plane)
1161 {
1162 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
1163 
1164 	if (intel_fb_uses_dpt(fb))
1165 		return 512 * 4096;
1166 
1167 	/* AUX_DIST needs only 4K alignment */
1168 	if (is_ccs_plane(fb, color_plane))
1169 		return 4096;
1170 
1171 	if (is_semiplanar_uv_plane(fb, color_plane)) {
1172 		/*
1173 		 * TODO: cross-check wrt. the bspec stride in bytes * 64 bytes
1174 		 * alignment for linear UV planes on all platforms.
1175 		 */
1176 		if (DISPLAY_VER(dev_priv) >= 12) {
1177 			if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
1178 				return intel_linear_alignment(dev_priv);
1179 
1180 			return intel_tile_row_size(fb, color_plane);
1181 		}
1182 
1183 		return 4096;
1184 	}
1185 
1186 	drm_WARN_ON(&dev_priv->drm, color_plane != 0);
1187 
1188 	switch (fb->modifier) {
1189 	case DRM_FORMAT_MOD_LINEAR:
1190 		return intel_linear_alignment(dev_priv);
1191 	case I915_FORMAT_MOD_X_TILED:
1192 		if (has_async_flips(dev_priv))
1193 			return 256 * 1024;
1194 		return 0;
1195 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1196 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1197 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1198 		return 16 * 1024;
1199 	case I915_FORMAT_MOD_Y_TILED_CCS:
1200 	case I915_FORMAT_MOD_Yf_TILED_CCS:
1201 	case I915_FORMAT_MOD_Y_TILED:
1202 	case I915_FORMAT_MOD_Yf_TILED:
1203 		return 1 * 1024 * 1024;
1204 	default:
1205 		MISSING_CASE(fb->modifier);
1206 		return 0;
1207 	}
1208 }
1209 
intel_plane_uses_fence(const struct intel_plane_state * plane_state)1210 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
1211 {
1212 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1213 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1214 
1215 	return DISPLAY_VER(dev_priv) < 4 ||
1216 		(plane->has_fbc &&
1217 		 plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
1218 }
1219 
1220 static struct i915_vma *
intel_pin_fb_obj_dpt(struct drm_framebuffer * fb,const struct i915_ggtt_view * view,bool uses_fence,unsigned long * out_flags,struct i915_address_space * vm)1221 intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
1222 		     const struct i915_ggtt_view *view,
1223 		     bool uses_fence,
1224 		     unsigned long *out_flags,
1225 		     struct i915_address_space *vm)
1226 {
1227 	struct drm_device *dev = fb->dev;
1228 	struct drm_i915_private *dev_priv = to_i915(dev);
1229 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1230 	struct i915_vma *vma;
1231 	u32 alignment;
1232 	int ret;
1233 
1234 	if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
1235 		return ERR_PTR(-EINVAL);
1236 
1237 	alignment = 4096 * 512;
1238 
1239 	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
1240 
1241 	ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
1242 	if (ret) {
1243 		vma = ERR_PTR(ret);
1244 		goto err;
1245 	}
1246 
1247 	vma = i915_vma_instance(obj, vm, view);
1248 	if (IS_ERR(vma))
1249 		goto err;
1250 
1251 	if (i915_vma_misplaced(vma, 0, alignment, 0)) {
1252 		ret = i915_vma_unbind(vma);
1253 		if (ret) {
1254 			vma = ERR_PTR(ret);
1255 			goto err;
1256 		}
1257 	}
1258 
1259 	ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
1260 	if (ret) {
1261 		vma = ERR_PTR(ret);
1262 		goto err;
1263 	}
1264 
1265 	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
1266 
1267 	i915_gem_object_flush_if_display(obj);
1268 
1269 	i915_vma_get(vma);
1270 err:
1271 	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1272 
1273 	return vma;
1274 }
1275 
1276 struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer * fb,bool phys_cursor,const struct i915_ggtt_view * view,bool uses_fence,unsigned long * out_flags)1277 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
1278 			   bool phys_cursor,
1279 			   const struct i915_ggtt_view *view,
1280 			   bool uses_fence,
1281 			   unsigned long *out_flags)
1282 {
1283 	struct drm_device *dev = fb->dev;
1284 	struct drm_i915_private *dev_priv = to_i915(dev);
1285 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1286 	intel_wakeref_t wakeref;
1287 	struct i915_gem_ww_ctx ww;
1288 	struct i915_vma *vma;
1289 	unsigned int pinctl;
1290 	u32 alignment;
1291 	int ret;
1292 
1293 	if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
1294 		return ERR_PTR(-EINVAL);
1295 
1296 	if (phys_cursor)
1297 		alignment = intel_cursor_alignment(dev_priv);
1298 	else
1299 		alignment = intel_surf_alignment(fb, 0);
1300 	if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
1301 		return ERR_PTR(-EINVAL);
1302 
1303 	/* Note that the w/a also requires 64 PTE of padding following the
1304 	 * bo. We currently fill all unused PTE with the shadow page and so
1305 	 * we should always have valid PTE following the scanout preventing
1306 	 * the VT-d warning.
1307 	 */
1308 	if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
1309 		alignment = 256 * 1024;
1310 
1311 	/*
1312 	 * Global gtt pte registers are special registers which actually forward
1313 	 * writes to a chunk of system memory. Which means that there is no risk
1314 	 * that the register values disappear as soon as we call
1315 	 * intel_runtime_pm_put(), so it is correct to wrap only the
1316 	 * pin/unpin/fence and not more.
1317 	 */
1318 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1319 
1320 	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
1321 
1322 	/*
1323 	 * Valleyview is definitely limited to scanning out the first
1324 	 * 512MiB. Lets presume this behaviour was inherited from the
1325 	 * g4x display engine and that all earlier gen are similarly
1326 	 * limited. Testing suggests that it is a little more
1327 	 * complicated than this. For example, Cherryview appears quite
1328 	 * happy to scanout from anywhere within its global aperture.
1329 	 */
1330 	pinctl = 0;
1331 	if (HAS_GMCH(dev_priv))
1332 		pinctl |= PIN_MAPPABLE;
1333 
1334 	i915_gem_ww_ctx_init(&ww, true);
1335 retry:
1336 	ret = i915_gem_object_lock(obj, &ww);
1337 	if (!ret && phys_cursor)
1338 		ret = i915_gem_object_attach_phys(obj, alignment);
1339 	else if (!ret && HAS_LMEM(dev_priv))
1340 		ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM);
1341 	/* TODO: Do we need to sync when migration becomes async? */
1342 	if (!ret)
1343 		ret = i915_gem_object_pin_pages(obj);
1344 	if (ret)
1345 		goto err;
1346 
1347 	if (!ret) {
1348 		vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
1349 							   view, pinctl);
1350 		if (IS_ERR(vma)) {
1351 			ret = PTR_ERR(vma);
1352 			goto err_unpin;
1353 		}
1354 	}
1355 
1356 	if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
1357 		/*
1358 		 * Install a fence for tiled scan-out. Pre-i965 always needs a
1359 		 * fence, whereas 965+ only requires a fence if using
1360 		 * framebuffer compression.  For simplicity, we always, when
1361 		 * possible, install a fence as the cost is not that onerous.
1362 		 *
1363 		 * If we fail to fence the tiled scanout, then either the
1364 		 * modeset will reject the change (which is highly unlikely as
1365 		 * the affected systems, all but one, do not have unmappable
1366 		 * space) or we will not be able to enable full powersaving
1367 		 * techniques (also likely not to apply due to various limits
1368 		 * FBC and the like impose on the size of the buffer, which
1369 		 * presumably we violated anyway with this unmappable buffer).
1370 		 * Anyway, it is presumably better to stumble onwards with
1371 		 * something and try to run the system in a "less than optimal"
1372 		 * mode that matches the user configuration.
1373 		 */
1374 		ret = i915_vma_pin_fence(vma);
1375 		if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
1376 			i915_vma_unpin(vma);
1377 			goto err_unpin;
1378 		}
1379 		ret = 0;
1380 
1381 		if (vma->fence)
1382 			*out_flags |= PLANE_HAS_FENCE;
1383 	}
1384 
1385 	i915_vma_get(vma);
1386 
1387 err_unpin:
1388 	i915_gem_object_unpin_pages(obj);
1389 err:
1390 	if (ret == -EDEADLK) {
1391 		ret = i915_gem_ww_ctx_backoff(&ww);
1392 		if (!ret)
1393 			goto retry;
1394 	}
1395 	i915_gem_ww_ctx_fini(&ww);
1396 	if (ret)
1397 		vma = ERR_PTR(ret);
1398 
1399 	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1400 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1401 	return vma;
1402 }
1403 
intel_unpin_fb_vma(struct i915_vma * vma,unsigned long flags)1404 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
1405 {
1406 	if (flags & PLANE_HAS_FENCE)
1407 		i915_vma_unpin_fence(vma);
1408 	i915_vma_unpin(vma);
1409 	i915_vma_put(vma);
1410 }
1411 
1412 /*
1413  * Convert the x/y offsets into a linear offset.
1414  * Only valid with 0/180 degree rotation, which is fine since linear
1415  * offset is only used with linear buffers on pre-hsw and tiled buffers
1416  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
1417  */
intel_fb_xy_to_linear(int x,int y,const struct intel_plane_state * state,int color_plane)1418 u32 intel_fb_xy_to_linear(int x, int y,
1419 			  const struct intel_plane_state *state,
1420 			  int color_plane)
1421 {
1422 	const struct drm_framebuffer *fb = state->hw.fb;
1423 	unsigned int cpp = fb->format->cpp[color_plane];
1424 	unsigned int pitch = state->view.color_plane[color_plane].stride;
1425 
1426 	return y * pitch + x * cpp;
1427 }
1428 
1429 /*
1430  * Add the x/y offsets derived from fb->offsets[] to the user
1431  * specified plane src x/y offsets. The resulting x/y offsets
1432  * specify the start of scanout from the beginning of the gtt mapping.
1433  */
intel_add_fb_offsets(int * x,int * y,const struct intel_plane_state * state,int color_plane)1434 void intel_add_fb_offsets(int *x, int *y,
1435 			  const struct intel_plane_state *state,
1436 			  int color_plane)
1437 
1438 {
1439 	*x += state->view.color_plane[color_plane].x;
1440 	*y += state->view.color_plane[color_plane].y;
1441 }
1442 
intel_fb_modifier_to_tiling(u64 fb_modifier)1443 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
1444 {
1445 	switch (fb_modifier) {
1446 	case I915_FORMAT_MOD_X_TILED:
1447 		return I915_TILING_X;
1448 	case I915_FORMAT_MOD_Y_TILED:
1449 	case I915_FORMAT_MOD_Y_TILED_CCS:
1450 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1451 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1452 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1453 		return I915_TILING_Y;
1454 	default:
1455 		return I915_TILING_NONE;
1456 	}
1457 }
1458 
1459 /*
1460  * From the Sky Lake PRM:
1461  * "The Color Control Surface (CCS) contains the compression status of
1462  *  the cache-line pairs. The compression state of the cache-line pair
1463  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
1464  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
1465  *  cache-line-pairs. CCS is always Y tiled."
1466  *
1467  * Since cache line pairs refers to horizontally adjacent cache lines,
1468  * each cache line in the CCS corresponds to an area of 32x16 cache
1469  * lines on the main surface. Since each pixel is 4 bytes, this gives
1470  * us a ratio of one byte in the CCS for each 8x16 pixels in the
1471  * main surface.
1472  */
1473 static const struct drm_format_info skl_ccs_formats[] = {
1474 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1475 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1476 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1477 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1478 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1479 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1480 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1481 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1482 };
1483 
1484 /*
1485  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
1486  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
1487  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
1488  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
1489  * the main surface.
1490  */
1491 static const struct drm_format_info gen12_ccs_formats[] = {
1492 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1493 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1494 	  .hsub = 1, .vsub = 1, },
1495 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1496 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1497 	  .hsub = 1, .vsub = 1, },
1498 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1499 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1500 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1501 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1502 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1503 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1504 	{ .format = DRM_FORMAT_YUYV, .num_planes = 2,
1505 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1506 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1507 	{ .format = DRM_FORMAT_YVYU, .num_planes = 2,
1508 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1509 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1510 	{ .format = DRM_FORMAT_UYVY, .num_planes = 2,
1511 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1512 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1513 	{ .format = DRM_FORMAT_VYUY, .num_planes = 2,
1514 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1515 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1516 	{ .format = DRM_FORMAT_XYUV8888, .num_planes = 2,
1517 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1518 	  .hsub = 1, .vsub = 1, .is_yuv = true },
1519 	{ .format = DRM_FORMAT_NV12, .num_planes = 4,
1520 	  .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
1521 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1522 	{ .format = DRM_FORMAT_P010, .num_planes = 4,
1523 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1524 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1525 	{ .format = DRM_FORMAT_P012, .num_planes = 4,
1526 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1527 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1528 	{ .format = DRM_FORMAT_P016, .num_planes = 4,
1529 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1530 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1531 };
1532 
1533 /*
1534  * Same as gen12_ccs_formats[] above, but with additional surface used
1535  * to pass Clear Color information in plane 2 with 64 bits of data.
1536  */
1537 static const struct drm_format_info gen12_ccs_cc_formats[] = {
1538 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
1539 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1540 	  .hsub = 1, .vsub = 1, },
1541 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
1542 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1543 	  .hsub = 1, .vsub = 1, },
1544 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
1545 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1546 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1547 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
1548 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1549 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1550 };
1551 
1552 static const struct drm_format_info *
lookup_format_info(const struct drm_format_info formats[],int num_formats,u32 format)1553 lookup_format_info(const struct drm_format_info formats[],
1554 		   int num_formats, u32 format)
1555 {
1556 	int i;
1557 
1558 	for (i = 0; i < num_formats; i++) {
1559 		if (formats[i].format == format)
1560 			return &formats[i];
1561 	}
1562 
1563 	return NULL;
1564 }
1565 
1566 static const struct drm_format_info *
intel_get_format_info(const struct drm_mode_fb_cmd2 * cmd)1567 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
1568 {
1569 	switch (cmd->modifier[0]) {
1570 	case I915_FORMAT_MOD_Y_TILED_CCS:
1571 	case I915_FORMAT_MOD_Yf_TILED_CCS:
1572 		return lookup_format_info(skl_ccs_formats,
1573 					  ARRAY_SIZE(skl_ccs_formats),
1574 					  cmd->pixel_format);
1575 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1576 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1577 		return lookup_format_info(gen12_ccs_formats,
1578 					  ARRAY_SIZE(gen12_ccs_formats),
1579 					  cmd->pixel_format);
1580 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1581 		return lookup_format_info(gen12_ccs_cc_formats,
1582 					  ARRAY_SIZE(gen12_ccs_cc_formats),
1583 					  cmd->pixel_format);
1584 	default:
1585 		return NULL;
1586 	}
1587 }
1588 
gen12_ccs_aux_stride(struct drm_framebuffer * fb,int ccs_plane)1589 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
1590 {
1591 	return DIV_ROUND_UP(fb->pitches[skl_ccs_to_main_plane(fb, ccs_plane)],
1592 			    512) * 64;
1593 }
1594 
intel_plane_fb_max_stride(struct drm_i915_private * dev_priv,u32 pixel_format,u64 modifier)1595 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
1596 			      u32 pixel_format, u64 modifier)
1597 {
1598 	struct intel_crtc *crtc;
1599 	struct intel_plane *plane;
1600 
1601 	if (!HAS_DISPLAY(dev_priv))
1602 		return 0;
1603 
1604 	/*
1605 	 * We assume the primary plane for pipe A has
1606 	 * the highest stride limits of them all,
1607 	 * if in case pipe A is disabled, use the first pipe from pipe_mask.
1608 	 */
1609 	crtc = intel_get_first_crtc(dev_priv);
1610 	if (!crtc)
1611 		return 0;
1612 
1613 	plane = to_intel_plane(crtc->base.primary);
1614 
1615 	return plane->max_stride(plane, pixel_format, modifier,
1616 				 DRM_MODE_ROTATE_0);
1617 }
1618 
1619 static
intel_fb_max_stride(struct drm_i915_private * dev_priv,u32 pixel_format,u64 modifier)1620 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
1621 			u32 pixel_format, u64 modifier)
1622 {
1623 	/*
1624 	 * Arbitrary limit for gen4+ chosen to match the
1625 	 * render engine max stride.
1626 	 *
1627 	 * The new CCS hash mode makes remapping impossible
1628 	 */
1629 	if (DISPLAY_VER(dev_priv) < 4 || is_ccs_modifier(modifier) ||
1630 	    intel_modifier_uses_dpt(dev_priv, modifier))
1631 		return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
1632 	else if (DISPLAY_VER(dev_priv) >= 7)
1633 		return 256 * 1024;
1634 	else
1635 		return 128 * 1024;
1636 }
1637 
1638 static u32
intel_fb_stride_alignment(const struct drm_framebuffer * fb,int color_plane)1639 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
1640 {
1641 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
1642 	u32 tile_width;
1643 
1644 	if (is_surface_linear(fb, color_plane)) {
1645 		u32 max_stride = intel_plane_fb_max_stride(dev_priv,
1646 							   fb->format->format,
1647 							   fb->modifier);
1648 
1649 		/*
1650 		 * To make remapping with linear generally feasible
1651 		 * we need the stride to be page aligned.
1652 		 */
1653 		if (fb->pitches[color_plane] > max_stride &&
1654 		    !is_ccs_modifier(fb->modifier))
1655 			return intel_tile_size(dev_priv);
1656 		else
1657 			return 64;
1658 	}
1659 
1660 	tile_width = intel_tile_width_bytes(fb, color_plane);
1661 	if (is_ccs_modifier(fb->modifier)) {
1662 		/*
1663 		 * Display WA #0531: skl,bxt,kbl,glk
1664 		 *
1665 		 * Render decompression and plane width > 3840
1666 		 * combined with horizontal panning requires the
1667 		 * plane stride to be a multiple of 4. We'll just
1668 		 * require the entire fb to accommodate that to avoid
1669 		 * potential runtime errors at plane configuration time.
1670 		 */
1671 		if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) &&
1672 		    color_plane == 0 && fb->width > 3840)
1673 			tile_width *= 4;
1674 		/*
1675 		 * The main surface pitch must be padded to a multiple of four
1676 		 * tile widths.
1677 		 */
1678 		else if (DISPLAY_VER(dev_priv) >= 12)
1679 			tile_width *= 4;
1680 	}
1681 	return tile_width;
1682 }
1683 
1684 static struct i915_vma *
initial_plane_vma(struct drm_i915_private * i915,struct intel_initial_plane_config * plane_config)1685 initial_plane_vma(struct drm_i915_private *i915,
1686 		  struct intel_initial_plane_config *plane_config)
1687 {
1688 	struct drm_i915_gem_object *obj;
1689 	struct i915_vma *vma;
1690 	u32 base, size;
1691 
1692 	if (plane_config->size == 0)
1693 		return NULL;
1694 
1695 	base = round_down(plane_config->base,
1696 			  I915_GTT_MIN_ALIGNMENT);
1697 	size = round_up(plane_config->base + plane_config->size,
1698 			I915_GTT_MIN_ALIGNMENT);
1699 	size -= base;
1700 
1701 	/*
1702 	 * If the FB is too big, just don't use it since fbdev is not very
1703 	 * important and we should probably use that space with FBC or other
1704 	 * features.
1705 	 */
1706 	if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
1707 	    size * 2 > i915->stolen_usable_size)
1708 		return NULL;
1709 
1710 	obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
1711 	if (IS_ERR(obj))
1712 		return NULL;
1713 
1714 	/*
1715 	 * Mark it WT ahead of time to avoid changing the
1716 	 * cache_level during fbdev initialization. The
1717 	 * unbind there would get stuck waiting for rcu.
1718 	 */
1719 	i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
1720 					    I915_CACHE_WT : I915_CACHE_NONE);
1721 
1722 	switch (plane_config->tiling) {
1723 	case I915_TILING_NONE:
1724 		break;
1725 	case I915_TILING_X:
1726 	case I915_TILING_Y:
1727 		obj->tiling_and_stride =
1728 			plane_config->fb->base.pitches[0] |
1729 			plane_config->tiling;
1730 		break;
1731 	default:
1732 		MISSING_CASE(plane_config->tiling);
1733 		goto err_obj;
1734 	}
1735 
1736 	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1737 	if (IS_ERR(vma))
1738 		goto err_obj;
1739 
1740 	if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
1741 		goto err_obj;
1742 
1743 	if (i915_gem_object_is_tiled(obj) &&
1744 	    !i915_vma_is_map_and_fenceable(vma))
1745 		goto err_obj;
1746 
1747 	return vma;
1748 
1749 err_obj:
1750 	i915_gem_object_put(obj);
1751 	return NULL;
1752 }
1753 
1754 static bool
intel_alloc_initial_plane_obj(struct intel_crtc * crtc,struct intel_initial_plane_config * plane_config)1755 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
1756 			      struct intel_initial_plane_config *plane_config)
1757 {
1758 	struct drm_device *dev = crtc->base.dev;
1759 	struct drm_i915_private *dev_priv = to_i915(dev);
1760 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
1761 	struct drm_framebuffer *fb = &plane_config->fb->base;
1762 	struct i915_vma *vma;
1763 
1764 	switch (fb->modifier) {
1765 	case DRM_FORMAT_MOD_LINEAR:
1766 	case I915_FORMAT_MOD_X_TILED:
1767 	case I915_FORMAT_MOD_Y_TILED:
1768 		break;
1769 	default:
1770 		drm_dbg(&dev_priv->drm,
1771 			"Unsupported modifier for initial FB: 0x%llx\n",
1772 			fb->modifier);
1773 		return false;
1774 	}
1775 
1776 	vma = initial_plane_vma(dev_priv, plane_config);
1777 	if (!vma)
1778 		return false;
1779 
1780 	mode_cmd.pixel_format = fb->format->format;
1781 	mode_cmd.width = fb->width;
1782 	mode_cmd.height = fb->height;
1783 	mode_cmd.pitches[0] = fb->pitches[0];
1784 	mode_cmd.modifier[0] = fb->modifier;
1785 	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
1786 
1787 	if (intel_framebuffer_init(to_intel_framebuffer(fb),
1788 				   vma->obj, &mode_cmd)) {
1789 		drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
1790 		goto err_vma;
1791 	}
1792 
1793 	plane_config->vma = vma;
1794 	return true;
1795 
1796 err_vma:
1797 	i915_vma_put(vma);
1798 	return false;
1799 }
1800 
1801 static void
intel_set_plane_visible(struct intel_crtc_state * crtc_state,struct intel_plane_state * plane_state,bool visible)1802 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
1803 			struct intel_plane_state *plane_state,
1804 			bool visible)
1805 {
1806 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1807 
1808 	plane_state->uapi.visible = visible;
1809 
1810 	if (visible)
1811 		crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
1812 	else
1813 		crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
1814 }
1815 
fixup_plane_bitmasks(struct intel_crtc_state * crtc_state)1816 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
1817 {
1818 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1819 	struct drm_plane *plane;
1820 
1821 	/*
1822 	 * Active_planes aliases if multiple "primary" or cursor planes
1823 	 * have been used on the same (or wrong) pipe. plane_mask uses
1824 	 * unique ids, hence we can use that to reconstruct active_planes.
1825 	 */
1826 	crtc_state->enabled_planes = 0;
1827 	crtc_state->active_planes = 0;
1828 
1829 	drm_for_each_plane_mask(plane, &dev_priv->drm,
1830 				crtc_state->uapi.plane_mask) {
1831 		crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
1832 		crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
1833 	}
1834 }
1835 
intel_plane_disable_noatomic(struct intel_crtc * crtc,struct intel_plane * plane)1836 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
1837 					 struct intel_plane *plane)
1838 {
1839 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1840 	struct intel_crtc_state *crtc_state =
1841 		to_intel_crtc_state(crtc->base.state);
1842 	struct intel_plane_state *plane_state =
1843 		to_intel_plane_state(plane->base.state);
1844 
1845 	drm_dbg_kms(&dev_priv->drm,
1846 		    "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
1847 		    plane->base.base.id, plane->base.name,
1848 		    crtc->base.base.id, crtc->base.name);
1849 
1850 	intel_set_plane_visible(crtc_state, plane_state, false);
1851 	fixup_plane_bitmasks(crtc_state);
1852 	crtc_state->data_rate[plane->id] = 0;
1853 	crtc_state->min_cdclk[plane->id] = 0;
1854 
1855 	if (plane->id == PLANE_PRIMARY)
1856 		hsw_disable_ips(crtc_state);
1857 
1858 	/*
1859 	 * Vblank time updates from the shadow to live plane control register
1860 	 * are blocked if the memory self-refresh mode is active at that
1861 	 * moment. So to make sure the plane gets truly disabled, disable
1862 	 * first the self-refresh mode. The self-refresh enable bit in turn
1863 	 * will be checked/applied by the HW only at the next frame start
1864 	 * event which is after the vblank start event, so we need to have a
1865 	 * wait-for-vblank between disabling the plane and the pipe.
1866 	 */
1867 	if (HAS_GMCH(dev_priv) &&
1868 	    intel_set_memory_cxsr(dev_priv, false))
1869 		intel_wait_for_vblank(dev_priv, crtc->pipe);
1870 
1871 	/*
1872 	 * Gen2 reports pipe underruns whenever all planes are disabled.
1873 	 * So disable underrun reporting before all the planes get disabled.
1874 	 */
1875 	if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
1876 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
1877 
1878 	intel_disable_plane(plane, crtc_state);
1879 	intel_wait_for_vblank(dev_priv, crtc->pipe);
1880 }
1881 
intel_dpt_pin(struct i915_address_space * vm)1882 static struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
1883 {
1884 	struct drm_i915_private *i915 = vm->i915;
1885 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
1886 	intel_wakeref_t wakeref;
1887 	struct i915_vma *vma;
1888 	void __iomem *iomem;
1889 
1890 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1891 	atomic_inc(&i915->gpu_error.pending_fb_pin);
1892 
1893 	vma = i915_gem_object_ggtt_pin(dpt->obj, NULL, 0, 4096,
1894 				       HAS_LMEM(i915) ? 0 : PIN_MAPPABLE);
1895 	if (IS_ERR(vma))
1896 		goto err;
1897 
1898 	iomem = i915_vma_pin_iomap(vma);
1899 	i915_vma_unpin(vma);
1900 	if (IS_ERR(iomem)) {
1901 		vma = iomem;
1902 		goto err;
1903 	}
1904 
1905 	dpt->vma = vma;
1906 	dpt->iomem = iomem;
1907 
1908 	i915_vma_get(vma);
1909 
1910 err:
1911 	atomic_dec(&i915->gpu_error.pending_fb_pin);
1912 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1913 
1914 	return vma;
1915 }
1916 
intel_dpt_unpin(struct i915_address_space * vm)1917 static void intel_dpt_unpin(struct i915_address_space *vm)
1918 {
1919 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
1920 
1921 	i915_vma_unpin_iomap(dpt->vma);
1922 	i915_vma_put(dpt->vma);
1923 }
1924 
1925 static bool
intel_reuse_initial_plane_obj(struct drm_i915_private * i915,const struct intel_initial_plane_config * plane_config,struct drm_framebuffer ** fb,struct i915_vma ** vma)1926 intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
1927 			      const struct intel_initial_plane_config *plane_config,
1928 			      struct drm_framebuffer **fb,
1929 			      struct i915_vma **vma)
1930 {
1931 	struct intel_crtc *crtc;
1932 
1933 	for_each_intel_crtc(&i915->drm, crtc) {
1934 		struct intel_crtc_state *crtc_state =
1935 			to_intel_crtc_state(crtc->base.state);
1936 		struct intel_plane *plane =
1937 			to_intel_plane(crtc->base.primary);
1938 		struct intel_plane_state *plane_state =
1939 			to_intel_plane_state(plane->base.state);
1940 
1941 		if (!crtc_state->uapi.active)
1942 			continue;
1943 
1944 		if (!plane_state->ggtt_vma)
1945 			continue;
1946 
1947 		if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
1948 			*fb = plane_state->hw.fb;
1949 			*vma = plane_state->ggtt_vma;
1950 			return true;
1951 		}
1952 	}
1953 
1954 	return false;
1955 }
1956 
1957 static void
intel_find_initial_plane_obj(struct intel_crtc * crtc,struct intel_initial_plane_config * plane_config)1958 intel_find_initial_plane_obj(struct intel_crtc *crtc,
1959 			     struct intel_initial_plane_config *plane_config)
1960 {
1961 	struct drm_device *dev = crtc->base.dev;
1962 	struct drm_i915_private *dev_priv = to_i915(dev);
1963 	struct intel_crtc_state *crtc_state =
1964 		to_intel_crtc_state(crtc->base.state);
1965 	struct intel_plane *plane =
1966 		to_intel_plane(crtc->base.primary);
1967 	struct intel_plane_state *plane_state =
1968 		to_intel_plane_state(plane->base.state);
1969 	struct drm_framebuffer *fb;
1970 	struct i915_vma *vma;
1971 
1972 	/*
1973 	 * TODO:
1974 	 *   Disable planes if get_initial_plane_config() failed.
1975 	 *   Make sure things work if the surface base is not page aligned.
1976 	 */
1977 	if (!plane_config->fb)
1978 		return;
1979 
1980 	if (intel_alloc_initial_plane_obj(crtc, plane_config)) {
1981 		fb = &plane_config->fb->base;
1982 		vma = plane_config->vma;
1983 		goto valid_fb;
1984 	}
1985 
1986 	/*
1987 	 * Failed to alloc the obj, check to see if we should share
1988 	 * an fb with another CRTC instead
1989 	 */
1990 	if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
1991 		goto valid_fb;
1992 
1993 	/*
1994 	 * We've failed to reconstruct the BIOS FB.  Current display state
1995 	 * indicates that the primary plane is visible, but has a NULL FB,
1996 	 * which will lead to problems later if we don't fix it up.  The
1997 	 * simplest solution is to just disable the primary plane now and
1998 	 * pretend the BIOS never had it enabled.
1999 	 */
2000 	intel_plane_disable_noatomic(crtc, plane);
2001 	if (crtc_state->bigjoiner) {
2002 		struct intel_crtc *slave =
2003 			crtc_state->bigjoiner_linked_crtc;
2004 		intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
2005 	}
2006 
2007 	return;
2008 
2009 valid_fb:
2010 	plane_state->uapi.rotation = plane_config->rotation;
2011 	intel_fb_fill_view(to_intel_framebuffer(fb),
2012 			   plane_state->uapi.rotation, &plane_state->view);
2013 
2014 	__i915_vma_pin(vma);
2015 	plane_state->ggtt_vma = i915_vma_get(vma);
2016 	if (intel_plane_uses_fence(plane_state) &&
2017 	    i915_vma_pin_fence(vma) == 0 && vma->fence)
2018 		plane_state->flags |= PLANE_HAS_FENCE;
2019 
2020 	plane_state->uapi.src_x = 0;
2021 	plane_state->uapi.src_y = 0;
2022 	plane_state->uapi.src_w = fb->width << 16;
2023 	plane_state->uapi.src_h = fb->height << 16;
2024 
2025 	plane_state->uapi.crtc_x = 0;
2026 	plane_state->uapi.crtc_y = 0;
2027 	plane_state->uapi.crtc_w = fb->width;
2028 	plane_state->uapi.crtc_h = fb->height;
2029 
2030 	if (plane_config->tiling)
2031 		dev_priv->preserve_bios_swizzle = true;
2032 
2033 	plane_state->uapi.fb = fb;
2034 	drm_framebuffer_get(fb);
2035 
2036 	plane_state->uapi.crtc = &crtc->base;
2037 	intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc);
2038 
2039 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
2040 
2041 	atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
2042 }
2043 
2044 unsigned int
intel_plane_fence_y_offset(const struct intel_plane_state * plane_state)2045 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
2046 {
2047 	int x = 0, y = 0;
2048 
2049 	intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
2050 					  plane_state->view.color_plane[0].offset, 0);
2051 
2052 	return y;
2053 }
2054 
2055 static int
__intel_display_resume(struct drm_device * dev,struct drm_atomic_state * state,struct drm_modeset_acquire_ctx * ctx)2056 __intel_display_resume(struct drm_device *dev,
2057 		       struct drm_atomic_state *state,
2058 		       struct drm_modeset_acquire_ctx *ctx)
2059 {
2060 	struct drm_crtc_state *crtc_state;
2061 	struct drm_crtc *crtc;
2062 	int i, ret;
2063 
2064 	intel_modeset_setup_hw_state(dev, ctx);
2065 	intel_vga_redisable(to_i915(dev));
2066 
2067 	if (!state)
2068 		return 0;
2069 
2070 	/*
2071 	 * We've duplicated the state, pointers to the old state are invalid.
2072 	 *
2073 	 * Don't attempt to use the old state until we commit the duplicated state.
2074 	 */
2075 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
2076 		/*
2077 		 * Force recalculation even if we restore
2078 		 * current state. With fast modeset this may not result
2079 		 * in a modeset when the state is compatible.
2080 		 */
2081 		crtc_state->mode_changed = true;
2082 	}
2083 
2084 	/* ignore any reset values/BIOS leftovers in the WM registers */
2085 	if (!HAS_GMCH(to_i915(dev)))
2086 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
2087 
2088 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
2089 
2090 	drm_WARN_ON(dev, ret == -EDEADLK);
2091 	return ret;
2092 }
2093 
gpu_reset_clobbers_display(struct drm_i915_private * dev_priv)2094 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
2095 {
2096 	return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
2097 		intel_has_gpu_reset(&dev_priv->gt));
2098 }
2099 
intel_display_prepare_reset(struct drm_i915_private * dev_priv)2100 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
2101 {
2102 	struct drm_device *dev = &dev_priv->drm;
2103 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
2104 	struct drm_atomic_state *state;
2105 	int ret;
2106 
2107 	if (!HAS_DISPLAY(dev_priv))
2108 		return;
2109 
2110 	/* reset doesn't touch the display */
2111 	if (!dev_priv->params.force_reset_modeset_test &&
2112 	    !gpu_reset_clobbers_display(dev_priv))
2113 		return;
2114 
2115 	/* We have a modeset vs reset deadlock, defensively unbreak it. */
2116 	set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
2117 	smp_mb__after_atomic();
2118 	wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
2119 
2120 	if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
2121 		drm_dbg_kms(&dev_priv->drm,
2122 			    "Modeset potentially stuck, unbreaking through wedging\n");
2123 		intel_gt_set_wedged(&dev_priv->gt);
2124 	}
2125 
2126 	/*
2127 	 * Need mode_config.mutex so that we don't
2128 	 * trample ongoing ->detect() and whatnot.
2129 	 */
2130 	mutex_lock(&dev->mode_config.mutex);
2131 	drm_modeset_acquire_init(ctx, 0);
2132 	while (1) {
2133 		ret = drm_modeset_lock_all_ctx(dev, ctx);
2134 		if (ret != -EDEADLK)
2135 			break;
2136 
2137 		drm_modeset_backoff(ctx);
2138 	}
2139 	/*
2140 	 * Disabling the crtcs gracefully seems nicer. Also the
2141 	 * g33 docs say we should at least disable all the planes.
2142 	 */
2143 	state = drm_atomic_helper_duplicate_state(dev, ctx);
2144 	if (IS_ERR(state)) {
2145 		ret = PTR_ERR(state);
2146 		drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
2147 			ret);
2148 		return;
2149 	}
2150 
2151 	ret = drm_atomic_helper_disable_all(dev, ctx);
2152 	if (ret) {
2153 		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
2154 			ret);
2155 		drm_atomic_state_put(state);
2156 		return;
2157 	}
2158 
2159 	dev_priv->modeset_restore_state = state;
2160 	state->acquire_ctx = ctx;
2161 }
2162 
intel_display_finish_reset(struct drm_i915_private * dev_priv)2163 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
2164 {
2165 	struct drm_device *dev = &dev_priv->drm;
2166 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
2167 	struct drm_atomic_state *state;
2168 	int ret;
2169 
2170 	if (!HAS_DISPLAY(dev_priv))
2171 		return;
2172 
2173 	/* reset doesn't touch the display */
2174 	if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
2175 		return;
2176 
2177 	state = fetch_and_zero(&dev_priv->modeset_restore_state);
2178 	if (!state)
2179 		goto unlock;
2180 
2181 	/* reset doesn't touch the display */
2182 	if (!gpu_reset_clobbers_display(dev_priv)) {
2183 		/* for testing only restore the display */
2184 		ret = __intel_display_resume(dev, state, ctx);
2185 		if (ret)
2186 			drm_err(&dev_priv->drm,
2187 				"Restoring old state failed with %i\n", ret);
2188 	} else {
2189 		/*
2190 		 * The display has been reset as well,
2191 		 * so need a full re-initialization.
2192 		 */
2193 		intel_pps_unlock_regs_wa(dev_priv);
2194 		intel_modeset_init_hw(dev_priv);
2195 		intel_init_clock_gating(dev_priv);
2196 		intel_hpd_init(dev_priv);
2197 
2198 		ret = __intel_display_resume(dev, state, ctx);
2199 		if (ret)
2200 			drm_err(&dev_priv->drm,
2201 				"Restoring old state failed with %i\n", ret);
2202 
2203 		intel_hpd_poll_disable(dev_priv);
2204 	}
2205 
2206 	drm_atomic_state_put(state);
2207 unlock:
2208 	drm_modeset_drop_locks(ctx);
2209 	drm_modeset_acquire_fini(ctx);
2210 	mutex_unlock(&dev->mode_config.mutex);
2211 
2212 	clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
2213 }
2214 
underrun_recovery_supported(const struct intel_crtc_state * crtc_state)2215 static bool underrun_recovery_supported(const struct intel_crtc_state *crtc_state)
2216 {
2217 	if (crtc_state->pch_pfit.enabled &&
2218 	    (crtc_state->pipe_src_w > drm_rect_width(&crtc_state->pch_pfit.dst) ||
2219 	     crtc_state->pipe_src_h > drm_rect_height(&crtc_state->pch_pfit.dst) ||
2220 	     crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420))
2221 		return false;
2222 
2223 	if (crtc_state->dsc.compression_enable)
2224 		return false;
2225 
2226 	if (crtc_state->has_psr2)
2227 		return false;
2228 
2229 	if (crtc_state->splitter.enable)
2230 		return false;
2231 
2232 	return true;
2233 }
2234 
icl_set_pipe_chicken(const struct intel_crtc_state * crtc_state)2235 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
2236 {
2237 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2238 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2239 	enum pipe pipe = crtc->pipe;
2240 	u32 tmp;
2241 
2242 	tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
2243 
2244 	/*
2245 	 * Display WA #1153: icl
2246 	 * enable hardware to bypass the alpha math
2247 	 * and rounding for per-pixel values 00 and 0xff
2248 	 */
2249 	tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
2250 	/*
2251 	 * Display WA # 1605353570: icl
2252 	 * Set the pixel rounding bit to 1 for allowing
2253 	 * passthrough of Frame buffer pixels unmodified
2254 	 * across pipe
2255 	 */
2256 	tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
2257 
2258 	if (IS_DG2(dev_priv)) {
2259 		/*
2260 		 * Underrun recovery must always be disabled on DG2.  However
2261 		 * the chicken bit meaning is inverted compared to other
2262 		 * platforms.
2263 		 */
2264 		tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
2265 	} else if (DISPLAY_VER(dev_priv) >= 13) {
2266 		if (underrun_recovery_supported(crtc_state))
2267 			tmp &= ~UNDERRUN_RECOVERY_DISABLE_ADLP;
2268 		else
2269 			tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
2270 	}
2271 
2272 	intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
2273 }
2274 
intel_has_pending_fb_unpin(struct drm_i915_private * dev_priv)2275 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
2276 {
2277 	struct drm_crtc *crtc;
2278 	bool cleanup_done;
2279 
2280 	drm_for_each_crtc(crtc, &dev_priv->drm) {
2281 		struct drm_crtc_commit *commit;
2282 		spin_lock(&crtc->commit_lock);
2283 		commit = list_first_entry_or_null(&crtc->commit_list,
2284 						  struct drm_crtc_commit, commit_entry);
2285 		cleanup_done = commit ?
2286 			try_wait_for_completion(&commit->cleanup_done) : true;
2287 		spin_unlock(&crtc->commit_lock);
2288 
2289 		if (cleanup_done)
2290 			continue;
2291 
2292 		drm_crtc_wait_one_vblank(crtc);
2293 
2294 		return true;
2295 	}
2296 
2297 	return false;
2298 }
2299 
lpt_disable_iclkip(struct drm_i915_private * dev_priv)2300 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
2301 {
2302 	u32 temp;
2303 
2304 	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
2305 
2306 	mutex_lock(&dev_priv->sb_lock);
2307 
2308 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2309 	temp |= SBI_SSCCTL_DISABLE;
2310 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2311 
2312 	mutex_unlock(&dev_priv->sb_lock);
2313 }
2314 
2315 /* Program iCLKIP clock to the desired frequency */
lpt_program_iclkip(const struct intel_crtc_state * crtc_state)2316 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
2317 {
2318 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2319 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2320 	int clock = crtc_state->hw.adjusted_mode.crtc_clock;
2321 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
2322 	u32 temp;
2323 
2324 	lpt_disable_iclkip(dev_priv);
2325 
2326 	/* The iCLK virtual clock root frequency is in MHz,
2327 	 * but the adjusted_mode->crtc_clock in in KHz. To get the
2328 	 * divisors, it is necessary to divide one by another, so we
2329 	 * convert the virtual clock precision to KHz here for higher
2330 	 * precision.
2331 	 */
2332 	for (auxdiv = 0; auxdiv < 2; auxdiv++) {
2333 		u32 iclk_virtual_root_freq = 172800 * 1000;
2334 		u32 iclk_pi_range = 64;
2335 		u32 desired_divisor;
2336 
2337 		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
2338 						    clock << auxdiv);
2339 		divsel = (desired_divisor / iclk_pi_range) - 2;
2340 		phaseinc = desired_divisor % iclk_pi_range;
2341 
2342 		/*
2343 		 * Near 20MHz is a corner case which is
2344 		 * out of range for the 7-bit divisor
2345 		 */
2346 		if (divsel <= 0x7f)
2347 			break;
2348 	}
2349 
2350 	/* This should not happen with any sane values */
2351 	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
2352 		    ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
2353 	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
2354 		    ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2355 
2356 	drm_dbg_kms(&dev_priv->drm,
2357 		    "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2358 		    clock, auxdiv, divsel, phasedir, phaseinc);
2359 
2360 	mutex_lock(&dev_priv->sb_lock);
2361 
2362 	/* Program SSCDIVINTPHASE6 */
2363 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2364 	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2365 	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2366 	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2367 	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2368 	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2369 	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2370 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
2371 
2372 	/* Program SSCAUXDIV */
2373 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2374 	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2375 	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2376 	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
2377 
2378 	/* Enable modulator and associated divider */
2379 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2380 	temp &= ~SBI_SSCCTL_DISABLE;
2381 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2382 
2383 	mutex_unlock(&dev_priv->sb_lock);
2384 
2385 	/* Wait for initialization time */
2386 	udelay(24);
2387 
2388 	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
2389 }
2390 
lpt_get_iclkip(struct drm_i915_private * dev_priv)2391 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
2392 {
2393 	u32 divsel, phaseinc, auxdiv;
2394 	u32 iclk_virtual_root_freq = 172800 * 1000;
2395 	u32 iclk_pi_range = 64;
2396 	u32 desired_divisor;
2397 	u32 temp;
2398 
2399 	if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
2400 		return 0;
2401 
2402 	mutex_lock(&dev_priv->sb_lock);
2403 
2404 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2405 	if (temp & SBI_SSCCTL_DISABLE) {
2406 		mutex_unlock(&dev_priv->sb_lock);
2407 		return 0;
2408 	}
2409 
2410 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2411 	divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
2412 		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
2413 	phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
2414 		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
2415 
2416 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2417 	auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
2418 		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
2419 
2420 	mutex_unlock(&dev_priv->sb_lock);
2421 
2422 	desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
2423 
2424 	return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
2425 				 desired_divisor << auxdiv);
2426 }
2427 
ilk_pch_transcoder_set_timings(const struct intel_crtc_state * crtc_state,enum pipe pch_transcoder)2428 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
2429 					   enum pipe pch_transcoder)
2430 {
2431 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2432 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2433 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2434 
2435 	intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
2436 		       intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
2437 	intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
2438 		       intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
2439 	intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
2440 		       intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
2441 
2442 	intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
2443 		       intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
2444 	intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
2445 		       intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
2446 	intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
2447 		       intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
2448 	intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
2449 		       intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
2450 }
2451 
cpt_set_fdi_bc_bifurcation(struct drm_i915_private * dev_priv,bool enable)2452 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
2453 {
2454 	u32 temp;
2455 
2456 	temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
2457 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
2458 		return;
2459 
2460 	drm_WARN_ON(&dev_priv->drm,
2461 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
2462 		    FDI_RX_ENABLE);
2463 	drm_WARN_ON(&dev_priv->drm,
2464 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
2465 		    FDI_RX_ENABLE);
2466 
2467 	temp &= ~FDI_BC_BIFURCATION_SELECT;
2468 	if (enable)
2469 		temp |= FDI_BC_BIFURCATION_SELECT;
2470 
2471 	drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
2472 		    enable ? "en" : "dis");
2473 	intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
2474 	intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
2475 }
2476 
ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state * crtc_state)2477 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
2478 {
2479 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2480 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2481 
2482 	switch (crtc->pipe) {
2483 	case PIPE_A:
2484 		break;
2485 	case PIPE_B:
2486 		if (crtc_state->fdi_lanes > 2)
2487 			cpt_set_fdi_bc_bifurcation(dev_priv, false);
2488 		else
2489 			cpt_set_fdi_bc_bifurcation(dev_priv, true);
2490 
2491 		break;
2492 	case PIPE_C:
2493 		cpt_set_fdi_bc_bifurcation(dev_priv, true);
2494 
2495 		break;
2496 	default:
2497 		BUG();
2498 	}
2499 }
2500 
2501 /*
2502  * Finds the encoder associated with the given CRTC. This can only be
2503  * used when we know that the CRTC isn't feeding multiple encoders!
2504  */
2505 struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state * state,const struct intel_crtc_state * crtc_state)2506 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
2507 			   const struct intel_crtc_state *crtc_state)
2508 {
2509 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2510 	const struct drm_connector_state *connector_state;
2511 	const struct drm_connector *connector;
2512 	struct intel_encoder *encoder = NULL;
2513 	int num_encoders = 0;
2514 	int i;
2515 
2516 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
2517 		if (connector_state->crtc != &crtc->base)
2518 			continue;
2519 
2520 		encoder = to_intel_encoder(connector_state->best_encoder);
2521 		num_encoders++;
2522 	}
2523 
2524 	drm_WARN(encoder->base.dev, num_encoders != 1,
2525 		 "%d encoders for pipe %c\n",
2526 		 num_encoders, pipe_name(crtc->pipe));
2527 
2528 	return encoder;
2529 }
2530 
2531 /*
2532  * Enable PCH resources required for PCH ports:
2533  *   - PCH PLLs
2534  *   - FDI training & RX/TX
2535  *   - update transcoder timings
2536  *   - DP transcoding bits
2537  *   - transcoder
2538  */
ilk_pch_enable(const struct intel_atomic_state * state,const struct intel_crtc_state * crtc_state)2539 static void ilk_pch_enable(const struct intel_atomic_state *state,
2540 			   const struct intel_crtc_state *crtc_state)
2541 {
2542 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2543 	struct drm_device *dev = crtc->base.dev;
2544 	struct drm_i915_private *dev_priv = to_i915(dev);
2545 	enum pipe pipe = crtc->pipe;
2546 	u32 temp;
2547 
2548 	assert_pch_transcoder_disabled(dev_priv, pipe);
2549 
2550 	if (IS_IVYBRIDGE(dev_priv))
2551 		ivb_update_fdi_bc_bifurcation(crtc_state);
2552 
2553 	/* Write the TU size bits before fdi link training, so that error
2554 	 * detection works. */
2555 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
2556 		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2557 
2558 	/* For PCH output, training FDI link */
2559 	dev_priv->display.fdi_link_train(crtc, crtc_state);
2560 
2561 	/* We need to program the right clock selection before writing the pixel
2562 	 * mutliplier into the DPLL. */
2563 	if (HAS_PCH_CPT(dev_priv)) {
2564 		u32 sel;
2565 
2566 		temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
2567 		temp |= TRANS_DPLL_ENABLE(pipe);
2568 		sel = TRANS_DPLLB_SEL(pipe);
2569 		if (crtc_state->shared_dpll ==
2570 		    intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
2571 			temp |= sel;
2572 		else
2573 			temp &= ~sel;
2574 		intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
2575 	}
2576 
2577 	/* XXX: pch pll's can be enabled any time before we enable the PCH
2578 	 * transcoder, and we actually should do this to not upset any PCH
2579 	 * transcoder that already use the clock when we share it.
2580 	 *
2581 	 * Note that enable_shared_dpll tries to do the right thing, but
2582 	 * get_shared_dpll unconditionally resets the pll - we need that to have
2583 	 * the right LVDS enable sequence. */
2584 	intel_enable_shared_dpll(crtc_state);
2585 
2586 	/* set transcoder timing, panel must allow it */
2587 	assert_panel_unlocked(dev_priv, pipe);
2588 	ilk_pch_transcoder_set_timings(crtc_state, pipe);
2589 
2590 	intel_fdi_normal_train(crtc);
2591 
2592 	/* For PCH DP, enable TRANS_DP_CTL */
2593 	if (HAS_PCH_CPT(dev_priv) &&
2594 	    intel_crtc_has_dp_encoder(crtc_state)) {
2595 		const struct drm_display_mode *adjusted_mode =
2596 			&crtc_state->hw.adjusted_mode;
2597 		u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
2598 		i915_reg_t reg = TRANS_DP_CTL(pipe);
2599 		enum port port;
2600 
2601 		temp = intel_de_read(dev_priv, reg);
2602 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
2603 			  TRANS_DP_SYNC_MASK |
2604 			  TRANS_DP_BPC_MASK);
2605 		temp |= TRANS_DP_OUTPUT_ENABLE;
2606 		temp |= bpc << 9; /* same format but at 11:9 */
2607 
2608 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2609 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2610 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2611 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2612 
2613 		port = intel_get_crtc_new_encoder(state, crtc_state)->port;
2614 		drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
2615 		temp |= TRANS_DP_PORT_SEL(port);
2616 
2617 		intel_de_write(dev_priv, reg, temp);
2618 	}
2619 
2620 	ilk_enable_pch_transcoder(crtc_state);
2621 }
2622 
lpt_pch_enable(const struct intel_crtc_state * crtc_state)2623 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
2624 {
2625 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2626 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2627 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2628 
2629 	assert_pch_transcoder_disabled(dev_priv, PIPE_A);
2630 
2631 	lpt_program_iclkip(crtc_state);
2632 
2633 	/* Set transcoder timing. */
2634 	ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
2635 
2636 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
2637 }
2638 
cpt_verify_modeset(struct drm_i915_private * dev_priv,enum pipe pipe)2639 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
2640 			       enum pipe pipe)
2641 {
2642 	i915_reg_t dslreg = PIPEDSL(pipe);
2643 	u32 temp;
2644 
2645 	temp = intel_de_read(dev_priv, dslreg);
2646 	udelay(500);
2647 	if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
2648 		if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
2649 			drm_err(&dev_priv->drm,
2650 				"mode set failed: pipe %c stuck\n",
2651 				pipe_name(pipe));
2652 	}
2653 }
2654 
ilk_pfit_enable(const struct intel_crtc_state * crtc_state)2655 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
2656 {
2657 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2658 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2659 	const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
2660 	enum pipe pipe = crtc->pipe;
2661 	int width = drm_rect_width(dst);
2662 	int height = drm_rect_height(dst);
2663 	int x = dst->x1;
2664 	int y = dst->y1;
2665 
2666 	if (!crtc_state->pch_pfit.enabled)
2667 		return;
2668 
2669 	/* Force use of hard-coded filter coefficients
2670 	 * as some pre-programmed values are broken,
2671 	 * e.g. x201.
2672 	 */
2673 	if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
2674 		intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
2675 			       PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
2676 	else
2677 		intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
2678 			       PF_FILTER_MED_3x3);
2679 	intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
2680 	intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
2681 }
2682 
hsw_enable_ips(const struct intel_crtc_state * crtc_state)2683 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
2684 {
2685 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2686 	struct drm_device *dev = crtc->base.dev;
2687 	struct drm_i915_private *dev_priv = to_i915(dev);
2688 
2689 	if (!crtc_state->ips_enabled)
2690 		return;
2691 
2692 	/*
2693 	 * We can only enable IPS after we enable a plane and wait for a vblank
2694 	 * This function is called from post_plane_update, which is run after
2695 	 * a vblank wait.
2696 	 */
2697 	drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
2698 
2699 	if (IS_BROADWELL(dev_priv)) {
2700 		drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
2701 							 IPS_ENABLE | IPS_PCODE_CONTROL));
2702 		/* Quoting Art Runyan: "its not safe to expect any particular
2703 		 * value in IPS_CTL bit 31 after enabling IPS through the
2704 		 * mailbox." Moreover, the mailbox may return a bogus state,
2705 		 * so we need to just enable it and continue on.
2706 		 */
2707 	} else {
2708 		intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
2709 		/* The bit only becomes 1 in the next vblank, so this wait here
2710 		 * is essentially intel_wait_for_vblank. If we don't have this
2711 		 * and don't wait for vblanks until the end of crtc_enable, then
2712 		 * the HW state readout code will complain that the expected
2713 		 * IPS_CTL value is not the one we read. */
2714 		if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
2715 			drm_err(&dev_priv->drm,
2716 				"Timed out waiting for IPS enable\n");
2717 	}
2718 }
2719 
hsw_disable_ips(const struct intel_crtc_state * crtc_state)2720 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
2721 {
2722 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2723 	struct drm_device *dev = crtc->base.dev;
2724 	struct drm_i915_private *dev_priv = to_i915(dev);
2725 
2726 	if (!crtc_state->ips_enabled)
2727 		return;
2728 
2729 	if (IS_BROADWELL(dev_priv)) {
2730 		drm_WARN_ON(dev,
2731 			    sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
2732 		/*
2733 		 * Wait for PCODE to finish disabling IPS. The BSpec specified
2734 		 * 42ms timeout value leads to occasional timeouts so use 100ms
2735 		 * instead.
2736 		 */
2737 		if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
2738 			drm_err(&dev_priv->drm,
2739 				"Timed out waiting for IPS disable\n");
2740 	} else {
2741 		intel_de_write(dev_priv, IPS_CTL, 0);
2742 		intel_de_posting_read(dev_priv, IPS_CTL);
2743 	}
2744 
2745 	/* We need to wait for a vblank before we can disable the plane. */
2746 	intel_wait_for_vblank(dev_priv, crtc->pipe);
2747 }
2748 
intel_crtc_dpms_overlay_disable(struct intel_crtc * crtc)2749 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
2750 {
2751 	if (crtc->overlay)
2752 		(void) intel_overlay_switch_off(crtc->overlay);
2753 
2754 	/* Let userspace switch the overlay on again. In most cases userspace
2755 	 * has to recompute where to put it anyway.
2756 	 */
2757 }
2758 
hsw_pre_update_disable_ips(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)2759 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
2760 				       const struct intel_crtc_state *new_crtc_state)
2761 {
2762 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2763 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2764 
2765 	if (!old_crtc_state->ips_enabled)
2766 		return false;
2767 
2768 	if (intel_crtc_needs_modeset(new_crtc_state))
2769 		return true;
2770 
2771 	/*
2772 	 * Workaround : Do not read or write the pipe palette/gamma data while
2773 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
2774 	 *
2775 	 * Disable IPS before we program the LUT.
2776 	 */
2777 	if (IS_HASWELL(dev_priv) &&
2778 	    (new_crtc_state->uapi.color_mgmt_changed ||
2779 	     new_crtc_state->update_pipe) &&
2780 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
2781 		return true;
2782 
2783 	return !new_crtc_state->ips_enabled;
2784 }
2785 
hsw_post_update_enable_ips(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)2786 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
2787 				       const struct intel_crtc_state *new_crtc_state)
2788 {
2789 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2790 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2791 
2792 	if (!new_crtc_state->ips_enabled)
2793 		return false;
2794 
2795 	if (intel_crtc_needs_modeset(new_crtc_state))
2796 		return true;
2797 
2798 	/*
2799 	 * Workaround : Do not read or write the pipe palette/gamma data while
2800 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
2801 	 *
2802 	 * Re-enable IPS after the LUT has been programmed.
2803 	 */
2804 	if (IS_HASWELL(dev_priv) &&
2805 	    (new_crtc_state->uapi.color_mgmt_changed ||
2806 	     new_crtc_state->update_pipe) &&
2807 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
2808 		return true;
2809 
2810 	/*
2811 	 * We can't read out IPS on broadwell, assume the worst and
2812 	 * forcibly enable IPS on the first fastset.
2813 	 */
2814 	if (new_crtc_state->update_pipe && old_crtc_state->inherited)
2815 		return true;
2816 
2817 	return !old_crtc_state->ips_enabled;
2818 }
2819 
needs_nv12_wa(const struct intel_crtc_state * crtc_state)2820 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
2821 {
2822 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2823 
2824 	if (!crtc_state->nv12_planes)
2825 		return false;
2826 
2827 	/* WA Display #0827: Gen9:all */
2828 	if (DISPLAY_VER(dev_priv) == 9)
2829 		return true;
2830 
2831 	return false;
2832 }
2833 
needs_scalerclk_wa(const struct intel_crtc_state * crtc_state)2834 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
2835 {
2836 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2837 
2838 	/* Wa_2006604312:icl,ehl */
2839 	if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
2840 		return true;
2841 
2842 	return false;
2843 }
2844 
planes_enabling(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)2845 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
2846 			    const struct intel_crtc_state *new_crtc_state)
2847 {
2848 	return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
2849 		new_crtc_state->active_planes;
2850 }
2851 
planes_disabling(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)2852 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
2853 			     const struct intel_crtc_state *new_crtc_state)
2854 {
2855 	return old_crtc_state->active_planes &&
2856 		(!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
2857 }
2858 
intel_post_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)2859 static void intel_post_plane_update(struct intel_atomic_state *state,
2860 				    struct intel_crtc *crtc)
2861 {
2862 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2863 	const struct intel_crtc_state *old_crtc_state =
2864 		intel_atomic_get_old_crtc_state(state, crtc);
2865 	const struct intel_crtc_state *new_crtc_state =
2866 		intel_atomic_get_new_crtc_state(state, crtc);
2867 	enum pipe pipe = crtc->pipe;
2868 
2869 	intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
2870 
2871 	if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
2872 		intel_update_watermarks(crtc);
2873 
2874 	if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
2875 		hsw_enable_ips(new_crtc_state);
2876 
2877 	intel_fbc_post_update(state, crtc);
2878 
2879 	if (needs_nv12_wa(old_crtc_state) &&
2880 	    !needs_nv12_wa(new_crtc_state))
2881 		skl_wa_827(dev_priv, pipe, false);
2882 
2883 	if (needs_scalerclk_wa(old_crtc_state) &&
2884 	    !needs_scalerclk_wa(new_crtc_state))
2885 		icl_wa_scalerclkgating(dev_priv, pipe, false);
2886 }
2887 
intel_crtc_enable_flip_done(struct intel_atomic_state * state,struct intel_crtc * crtc)2888 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
2889 					struct intel_crtc *crtc)
2890 {
2891 	const struct intel_crtc_state *crtc_state =
2892 		intel_atomic_get_new_crtc_state(state, crtc);
2893 	u8 update_planes = crtc_state->update_planes;
2894 	const struct intel_plane_state *plane_state;
2895 	struct intel_plane *plane;
2896 	int i;
2897 
2898 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2899 		if (plane->enable_flip_done &&
2900 		    plane->pipe == crtc->pipe &&
2901 		    update_planes & BIT(plane->id))
2902 			plane->enable_flip_done(plane);
2903 	}
2904 }
2905 
intel_crtc_disable_flip_done(struct intel_atomic_state * state,struct intel_crtc * crtc)2906 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
2907 					 struct intel_crtc *crtc)
2908 {
2909 	const struct intel_crtc_state *crtc_state =
2910 		intel_atomic_get_new_crtc_state(state, crtc);
2911 	u8 update_planes = crtc_state->update_planes;
2912 	const struct intel_plane_state *plane_state;
2913 	struct intel_plane *plane;
2914 	int i;
2915 
2916 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2917 		if (plane->disable_flip_done &&
2918 		    plane->pipe == crtc->pipe &&
2919 		    update_planes & BIT(plane->id))
2920 			plane->disable_flip_done(plane);
2921 	}
2922 }
2923 
intel_crtc_async_flip_disable_wa(struct intel_atomic_state * state,struct intel_crtc * crtc)2924 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
2925 					     struct intel_crtc *crtc)
2926 {
2927 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2928 	const struct intel_crtc_state *old_crtc_state =
2929 		intel_atomic_get_old_crtc_state(state, crtc);
2930 	const struct intel_crtc_state *new_crtc_state =
2931 		intel_atomic_get_new_crtc_state(state, crtc);
2932 	u8 update_planes = new_crtc_state->update_planes;
2933 	const struct intel_plane_state *old_plane_state;
2934 	struct intel_plane *plane;
2935 	bool need_vbl_wait = false;
2936 	int i;
2937 
2938 	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
2939 		if (plane->need_async_flip_disable_wa &&
2940 		    plane->pipe == crtc->pipe &&
2941 		    update_planes & BIT(plane->id)) {
2942 			/*
2943 			 * Apart from the async flip bit we want to
2944 			 * preserve the old state for the plane.
2945 			 */
2946 			plane->async_flip(plane, old_crtc_state,
2947 					  old_plane_state, false);
2948 			need_vbl_wait = true;
2949 		}
2950 	}
2951 
2952 	if (need_vbl_wait)
2953 		intel_wait_for_vblank(i915, crtc->pipe);
2954 }
2955 
intel_pre_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)2956 static void intel_pre_plane_update(struct intel_atomic_state *state,
2957 				   struct intel_crtc *crtc)
2958 {
2959 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2960 	const struct intel_crtc_state *old_crtc_state =
2961 		intel_atomic_get_old_crtc_state(state, crtc);
2962 	const struct intel_crtc_state *new_crtc_state =
2963 		intel_atomic_get_new_crtc_state(state, crtc);
2964 	enum pipe pipe = crtc->pipe;
2965 
2966 	if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
2967 		hsw_disable_ips(old_crtc_state);
2968 
2969 	if (intel_fbc_pre_update(state, crtc))
2970 		intel_wait_for_vblank(dev_priv, pipe);
2971 
2972 	/* Display WA 827 */
2973 	if (!needs_nv12_wa(old_crtc_state) &&
2974 	    needs_nv12_wa(new_crtc_state))
2975 		skl_wa_827(dev_priv, pipe, true);
2976 
2977 	/* Wa_2006604312:icl,ehl */
2978 	if (!needs_scalerclk_wa(old_crtc_state) &&
2979 	    needs_scalerclk_wa(new_crtc_state))
2980 		icl_wa_scalerclkgating(dev_priv, pipe, true);
2981 
2982 	/*
2983 	 * Vblank time updates from the shadow to live plane control register
2984 	 * are blocked if the memory self-refresh mode is active at that
2985 	 * moment. So to make sure the plane gets truly disabled, disable
2986 	 * first the self-refresh mode. The self-refresh enable bit in turn
2987 	 * will be checked/applied by the HW only at the next frame start
2988 	 * event which is after the vblank start event, so we need to have a
2989 	 * wait-for-vblank between disabling the plane and the pipe.
2990 	 */
2991 	if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
2992 	    new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
2993 		intel_wait_for_vblank(dev_priv, pipe);
2994 
2995 	/*
2996 	 * IVB workaround: must disable low power watermarks for at least
2997 	 * one frame before enabling scaling.  LP watermarks can be re-enabled
2998 	 * when scaling is disabled.
2999 	 *
3000 	 * WaCxSRDisabledForSpriteScaling:ivb
3001 	 */
3002 	if (old_crtc_state->hw.active &&
3003 	    new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
3004 		intel_wait_for_vblank(dev_priv, pipe);
3005 
3006 	/*
3007 	 * If we're doing a modeset we don't need to do any
3008 	 * pre-vblank watermark programming here.
3009 	 */
3010 	if (!intel_crtc_needs_modeset(new_crtc_state)) {
3011 		/*
3012 		 * For platforms that support atomic watermarks, program the
3013 		 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
3014 		 * will be the intermediate values that are safe for both pre- and
3015 		 * post- vblank; when vblank happens, the 'active' values will be set
3016 		 * to the final 'target' values and we'll do this again to get the
3017 		 * optimal watermarks.  For gen9+ platforms, the values we program here
3018 		 * will be the final target values which will get automatically latched
3019 		 * at vblank time; no further programming will be necessary.
3020 		 *
3021 		 * If a platform hasn't been transitioned to atomic watermarks yet,
3022 		 * we'll continue to update watermarks the old way, if flags tell
3023 		 * us to.
3024 		 */
3025 		if (dev_priv->display.initial_watermarks)
3026 			dev_priv->display.initial_watermarks(state, crtc);
3027 		else if (new_crtc_state->update_wm_pre)
3028 			intel_update_watermarks(crtc);
3029 	}
3030 
3031 	/*
3032 	 * Gen2 reports pipe underruns whenever all planes are disabled.
3033 	 * So disable underrun reporting before all the planes get disabled.
3034 	 *
3035 	 * We do this after .initial_watermarks() so that we have a
3036 	 * chance of catching underruns with the intermediate watermarks
3037 	 * vs. the old plane configuration.
3038 	 */
3039 	if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
3040 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3041 
3042 	/*
3043 	 * WA for platforms where async address update enable bit
3044 	 * is double buffered and only latched at start of vblank.
3045 	 */
3046 	if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
3047 		intel_crtc_async_flip_disable_wa(state, crtc);
3048 }
3049 
intel_crtc_disable_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)3050 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
3051 				      struct intel_crtc *crtc)
3052 {
3053 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3054 	const struct intel_crtc_state *new_crtc_state =
3055 		intel_atomic_get_new_crtc_state(state, crtc);
3056 	unsigned int update_mask = new_crtc_state->update_planes;
3057 	const struct intel_plane_state *old_plane_state;
3058 	struct intel_plane *plane;
3059 	unsigned fb_bits = 0;
3060 	int i;
3061 
3062 	intel_crtc_dpms_overlay_disable(crtc);
3063 
3064 	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
3065 		if (crtc->pipe != plane->pipe ||
3066 		    !(update_mask & BIT(plane->id)))
3067 			continue;
3068 
3069 		intel_disable_plane(plane, new_crtc_state);
3070 
3071 		if (old_plane_state->uapi.visible)
3072 			fb_bits |= plane->frontbuffer_bit;
3073 	}
3074 
3075 	intel_frontbuffer_flip(dev_priv, fb_bits);
3076 }
3077 
3078 /*
3079  * intel_connector_primary_encoder - get the primary encoder for a connector
3080  * @connector: connector for which to return the encoder
3081  *
3082  * Returns the primary encoder for a connector. There is a 1:1 mapping from
3083  * all connectors to their encoder, except for DP-MST connectors which have
3084  * both a virtual and a primary encoder. These DP-MST primary encoders can be
3085  * pointed to by as many DP-MST connectors as there are pipes.
3086  */
3087 static struct intel_encoder *
intel_connector_primary_encoder(struct intel_connector * connector)3088 intel_connector_primary_encoder(struct intel_connector *connector)
3089 {
3090 	struct intel_encoder *encoder;
3091 
3092 	if (connector->mst_port)
3093 		return &dp_to_dig_port(connector->mst_port)->base;
3094 
3095 	encoder = intel_attached_encoder(connector);
3096 	drm_WARN_ON(connector->base.dev, !encoder);
3097 
3098 	return encoder;
3099 }
3100 
intel_encoders_update_prepare(struct intel_atomic_state * state)3101 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
3102 {
3103 	struct drm_connector_state *new_conn_state;
3104 	struct drm_connector *connector;
3105 	int i;
3106 
3107 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
3108 					i) {
3109 		struct intel_connector *intel_connector;
3110 		struct intel_encoder *encoder;
3111 		struct intel_crtc *crtc;
3112 
3113 		if (!intel_connector_needs_modeset(state, connector))
3114 			continue;
3115 
3116 		intel_connector = to_intel_connector(connector);
3117 		encoder = intel_connector_primary_encoder(intel_connector);
3118 		if (!encoder->update_prepare)
3119 			continue;
3120 
3121 		crtc = new_conn_state->crtc ?
3122 			to_intel_crtc(new_conn_state->crtc) : NULL;
3123 		encoder->update_prepare(state, encoder, crtc);
3124 	}
3125 }
3126 
intel_encoders_update_complete(struct intel_atomic_state * state)3127 static void intel_encoders_update_complete(struct intel_atomic_state *state)
3128 {
3129 	struct drm_connector_state *new_conn_state;
3130 	struct drm_connector *connector;
3131 	int i;
3132 
3133 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
3134 					i) {
3135 		struct intel_connector *intel_connector;
3136 		struct intel_encoder *encoder;
3137 		struct intel_crtc *crtc;
3138 
3139 		if (!intel_connector_needs_modeset(state, connector))
3140 			continue;
3141 
3142 		intel_connector = to_intel_connector(connector);
3143 		encoder = intel_connector_primary_encoder(intel_connector);
3144 		if (!encoder->update_complete)
3145 			continue;
3146 
3147 		crtc = new_conn_state->crtc ?
3148 			to_intel_crtc(new_conn_state->crtc) : NULL;
3149 		encoder->update_complete(state, encoder, crtc);
3150 	}
3151 }
3152 
intel_encoders_pre_pll_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)3153 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
3154 					  struct intel_crtc *crtc)
3155 {
3156 	const struct intel_crtc_state *crtc_state =
3157 		intel_atomic_get_new_crtc_state(state, crtc);
3158 	const struct drm_connector_state *conn_state;
3159 	struct drm_connector *conn;
3160 	int i;
3161 
3162 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3163 		struct intel_encoder *encoder =
3164 			to_intel_encoder(conn_state->best_encoder);
3165 
3166 		if (conn_state->crtc != &crtc->base)
3167 			continue;
3168 
3169 		if (encoder->pre_pll_enable)
3170 			encoder->pre_pll_enable(state, encoder,
3171 						crtc_state, conn_state);
3172 	}
3173 }
3174 
intel_encoders_pre_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)3175 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
3176 				      struct intel_crtc *crtc)
3177 {
3178 	const struct intel_crtc_state *crtc_state =
3179 		intel_atomic_get_new_crtc_state(state, crtc);
3180 	const struct drm_connector_state *conn_state;
3181 	struct drm_connector *conn;
3182 	int i;
3183 
3184 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3185 		struct intel_encoder *encoder =
3186 			to_intel_encoder(conn_state->best_encoder);
3187 
3188 		if (conn_state->crtc != &crtc->base)
3189 			continue;
3190 
3191 		if (encoder->pre_enable)
3192 			encoder->pre_enable(state, encoder,
3193 					    crtc_state, conn_state);
3194 	}
3195 }
3196 
intel_encoders_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)3197 static void intel_encoders_enable(struct intel_atomic_state *state,
3198 				  struct intel_crtc *crtc)
3199 {
3200 	const struct intel_crtc_state *crtc_state =
3201 		intel_atomic_get_new_crtc_state(state, crtc);
3202 	const struct drm_connector_state *conn_state;
3203 	struct drm_connector *conn;
3204 	int i;
3205 
3206 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3207 		struct intel_encoder *encoder =
3208 			to_intel_encoder(conn_state->best_encoder);
3209 
3210 		if (conn_state->crtc != &crtc->base)
3211 			continue;
3212 
3213 		if (encoder->enable)
3214 			encoder->enable(state, encoder,
3215 					crtc_state, conn_state);
3216 		intel_opregion_notify_encoder(encoder, true);
3217 	}
3218 }
3219 
intel_encoders_pre_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)3220 static void intel_encoders_pre_disable(struct intel_atomic_state *state,
3221 				       struct intel_crtc *crtc)
3222 {
3223 	const struct intel_crtc_state *old_crtc_state =
3224 		intel_atomic_get_old_crtc_state(state, crtc);
3225 	const struct drm_connector_state *old_conn_state;
3226 	struct drm_connector *conn;
3227 	int i;
3228 
3229 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3230 		struct intel_encoder *encoder =
3231 			to_intel_encoder(old_conn_state->best_encoder);
3232 
3233 		if (old_conn_state->crtc != &crtc->base)
3234 			continue;
3235 
3236 		if (encoder->pre_disable)
3237 			encoder->pre_disable(state, encoder, old_crtc_state,
3238 					     old_conn_state);
3239 	}
3240 }
3241 
intel_encoders_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)3242 static void intel_encoders_disable(struct intel_atomic_state *state,
3243 				   struct intel_crtc *crtc)
3244 {
3245 	const struct intel_crtc_state *old_crtc_state =
3246 		intel_atomic_get_old_crtc_state(state, crtc);
3247 	const struct drm_connector_state *old_conn_state;
3248 	struct drm_connector *conn;
3249 	int i;
3250 
3251 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3252 		struct intel_encoder *encoder =
3253 			to_intel_encoder(old_conn_state->best_encoder);
3254 
3255 		if (old_conn_state->crtc != &crtc->base)
3256 			continue;
3257 
3258 		intel_opregion_notify_encoder(encoder, false);
3259 		if (encoder->disable)
3260 			encoder->disable(state, encoder,
3261 					 old_crtc_state, old_conn_state);
3262 	}
3263 }
3264 
intel_encoders_post_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)3265 static void intel_encoders_post_disable(struct intel_atomic_state *state,
3266 					struct intel_crtc *crtc)
3267 {
3268 	const struct intel_crtc_state *old_crtc_state =
3269 		intel_atomic_get_old_crtc_state(state, crtc);
3270 	const struct drm_connector_state *old_conn_state;
3271 	struct drm_connector *conn;
3272 	int i;
3273 
3274 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3275 		struct intel_encoder *encoder =
3276 			to_intel_encoder(old_conn_state->best_encoder);
3277 
3278 		if (old_conn_state->crtc != &crtc->base)
3279 			continue;
3280 
3281 		if (encoder->post_disable)
3282 			encoder->post_disable(state, encoder,
3283 					      old_crtc_state, old_conn_state);
3284 	}
3285 }
3286 
intel_encoders_post_pll_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)3287 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
3288 					    struct intel_crtc *crtc)
3289 {
3290 	const struct intel_crtc_state *old_crtc_state =
3291 		intel_atomic_get_old_crtc_state(state, crtc);
3292 	const struct drm_connector_state *old_conn_state;
3293 	struct drm_connector *conn;
3294 	int i;
3295 
3296 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3297 		struct intel_encoder *encoder =
3298 			to_intel_encoder(old_conn_state->best_encoder);
3299 
3300 		if (old_conn_state->crtc != &crtc->base)
3301 			continue;
3302 
3303 		if (encoder->post_pll_disable)
3304 			encoder->post_pll_disable(state, encoder,
3305 						  old_crtc_state, old_conn_state);
3306 	}
3307 }
3308 
intel_encoders_update_pipe(struct intel_atomic_state * state,struct intel_crtc * crtc)3309 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
3310 				       struct intel_crtc *crtc)
3311 {
3312 	const struct intel_crtc_state *crtc_state =
3313 		intel_atomic_get_new_crtc_state(state, crtc);
3314 	const struct drm_connector_state *conn_state;
3315 	struct drm_connector *conn;
3316 	int i;
3317 
3318 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3319 		struct intel_encoder *encoder =
3320 			to_intel_encoder(conn_state->best_encoder);
3321 
3322 		if (conn_state->crtc != &crtc->base)
3323 			continue;
3324 
3325 		if (encoder->update_pipe)
3326 			encoder->update_pipe(state, encoder,
3327 					     crtc_state, conn_state);
3328 	}
3329 }
3330 
intel_disable_primary_plane(const struct intel_crtc_state * crtc_state)3331 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
3332 {
3333 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3334 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3335 
3336 	plane->disable_plane(plane, crtc_state);
3337 }
3338 
ilk_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)3339 static void ilk_crtc_enable(struct intel_atomic_state *state,
3340 			    struct intel_crtc *crtc)
3341 {
3342 	const struct intel_crtc_state *new_crtc_state =
3343 		intel_atomic_get_new_crtc_state(state, crtc);
3344 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3345 	enum pipe pipe = crtc->pipe;
3346 
3347 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3348 		return;
3349 
3350 	/*
3351 	 * Sometimes spurious CPU pipe underruns happen during FDI
3352 	 * training, at least with VGA+HDMI cloning. Suppress them.
3353 	 *
3354 	 * On ILK we get an occasional spurious CPU pipe underruns
3355 	 * between eDP port A enable and vdd enable. Also PCH port
3356 	 * enable seems to result in the occasional CPU pipe underrun.
3357 	 *
3358 	 * Spurious PCH underruns also occur during PCH enabling.
3359 	 */
3360 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3361 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
3362 
3363 	if (new_crtc_state->has_pch_encoder)
3364 		intel_prepare_shared_dpll(new_crtc_state);
3365 
3366 	if (intel_crtc_has_dp_encoder(new_crtc_state))
3367 		intel_dp_set_m_n(new_crtc_state, M1_N1);
3368 
3369 	intel_set_transcoder_timings(new_crtc_state);
3370 	intel_set_pipe_src_size(new_crtc_state);
3371 
3372 	if (new_crtc_state->has_pch_encoder)
3373 		intel_cpu_transcoder_set_m_n(new_crtc_state,
3374 					     &new_crtc_state->fdi_m_n, NULL);
3375 
3376 	ilk_set_pipeconf(new_crtc_state);
3377 
3378 	crtc->active = true;
3379 
3380 	intel_encoders_pre_enable(state, crtc);
3381 
3382 	if (new_crtc_state->has_pch_encoder) {
3383 		/* Note: FDI PLL enabling _must_ be done before we enable the
3384 		 * cpu pipes, hence this is separate from all the other fdi/pch
3385 		 * enabling. */
3386 		ilk_fdi_pll_enable(new_crtc_state);
3387 	} else {
3388 		assert_fdi_tx_disabled(dev_priv, pipe);
3389 		assert_fdi_rx_disabled(dev_priv, pipe);
3390 	}
3391 
3392 	ilk_pfit_enable(new_crtc_state);
3393 
3394 	/*
3395 	 * On ILK+ LUT must be loaded before the pipe is running but with
3396 	 * clocks enabled
3397 	 */
3398 	intel_color_load_luts(new_crtc_state);
3399 	intel_color_commit(new_crtc_state);
3400 	/* update DSPCNTR to configure gamma for pipe bottom color */
3401 	intel_disable_primary_plane(new_crtc_state);
3402 
3403 	if (dev_priv->display.initial_watermarks)
3404 		dev_priv->display.initial_watermarks(state, crtc);
3405 	intel_enable_pipe(new_crtc_state);
3406 
3407 	if (new_crtc_state->has_pch_encoder)
3408 		ilk_pch_enable(state, new_crtc_state);
3409 
3410 	intel_crtc_vblank_on(new_crtc_state);
3411 
3412 	intel_encoders_enable(state, crtc);
3413 
3414 	if (HAS_PCH_CPT(dev_priv))
3415 		cpt_verify_modeset(dev_priv, pipe);
3416 
3417 	/*
3418 	 * Must wait for vblank to avoid spurious PCH FIFO underruns.
3419 	 * And a second vblank wait is needed at least on ILK with
3420 	 * some interlaced HDMI modes. Let's do the double wait always
3421 	 * in case there are more corner cases we don't know about.
3422 	 */
3423 	if (new_crtc_state->has_pch_encoder) {
3424 		intel_wait_for_vblank(dev_priv, pipe);
3425 		intel_wait_for_vblank(dev_priv, pipe);
3426 	}
3427 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3428 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3429 }
3430 
3431 /* IPS only exists on ULT machines and is tied to pipe A. */
hsw_crtc_supports_ips(struct intel_crtc * crtc)3432 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3433 {
3434 	return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
3435 }
3436 
glk_pipe_scaler_clock_gating_wa(struct drm_i915_private * dev_priv,enum pipe pipe,bool apply)3437 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
3438 					    enum pipe pipe, bool apply)
3439 {
3440 	u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
3441 	u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
3442 
3443 	if (apply)
3444 		val |= mask;
3445 	else
3446 		val &= ~mask;
3447 
3448 	intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
3449 }
3450 
icl_pipe_mbus_enable(struct intel_crtc * crtc,bool joined_mbus)3451 static void icl_pipe_mbus_enable(struct intel_crtc *crtc, bool joined_mbus)
3452 {
3453 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3454 	enum pipe pipe = crtc->pipe;
3455 	u32 val;
3456 
3457 	/* Wa_22010947358:adl-p */
3458 	if (IS_ALDERLAKE_P(dev_priv))
3459 		val = joined_mbus ? MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
3460 	else
3461 		val = MBUS_DBOX_A_CREDIT(2);
3462 
3463 	if (DISPLAY_VER(dev_priv) >= 12) {
3464 		val |= MBUS_DBOX_BW_CREDIT(2);
3465 		val |= MBUS_DBOX_B_CREDIT(12);
3466 	} else {
3467 		val |= MBUS_DBOX_BW_CREDIT(1);
3468 		val |= MBUS_DBOX_B_CREDIT(8);
3469 	}
3470 
3471 	intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
3472 }
3473 
hsw_set_linetime_wm(const struct intel_crtc_state * crtc_state)3474 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
3475 {
3476 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3477 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3478 
3479 	intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
3480 		       HSW_LINETIME(crtc_state->linetime) |
3481 		       HSW_IPS_LINETIME(crtc_state->ips_linetime));
3482 }
3483 
hsw_set_frame_start_delay(const struct intel_crtc_state * crtc_state)3484 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
3485 {
3486 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3487 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3488 	i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
3489 	u32 val;
3490 
3491 	val = intel_de_read(dev_priv, reg);
3492 	val &= ~HSW_FRAME_START_DELAY_MASK;
3493 	val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3494 	intel_de_write(dev_priv, reg, val);
3495 }
3496 
icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state * state,const struct intel_crtc_state * crtc_state)3497 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
3498 					 const struct intel_crtc_state *crtc_state)
3499 {
3500 	struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
3501 	struct drm_i915_private *dev_priv = to_i915(master->base.dev);
3502 	struct intel_crtc_state *master_crtc_state;
3503 	struct drm_connector_state *conn_state;
3504 	struct drm_connector *conn;
3505 	struct intel_encoder *encoder = NULL;
3506 	int i;
3507 
3508 	if (crtc_state->bigjoiner_slave)
3509 		master = crtc_state->bigjoiner_linked_crtc;
3510 
3511 	master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
3512 
3513 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3514 		if (conn_state->crtc != &master->base)
3515 			continue;
3516 
3517 		encoder = to_intel_encoder(conn_state->best_encoder);
3518 		break;
3519 	}
3520 
3521 	if (!crtc_state->bigjoiner_slave) {
3522 		/* need to enable VDSC, which we skipped in pre-enable */
3523 		intel_dsc_enable(encoder, crtc_state);
3524 	} else {
3525 		/*
3526 		 * Enable sequence steps 1-7 on bigjoiner master
3527 		 */
3528 		intel_encoders_pre_pll_enable(state, master);
3529 		if (master_crtc_state->shared_dpll)
3530 			intel_enable_shared_dpll(master_crtc_state);
3531 		intel_encoders_pre_enable(state, master);
3532 
3533 		/* and DSC on slave */
3534 		intel_dsc_enable(NULL, crtc_state);
3535 	}
3536 
3537 	if (DISPLAY_VER(dev_priv) >= 13)
3538 		intel_uncompressed_joiner_enable(crtc_state);
3539 }
3540 
hsw_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)3541 static void hsw_crtc_enable(struct intel_atomic_state *state,
3542 			    struct intel_crtc *crtc)
3543 {
3544 	const struct intel_crtc_state *new_crtc_state =
3545 		intel_atomic_get_new_crtc_state(state, crtc);
3546 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3547 	enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
3548 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3549 	bool psl_clkgate_wa;
3550 
3551 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3552 		return;
3553 
3554 	if (!new_crtc_state->bigjoiner) {
3555 		intel_encoders_pre_pll_enable(state, crtc);
3556 
3557 		if (new_crtc_state->shared_dpll)
3558 			intel_enable_shared_dpll(new_crtc_state);
3559 
3560 		intel_encoders_pre_enable(state, crtc);
3561 	} else {
3562 		icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
3563 	}
3564 
3565 	intel_set_pipe_src_size(new_crtc_state);
3566 	if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
3567 		bdw_set_pipemisc(new_crtc_state);
3568 
3569 	if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
3570 		intel_set_transcoder_timings(new_crtc_state);
3571 
3572 		if (cpu_transcoder != TRANSCODER_EDP)
3573 			intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
3574 				       new_crtc_state->pixel_multiplier - 1);
3575 
3576 		if (new_crtc_state->has_pch_encoder)
3577 			intel_cpu_transcoder_set_m_n(new_crtc_state,
3578 						     &new_crtc_state->fdi_m_n, NULL);
3579 
3580 		hsw_set_frame_start_delay(new_crtc_state);
3581 	}
3582 
3583 	if (!transcoder_is_dsi(cpu_transcoder))
3584 		hsw_set_pipeconf(new_crtc_state);
3585 
3586 	crtc->active = true;
3587 
3588 	/* Display WA #1180: WaDisableScalarClockGating: glk */
3589 	psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
3590 		new_crtc_state->pch_pfit.enabled;
3591 	if (psl_clkgate_wa)
3592 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
3593 
3594 	if (DISPLAY_VER(dev_priv) >= 9)
3595 		skl_pfit_enable(new_crtc_state);
3596 	else
3597 		ilk_pfit_enable(new_crtc_state);
3598 
3599 	/*
3600 	 * On ILK+ LUT must be loaded before the pipe is running but with
3601 	 * clocks enabled
3602 	 */
3603 	intel_color_load_luts(new_crtc_state);
3604 	intel_color_commit(new_crtc_state);
3605 	/* update DSPCNTR to configure gamma/csc for pipe bottom color */
3606 	if (DISPLAY_VER(dev_priv) < 9)
3607 		intel_disable_primary_plane(new_crtc_state);
3608 
3609 	hsw_set_linetime_wm(new_crtc_state);
3610 
3611 	if (DISPLAY_VER(dev_priv) >= 11)
3612 		icl_set_pipe_chicken(new_crtc_state);
3613 
3614 	if (dev_priv->display.initial_watermarks)
3615 		dev_priv->display.initial_watermarks(state, crtc);
3616 
3617 	if (DISPLAY_VER(dev_priv) >= 11) {
3618 		const struct intel_dbuf_state *dbuf_state =
3619 				intel_atomic_get_new_dbuf_state(state);
3620 
3621 		icl_pipe_mbus_enable(crtc, dbuf_state->joined_mbus);
3622 	}
3623 
3624 	if (new_crtc_state->bigjoiner_slave)
3625 		intel_crtc_vblank_on(new_crtc_state);
3626 
3627 	intel_encoders_enable(state, crtc);
3628 
3629 	if (psl_clkgate_wa) {
3630 		intel_wait_for_vblank(dev_priv, pipe);
3631 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
3632 	}
3633 
3634 	/* If we change the relative order between pipe/planes enabling, we need
3635 	 * to change the workaround. */
3636 	hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
3637 	if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
3638 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
3639 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
3640 	}
3641 }
3642 
ilk_pfit_disable(const struct intel_crtc_state * old_crtc_state)3643 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
3644 {
3645 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3646 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3647 	enum pipe pipe = crtc->pipe;
3648 
3649 	/* To avoid upsetting the power well on haswell only disable the pfit if
3650 	 * it's in use. The hw state code will make sure we get this right. */
3651 	if (!old_crtc_state->pch_pfit.enabled)
3652 		return;
3653 
3654 	intel_de_write(dev_priv, PF_CTL(pipe), 0);
3655 	intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
3656 	intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
3657 }
3658 
ilk_crtc_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)3659 static void ilk_crtc_disable(struct intel_atomic_state *state,
3660 			     struct intel_crtc *crtc)
3661 {
3662 	const struct intel_crtc_state *old_crtc_state =
3663 		intel_atomic_get_old_crtc_state(state, crtc);
3664 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3665 	enum pipe pipe = crtc->pipe;
3666 
3667 	/*
3668 	 * Sometimes spurious CPU pipe underruns happen when the
3669 	 * pipe is already disabled, but FDI RX/TX is still enabled.
3670 	 * Happens at least with VGA+HDMI cloning. Suppress them.
3671 	 */
3672 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3673 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
3674 
3675 	intel_encoders_disable(state, crtc);
3676 
3677 	intel_crtc_vblank_off(old_crtc_state);
3678 
3679 	intel_disable_pipe(old_crtc_state);
3680 
3681 	ilk_pfit_disable(old_crtc_state);
3682 
3683 	if (old_crtc_state->has_pch_encoder)
3684 		ilk_fdi_disable(crtc);
3685 
3686 	intel_encoders_post_disable(state, crtc);
3687 
3688 	if (old_crtc_state->has_pch_encoder) {
3689 		ilk_disable_pch_transcoder(dev_priv, pipe);
3690 
3691 		if (HAS_PCH_CPT(dev_priv)) {
3692 			i915_reg_t reg;
3693 			u32 temp;
3694 
3695 			/* disable TRANS_DP_CTL */
3696 			reg = TRANS_DP_CTL(pipe);
3697 			temp = intel_de_read(dev_priv, reg);
3698 			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
3699 				  TRANS_DP_PORT_SEL_MASK);
3700 			temp |= TRANS_DP_PORT_SEL_NONE;
3701 			intel_de_write(dev_priv, reg, temp);
3702 
3703 			/* disable DPLL_SEL */
3704 			temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
3705 			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
3706 			intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
3707 		}
3708 
3709 		ilk_fdi_pll_disable(crtc);
3710 	}
3711 
3712 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3713 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3714 }
3715 
hsw_crtc_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)3716 static void hsw_crtc_disable(struct intel_atomic_state *state,
3717 			     struct intel_crtc *crtc)
3718 {
3719 	/*
3720 	 * FIXME collapse everything to one hook.
3721 	 * Need care with mst->ddi interactions.
3722 	 */
3723 	intel_encoders_disable(state, crtc);
3724 	intel_encoders_post_disable(state, crtc);
3725 }
3726 
i9xx_pfit_enable(const struct intel_crtc_state * crtc_state)3727 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
3728 {
3729 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3730 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3731 
3732 	if (!crtc_state->gmch_pfit.control)
3733 		return;
3734 
3735 	/*
3736 	 * The panel fitter should only be adjusted whilst the pipe is disabled,
3737 	 * according to register description and PRM.
3738 	 */
3739 	drm_WARN_ON(&dev_priv->drm,
3740 		    intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
3741 	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
3742 
3743 	intel_de_write(dev_priv, PFIT_PGM_RATIOS,
3744 		       crtc_state->gmch_pfit.pgm_ratios);
3745 	intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
3746 
3747 	/* Border color in case we don't scale up to the full screen. Black by
3748 	 * default, change to something else for debugging. */
3749 	intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
3750 }
3751 
intel_phy_is_combo(struct drm_i915_private * dev_priv,enum phy phy)3752 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
3753 {
3754 	if (phy == PHY_NONE)
3755 		return false;
3756 	else if (IS_DG2(dev_priv))
3757 		/*
3758 		 * DG2 outputs labelled as "combo PHY" in the bspec use
3759 		 * SNPS PHYs with completely different programming,
3760 		 * hence we always return false here.
3761 		 */
3762 		return false;
3763 	else if (IS_ALDERLAKE_S(dev_priv))
3764 		return phy <= PHY_E;
3765 	else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
3766 		return phy <= PHY_D;
3767 	else if (IS_JSL_EHL(dev_priv))
3768 		return phy <= PHY_C;
3769 	else if (DISPLAY_VER(dev_priv) >= 11)
3770 		return phy <= PHY_B;
3771 	else
3772 		return false;
3773 }
3774 
intel_phy_is_tc(struct drm_i915_private * dev_priv,enum phy phy)3775 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
3776 {
3777 	if (IS_DG2(dev_priv))
3778 		/* DG2's "TC1" output uses a SNPS PHY */
3779 		return false;
3780 	else if (IS_ALDERLAKE_P(dev_priv))
3781 		return phy >= PHY_F && phy <= PHY_I;
3782 	else if (IS_TIGERLAKE(dev_priv))
3783 		return phy >= PHY_D && phy <= PHY_I;
3784 	else if (IS_ICELAKE(dev_priv))
3785 		return phy >= PHY_C && phy <= PHY_F;
3786 	else
3787 		return false;
3788 }
3789 
intel_phy_is_snps(struct drm_i915_private * dev_priv,enum phy phy)3790 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
3791 {
3792 	if (phy == PHY_NONE)
3793 		return false;
3794 	else if (IS_DG2(dev_priv))
3795 		/*
3796 		 * All four "combo" ports and the TC1 port (PHY E) use
3797 		 * Synopsis PHYs.
3798 		 */
3799 		return phy <= PHY_E;
3800 
3801 	return false;
3802 }
3803 
intel_port_to_phy(struct drm_i915_private * i915,enum port port)3804 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
3805 {
3806 	if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
3807 		return PHY_D + port - PORT_D_XELPD;
3808 	else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
3809 		return PHY_F + port - PORT_TC1;
3810 	else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
3811 		return PHY_B + port - PORT_TC1;
3812 	else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
3813 		return PHY_C + port - PORT_TC1;
3814 	else if (IS_JSL_EHL(i915) && port == PORT_D)
3815 		return PHY_A;
3816 
3817 	return PHY_A + port - PORT_A;
3818 }
3819 
intel_port_to_tc(struct drm_i915_private * dev_priv,enum port port)3820 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
3821 {
3822 	if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
3823 		return TC_PORT_NONE;
3824 
3825 	if (DISPLAY_VER(dev_priv) >= 12)
3826 		return TC_PORT_1 + port - PORT_TC1;
3827 	else
3828 		return TC_PORT_1 + port - PORT_C;
3829 }
3830 
intel_port_to_power_domain(enum port port)3831 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
3832 {
3833 	switch (port) {
3834 	case PORT_A:
3835 		return POWER_DOMAIN_PORT_DDI_A_LANES;
3836 	case PORT_B:
3837 		return POWER_DOMAIN_PORT_DDI_B_LANES;
3838 	case PORT_C:
3839 		return POWER_DOMAIN_PORT_DDI_C_LANES;
3840 	case PORT_D:
3841 		return POWER_DOMAIN_PORT_DDI_D_LANES;
3842 	case PORT_E:
3843 		return POWER_DOMAIN_PORT_DDI_E_LANES;
3844 	case PORT_F:
3845 		return POWER_DOMAIN_PORT_DDI_F_LANES;
3846 	case PORT_G:
3847 		return POWER_DOMAIN_PORT_DDI_G_LANES;
3848 	case PORT_H:
3849 		return POWER_DOMAIN_PORT_DDI_H_LANES;
3850 	case PORT_I:
3851 		return POWER_DOMAIN_PORT_DDI_I_LANES;
3852 	default:
3853 		MISSING_CASE(port);
3854 		return POWER_DOMAIN_PORT_OTHER;
3855 	}
3856 }
3857 
3858 enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port * dig_port)3859 intel_aux_power_domain(struct intel_digital_port *dig_port)
3860 {
3861 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3862 	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
3863 
3864 	if (intel_phy_is_tc(dev_priv, phy) &&
3865 	    dig_port->tc_mode == TC_PORT_TBT_ALT) {
3866 		switch (dig_port->aux_ch) {
3867 		case AUX_CH_C:
3868 			return POWER_DOMAIN_AUX_C_TBT;
3869 		case AUX_CH_D:
3870 			return POWER_DOMAIN_AUX_D_TBT;
3871 		case AUX_CH_E:
3872 			return POWER_DOMAIN_AUX_E_TBT;
3873 		case AUX_CH_F:
3874 			return POWER_DOMAIN_AUX_F_TBT;
3875 		case AUX_CH_G:
3876 			return POWER_DOMAIN_AUX_G_TBT;
3877 		case AUX_CH_H:
3878 			return POWER_DOMAIN_AUX_H_TBT;
3879 		case AUX_CH_I:
3880 			return POWER_DOMAIN_AUX_I_TBT;
3881 		default:
3882 			MISSING_CASE(dig_port->aux_ch);
3883 			return POWER_DOMAIN_AUX_C_TBT;
3884 		}
3885 	}
3886 
3887 	return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
3888 }
3889 
3890 /*
3891  * Converts aux_ch to power_domain without caring about TBT ports for that use
3892  * intel_aux_power_domain()
3893  */
3894 enum intel_display_power_domain
intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)3895 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
3896 {
3897 	switch (aux_ch) {
3898 	case AUX_CH_A:
3899 		return POWER_DOMAIN_AUX_A;
3900 	case AUX_CH_B:
3901 		return POWER_DOMAIN_AUX_B;
3902 	case AUX_CH_C:
3903 		return POWER_DOMAIN_AUX_C;
3904 	case AUX_CH_D:
3905 		return POWER_DOMAIN_AUX_D;
3906 	case AUX_CH_E:
3907 		return POWER_DOMAIN_AUX_E;
3908 	case AUX_CH_F:
3909 		return POWER_DOMAIN_AUX_F;
3910 	case AUX_CH_G:
3911 		return POWER_DOMAIN_AUX_G;
3912 	case AUX_CH_H:
3913 		return POWER_DOMAIN_AUX_H;
3914 	case AUX_CH_I:
3915 		return POWER_DOMAIN_AUX_I;
3916 	default:
3917 		MISSING_CASE(aux_ch);
3918 		return POWER_DOMAIN_AUX_A;
3919 	}
3920 }
3921 
get_crtc_power_domains(struct intel_crtc_state * crtc_state)3922 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
3923 {
3924 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3925 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3926 	struct drm_encoder *encoder;
3927 	enum pipe pipe = crtc->pipe;
3928 	u64 mask;
3929 	enum transcoder transcoder = crtc_state->cpu_transcoder;
3930 
3931 	if (!crtc_state->hw.active)
3932 		return 0;
3933 
3934 	mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
3935 	mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
3936 	if (crtc_state->pch_pfit.enabled ||
3937 	    crtc_state->pch_pfit.force_thru)
3938 		mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
3939 
3940 	drm_for_each_encoder_mask(encoder, &dev_priv->drm,
3941 				  crtc_state->uapi.encoder_mask) {
3942 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3943 
3944 		mask |= BIT_ULL(intel_encoder->power_domain);
3945 	}
3946 
3947 	if (HAS_DDI(dev_priv) && crtc_state->has_audio)
3948 		mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO);
3949 
3950 	if (crtc_state->shared_dpll)
3951 		mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
3952 
3953 	if (crtc_state->dsc.compression_enable)
3954 		mask |= BIT_ULL(intel_dsc_power_domain(crtc_state));
3955 
3956 	return mask;
3957 }
3958 
3959 static u64
modeset_get_crtc_power_domains(struct intel_crtc_state * crtc_state)3960 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
3961 {
3962 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3963 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3964 	enum intel_display_power_domain domain;
3965 	u64 domains, new_domains, old_domains;
3966 
3967 	domains = get_crtc_power_domains(crtc_state);
3968 
3969 	new_domains = domains & ~crtc->enabled_power_domains.mask;
3970 	old_domains = crtc->enabled_power_domains.mask & ~domains;
3971 
3972 	for_each_power_domain(domain, new_domains)
3973 		intel_display_power_get_in_set(dev_priv,
3974 					       &crtc->enabled_power_domains,
3975 					       domain);
3976 
3977 	return old_domains;
3978 }
3979 
modeset_put_crtc_power_domains(struct intel_crtc * crtc,u64 domains)3980 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
3981 					   u64 domains)
3982 {
3983 	intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
3984 					    &crtc->enabled_power_domains,
3985 					    domains);
3986 }
3987 
valleyview_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)3988 static void valleyview_crtc_enable(struct intel_atomic_state *state,
3989 				   struct intel_crtc *crtc)
3990 {
3991 	const struct intel_crtc_state *new_crtc_state =
3992 		intel_atomic_get_new_crtc_state(state, crtc);
3993 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3994 	enum pipe pipe = crtc->pipe;
3995 
3996 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3997 		return;
3998 
3999 	if (intel_crtc_has_dp_encoder(new_crtc_state))
4000 		intel_dp_set_m_n(new_crtc_state, M1_N1);
4001 
4002 	intel_set_transcoder_timings(new_crtc_state);
4003 	intel_set_pipe_src_size(new_crtc_state);
4004 
4005 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
4006 		intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
4007 		intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
4008 	}
4009 
4010 	i9xx_set_pipeconf(new_crtc_state);
4011 
4012 	crtc->active = true;
4013 
4014 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4015 
4016 	intel_encoders_pre_pll_enable(state, crtc);
4017 
4018 	if (IS_CHERRYVIEW(dev_priv)) {
4019 		chv_prepare_pll(crtc, new_crtc_state);
4020 		chv_enable_pll(crtc, new_crtc_state);
4021 	} else {
4022 		vlv_prepare_pll(crtc, new_crtc_state);
4023 		vlv_enable_pll(crtc, new_crtc_state);
4024 	}
4025 
4026 	intel_encoders_pre_enable(state, crtc);
4027 
4028 	i9xx_pfit_enable(new_crtc_state);
4029 
4030 	intel_color_load_luts(new_crtc_state);
4031 	intel_color_commit(new_crtc_state);
4032 	/* update DSPCNTR to configure gamma for pipe bottom color */
4033 	intel_disable_primary_plane(new_crtc_state);
4034 
4035 	dev_priv->display.initial_watermarks(state, crtc);
4036 	intel_enable_pipe(new_crtc_state);
4037 
4038 	intel_crtc_vblank_on(new_crtc_state);
4039 
4040 	intel_encoders_enable(state, crtc);
4041 }
4042 
i9xx_set_pll_dividers(const struct intel_crtc_state * crtc_state)4043 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
4044 {
4045 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4046 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4047 
4048 	intel_de_write(dev_priv, FP0(crtc->pipe),
4049 		       crtc_state->dpll_hw_state.fp0);
4050 	intel_de_write(dev_priv, FP1(crtc->pipe),
4051 		       crtc_state->dpll_hw_state.fp1);
4052 }
4053 
i9xx_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)4054 static void i9xx_crtc_enable(struct intel_atomic_state *state,
4055 			     struct intel_crtc *crtc)
4056 {
4057 	const struct intel_crtc_state *new_crtc_state =
4058 		intel_atomic_get_new_crtc_state(state, crtc);
4059 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4060 	enum pipe pipe = crtc->pipe;
4061 
4062 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
4063 		return;
4064 
4065 	i9xx_set_pll_dividers(new_crtc_state);
4066 
4067 	if (intel_crtc_has_dp_encoder(new_crtc_state))
4068 		intel_dp_set_m_n(new_crtc_state, M1_N1);
4069 
4070 	intel_set_transcoder_timings(new_crtc_state);
4071 	intel_set_pipe_src_size(new_crtc_state);
4072 
4073 	i9xx_set_pipeconf(new_crtc_state);
4074 
4075 	crtc->active = true;
4076 
4077 	if (DISPLAY_VER(dev_priv) != 2)
4078 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4079 
4080 	intel_encoders_pre_enable(state, crtc);
4081 
4082 	i9xx_enable_pll(crtc, new_crtc_state);
4083 
4084 	i9xx_pfit_enable(new_crtc_state);
4085 
4086 	intel_color_load_luts(new_crtc_state);
4087 	intel_color_commit(new_crtc_state);
4088 	/* update DSPCNTR to configure gamma for pipe bottom color */
4089 	intel_disable_primary_plane(new_crtc_state);
4090 
4091 	if (dev_priv->display.initial_watermarks)
4092 		dev_priv->display.initial_watermarks(state, crtc);
4093 	else
4094 		intel_update_watermarks(crtc);
4095 	intel_enable_pipe(new_crtc_state);
4096 
4097 	intel_crtc_vblank_on(new_crtc_state);
4098 
4099 	intel_encoders_enable(state, crtc);
4100 
4101 	/* prevents spurious underruns */
4102 	if (DISPLAY_VER(dev_priv) == 2)
4103 		intel_wait_for_vblank(dev_priv, pipe);
4104 }
4105 
i9xx_pfit_disable(const struct intel_crtc_state * old_crtc_state)4106 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
4107 {
4108 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4109 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4110 
4111 	if (!old_crtc_state->gmch_pfit.control)
4112 		return;
4113 
4114 	assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
4115 
4116 	drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
4117 		    intel_de_read(dev_priv, PFIT_CONTROL));
4118 	intel_de_write(dev_priv, PFIT_CONTROL, 0);
4119 }
4120 
i9xx_crtc_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)4121 static void i9xx_crtc_disable(struct intel_atomic_state *state,
4122 			      struct intel_crtc *crtc)
4123 {
4124 	struct intel_crtc_state *old_crtc_state =
4125 		intel_atomic_get_old_crtc_state(state, crtc);
4126 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4127 	enum pipe pipe = crtc->pipe;
4128 
4129 	/*
4130 	 * On gen2 planes are double buffered but the pipe isn't, so we must
4131 	 * wait for planes to fully turn off before disabling the pipe.
4132 	 */
4133 	if (DISPLAY_VER(dev_priv) == 2)
4134 		intel_wait_for_vblank(dev_priv, pipe);
4135 
4136 	intel_encoders_disable(state, crtc);
4137 
4138 	intel_crtc_vblank_off(old_crtc_state);
4139 
4140 	intel_disable_pipe(old_crtc_state);
4141 
4142 	i9xx_pfit_disable(old_crtc_state);
4143 
4144 	intel_encoders_post_disable(state, crtc);
4145 
4146 	if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
4147 		if (IS_CHERRYVIEW(dev_priv))
4148 			chv_disable_pll(dev_priv, pipe);
4149 		else if (IS_VALLEYVIEW(dev_priv))
4150 			vlv_disable_pll(dev_priv, pipe);
4151 		else
4152 			i9xx_disable_pll(old_crtc_state);
4153 	}
4154 
4155 	intel_encoders_post_pll_disable(state, crtc);
4156 
4157 	if (DISPLAY_VER(dev_priv) != 2)
4158 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4159 
4160 	if (!dev_priv->display.initial_watermarks)
4161 		intel_update_watermarks(crtc);
4162 
4163 	/* clock the pipe down to 640x480@60 to potentially save power */
4164 	if (IS_I830(dev_priv))
4165 		i830_enable_pipe(dev_priv, pipe);
4166 }
4167 
intel_crtc_disable_noatomic(struct intel_crtc * crtc,struct drm_modeset_acquire_ctx * ctx)4168 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
4169 					struct drm_modeset_acquire_ctx *ctx)
4170 {
4171 	struct intel_encoder *encoder;
4172 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4173 	struct intel_bw_state *bw_state =
4174 		to_intel_bw_state(dev_priv->bw_obj.state);
4175 	struct intel_cdclk_state *cdclk_state =
4176 		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
4177 	struct intel_dbuf_state *dbuf_state =
4178 		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
4179 	struct intel_crtc_state *crtc_state =
4180 		to_intel_crtc_state(crtc->base.state);
4181 	struct intel_plane *plane;
4182 	struct drm_atomic_state *state;
4183 	struct intel_crtc_state *temp_crtc_state;
4184 	enum pipe pipe = crtc->pipe;
4185 	int ret;
4186 
4187 	if (!crtc_state->hw.active)
4188 		return;
4189 
4190 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
4191 		const struct intel_plane_state *plane_state =
4192 			to_intel_plane_state(plane->base.state);
4193 
4194 		if (plane_state->uapi.visible)
4195 			intel_plane_disable_noatomic(crtc, plane);
4196 	}
4197 
4198 	state = drm_atomic_state_alloc(&dev_priv->drm);
4199 	if (!state) {
4200 		drm_dbg_kms(&dev_priv->drm,
4201 			    "failed to disable [CRTC:%d:%s], out of memory",
4202 			    crtc->base.base.id, crtc->base.name);
4203 		return;
4204 	}
4205 
4206 	state->acquire_ctx = ctx;
4207 
4208 	/* Everything's already locked, -EDEADLK can't happen. */
4209 	temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
4210 	ret = drm_atomic_add_affected_connectors(state, &crtc->base);
4211 
4212 	drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
4213 
4214 	dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
4215 
4216 	drm_atomic_state_put(state);
4217 
4218 	drm_dbg_kms(&dev_priv->drm,
4219 		    "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
4220 		    crtc->base.base.id, crtc->base.name);
4221 
4222 	crtc->active = false;
4223 	crtc->base.enabled = false;
4224 
4225 	drm_WARN_ON(&dev_priv->drm,
4226 		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
4227 	crtc_state->uapi.active = false;
4228 	crtc_state->uapi.connector_mask = 0;
4229 	crtc_state->uapi.encoder_mask = 0;
4230 	intel_crtc_free_hw_state(crtc_state);
4231 	memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
4232 
4233 	for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
4234 		encoder->base.crtc = NULL;
4235 
4236 	intel_fbc_disable(crtc);
4237 	intel_update_watermarks(crtc);
4238 	intel_disable_shared_dpll(crtc_state);
4239 
4240 	intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
4241 
4242 	dev_priv->active_pipes &= ~BIT(pipe);
4243 	cdclk_state->min_cdclk[pipe] = 0;
4244 	cdclk_state->min_voltage_level[pipe] = 0;
4245 	cdclk_state->active_pipes &= ~BIT(pipe);
4246 
4247 	dbuf_state->active_pipes &= ~BIT(pipe);
4248 
4249 	bw_state->data_rate[pipe] = 0;
4250 	bw_state->num_active_planes[pipe] = 0;
4251 }
4252 
4253 /*
4254  * turn all crtc's off, but do not adjust state
4255  * This has to be paired with a call to intel_modeset_setup_hw_state.
4256  */
intel_display_suspend(struct drm_device * dev)4257 int intel_display_suspend(struct drm_device *dev)
4258 {
4259 	struct drm_i915_private *dev_priv = to_i915(dev);
4260 	struct drm_atomic_state *state;
4261 	int ret;
4262 
4263 	if (!HAS_DISPLAY(dev_priv))
4264 		return 0;
4265 
4266 	state = drm_atomic_helper_suspend(dev);
4267 	ret = PTR_ERR_OR_ZERO(state);
4268 	if (ret)
4269 		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
4270 			ret);
4271 	else
4272 		dev_priv->modeset_restore_state = state;
4273 	return ret;
4274 }
4275 
intel_encoder_destroy(struct drm_encoder * encoder)4276 void intel_encoder_destroy(struct drm_encoder *encoder)
4277 {
4278 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4279 
4280 	drm_encoder_cleanup(encoder);
4281 	kfree(intel_encoder);
4282 }
4283 
4284 /* Cross check the actual hw state with our own modeset state tracking (and it's
4285  * internal consistency). */
intel_connector_verify_state(struct intel_crtc_state * crtc_state,struct drm_connector_state * conn_state)4286 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
4287 					 struct drm_connector_state *conn_state)
4288 {
4289 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
4290 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
4291 
4292 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
4293 		    connector->base.base.id, connector->base.name);
4294 
4295 	if (connector->get_hw_state(connector)) {
4296 		struct intel_encoder *encoder = intel_attached_encoder(connector);
4297 
4298 		I915_STATE_WARN(!crtc_state,
4299 			 "connector enabled without attached crtc\n");
4300 
4301 		if (!crtc_state)
4302 			return;
4303 
4304 		I915_STATE_WARN(!crtc_state->hw.active,
4305 				"connector is active, but attached crtc isn't\n");
4306 
4307 		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
4308 			return;
4309 
4310 		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
4311 			"atomic encoder doesn't match attached encoder\n");
4312 
4313 		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
4314 			"attached encoder crtc differs from connector crtc\n");
4315 	} else {
4316 		I915_STATE_WARN(crtc_state && crtc_state->hw.active,
4317 				"attached crtc is active, but connector isn't\n");
4318 		I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
4319 			"best encoder set without crtc!\n");
4320 	}
4321 }
4322 
hsw_crtc_state_ips_capable(const struct intel_crtc_state * crtc_state)4323 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
4324 {
4325 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4326 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4327 
4328 	/* IPS only exists on ULT machines and is tied to pipe A. */
4329 	if (!hsw_crtc_supports_ips(crtc))
4330 		return false;
4331 
4332 	if (!dev_priv->params.enable_ips)
4333 		return false;
4334 
4335 	if (crtc_state->pipe_bpp > 24)
4336 		return false;
4337 
4338 	/*
4339 	 * We compare against max which means we must take
4340 	 * the increased cdclk requirement into account when
4341 	 * calculating the new cdclk.
4342 	 *
4343 	 * Should measure whether using a lower cdclk w/o IPS
4344 	 */
4345 	if (IS_BROADWELL(dev_priv) &&
4346 	    crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
4347 		return false;
4348 
4349 	return true;
4350 }
4351 
hsw_compute_ips_config(struct intel_crtc_state * crtc_state)4352 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
4353 {
4354 	struct drm_i915_private *dev_priv =
4355 		to_i915(crtc_state->uapi.crtc->dev);
4356 	struct intel_atomic_state *state =
4357 		to_intel_atomic_state(crtc_state->uapi.state);
4358 
4359 	crtc_state->ips_enabled = false;
4360 
4361 	if (!hsw_crtc_state_ips_capable(crtc_state))
4362 		return 0;
4363 
4364 	/*
4365 	 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
4366 	 * enabled and disabled dynamically based on package C states,
4367 	 * user space can't make reliable use of the CRCs, so let's just
4368 	 * completely disable it.
4369 	 */
4370 	if (crtc_state->crc_enabled)
4371 		return 0;
4372 
4373 	/* IPS should be fine as long as at least one plane is enabled. */
4374 	if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
4375 		return 0;
4376 
4377 	if (IS_BROADWELL(dev_priv)) {
4378 		const struct intel_cdclk_state *cdclk_state;
4379 
4380 		cdclk_state = intel_atomic_get_cdclk_state(state);
4381 		if (IS_ERR(cdclk_state))
4382 			return PTR_ERR(cdclk_state);
4383 
4384 		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
4385 		if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
4386 			return 0;
4387 	}
4388 
4389 	crtc_state->ips_enabled = true;
4390 
4391 	return 0;
4392 }
4393 
intel_crtc_supports_double_wide(const struct intel_crtc * crtc)4394 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
4395 {
4396 	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4397 
4398 	/* GDG double wide on either pipe, otherwise pipe A only */
4399 	return DISPLAY_VER(dev_priv) < 4 &&
4400 		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
4401 }
4402 
ilk_pipe_pixel_rate(const struct intel_crtc_state * crtc_state)4403 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
4404 {
4405 	u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
4406 	struct drm_rect src;
4407 
4408 	/*
4409 	 * We only use IF-ID interlacing. If we ever use
4410 	 * PF-ID we'll need to adjust the pixel_rate here.
4411 	 */
4412 
4413 	if (!crtc_state->pch_pfit.enabled)
4414 		return pixel_rate;
4415 
4416 	drm_rect_init(&src, 0, 0,
4417 		      crtc_state->pipe_src_w << 16,
4418 		      crtc_state->pipe_src_h << 16);
4419 
4420 	return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
4421 				   pixel_rate);
4422 }
4423 
intel_mode_from_crtc_timings(struct drm_display_mode * mode,const struct drm_display_mode * timings)4424 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
4425 					 const struct drm_display_mode *timings)
4426 {
4427 	mode->hdisplay = timings->crtc_hdisplay;
4428 	mode->htotal = timings->crtc_htotal;
4429 	mode->hsync_start = timings->crtc_hsync_start;
4430 	mode->hsync_end = timings->crtc_hsync_end;
4431 
4432 	mode->vdisplay = timings->crtc_vdisplay;
4433 	mode->vtotal = timings->crtc_vtotal;
4434 	mode->vsync_start = timings->crtc_vsync_start;
4435 	mode->vsync_end = timings->crtc_vsync_end;
4436 
4437 	mode->flags = timings->flags;
4438 	mode->type = DRM_MODE_TYPE_DRIVER;
4439 
4440 	mode->clock = timings->crtc_clock;
4441 
4442 	drm_mode_set_name(mode);
4443 }
4444 
intel_crtc_compute_pixel_rate(struct intel_crtc_state * crtc_state)4445 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
4446 {
4447 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4448 
4449 	if (HAS_GMCH(dev_priv))
4450 		/* FIXME calculate proper pipe pixel rate for GMCH pfit */
4451 		crtc_state->pixel_rate =
4452 			crtc_state->hw.pipe_mode.crtc_clock;
4453 	else
4454 		crtc_state->pixel_rate =
4455 			ilk_pipe_pixel_rate(crtc_state);
4456 }
4457 
intel_crtc_readout_derived_state(struct intel_crtc_state * crtc_state)4458 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
4459 {
4460 	struct drm_display_mode *mode = &crtc_state->hw.mode;
4461 	struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
4462 	struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4463 
4464 	drm_mode_copy(pipe_mode, adjusted_mode);
4465 
4466 	if (crtc_state->bigjoiner) {
4467 		/*
4468 		 * transcoder is programmed to the full mode,
4469 		 * but pipe timings are half of the transcoder mode
4470 		 */
4471 		pipe_mode->crtc_hdisplay /= 2;
4472 		pipe_mode->crtc_hblank_start /= 2;
4473 		pipe_mode->crtc_hblank_end /= 2;
4474 		pipe_mode->crtc_hsync_start /= 2;
4475 		pipe_mode->crtc_hsync_end /= 2;
4476 		pipe_mode->crtc_htotal /= 2;
4477 		pipe_mode->crtc_clock /= 2;
4478 	}
4479 
4480 	if (crtc_state->splitter.enable) {
4481 		int n = crtc_state->splitter.link_count;
4482 		int overlap = crtc_state->splitter.pixel_overlap;
4483 
4484 		/*
4485 		 * eDP MSO uses segment timings from EDID for transcoder
4486 		 * timings, but full mode for everything else.
4487 		 *
4488 		 * h_full = (h_segment - pixel_overlap) * link_count
4489 		 */
4490 		pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4491 		pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4492 		pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4493 		pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4494 		pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4495 		pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4496 		pipe_mode->crtc_clock *= n;
4497 
4498 		intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4499 		intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
4500 	} else {
4501 		intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4502 		intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
4503 	}
4504 
4505 	intel_crtc_compute_pixel_rate(crtc_state);
4506 
4507 	drm_mode_copy(mode, adjusted_mode);
4508 	mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
4509 	mode->vdisplay = crtc_state->pipe_src_h;
4510 }
4511 
intel_encoder_get_config(struct intel_encoder * encoder,struct intel_crtc_state * crtc_state)4512 static void intel_encoder_get_config(struct intel_encoder *encoder,
4513 				     struct intel_crtc_state *crtc_state)
4514 {
4515 	encoder->get_config(encoder, crtc_state);
4516 
4517 	intel_crtc_readout_derived_state(crtc_state);
4518 }
4519 
intel_crtc_compute_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)4520 static int intel_crtc_compute_config(struct intel_crtc *crtc,
4521 				     struct intel_crtc_state *pipe_config)
4522 {
4523 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4524 	struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
4525 	int clock_limit = dev_priv->max_dotclk_freq;
4526 
4527 	drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
4528 
4529 	/* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
4530 	if (pipe_config->bigjoiner) {
4531 		pipe_mode->crtc_clock /= 2;
4532 		pipe_mode->crtc_hdisplay /= 2;
4533 		pipe_mode->crtc_hblank_start /= 2;
4534 		pipe_mode->crtc_hblank_end /= 2;
4535 		pipe_mode->crtc_hsync_start /= 2;
4536 		pipe_mode->crtc_hsync_end /= 2;
4537 		pipe_mode->crtc_htotal /= 2;
4538 		pipe_config->pipe_src_w /= 2;
4539 	}
4540 
4541 	if (pipe_config->splitter.enable) {
4542 		int n = pipe_config->splitter.link_count;
4543 		int overlap = pipe_config->splitter.pixel_overlap;
4544 
4545 		pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4546 		pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4547 		pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4548 		pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4549 		pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4550 		pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4551 		pipe_mode->crtc_clock *= n;
4552 	}
4553 
4554 	intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4555 
4556 	if (DISPLAY_VER(dev_priv) < 4) {
4557 		clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
4558 
4559 		/*
4560 		 * Enable double wide mode when the dot clock
4561 		 * is > 90% of the (display) core speed.
4562 		 */
4563 		if (intel_crtc_supports_double_wide(crtc) &&
4564 		    pipe_mode->crtc_clock > clock_limit) {
4565 			clock_limit = dev_priv->max_dotclk_freq;
4566 			pipe_config->double_wide = true;
4567 		}
4568 	}
4569 
4570 	if (pipe_mode->crtc_clock > clock_limit) {
4571 		drm_dbg_kms(&dev_priv->drm,
4572 			    "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
4573 			    pipe_mode->crtc_clock, clock_limit,
4574 			    yesno(pipe_config->double_wide));
4575 		return -EINVAL;
4576 	}
4577 
4578 	/*
4579 	 * Pipe horizontal size must be even in:
4580 	 * - DVO ganged mode
4581 	 * - LVDS dual channel mode
4582 	 * - Double wide pipe
4583 	 */
4584 	if (pipe_config->pipe_src_w & 1) {
4585 		if (pipe_config->double_wide) {
4586 			drm_dbg_kms(&dev_priv->drm,
4587 				    "Odd pipe source width not supported with double wide pipe\n");
4588 			return -EINVAL;
4589 		}
4590 
4591 		if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
4592 		    intel_is_dual_link_lvds(dev_priv)) {
4593 			drm_dbg_kms(&dev_priv->drm,
4594 				    "Odd pipe source width not supported with dual link LVDS\n");
4595 			return -EINVAL;
4596 		}
4597 	}
4598 
4599 	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
4600 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4601 	 */
4602 	if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
4603 	    pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
4604 		return -EINVAL;
4605 
4606 	intel_crtc_compute_pixel_rate(pipe_config);
4607 
4608 	if (pipe_config->has_pch_encoder)
4609 		return ilk_fdi_compute_config(crtc, pipe_config);
4610 
4611 	return 0;
4612 }
4613 
4614 static void
intel_reduce_m_n_ratio(u32 * num,u32 * den)4615 intel_reduce_m_n_ratio(u32 *num, u32 *den)
4616 {
4617 	while (*num > DATA_LINK_M_N_MASK ||
4618 	       *den > DATA_LINK_M_N_MASK) {
4619 		*num >>= 1;
4620 		*den >>= 1;
4621 	}
4622 }
4623 
compute_m_n(unsigned int m,unsigned int n,u32 * ret_m,u32 * ret_n,bool constant_n)4624 static void compute_m_n(unsigned int m, unsigned int n,
4625 			u32 *ret_m, u32 *ret_n,
4626 			bool constant_n)
4627 {
4628 	/*
4629 	 * Several DP dongles in particular seem to be fussy about
4630 	 * too large link M/N values. Give N value as 0x8000 that
4631 	 * should be acceptable by specific devices. 0x8000 is the
4632 	 * specified fixed N value for asynchronous clock mode,
4633 	 * which the devices expect also in synchronous clock mode.
4634 	 */
4635 	if (constant_n)
4636 		*ret_n = DP_LINK_CONSTANT_N_VALUE;
4637 	else
4638 		*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
4639 
4640 	*ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
4641 	intel_reduce_m_n_ratio(ret_m, ret_n);
4642 }
4643 
4644 void
intel_link_compute_m_n(u16 bits_per_pixel,int nlanes,int pixel_clock,int link_clock,struct intel_link_m_n * m_n,bool constant_n,bool fec_enable)4645 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
4646 		       int pixel_clock, int link_clock,
4647 		       struct intel_link_m_n *m_n,
4648 		       bool constant_n, bool fec_enable)
4649 {
4650 	u32 data_clock = bits_per_pixel * pixel_clock;
4651 
4652 	if (fec_enable)
4653 		data_clock = intel_dp_mode_to_fec_clock(data_clock);
4654 
4655 	m_n->tu = 64;
4656 	compute_m_n(data_clock,
4657 		    link_clock * nlanes * 8,
4658 		    &m_n->gmch_m, &m_n->gmch_n,
4659 		    constant_n);
4660 
4661 	compute_m_n(pixel_clock, link_clock,
4662 		    &m_n->link_m, &m_n->link_n,
4663 		    constant_n);
4664 }
4665 
intel_panel_sanitize_ssc(struct drm_i915_private * dev_priv)4666 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
4667 {
4668 	/*
4669 	 * There may be no VBT; and if the BIOS enabled SSC we can
4670 	 * just keep using it to avoid unnecessary flicker.  Whereas if the
4671 	 * BIOS isn't using it, don't assume it will work even if the VBT
4672 	 * indicates as much.
4673 	 */
4674 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
4675 		bool bios_lvds_use_ssc = intel_de_read(dev_priv,
4676 						       PCH_DREF_CONTROL) &
4677 			DREF_SSC1_ENABLE;
4678 
4679 		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
4680 			drm_dbg_kms(&dev_priv->drm,
4681 				    "SSC %s by BIOS, overriding VBT which says %s\n",
4682 				    enableddisabled(bios_lvds_use_ssc),
4683 				    enableddisabled(dev_priv->vbt.lvds_use_ssc));
4684 			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
4685 		}
4686 	}
4687 }
4688 
intel_pch_transcoder_set_m_n(const struct intel_crtc_state * crtc_state,const struct intel_link_m_n * m_n)4689 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
4690 					 const struct intel_link_m_n *m_n)
4691 {
4692 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4693 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4694 	enum pipe pipe = crtc->pipe;
4695 
4696 	intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
4697 		       TU_SIZE(m_n->tu) | m_n->gmch_m);
4698 	intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
4699 	intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
4700 	intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
4701 }
4702 
transcoder_has_m2_n2(struct drm_i915_private * dev_priv,enum transcoder transcoder)4703 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
4704 				 enum transcoder transcoder)
4705 {
4706 	if (IS_HASWELL(dev_priv))
4707 		return transcoder == TRANSCODER_EDP;
4708 
4709 	/*
4710 	 * Strictly speaking some registers are available before
4711 	 * gen7, but we only support DRRS on gen7+
4712 	 */
4713 	return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv);
4714 }
4715 
intel_cpu_transcoder_set_m_n(const struct intel_crtc_state * crtc_state,const struct intel_link_m_n * m_n,const struct intel_link_m_n * m2_n2)4716 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
4717 					 const struct intel_link_m_n *m_n,
4718 					 const struct intel_link_m_n *m2_n2)
4719 {
4720 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4721 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4722 	enum pipe pipe = crtc->pipe;
4723 	enum transcoder transcoder = crtc_state->cpu_transcoder;
4724 
4725 	if (DISPLAY_VER(dev_priv) >= 5) {
4726 		intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
4727 			       TU_SIZE(m_n->tu) | m_n->gmch_m);
4728 		intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
4729 			       m_n->gmch_n);
4730 		intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
4731 			       m_n->link_m);
4732 		intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
4733 			       m_n->link_n);
4734 		/*
4735 		 *  M2_N2 registers are set only if DRRS is supported
4736 		 * (to make sure the registers are not unnecessarily accessed).
4737 		 */
4738 		if (m2_n2 && crtc_state->has_drrs &&
4739 		    transcoder_has_m2_n2(dev_priv, transcoder)) {
4740 			intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
4741 				       TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
4742 			intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
4743 				       m2_n2->gmch_n);
4744 			intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
4745 				       m2_n2->link_m);
4746 			intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
4747 				       m2_n2->link_n);
4748 		}
4749 	} else {
4750 		intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
4751 			       TU_SIZE(m_n->tu) | m_n->gmch_m);
4752 		intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
4753 		intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
4754 		intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
4755 	}
4756 }
4757 
intel_dp_set_m_n(const struct intel_crtc_state * crtc_state,enum link_m_n_set m_n)4758 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
4759 {
4760 	const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
4761 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
4762 
4763 	if (m_n == M1_N1) {
4764 		dp_m_n = &crtc_state->dp_m_n;
4765 		dp_m2_n2 = &crtc_state->dp_m2_n2;
4766 	} else if (m_n == M2_N2) {
4767 
4768 		/*
4769 		 * M2_N2 registers are not supported. Hence m2_n2 divider value
4770 		 * needs to be programmed into M1_N1.
4771 		 */
4772 		dp_m_n = &crtc_state->dp_m2_n2;
4773 	} else {
4774 		drm_err(&i915->drm, "Unsupported divider value\n");
4775 		return;
4776 	}
4777 
4778 	if (crtc_state->has_pch_encoder)
4779 		intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
4780 	else
4781 		intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
4782 }
4783 
intel_set_transcoder_timings(const struct intel_crtc_state * crtc_state)4784 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
4785 {
4786 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4787 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4788 	enum pipe pipe = crtc->pipe;
4789 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4790 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4791 	u32 crtc_vtotal, crtc_vblank_end;
4792 	int vsyncshift = 0;
4793 
4794 	/* We need to be careful not to changed the adjusted mode, for otherwise
4795 	 * the hw state checker will get angry at the mismatch. */
4796 	crtc_vtotal = adjusted_mode->crtc_vtotal;
4797 	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
4798 
4799 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4800 		/* the chip adds 2 halflines automatically */
4801 		crtc_vtotal -= 1;
4802 		crtc_vblank_end -= 1;
4803 
4804 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
4805 			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
4806 		else
4807 			vsyncshift = adjusted_mode->crtc_hsync_start -
4808 				adjusted_mode->crtc_htotal / 2;
4809 		if (vsyncshift < 0)
4810 			vsyncshift += adjusted_mode->crtc_htotal;
4811 	}
4812 
4813 	if (DISPLAY_VER(dev_priv) > 3)
4814 		intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
4815 		               vsyncshift);
4816 
4817 	intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
4818 		       (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
4819 	intel_de_write(dev_priv, HBLANK(cpu_transcoder),
4820 		       (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
4821 	intel_de_write(dev_priv, HSYNC(cpu_transcoder),
4822 		       (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
4823 
4824 	intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
4825 		       (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
4826 	intel_de_write(dev_priv, VBLANK(cpu_transcoder),
4827 		       (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
4828 	intel_de_write(dev_priv, VSYNC(cpu_transcoder),
4829 		       (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
4830 
4831 	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
4832 	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
4833 	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
4834 	 * bits. */
4835 	if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
4836 	    (pipe == PIPE_B || pipe == PIPE_C))
4837 		intel_de_write(dev_priv, VTOTAL(pipe),
4838 		               intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
4839 
4840 }
4841 
intel_set_pipe_src_size(const struct intel_crtc_state * crtc_state)4842 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
4843 {
4844 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4845 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4846 	enum pipe pipe = crtc->pipe;
4847 
4848 	/* pipesrc controls the size that is scaled from, which should
4849 	 * always be the user's requested size.
4850 	 */
4851 	intel_de_write(dev_priv, PIPESRC(pipe),
4852 		       ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
4853 }
4854 
intel_pipe_is_interlaced(const struct intel_crtc_state * crtc_state)4855 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
4856 {
4857 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4858 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4859 
4860 	if (DISPLAY_VER(dev_priv) == 2)
4861 		return false;
4862 
4863 	if (DISPLAY_VER(dev_priv) >= 9 ||
4864 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4865 		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
4866 	else
4867 		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
4868 }
4869 
intel_get_transcoder_timings(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)4870 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
4871 					 struct intel_crtc_state *pipe_config)
4872 {
4873 	struct drm_device *dev = crtc->base.dev;
4874 	struct drm_i915_private *dev_priv = to_i915(dev);
4875 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
4876 	u32 tmp;
4877 
4878 	tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
4879 	pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
4880 	pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
4881 
4882 	if (!transcoder_is_dsi(cpu_transcoder)) {
4883 		tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
4884 		pipe_config->hw.adjusted_mode.crtc_hblank_start =
4885 							(tmp & 0xffff) + 1;
4886 		pipe_config->hw.adjusted_mode.crtc_hblank_end =
4887 						((tmp >> 16) & 0xffff) + 1;
4888 	}
4889 	tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
4890 	pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
4891 	pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
4892 
4893 	tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
4894 	pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
4895 	pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
4896 
4897 	if (!transcoder_is_dsi(cpu_transcoder)) {
4898 		tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
4899 		pipe_config->hw.adjusted_mode.crtc_vblank_start =
4900 							(tmp & 0xffff) + 1;
4901 		pipe_config->hw.adjusted_mode.crtc_vblank_end =
4902 						((tmp >> 16) & 0xffff) + 1;
4903 	}
4904 	tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
4905 	pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
4906 	pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
4907 
4908 	if (intel_pipe_is_interlaced(pipe_config)) {
4909 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
4910 		pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
4911 		pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
4912 	}
4913 }
4914 
intel_get_pipe_src_size(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)4915 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
4916 				    struct intel_crtc_state *pipe_config)
4917 {
4918 	struct drm_device *dev = crtc->base.dev;
4919 	struct drm_i915_private *dev_priv = to_i915(dev);
4920 	u32 tmp;
4921 
4922 	tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
4923 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
4924 	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
4925 }
4926 
i9xx_set_pipeconf(const struct intel_crtc_state * crtc_state)4927 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
4928 {
4929 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4930 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4931 	u32 pipeconf;
4932 
4933 	pipeconf = 0;
4934 
4935 	/* we keep both pipes enabled on 830 */
4936 	if (IS_I830(dev_priv))
4937 		pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
4938 
4939 	if (crtc_state->double_wide)
4940 		pipeconf |= PIPECONF_DOUBLE_WIDE;
4941 
4942 	/* only g4x and later have fancy bpc/dither controls */
4943 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4944 	    IS_CHERRYVIEW(dev_priv)) {
4945 		/* Bspec claims that we can't use dithering for 30bpp pipes. */
4946 		if (crtc_state->dither && crtc_state->pipe_bpp != 30)
4947 			pipeconf |= PIPECONF_DITHER_EN |
4948 				    PIPECONF_DITHER_TYPE_SP;
4949 
4950 		switch (crtc_state->pipe_bpp) {
4951 		case 18:
4952 			pipeconf |= PIPECONF_6BPC;
4953 			break;
4954 		case 24:
4955 			pipeconf |= PIPECONF_8BPC;
4956 			break;
4957 		case 30:
4958 			pipeconf |= PIPECONF_10BPC;
4959 			break;
4960 		default:
4961 			/* Case prevented by intel_choose_pipe_bpp_dither. */
4962 			BUG();
4963 		}
4964 	}
4965 
4966 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
4967 		if (DISPLAY_VER(dev_priv) < 4 ||
4968 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
4969 			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4970 		else
4971 			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
4972 	} else {
4973 		pipeconf |= PIPECONF_PROGRESSIVE;
4974 	}
4975 
4976 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
4977 	     crtc_state->limited_color_range)
4978 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
4979 
4980 	pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
4981 
4982 	pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
4983 
4984 	intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
4985 	intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
4986 }
4987 
i9xx_has_pfit(struct drm_i915_private * dev_priv)4988 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
4989 {
4990 	if (IS_I830(dev_priv))
4991 		return false;
4992 
4993 	return DISPLAY_VER(dev_priv) >= 4 ||
4994 		IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
4995 }
4996 
i9xx_get_pfit_config(struct intel_crtc_state * crtc_state)4997 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
4998 {
4999 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5000 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5001 	u32 tmp;
5002 
5003 	if (!i9xx_has_pfit(dev_priv))
5004 		return;
5005 
5006 	tmp = intel_de_read(dev_priv, PFIT_CONTROL);
5007 	if (!(tmp & PFIT_ENABLE))
5008 		return;
5009 
5010 	/* Check whether the pfit is attached to our pipe. */
5011 	if (DISPLAY_VER(dev_priv) < 4) {
5012 		if (crtc->pipe != PIPE_B)
5013 			return;
5014 	} else {
5015 		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
5016 			return;
5017 	}
5018 
5019 	crtc_state->gmch_pfit.control = tmp;
5020 	crtc_state->gmch_pfit.pgm_ratios =
5021 		intel_de_read(dev_priv, PFIT_PGM_RATIOS);
5022 }
5023 
vlv_crtc_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)5024 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5025 			       struct intel_crtc_state *pipe_config)
5026 {
5027 	struct drm_device *dev = crtc->base.dev;
5028 	struct drm_i915_private *dev_priv = to_i915(dev);
5029 	enum pipe pipe = crtc->pipe;
5030 	struct dpll clock;
5031 	u32 mdiv;
5032 	int refclk = 100000;
5033 
5034 	/* In case of DSI, DPLL will not be used */
5035 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
5036 		return;
5037 
5038 	vlv_dpio_get(dev_priv);
5039 	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
5040 	vlv_dpio_put(dev_priv);
5041 
5042 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
5043 	clock.m2 = mdiv & DPIO_M2DIV_MASK;
5044 	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
5045 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
5046 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
5047 
5048 	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
5049 }
5050 
chv_crtc_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)5051 static void chv_crtc_clock_get(struct intel_crtc *crtc,
5052 			       struct intel_crtc_state *pipe_config)
5053 {
5054 	struct drm_device *dev = crtc->base.dev;
5055 	struct drm_i915_private *dev_priv = to_i915(dev);
5056 	enum pipe pipe = crtc->pipe;
5057 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
5058 	struct dpll clock;
5059 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
5060 	int refclk = 100000;
5061 
5062 	/* In case of DSI, DPLL will not be used */
5063 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
5064 		return;
5065 
5066 	vlv_dpio_get(dev_priv);
5067 	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
5068 	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
5069 	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
5070 	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
5071 	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
5072 	vlv_dpio_put(dev_priv);
5073 
5074 	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
5075 	clock.m2 = (pll_dw0 & 0xff) << 22;
5076 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
5077 		clock.m2 |= pll_dw2 & 0x3fffff;
5078 	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
5079 	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
5080 	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
5081 
5082 	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
5083 }
5084 
5085 static enum intel_output_format
bdw_get_pipemisc_output_format(struct intel_crtc * crtc)5086 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
5087 {
5088 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5089 	u32 tmp;
5090 
5091 	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
5092 
5093 	if (tmp & PIPEMISC_YUV420_ENABLE) {
5094 		/* We support 4:2:0 in full blend mode only */
5095 		drm_WARN_ON(&dev_priv->drm,
5096 			    (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
5097 
5098 		return INTEL_OUTPUT_FORMAT_YCBCR420;
5099 	} else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
5100 		return INTEL_OUTPUT_FORMAT_YCBCR444;
5101 	} else {
5102 		return INTEL_OUTPUT_FORMAT_RGB;
5103 	}
5104 }
5105 
i9xx_get_pipe_color_config(struct intel_crtc_state * crtc_state)5106 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
5107 {
5108 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5109 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
5110 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5111 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
5112 	u32 tmp;
5113 
5114 	tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
5115 
5116 	if (tmp & DISPPLANE_GAMMA_ENABLE)
5117 		crtc_state->gamma_enable = true;
5118 
5119 	if (!HAS_GMCH(dev_priv) &&
5120 	    tmp & DISPPLANE_PIPE_CSC_ENABLE)
5121 		crtc_state->csc_enable = true;
5122 }
5123 
i9xx_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)5124 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5125 				 struct intel_crtc_state *pipe_config)
5126 {
5127 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5128 	enum intel_display_power_domain power_domain;
5129 	intel_wakeref_t wakeref;
5130 	u32 tmp;
5131 	bool ret;
5132 
5133 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
5134 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
5135 	if (!wakeref)
5136 		return false;
5137 
5138 	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
5139 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5140 	pipe_config->shared_dpll = NULL;
5141 
5142 	ret = false;
5143 
5144 	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
5145 	if (!(tmp & PIPECONF_ENABLE))
5146 		goto out;
5147 
5148 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5149 	    IS_CHERRYVIEW(dev_priv)) {
5150 		switch (tmp & PIPECONF_BPC_MASK) {
5151 		case PIPECONF_6BPC:
5152 			pipe_config->pipe_bpp = 18;
5153 			break;
5154 		case PIPECONF_8BPC:
5155 			pipe_config->pipe_bpp = 24;
5156 			break;
5157 		case PIPECONF_10BPC:
5158 			pipe_config->pipe_bpp = 30;
5159 			break;
5160 		default:
5161 			break;
5162 		}
5163 	}
5164 
5165 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
5166 	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
5167 		pipe_config->limited_color_range = true;
5168 
5169 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
5170 		PIPECONF_GAMMA_MODE_SHIFT;
5171 
5172 	if (IS_CHERRYVIEW(dev_priv))
5173 		pipe_config->cgm_mode = intel_de_read(dev_priv,
5174 						      CGM_PIPE_MODE(crtc->pipe));
5175 
5176 	i9xx_get_pipe_color_config(pipe_config);
5177 	intel_color_get_config(pipe_config);
5178 
5179 	if (DISPLAY_VER(dev_priv) < 4)
5180 		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
5181 
5182 	intel_get_transcoder_timings(crtc, pipe_config);
5183 	intel_get_pipe_src_size(crtc, pipe_config);
5184 
5185 	i9xx_get_pfit_config(pipe_config);
5186 
5187 	if (DISPLAY_VER(dev_priv) >= 4) {
5188 		/* No way to read it out on pipes B and C */
5189 		if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
5190 			tmp = dev_priv->chv_dpll_md[crtc->pipe];
5191 		else
5192 			tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
5193 		pipe_config->pixel_multiplier =
5194 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
5195 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
5196 		pipe_config->dpll_hw_state.dpll_md = tmp;
5197 	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
5198 		   IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
5199 		tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
5200 		pipe_config->pixel_multiplier =
5201 			((tmp & SDVO_MULTIPLIER_MASK)
5202 			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
5203 	} else {
5204 		/* Note that on i915G/GM the pixel multiplier is in the sdvo
5205 		 * port and will be fixed up in the encoder->get_config
5206 		 * function. */
5207 		pipe_config->pixel_multiplier = 1;
5208 	}
5209 	pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
5210 							DPLL(crtc->pipe));
5211 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
5212 		pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
5213 							       FP0(crtc->pipe));
5214 		pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
5215 							       FP1(crtc->pipe));
5216 	} else {
5217 		/* Mask out read-only status bits. */
5218 		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
5219 						     DPLL_PORTC_READY_MASK |
5220 						     DPLL_PORTB_READY_MASK);
5221 	}
5222 
5223 	if (IS_CHERRYVIEW(dev_priv))
5224 		chv_crtc_clock_get(crtc, pipe_config);
5225 	else if (IS_VALLEYVIEW(dev_priv))
5226 		vlv_crtc_clock_get(crtc, pipe_config);
5227 	else
5228 		i9xx_crtc_clock_get(crtc, pipe_config);
5229 
5230 	/*
5231 	 * Normally the dotclock is filled in by the encoder .get_config()
5232 	 * but in case the pipe is enabled w/o any ports we need a sane
5233 	 * default.
5234 	 */
5235 	pipe_config->hw.adjusted_mode.crtc_clock =
5236 		pipe_config->port_clock / pipe_config->pixel_multiplier;
5237 
5238 	ret = true;
5239 
5240 out:
5241 	intel_display_power_put(dev_priv, power_domain, wakeref);
5242 
5243 	return ret;
5244 }
5245 
ilk_init_pch_refclk(struct drm_i915_private * dev_priv)5246 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
5247 {
5248 	struct intel_encoder *encoder;
5249 	int i;
5250 	u32 val, final;
5251 	bool has_lvds = false;
5252 	bool has_cpu_edp = false;
5253 	bool has_panel = false;
5254 	bool has_ck505 = false;
5255 	bool can_ssc = false;
5256 	bool using_ssc_source = false;
5257 
5258 	/* We need to take the global config into account */
5259 	for_each_intel_encoder(&dev_priv->drm, encoder) {
5260 		switch (encoder->type) {
5261 		case INTEL_OUTPUT_LVDS:
5262 			has_panel = true;
5263 			has_lvds = true;
5264 			break;
5265 		case INTEL_OUTPUT_EDP:
5266 			has_panel = true;
5267 			if (encoder->port == PORT_A)
5268 				has_cpu_edp = true;
5269 			break;
5270 		default:
5271 			break;
5272 		}
5273 	}
5274 
5275 	if (HAS_PCH_IBX(dev_priv)) {
5276 		has_ck505 = dev_priv->vbt.display_clock_mode;
5277 		can_ssc = has_ck505;
5278 	} else {
5279 		has_ck505 = false;
5280 		can_ssc = true;
5281 	}
5282 
5283 	/* Check if any DPLLs are using the SSC source */
5284 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
5285 		u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
5286 
5287 		if (!(temp & DPLL_VCO_ENABLE))
5288 			continue;
5289 
5290 		if ((temp & PLL_REF_INPUT_MASK) ==
5291 		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
5292 			using_ssc_source = true;
5293 			break;
5294 		}
5295 	}
5296 
5297 	drm_dbg_kms(&dev_priv->drm,
5298 		    "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
5299 		    has_panel, has_lvds, has_ck505, using_ssc_source);
5300 
5301 	/* Ironlake: try to setup display ref clock before DPLL
5302 	 * enabling. This is only under driver's control after
5303 	 * PCH B stepping, previous chipset stepping should be
5304 	 * ignoring this setting.
5305 	 */
5306 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
5307 
5308 	/* As we must carefully and slowly disable/enable each source in turn,
5309 	 * compute the final state we want first and check if we need to
5310 	 * make any changes at all.
5311 	 */
5312 	final = val;
5313 	final &= ~DREF_NONSPREAD_SOURCE_MASK;
5314 	if (has_ck505)
5315 		final |= DREF_NONSPREAD_CK505_ENABLE;
5316 	else
5317 		final |= DREF_NONSPREAD_SOURCE_ENABLE;
5318 
5319 	final &= ~DREF_SSC_SOURCE_MASK;
5320 	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5321 	final &= ~DREF_SSC1_ENABLE;
5322 
5323 	if (has_panel) {
5324 		final |= DREF_SSC_SOURCE_ENABLE;
5325 
5326 		if (intel_panel_use_ssc(dev_priv) && can_ssc)
5327 			final |= DREF_SSC1_ENABLE;
5328 
5329 		if (has_cpu_edp) {
5330 			if (intel_panel_use_ssc(dev_priv) && can_ssc)
5331 				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5332 			else
5333 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5334 		} else
5335 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5336 	} else if (using_ssc_source) {
5337 		final |= DREF_SSC_SOURCE_ENABLE;
5338 		final |= DREF_SSC1_ENABLE;
5339 	}
5340 
5341 	if (final == val)
5342 		return;
5343 
5344 	/* Always enable nonspread source */
5345 	val &= ~DREF_NONSPREAD_SOURCE_MASK;
5346 
5347 	if (has_ck505)
5348 		val |= DREF_NONSPREAD_CK505_ENABLE;
5349 	else
5350 		val |= DREF_NONSPREAD_SOURCE_ENABLE;
5351 
5352 	if (has_panel) {
5353 		val &= ~DREF_SSC_SOURCE_MASK;
5354 		val |= DREF_SSC_SOURCE_ENABLE;
5355 
5356 		/* SSC must be turned on before enabling the CPU output  */
5357 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5358 			drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
5359 			val |= DREF_SSC1_ENABLE;
5360 		} else
5361 			val &= ~DREF_SSC1_ENABLE;
5362 
5363 		/* Get SSC going before enabling the outputs */
5364 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5365 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5366 		udelay(200);
5367 
5368 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5369 
5370 		/* Enable CPU source on CPU attached eDP */
5371 		if (has_cpu_edp) {
5372 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5373 				drm_dbg_kms(&dev_priv->drm,
5374 					    "Using SSC on eDP\n");
5375 				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5376 			} else
5377 				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5378 		} else
5379 			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5380 
5381 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5382 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5383 		udelay(200);
5384 	} else {
5385 		drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
5386 
5387 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5388 
5389 		/* Turn off CPU output */
5390 		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5391 
5392 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5393 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5394 		udelay(200);
5395 
5396 		if (!using_ssc_source) {
5397 			drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
5398 
5399 			/* Turn off the SSC source */
5400 			val &= ~DREF_SSC_SOURCE_MASK;
5401 			val |= DREF_SSC_SOURCE_DISABLE;
5402 
5403 			/* Turn off SSC1 */
5404 			val &= ~DREF_SSC1_ENABLE;
5405 
5406 			intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5407 			intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5408 			udelay(200);
5409 		}
5410 	}
5411 
5412 	BUG_ON(val != final);
5413 }
5414 
lpt_reset_fdi_mphy(struct drm_i915_private * dev_priv)5415 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
5416 {
5417 	u32 tmp;
5418 
5419 	tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5420 	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5421 	intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5422 
5423 	if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5424 			FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5425 		drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
5426 
5427 	tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5428 	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5429 	intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5430 
5431 	if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5432 			 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
5433 		drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
5434 }
5435 
5436 /* WaMPhyProgramming:hsw */
lpt_program_fdi_mphy(struct drm_i915_private * dev_priv)5437 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
5438 {
5439 	u32 tmp;
5440 
5441 	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5442 	tmp &= ~(0xFF << 24);
5443 	tmp |= (0x12 << 24);
5444 	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5445 
5446 	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5447 	tmp |= (1 << 11);
5448 	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5449 
5450 	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5451 	tmp |= (1 << 11);
5452 	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5453 
5454 	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5455 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5456 	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5457 
5458 	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5459 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5460 	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5461 
5462 	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5463 	tmp &= ~(7 << 13);
5464 	tmp |= (5 << 13);
5465 	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5466 
5467 	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5468 	tmp &= ~(7 << 13);
5469 	tmp |= (5 << 13);
5470 	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5471 
5472 	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5473 	tmp &= ~0xFF;
5474 	tmp |= 0x1C;
5475 	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5476 
5477 	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5478 	tmp &= ~0xFF;
5479 	tmp |= 0x1C;
5480 	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5481 
5482 	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5483 	tmp &= ~(0xFF << 16);
5484 	tmp |= (0x1C << 16);
5485 	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5486 
5487 	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5488 	tmp &= ~(0xFF << 16);
5489 	tmp |= (0x1C << 16);
5490 	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5491 
5492 	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5493 	tmp |= (1 << 27);
5494 	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5495 
5496 	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5497 	tmp |= (1 << 27);
5498 	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5499 
5500 	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5501 	tmp &= ~(0xF << 28);
5502 	tmp |= (4 << 28);
5503 	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5504 
5505 	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5506 	tmp &= ~(0xF << 28);
5507 	tmp |= (4 << 28);
5508 	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5509 }
5510 
5511 /* Implements 3 different sequences from BSpec chapter "Display iCLK
5512  * Programming" based on the parameters passed:
5513  * - Sequence to enable CLKOUT_DP
5514  * - Sequence to enable CLKOUT_DP without spread
5515  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5516  */
lpt_enable_clkout_dp(struct drm_i915_private * dev_priv,bool with_spread,bool with_fdi)5517 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
5518 				 bool with_spread, bool with_fdi)
5519 {
5520 	u32 reg, tmp;
5521 
5522 	if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
5523 		     "FDI requires downspread\n"))
5524 		with_spread = true;
5525 	if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
5526 		     with_fdi, "LP PCH doesn't have FDI\n"))
5527 		with_fdi = false;
5528 
5529 	mutex_lock(&dev_priv->sb_lock);
5530 
5531 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5532 	tmp &= ~SBI_SSCCTL_DISABLE;
5533 	tmp |= SBI_SSCCTL_PATHALT;
5534 	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5535 
5536 	udelay(24);
5537 
5538 	if (with_spread) {
5539 		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5540 		tmp &= ~SBI_SSCCTL_PATHALT;
5541 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5542 
5543 		if (with_fdi) {
5544 			lpt_reset_fdi_mphy(dev_priv);
5545 			lpt_program_fdi_mphy(dev_priv);
5546 		}
5547 	}
5548 
5549 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5550 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5551 	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5552 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5553 
5554 	mutex_unlock(&dev_priv->sb_lock);
5555 }
5556 
5557 /* Sequence to disable CLKOUT_DP */
lpt_disable_clkout_dp(struct drm_i915_private * dev_priv)5558 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
5559 {
5560 	u32 reg, tmp;
5561 
5562 	mutex_lock(&dev_priv->sb_lock);
5563 
5564 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5565 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5566 	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5567 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5568 
5569 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5570 	if (!(tmp & SBI_SSCCTL_DISABLE)) {
5571 		if (!(tmp & SBI_SSCCTL_PATHALT)) {
5572 			tmp |= SBI_SSCCTL_PATHALT;
5573 			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5574 			udelay(32);
5575 		}
5576 		tmp |= SBI_SSCCTL_DISABLE;
5577 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5578 	}
5579 
5580 	mutex_unlock(&dev_priv->sb_lock);
5581 }
5582 
5583 #define BEND_IDX(steps) ((50 + (steps)) / 5)
5584 
5585 static const u16 sscdivintphase[] = {
5586 	[BEND_IDX( 50)] = 0x3B23,
5587 	[BEND_IDX( 45)] = 0x3B23,
5588 	[BEND_IDX( 40)] = 0x3C23,
5589 	[BEND_IDX( 35)] = 0x3C23,
5590 	[BEND_IDX( 30)] = 0x3D23,
5591 	[BEND_IDX( 25)] = 0x3D23,
5592 	[BEND_IDX( 20)] = 0x3E23,
5593 	[BEND_IDX( 15)] = 0x3E23,
5594 	[BEND_IDX( 10)] = 0x3F23,
5595 	[BEND_IDX(  5)] = 0x3F23,
5596 	[BEND_IDX(  0)] = 0x0025,
5597 	[BEND_IDX( -5)] = 0x0025,
5598 	[BEND_IDX(-10)] = 0x0125,
5599 	[BEND_IDX(-15)] = 0x0125,
5600 	[BEND_IDX(-20)] = 0x0225,
5601 	[BEND_IDX(-25)] = 0x0225,
5602 	[BEND_IDX(-30)] = 0x0325,
5603 	[BEND_IDX(-35)] = 0x0325,
5604 	[BEND_IDX(-40)] = 0x0425,
5605 	[BEND_IDX(-45)] = 0x0425,
5606 	[BEND_IDX(-50)] = 0x0525,
5607 };
5608 
5609 /*
5610  * Bend CLKOUT_DP
5611  * steps -50 to 50 inclusive, in steps of 5
5612  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
5613  * change in clock period = -(steps / 10) * 5.787 ps
5614  */
lpt_bend_clkout_dp(struct drm_i915_private * dev_priv,int steps)5615 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
5616 {
5617 	u32 tmp;
5618 	int idx = BEND_IDX(steps);
5619 
5620 	if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
5621 		return;
5622 
5623 	if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
5624 		return;
5625 
5626 	mutex_lock(&dev_priv->sb_lock);
5627 
5628 	if (steps % 10 != 0)
5629 		tmp = 0xAAAAAAAB;
5630 	else
5631 		tmp = 0x00000000;
5632 	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
5633 
5634 	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
5635 	tmp &= 0xffff0000;
5636 	tmp |= sscdivintphase[idx];
5637 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
5638 
5639 	mutex_unlock(&dev_priv->sb_lock);
5640 }
5641 
5642 #undef BEND_IDX
5643 
spll_uses_pch_ssc(struct drm_i915_private * dev_priv)5644 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
5645 {
5646 	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5647 	u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
5648 
5649 	if ((ctl & SPLL_PLL_ENABLE) == 0)
5650 		return false;
5651 
5652 	if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
5653 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
5654 		return true;
5655 
5656 	if (IS_BROADWELL(dev_priv) &&
5657 	    (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
5658 		return true;
5659 
5660 	return false;
5661 }
5662 
wrpll_uses_pch_ssc(struct drm_i915_private * dev_priv,enum intel_dpll_id id)5663 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
5664 			       enum intel_dpll_id id)
5665 {
5666 	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5667 	u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
5668 
5669 	if ((ctl & WRPLL_PLL_ENABLE) == 0)
5670 		return false;
5671 
5672 	if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
5673 		return true;
5674 
5675 	if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
5676 	    (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
5677 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
5678 		return true;
5679 
5680 	return false;
5681 }
5682 
lpt_init_pch_refclk(struct drm_i915_private * dev_priv)5683 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
5684 {
5685 	struct intel_encoder *encoder;
5686 	bool has_fdi = false;
5687 
5688 	for_each_intel_encoder(&dev_priv->drm, encoder) {
5689 		switch (encoder->type) {
5690 		case INTEL_OUTPUT_ANALOG:
5691 			has_fdi = true;
5692 			break;
5693 		default:
5694 			break;
5695 		}
5696 	}
5697 
5698 	/*
5699 	 * The BIOS may have decided to use the PCH SSC
5700 	 * reference so we must not disable it until the
5701 	 * relevant PLLs have stopped relying on it. We'll
5702 	 * just leave the PCH SSC reference enabled in case
5703 	 * any active PLL is using it. It will get disabled
5704 	 * after runtime suspend if we don't have FDI.
5705 	 *
5706 	 * TODO: Move the whole reference clock handling
5707 	 * to the modeset sequence proper so that we can
5708 	 * actually enable/disable/reconfigure these things
5709 	 * safely. To do that we need to introduce a real
5710 	 * clock hierarchy. That would also allow us to do
5711 	 * clock bending finally.
5712 	 */
5713 	dev_priv->pch_ssc_use = 0;
5714 
5715 	if (spll_uses_pch_ssc(dev_priv)) {
5716 		drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
5717 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
5718 	}
5719 
5720 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
5721 		drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
5722 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
5723 	}
5724 
5725 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
5726 		drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
5727 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
5728 	}
5729 
5730 	if (dev_priv->pch_ssc_use)
5731 		return;
5732 
5733 	if (has_fdi) {
5734 		lpt_bend_clkout_dp(dev_priv, 0);
5735 		lpt_enable_clkout_dp(dev_priv, true, true);
5736 	} else {
5737 		lpt_disable_clkout_dp(dev_priv);
5738 	}
5739 }
5740 
5741 /*
5742  * Initialize reference clocks when the driver loads
5743  */
intel_init_pch_refclk(struct drm_i915_private * dev_priv)5744 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
5745 {
5746 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
5747 		ilk_init_pch_refclk(dev_priv);
5748 	else if (HAS_PCH_LPT(dev_priv))
5749 		lpt_init_pch_refclk(dev_priv);
5750 }
5751 
ilk_set_pipeconf(const struct intel_crtc_state * crtc_state)5752 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
5753 {
5754 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5755 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5756 	enum pipe pipe = crtc->pipe;
5757 	u32 val;
5758 
5759 	val = 0;
5760 
5761 	switch (crtc_state->pipe_bpp) {
5762 	case 18:
5763 		val |= PIPECONF_6BPC;
5764 		break;
5765 	case 24:
5766 		val |= PIPECONF_8BPC;
5767 		break;
5768 	case 30:
5769 		val |= PIPECONF_10BPC;
5770 		break;
5771 	case 36:
5772 		val |= PIPECONF_12BPC;
5773 		break;
5774 	default:
5775 		/* Case prevented by intel_choose_pipe_bpp_dither. */
5776 		BUG();
5777 	}
5778 
5779 	if (crtc_state->dither)
5780 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5781 
5782 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5783 		val |= PIPECONF_INTERLACED_ILK;
5784 	else
5785 		val |= PIPECONF_PROGRESSIVE;
5786 
5787 	/*
5788 	 * This would end up with an odd purple hue over
5789 	 * the entire display. Make sure we don't do it.
5790 	 */
5791 	drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
5792 		    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
5793 
5794 	if (crtc_state->limited_color_range &&
5795 	    !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
5796 		val |= PIPECONF_COLOR_RANGE_SELECT;
5797 
5798 	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
5799 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
5800 
5801 	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
5802 
5803 	val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
5804 
5805 	intel_de_write(dev_priv, PIPECONF(pipe), val);
5806 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
5807 }
5808 
hsw_set_pipeconf(const struct intel_crtc_state * crtc_state)5809 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
5810 {
5811 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5812 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5813 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5814 	u32 val = 0;
5815 
5816 	if (IS_HASWELL(dev_priv) && crtc_state->dither)
5817 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5818 
5819 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5820 		val |= PIPECONF_INTERLACED_ILK;
5821 	else
5822 		val |= PIPECONF_PROGRESSIVE;
5823 
5824 	if (IS_HASWELL(dev_priv) &&
5825 	    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
5826 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
5827 
5828 	intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
5829 	intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
5830 }
5831 
bdw_set_pipemisc(const struct intel_crtc_state * crtc_state)5832 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
5833 {
5834 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5835 	const struct intel_crtc_scaler_state *scaler_state =
5836 		&crtc_state->scaler_state;
5837 
5838 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5839 	u32 val = 0;
5840 	int i;
5841 
5842 	switch (crtc_state->pipe_bpp) {
5843 	case 18:
5844 		val |= PIPEMISC_6_BPC;
5845 		break;
5846 	case 24:
5847 		val |= PIPEMISC_8_BPC;
5848 		break;
5849 	case 30:
5850 		val |= PIPEMISC_10_BPC;
5851 		break;
5852 	case 36:
5853 		/* Port output 12BPC defined for ADLP+ */
5854 		if (DISPLAY_VER(dev_priv) > 12)
5855 			val |= PIPEMISC_12_BPC_ADLP;
5856 		break;
5857 	default:
5858 		MISSING_CASE(crtc_state->pipe_bpp);
5859 		break;
5860 	}
5861 
5862 	if (crtc_state->dither)
5863 		val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
5864 
5865 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
5866 	    crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
5867 		val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
5868 
5869 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5870 		val |= PIPEMISC_YUV420_ENABLE |
5871 			PIPEMISC_YUV420_MODE_FULL_BLEND;
5872 
5873 	if (DISPLAY_VER(dev_priv) >= 11 &&
5874 	    (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
5875 					   BIT(PLANE_CURSOR))) == 0)
5876 		val |= PIPEMISC_HDR_MODE_PRECISION;
5877 
5878 	if (DISPLAY_VER(dev_priv) >= 12)
5879 		val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
5880 
5881 	if (IS_ALDERLAKE_P(dev_priv)) {
5882 		bool scaler_in_use = false;
5883 
5884 		for (i = 0; i < crtc->num_scalers; i++) {
5885 			if (!scaler_state->scalers[i].in_use)
5886 				continue;
5887 
5888 			scaler_in_use = true;
5889 			break;
5890 		}
5891 
5892 		intel_de_rmw(dev_priv, PIPE_MISC2(crtc->pipe),
5893 			     PIPE_MISC2_UNDERRUN_BUBBLE_COUNTER_MASK,
5894 			     scaler_in_use ? PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN :
5895 			     PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS);
5896 	}
5897 
5898 	intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
5899 }
5900 
bdw_get_pipemisc_bpp(struct intel_crtc * crtc)5901 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
5902 {
5903 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5904 	u32 tmp;
5905 
5906 	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
5907 
5908 	switch (tmp & PIPEMISC_BPC_MASK) {
5909 	case PIPEMISC_6_BPC:
5910 		return 18;
5911 	case PIPEMISC_8_BPC:
5912 		return 24;
5913 	case PIPEMISC_10_BPC:
5914 		return 30;
5915 	/*
5916 	 * PORT OUTPUT 12 BPC defined for ADLP+.
5917 	 *
5918 	 * TODO:
5919 	 * For previous platforms with DSI interface, bits 5:7
5920 	 * are used for storing pipe_bpp irrespective of dithering.
5921 	 * Since the value of 12 BPC is not defined for these bits
5922 	 * on older platforms, need to find a workaround for 12 BPC
5923 	 * MIPI DSI HW readout.
5924 	 */
5925 	case PIPEMISC_12_BPC_ADLP:
5926 		if (DISPLAY_VER(dev_priv) > 12)
5927 			return 36;
5928 		fallthrough;
5929 	default:
5930 		MISSING_CASE(tmp);
5931 		return 0;
5932 	}
5933 }
5934 
ilk_get_lanes_required(int target_clock,int link_bw,int bpp)5935 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
5936 {
5937 	/*
5938 	 * Account for spread spectrum to avoid
5939 	 * oversubscribing the link. Max center spread
5940 	 * is 2.5%; use 5% for safety's sake.
5941 	 */
5942 	u32 bps = target_clock * bpp * 21 / 20;
5943 	return DIV_ROUND_UP(bps, link_bw * 8);
5944 }
5945 
intel_pch_transcoder_get_m_n(struct intel_crtc * crtc,struct intel_link_m_n * m_n)5946 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
5947 					 struct intel_link_m_n *m_n)
5948 {
5949 	struct drm_device *dev = crtc->base.dev;
5950 	struct drm_i915_private *dev_priv = to_i915(dev);
5951 	enum pipe pipe = crtc->pipe;
5952 
5953 	m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
5954 	m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
5955 	m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
5956 		& ~TU_SIZE_MASK;
5957 	m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
5958 	m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
5959 		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5960 }
5961 
intel_cpu_transcoder_get_m_n(struct intel_crtc * crtc,enum transcoder transcoder,struct intel_link_m_n * m_n,struct intel_link_m_n * m2_n2)5962 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
5963 					 enum transcoder transcoder,
5964 					 struct intel_link_m_n *m_n,
5965 					 struct intel_link_m_n *m2_n2)
5966 {
5967 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5968 	enum pipe pipe = crtc->pipe;
5969 
5970 	if (DISPLAY_VER(dev_priv) >= 5) {
5971 		m_n->link_m = intel_de_read(dev_priv,
5972 					    PIPE_LINK_M1(transcoder));
5973 		m_n->link_n = intel_de_read(dev_priv,
5974 					    PIPE_LINK_N1(transcoder));
5975 		m_n->gmch_m = intel_de_read(dev_priv,
5976 					    PIPE_DATA_M1(transcoder))
5977 			& ~TU_SIZE_MASK;
5978 		m_n->gmch_n = intel_de_read(dev_priv,
5979 					    PIPE_DATA_N1(transcoder));
5980 		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
5981 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5982 
5983 		if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
5984 			m2_n2->link_m = intel_de_read(dev_priv,
5985 						      PIPE_LINK_M2(transcoder));
5986 			m2_n2->link_n =	intel_de_read(dev_priv,
5987 							     PIPE_LINK_N2(transcoder));
5988 			m2_n2->gmch_m =	intel_de_read(dev_priv,
5989 							     PIPE_DATA_M2(transcoder))
5990 					& ~TU_SIZE_MASK;
5991 			m2_n2->gmch_n =	intel_de_read(dev_priv,
5992 							     PIPE_DATA_N2(transcoder));
5993 			m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
5994 					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5995 		}
5996 	} else {
5997 		m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
5998 		m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
5999 		m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
6000 			& ~TU_SIZE_MASK;
6001 		m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
6002 		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
6003 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6004 	}
6005 }
6006 
intel_dp_get_m_n(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)6007 void intel_dp_get_m_n(struct intel_crtc *crtc,
6008 		      struct intel_crtc_state *pipe_config)
6009 {
6010 	if (pipe_config->has_pch_encoder)
6011 		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
6012 	else
6013 		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6014 					     &pipe_config->dp_m_n,
6015 					     &pipe_config->dp_m2_n2);
6016 }
6017 
ilk_get_fdi_m_n_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)6018 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
6019 				   struct intel_crtc_state *pipe_config)
6020 {
6021 	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6022 				     &pipe_config->fdi_m_n, NULL);
6023 }
6024 
ilk_get_pfit_pos_size(struct intel_crtc_state * crtc_state,u32 pos,u32 size)6025 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
6026 				  u32 pos, u32 size)
6027 {
6028 	drm_rect_init(&crtc_state->pch_pfit.dst,
6029 		      pos >> 16, pos & 0xffff,
6030 		      size >> 16, size & 0xffff);
6031 }
6032 
skl_get_pfit_config(struct intel_crtc_state * crtc_state)6033 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
6034 {
6035 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6036 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6037 	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
6038 	int id = -1;
6039 	int i;
6040 
6041 	/* find scaler attached to this pipe */
6042 	for (i = 0; i < crtc->num_scalers; i++) {
6043 		u32 ctl, pos, size;
6044 
6045 		ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
6046 		if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
6047 			continue;
6048 
6049 		id = i;
6050 		crtc_state->pch_pfit.enabled = true;
6051 
6052 		pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
6053 		size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
6054 
6055 		ilk_get_pfit_pos_size(crtc_state, pos, size);
6056 
6057 		scaler_state->scalers[i].in_use = true;
6058 		break;
6059 	}
6060 
6061 	scaler_state->scaler_id = id;
6062 	if (id >= 0)
6063 		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
6064 	else
6065 		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
6066 }
6067 
ilk_get_pfit_config(struct intel_crtc_state * crtc_state)6068 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
6069 {
6070 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6071 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6072 	u32 ctl, pos, size;
6073 
6074 	ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
6075 	if ((ctl & PF_ENABLE) == 0)
6076 		return;
6077 
6078 	crtc_state->pch_pfit.enabled = true;
6079 
6080 	pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
6081 	size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
6082 
6083 	ilk_get_pfit_pos_size(crtc_state, pos, size);
6084 
6085 	/*
6086 	 * We currently do not free assignements of panel fitters on
6087 	 * ivb/hsw (since we don't use the higher upscaling modes which
6088 	 * differentiates them) so just WARN about this case for now.
6089 	 */
6090 	drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
6091 		    (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
6092 }
6093 
ilk_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)6094 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
6095 				struct intel_crtc_state *pipe_config)
6096 {
6097 	struct drm_device *dev = crtc->base.dev;
6098 	struct drm_i915_private *dev_priv = to_i915(dev);
6099 	enum intel_display_power_domain power_domain;
6100 	intel_wakeref_t wakeref;
6101 	u32 tmp;
6102 	bool ret;
6103 
6104 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
6105 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
6106 	if (!wakeref)
6107 		return false;
6108 
6109 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6110 	pipe_config->shared_dpll = NULL;
6111 
6112 	ret = false;
6113 	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
6114 	if (!(tmp & PIPECONF_ENABLE))
6115 		goto out;
6116 
6117 	switch (tmp & PIPECONF_BPC_MASK) {
6118 	case PIPECONF_6BPC:
6119 		pipe_config->pipe_bpp = 18;
6120 		break;
6121 	case PIPECONF_8BPC:
6122 		pipe_config->pipe_bpp = 24;
6123 		break;
6124 	case PIPECONF_10BPC:
6125 		pipe_config->pipe_bpp = 30;
6126 		break;
6127 	case PIPECONF_12BPC:
6128 		pipe_config->pipe_bpp = 36;
6129 		break;
6130 	default:
6131 		break;
6132 	}
6133 
6134 	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
6135 		pipe_config->limited_color_range = true;
6136 
6137 	switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
6138 	case PIPECONF_OUTPUT_COLORSPACE_YUV601:
6139 	case PIPECONF_OUTPUT_COLORSPACE_YUV709:
6140 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
6141 		break;
6142 	default:
6143 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
6144 		break;
6145 	}
6146 
6147 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
6148 		PIPECONF_GAMMA_MODE_SHIFT;
6149 
6150 	pipe_config->csc_mode = intel_de_read(dev_priv,
6151 					      PIPE_CSC_MODE(crtc->pipe));
6152 
6153 	i9xx_get_pipe_color_config(pipe_config);
6154 	intel_color_get_config(pipe_config);
6155 
6156 	if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
6157 		struct intel_shared_dpll *pll;
6158 		enum intel_dpll_id pll_id;
6159 		bool pll_active;
6160 
6161 		pipe_config->has_pch_encoder = true;
6162 
6163 		tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
6164 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6165 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
6166 
6167 		ilk_get_fdi_m_n_config(crtc, pipe_config);
6168 
6169 		if (HAS_PCH_IBX(dev_priv)) {
6170 			/*
6171 			 * The pipe->pch transcoder and pch transcoder->pll
6172 			 * mapping is fixed.
6173 			 */
6174 			pll_id = (enum intel_dpll_id) crtc->pipe;
6175 		} else {
6176 			tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
6177 			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
6178 				pll_id = DPLL_ID_PCH_PLL_B;
6179 			else
6180 				pll_id= DPLL_ID_PCH_PLL_A;
6181 		}
6182 
6183 		pipe_config->shared_dpll =
6184 			intel_get_shared_dpll_by_id(dev_priv, pll_id);
6185 		pll = pipe_config->shared_dpll;
6186 
6187 		pll_active = intel_dpll_get_hw_state(dev_priv, pll,
6188 						     &pipe_config->dpll_hw_state);
6189 		drm_WARN_ON(dev, !pll_active);
6190 
6191 		tmp = pipe_config->dpll_hw_state.dpll;
6192 		pipe_config->pixel_multiplier =
6193 			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
6194 			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
6195 
6196 		ilk_pch_clock_get(crtc, pipe_config);
6197 	} else {
6198 		pipe_config->pixel_multiplier = 1;
6199 	}
6200 
6201 	intel_get_transcoder_timings(crtc, pipe_config);
6202 	intel_get_pipe_src_size(crtc, pipe_config);
6203 
6204 	ilk_get_pfit_config(pipe_config);
6205 
6206 	ret = true;
6207 
6208 out:
6209 	intel_display_power_put(dev_priv, power_domain, wakeref);
6210 
6211 	return ret;
6212 }
6213 
hsw_get_transcoder_state(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config,struct intel_display_power_domain_set * power_domain_set)6214 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
6215 				     struct intel_crtc_state *pipe_config,
6216 				     struct intel_display_power_domain_set *power_domain_set)
6217 {
6218 	struct drm_device *dev = crtc->base.dev;
6219 	struct drm_i915_private *dev_priv = to_i915(dev);
6220 	unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
6221 	unsigned long enabled_panel_transcoders = 0;
6222 	enum transcoder panel_transcoder;
6223 	u32 tmp;
6224 
6225 	if (DISPLAY_VER(dev_priv) >= 11)
6226 		panel_transcoder_mask |=
6227 			BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
6228 
6229 	/*
6230 	 * The pipe->transcoder mapping is fixed with the exception of the eDP
6231 	 * and DSI transcoders handled below.
6232 	 */
6233 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6234 
6235 	/*
6236 	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
6237 	 * consistency and less surprising code; it's in always on power).
6238 	 */
6239 	for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
6240 				       panel_transcoder_mask) {
6241 		bool force_thru = false;
6242 		enum pipe trans_pipe;
6243 
6244 		tmp = intel_de_read(dev_priv,
6245 				    TRANS_DDI_FUNC_CTL(panel_transcoder));
6246 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
6247 			continue;
6248 
6249 		/*
6250 		 * Log all enabled ones, only use the first one.
6251 		 *
6252 		 * FIXME: This won't work for two separate DSI displays.
6253 		 */
6254 		enabled_panel_transcoders |= BIT(panel_transcoder);
6255 		if (enabled_panel_transcoders != BIT(panel_transcoder))
6256 			continue;
6257 
6258 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
6259 		default:
6260 			drm_WARN(dev, 1,
6261 				 "unknown pipe linked to transcoder %s\n",
6262 				 transcoder_name(panel_transcoder));
6263 			fallthrough;
6264 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
6265 			force_thru = true;
6266 			fallthrough;
6267 		case TRANS_DDI_EDP_INPUT_A_ON:
6268 			trans_pipe = PIPE_A;
6269 			break;
6270 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
6271 			trans_pipe = PIPE_B;
6272 			break;
6273 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
6274 			trans_pipe = PIPE_C;
6275 			break;
6276 		case TRANS_DDI_EDP_INPUT_D_ONOFF:
6277 			trans_pipe = PIPE_D;
6278 			break;
6279 		}
6280 
6281 		if (trans_pipe == crtc->pipe) {
6282 			pipe_config->cpu_transcoder = panel_transcoder;
6283 			pipe_config->pch_pfit.force_thru = force_thru;
6284 		}
6285 	}
6286 
6287 	/*
6288 	 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
6289 	 */
6290 	drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
6291 		    enabled_panel_transcoders != BIT(TRANSCODER_EDP));
6292 
6293 	if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
6294 						       POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
6295 		return false;
6296 
6297 	tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
6298 
6299 	return tmp & PIPECONF_ENABLE;
6300 }
6301 
bxt_get_dsi_transcoder_state(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config,struct intel_display_power_domain_set * power_domain_set)6302 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
6303 					 struct intel_crtc_state *pipe_config,
6304 					 struct intel_display_power_domain_set *power_domain_set)
6305 {
6306 	struct drm_device *dev = crtc->base.dev;
6307 	struct drm_i915_private *dev_priv = to_i915(dev);
6308 	enum transcoder cpu_transcoder;
6309 	enum port port;
6310 	u32 tmp;
6311 
6312 	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
6313 		if (port == PORT_A)
6314 			cpu_transcoder = TRANSCODER_DSI_A;
6315 		else
6316 			cpu_transcoder = TRANSCODER_DSI_C;
6317 
6318 		if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
6319 							       POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
6320 			continue;
6321 
6322 		/*
6323 		 * The PLL needs to be enabled with a valid divider
6324 		 * configuration, otherwise accessing DSI registers will hang
6325 		 * the machine. See BSpec North Display Engine
6326 		 * registers/MIPI[BXT]. We can break out here early, since we
6327 		 * need the same DSI PLL to be enabled for both DSI ports.
6328 		 */
6329 		if (!bxt_dsi_pll_is_enabled(dev_priv))
6330 			break;
6331 
6332 		/* XXX: this works for video mode only */
6333 		tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
6334 		if (!(tmp & DPI_ENABLE))
6335 			continue;
6336 
6337 		tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
6338 		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
6339 			continue;
6340 
6341 		pipe_config->cpu_transcoder = cpu_transcoder;
6342 		break;
6343 	}
6344 
6345 	return transcoder_is_dsi(pipe_config->cpu_transcoder);
6346 }
6347 
hsw_get_ddi_port_state(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)6348 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
6349 				   struct intel_crtc_state *pipe_config)
6350 {
6351 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6352 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6353 	enum port port;
6354 	u32 tmp;
6355 
6356 	if (transcoder_is_dsi(cpu_transcoder)) {
6357 		port = (cpu_transcoder == TRANSCODER_DSI_A) ?
6358 						PORT_A : PORT_B;
6359 	} else {
6360 		tmp = intel_de_read(dev_priv,
6361 				    TRANS_DDI_FUNC_CTL(cpu_transcoder));
6362 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
6363 			return;
6364 		if (DISPLAY_VER(dev_priv) >= 12)
6365 			port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
6366 		else
6367 			port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
6368 	}
6369 
6370 	/*
6371 	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
6372 	 * DDI E. So just check whether this pipe is wired to DDI E and whether
6373 	 * the PCH transcoder is on.
6374 	 */
6375 	if (DISPLAY_VER(dev_priv) < 9 &&
6376 	    (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
6377 		pipe_config->has_pch_encoder = true;
6378 
6379 		tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
6380 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6381 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
6382 
6383 		ilk_get_fdi_m_n_config(crtc, pipe_config);
6384 	}
6385 }
6386 
hsw_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)6387 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
6388 				struct intel_crtc_state *pipe_config)
6389 {
6390 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6391 	struct intel_display_power_domain_set power_domain_set = { };
6392 	bool active;
6393 	u32 tmp;
6394 
6395 	if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
6396 						       POWER_DOMAIN_PIPE(crtc->pipe)))
6397 		return false;
6398 
6399 	pipe_config->shared_dpll = NULL;
6400 
6401 	active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
6402 
6403 	if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
6404 	    bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
6405 		drm_WARN_ON(&dev_priv->drm, active);
6406 		active = true;
6407 	}
6408 
6409 	intel_dsc_get_config(pipe_config);
6410 	if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable)
6411 		intel_uncompressed_joiner_get_config(pipe_config);
6412 
6413 	if (!active) {
6414 		/* bigjoiner slave doesn't enable transcoder */
6415 		if (!pipe_config->bigjoiner_slave)
6416 			goto out;
6417 
6418 		active = true;
6419 		pipe_config->pixel_multiplier = 1;
6420 
6421 		/* we cannot read out most state, so don't bother.. */
6422 		pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
6423 	} else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
6424 	    DISPLAY_VER(dev_priv) >= 11) {
6425 		hsw_get_ddi_port_state(crtc, pipe_config);
6426 		intel_get_transcoder_timings(crtc, pipe_config);
6427 	}
6428 
6429 	if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
6430 		intel_vrr_get_config(crtc, pipe_config);
6431 
6432 	intel_get_pipe_src_size(crtc, pipe_config);
6433 
6434 	if (IS_HASWELL(dev_priv)) {
6435 		u32 tmp = intel_de_read(dev_priv,
6436 					PIPECONF(pipe_config->cpu_transcoder));
6437 
6438 		if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
6439 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
6440 		else
6441 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
6442 	} else {
6443 		pipe_config->output_format =
6444 			bdw_get_pipemisc_output_format(crtc);
6445 	}
6446 
6447 	pipe_config->gamma_mode = intel_de_read(dev_priv,
6448 						GAMMA_MODE(crtc->pipe));
6449 
6450 	pipe_config->csc_mode = intel_de_read(dev_priv,
6451 					      PIPE_CSC_MODE(crtc->pipe));
6452 
6453 	if (DISPLAY_VER(dev_priv) >= 9) {
6454 		tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
6455 
6456 		if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
6457 			pipe_config->gamma_enable = true;
6458 
6459 		if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
6460 			pipe_config->csc_enable = true;
6461 	} else {
6462 		i9xx_get_pipe_color_config(pipe_config);
6463 	}
6464 
6465 	intel_color_get_config(pipe_config);
6466 
6467 	tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
6468 	pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
6469 	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
6470 		pipe_config->ips_linetime =
6471 			REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
6472 
6473 	if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
6474 						      POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
6475 		if (DISPLAY_VER(dev_priv) >= 9)
6476 			skl_get_pfit_config(pipe_config);
6477 		else
6478 			ilk_get_pfit_config(pipe_config);
6479 	}
6480 
6481 	if (hsw_crtc_supports_ips(crtc)) {
6482 		if (IS_HASWELL(dev_priv))
6483 			pipe_config->ips_enabled = intel_de_read(dev_priv,
6484 								 IPS_CTL) & IPS_ENABLE;
6485 		else {
6486 			/*
6487 			 * We cannot readout IPS state on broadwell, set to
6488 			 * true so we can set it to a defined state on first
6489 			 * commit.
6490 			 */
6491 			pipe_config->ips_enabled = true;
6492 		}
6493 	}
6494 
6495 	if (pipe_config->bigjoiner_slave) {
6496 		/* Cannot be read out as a slave, set to 0. */
6497 		pipe_config->pixel_multiplier = 0;
6498 	} else if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
6499 	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
6500 		pipe_config->pixel_multiplier =
6501 			intel_de_read(dev_priv,
6502 				      PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
6503 	} else {
6504 		pipe_config->pixel_multiplier = 1;
6505 	}
6506 
6507 out:
6508 	intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
6509 
6510 	return active;
6511 }
6512 
intel_crtc_get_pipe_config(struct intel_crtc_state * crtc_state)6513 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
6514 {
6515 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6516 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6517 
6518 	if (!i915->display.get_pipe_config(crtc, crtc_state))
6519 		return false;
6520 
6521 	crtc_state->hw.active = true;
6522 
6523 	intel_crtc_readout_derived_state(crtc_state);
6524 
6525 	return true;
6526 }
6527 
6528 /* VESA 640x480x72Hz mode to set on the pipe */
6529 static const struct drm_display_mode load_detect_mode = {
6530 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6531 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6532 };
6533 
6534 struct drm_framebuffer *
intel_framebuffer_create(struct drm_i915_gem_object * obj,struct drm_mode_fb_cmd2 * mode_cmd)6535 intel_framebuffer_create(struct drm_i915_gem_object *obj,
6536 			 struct drm_mode_fb_cmd2 *mode_cmd)
6537 {
6538 	struct intel_framebuffer *intel_fb;
6539 	int ret;
6540 
6541 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6542 	if (!intel_fb)
6543 		return ERR_PTR(-ENOMEM);
6544 
6545 	ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
6546 	if (ret)
6547 		goto err;
6548 
6549 	return &intel_fb->base;
6550 
6551 err:
6552 	kfree(intel_fb);
6553 	return ERR_PTR(ret);
6554 }
6555 
intel_modeset_disable_planes(struct drm_atomic_state * state,struct drm_crtc * crtc)6556 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
6557 					struct drm_crtc *crtc)
6558 {
6559 	struct drm_plane *plane;
6560 	struct drm_plane_state *plane_state;
6561 	int ret, i;
6562 
6563 	ret = drm_atomic_add_affected_planes(state, crtc);
6564 	if (ret)
6565 		return ret;
6566 
6567 	for_each_new_plane_in_state(state, plane, plane_state, i) {
6568 		if (plane_state->crtc != crtc)
6569 			continue;
6570 
6571 		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
6572 		if (ret)
6573 			return ret;
6574 
6575 		drm_atomic_set_fb_for_plane(plane_state, NULL);
6576 	}
6577 
6578 	return 0;
6579 }
6580 
intel_get_load_detect_pipe(struct drm_connector * connector,struct intel_load_detect_pipe * old,struct drm_modeset_acquire_ctx * ctx)6581 int intel_get_load_detect_pipe(struct drm_connector *connector,
6582 			       struct intel_load_detect_pipe *old,
6583 			       struct drm_modeset_acquire_ctx *ctx)
6584 {
6585 	struct intel_encoder *encoder =
6586 		intel_attached_encoder(to_intel_connector(connector));
6587 	struct intel_crtc *possible_crtc;
6588 	struct intel_crtc *crtc = NULL;
6589 	struct drm_device *dev = encoder->base.dev;
6590 	struct drm_i915_private *dev_priv = to_i915(dev);
6591 	struct drm_mode_config *config = &dev->mode_config;
6592 	struct drm_atomic_state *state = NULL, *restore_state = NULL;
6593 	struct drm_connector_state *connector_state;
6594 	struct intel_crtc_state *crtc_state;
6595 	int ret;
6596 
6597 	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6598 		    connector->base.id, connector->name,
6599 		    encoder->base.base.id, encoder->base.name);
6600 
6601 	old->restore_state = NULL;
6602 
6603 	drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
6604 
6605 	/*
6606 	 * Algorithm gets a little messy:
6607 	 *
6608 	 *   - if the connector already has an assigned crtc, use it (but make
6609 	 *     sure it's on first)
6610 	 *
6611 	 *   - try to find the first unused crtc that can drive this connector,
6612 	 *     and use that if we find one
6613 	 */
6614 
6615 	/* See if we already have a CRTC for this connector */
6616 	if (connector->state->crtc) {
6617 		crtc = to_intel_crtc(connector->state->crtc);
6618 
6619 		ret = drm_modeset_lock(&crtc->base.mutex, ctx);
6620 		if (ret)
6621 			goto fail;
6622 
6623 		/* Make sure the crtc and connector are running */
6624 		goto found;
6625 	}
6626 
6627 	/* Find an unused one (if possible) */
6628 	for_each_intel_crtc(dev, possible_crtc) {
6629 		if (!(encoder->base.possible_crtcs &
6630 		      drm_crtc_mask(&possible_crtc->base)))
6631 			continue;
6632 
6633 		ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
6634 		if (ret)
6635 			goto fail;
6636 
6637 		if (possible_crtc->base.state->enable) {
6638 			drm_modeset_unlock(&possible_crtc->base.mutex);
6639 			continue;
6640 		}
6641 
6642 		crtc = possible_crtc;
6643 		break;
6644 	}
6645 
6646 	/*
6647 	 * If we didn't find an unused CRTC, don't use any.
6648 	 */
6649 	if (!crtc) {
6650 		drm_dbg_kms(&dev_priv->drm,
6651 			    "no pipe available for load-detect\n");
6652 		ret = -ENODEV;
6653 		goto fail;
6654 	}
6655 
6656 found:
6657 	state = drm_atomic_state_alloc(dev);
6658 	restore_state = drm_atomic_state_alloc(dev);
6659 	if (!state || !restore_state) {
6660 		ret = -ENOMEM;
6661 		goto fail;
6662 	}
6663 
6664 	state->acquire_ctx = ctx;
6665 	restore_state->acquire_ctx = ctx;
6666 
6667 	connector_state = drm_atomic_get_connector_state(state, connector);
6668 	if (IS_ERR(connector_state)) {
6669 		ret = PTR_ERR(connector_state);
6670 		goto fail;
6671 	}
6672 
6673 	ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
6674 	if (ret)
6675 		goto fail;
6676 
6677 	crtc_state = intel_atomic_get_crtc_state(state, crtc);
6678 	if (IS_ERR(crtc_state)) {
6679 		ret = PTR_ERR(crtc_state);
6680 		goto fail;
6681 	}
6682 
6683 	crtc_state->uapi.active = true;
6684 
6685 	ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
6686 					   &load_detect_mode);
6687 	if (ret)
6688 		goto fail;
6689 
6690 	ret = intel_modeset_disable_planes(state, &crtc->base);
6691 	if (ret)
6692 		goto fail;
6693 
6694 	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
6695 	if (!ret)
6696 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
6697 	if (!ret)
6698 		ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
6699 	if (ret) {
6700 		drm_dbg_kms(&dev_priv->drm,
6701 			    "Failed to create a copy of old state to restore: %i\n",
6702 			    ret);
6703 		goto fail;
6704 	}
6705 
6706 	ret = drm_atomic_commit(state);
6707 	if (ret) {
6708 		drm_dbg_kms(&dev_priv->drm,
6709 			    "failed to set mode on load-detect pipe\n");
6710 		goto fail;
6711 	}
6712 
6713 	old->restore_state = restore_state;
6714 	drm_atomic_state_put(state);
6715 
6716 	/* let the connector get through one full cycle before testing */
6717 	intel_wait_for_vblank(dev_priv, crtc->pipe);
6718 	return true;
6719 
6720 fail:
6721 	if (state) {
6722 		drm_atomic_state_put(state);
6723 		state = NULL;
6724 	}
6725 	if (restore_state) {
6726 		drm_atomic_state_put(restore_state);
6727 		restore_state = NULL;
6728 	}
6729 
6730 	if (ret == -EDEADLK)
6731 		return ret;
6732 
6733 	return false;
6734 }
6735 
intel_release_load_detect_pipe(struct drm_connector * connector,struct intel_load_detect_pipe * old,struct drm_modeset_acquire_ctx * ctx)6736 void intel_release_load_detect_pipe(struct drm_connector *connector,
6737 				    struct intel_load_detect_pipe *old,
6738 				    struct drm_modeset_acquire_ctx *ctx)
6739 {
6740 	struct intel_encoder *intel_encoder =
6741 		intel_attached_encoder(to_intel_connector(connector));
6742 	struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
6743 	struct drm_encoder *encoder = &intel_encoder->base;
6744 	struct drm_atomic_state *state = old->restore_state;
6745 	int ret;
6746 
6747 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6748 		    connector->base.id, connector->name,
6749 		    encoder->base.id, encoder->name);
6750 
6751 	if (!state)
6752 		return;
6753 
6754 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
6755 	if (ret)
6756 		drm_dbg_kms(&i915->drm,
6757 			    "Couldn't release load detect pipe: %i\n", ret);
6758 	drm_atomic_state_put(state);
6759 }
6760 
i9xx_pll_refclk(struct drm_device * dev,const struct intel_crtc_state * pipe_config)6761 static int i9xx_pll_refclk(struct drm_device *dev,
6762 			   const struct intel_crtc_state *pipe_config)
6763 {
6764 	struct drm_i915_private *dev_priv = to_i915(dev);
6765 	u32 dpll = pipe_config->dpll_hw_state.dpll;
6766 
6767 	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
6768 		return dev_priv->vbt.lvds_ssc_freq;
6769 	else if (HAS_PCH_SPLIT(dev_priv))
6770 		return 120000;
6771 	else if (DISPLAY_VER(dev_priv) != 2)
6772 		return 96000;
6773 	else
6774 		return 48000;
6775 }
6776 
6777 /* Returns the clock of the currently programmed mode of the given pipe. */
i9xx_crtc_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)6778 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
6779 				struct intel_crtc_state *pipe_config)
6780 {
6781 	struct drm_device *dev = crtc->base.dev;
6782 	struct drm_i915_private *dev_priv = to_i915(dev);
6783 	enum pipe pipe = crtc->pipe;
6784 	u32 dpll = pipe_config->dpll_hw_state.dpll;
6785 	u32 fp;
6786 	struct dpll clock;
6787 	int port_clock;
6788 	int refclk = i9xx_pll_refclk(dev, pipe_config);
6789 
6790 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6791 		fp = pipe_config->dpll_hw_state.fp0;
6792 	else
6793 		fp = pipe_config->dpll_hw_state.fp1;
6794 
6795 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6796 	if (IS_PINEVIEW(dev_priv)) {
6797 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6798 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6799 	} else {
6800 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6801 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6802 	}
6803 
6804 	if (DISPLAY_VER(dev_priv) != 2) {
6805 		if (IS_PINEVIEW(dev_priv))
6806 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6807 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6808 		else
6809 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6810 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
6811 
6812 		switch (dpll & DPLL_MODE_MASK) {
6813 		case DPLLB_MODE_DAC_SERIAL:
6814 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6815 				5 : 10;
6816 			break;
6817 		case DPLLB_MODE_LVDS:
6818 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6819 				7 : 14;
6820 			break;
6821 		default:
6822 			drm_dbg_kms(&dev_priv->drm,
6823 				    "Unknown DPLL mode %08x in programmed "
6824 				    "mode\n", (int)(dpll & DPLL_MODE_MASK));
6825 			return;
6826 		}
6827 
6828 		if (IS_PINEVIEW(dev_priv))
6829 			port_clock = pnv_calc_dpll_params(refclk, &clock);
6830 		else
6831 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
6832 	} else {
6833 		u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
6834 								 LVDS);
6835 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
6836 
6837 		if (is_lvds) {
6838 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6839 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
6840 
6841 			if (lvds & LVDS_CLKB_POWER_UP)
6842 				clock.p2 = 7;
6843 			else
6844 				clock.p2 = 14;
6845 		} else {
6846 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
6847 				clock.p1 = 2;
6848 			else {
6849 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6850 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6851 			}
6852 			if (dpll & PLL_P2_DIVIDE_BY_4)
6853 				clock.p2 = 4;
6854 			else
6855 				clock.p2 = 2;
6856 		}
6857 
6858 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
6859 	}
6860 
6861 	/*
6862 	 * This value includes pixel_multiplier. We will use
6863 	 * port_clock to compute adjusted_mode.crtc_clock in the
6864 	 * encoder's get_config() function.
6865 	 */
6866 	pipe_config->port_clock = port_clock;
6867 }
6868 
intel_dotclock_calculate(int link_freq,const struct intel_link_m_n * m_n)6869 int intel_dotclock_calculate(int link_freq,
6870 			     const struct intel_link_m_n *m_n)
6871 {
6872 	/*
6873 	 * The calculation for the data clock is:
6874 	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
6875 	 * But we want to avoid losing precison if possible, so:
6876 	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
6877 	 *
6878 	 * and the link clock is simpler:
6879 	 * link_clock = (m * link_clock) / n
6880 	 */
6881 
6882 	if (!m_n->link_n)
6883 		return 0;
6884 
6885 	return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
6886 }
6887 
ilk_pch_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)6888 static void ilk_pch_clock_get(struct intel_crtc *crtc,
6889 			      struct intel_crtc_state *pipe_config)
6890 {
6891 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6892 
6893 	/* read out port_clock from the DPLL */
6894 	i9xx_crtc_clock_get(crtc, pipe_config);
6895 
6896 	/*
6897 	 * In case there is an active pipe without active ports,
6898 	 * we may need some idea for the dotclock anyway.
6899 	 * Calculate one based on the FDI configuration.
6900 	 */
6901 	pipe_config->hw.adjusted_mode.crtc_clock =
6902 		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
6903 					 &pipe_config->fdi_m_n);
6904 }
6905 
6906 /* Returns the currently programmed mode of the given encoder. */
6907 struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder * encoder)6908 intel_encoder_current_mode(struct intel_encoder *encoder)
6909 {
6910 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6911 	struct intel_crtc_state *crtc_state;
6912 	struct drm_display_mode *mode;
6913 	struct intel_crtc *crtc;
6914 	enum pipe pipe;
6915 
6916 	if (!encoder->get_hw_state(encoder, &pipe))
6917 		return NULL;
6918 
6919 	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
6920 
6921 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6922 	if (!mode)
6923 		return NULL;
6924 
6925 	crtc_state = intel_crtc_state_alloc(crtc);
6926 	if (!crtc_state) {
6927 		kfree(mode);
6928 		return NULL;
6929 	}
6930 
6931 	if (!intel_crtc_get_pipe_config(crtc_state)) {
6932 		kfree(crtc_state);
6933 		kfree(mode);
6934 		return NULL;
6935 	}
6936 
6937 	intel_encoder_get_config(encoder, crtc_state);
6938 
6939 	intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
6940 
6941 	kfree(crtc_state);
6942 
6943 	return mode;
6944 }
6945 
6946 /**
6947  * intel_wm_need_update - Check whether watermarks need updating
6948  * @cur: current plane state
6949  * @new: new plane state
6950  *
6951  * Check current plane state versus the new one to determine whether
6952  * watermarks need to be recalculated.
6953  *
6954  * Returns true or false.
6955  */
intel_wm_need_update(const struct intel_plane_state * cur,struct intel_plane_state * new)6956 static bool intel_wm_need_update(const struct intel_plane_state *cur,
6957 				 struct intel_plane_state *new)
6958 {
6959 	/* Update watermarks on tiling or size changes. */
6960 	if (new->uapi.visible != cur->uapi.visible)
6961 		return true;
6962 
6963 	if (!cur->hw.fb || !new->hw.fb)
6964 		return false;
6965 
6966 	if (cur->hw.fb->modifier != new->hw.fb->modifier ||
6967 	    cur->hw.rotation != new->hw.rotation ||
6968 	    drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
6969 	    drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
6970 	    drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
6971 	    drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
6972 		return true;
6973 
6974 	return false;
6975 }
6976 
needs_scaling(const struct intel_plane_state * state)6977 static bool needs_scaling(const struct intel_plane_state *state)
6978 {
6979 	int src_w = drm_rect_width(&state->uapi.src) >> 16;
6980 	int src_h = drm_rect_height(&state->uapi.src) >> 16;
6981 	int dst_w = drm_rect_width(&state->uapi.dst);
6982 	int dst_h = drm_rect_height(&state->uapi.dst);
6983 
6984 	return (src_w != dst_w || src_h != dst_h);
6985 }
6986 
intel_plane_atomic_calc_changes(const struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * crtc_state,const struct intel_plane_state * old_plane_state,struct intel_plane_state * plane_state)6987 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
6988 				    struct intel_crtc_state *crtc_state,
6989 				    const struct intel_plane_state *old_plane_state,
6990 				    struct intel_plane_state *plane_state)
6991 {
6992 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6993 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
6994 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6995 	bool mode_changed = intel_crtc_needs_modeset(crtc_state);
6996 	bool was_crtc_enabled = old_crtc_state->hw.active;
6997 	bool is_crtc_enabled = crtc_state->hw.active;
6998 	bool turn_off, turn_on, visible, was_visible;
6999 	int ret;
7000 
7001 	if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
7002 		ret = skl_update_scaler_plane(crtc_state, plane_state);
7003 		if (ret)
7004 			return ret;
7005 	}
7006 
7007 	was_visible = old_plane_state->uapi.visible;
7008 	visible = plane_state->uapi.visible;
7009 
7010 	if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
7011 		was_visible = false;
7012 
7013 	/*
7014 	 * Visibility is calculated as if the crtc was on, but
7015 	 * after scaler setup everything depends on it being off
7016 	 * when the crtc isn't active.
7017 	 *
7018 	 * FIXME this is wrong for watermarks. Watermarks should also
7019 	 * be computed as if the pipe would be active. Perhaps move
7020 	 * per-plane wm computation to the .check_plane() hook, and
7021 	 * only combine the results from all planes in the current place?
7022 	 */
7023 	if (!is_crtc_enabled) {
7024 		intel_plane_set_invisible(crtc_state, plane_state);
7025 		visible = false;
7026 	}
7027 
7028 	if (!was_visible && !visible)
7029 		return 0;
7030 
7031 	turn_off = was_visible && (!visible || mode_changed);
7032 	turn_on = visible && (!was_visible || mode_changed);
7033 
7034 	drm_dbg_atomic(&dev_priv->drm,
7035 		       "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
7036 		       crtc->base.base.id, crtc->base.name,
7037 		       plane->base.base.id, plane->base.name,
7038 		       was_visible, visible,
7039 		       turn_off, turn_on, mode_changed);
7040 
7041 	if (turn_on) {
7042 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
7043 			crtc_state->update_wm_pre = true;
7044 
7045 		/* must disable cxsr around plane enable/disable */
7046 		if (plane->id != PLANE_CURSOR)
7047 			crtc_state->disable_cxsr = true;
7048 	} else if (turn_off) {
7049 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
7050 			crtc_state->update_wm_post = true;
7051 
7052 		/* must disable cxsr around plane enable/disable */
7053 		if (plane->id != PLANE_CURSOR)
7054 			crtc_state->disable_cxsr = true;
7055 	} else if (intel_wm_need_update(old_plane_state, plane_state)) {
7056 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
7057 			/* FIXME bollocks */
7058 			crtc_state->update_wm_pre = true;
7059 			crtc_state->update_wm_post = true;
7060 		}
7061 	}
7062 
7063 	if (visible || was_visible)
7064 		crtc_state->fb_bits |= plane->frontbuffer_bit;
7065 
7066 	/*
7067 	 * ILK/SNB DVSACNTR/Sprite Enable
7068 	 * IVB SPR_CTL/Sprite Enable
7069 	 * "When in Self Refresh Big FIFO mode, a write to enable the
7070 	 *  plane will be internally buffered and delayed while Big FIFO
7071 	 *  mode is exiting."
7072 	 *
7073 	 * Which means that enabling the sprite can take an extra frame
7074 	 * when we start in big FIFO mode (LP1+). Thus we need to drop
7075 	 * down to LP0 and wait for vblank in order to make sure the
7076 	 * sprite gets enabled on the next vblank after the register write.
7077 	 * Doing otherwise would risk enabling the sprite one frame after
7078 	 * we've already signalled flip completion. We can resume LP1+
7079 	 * once the sprite has been enabled.
7080 	 *
7081 	 *
7082 	 * WaCxSRDisabledForSpriteScaling:ivb
7083 	 * IVB SPR_SCALE/Scaling Enable
7084 	 * "Low Power watermarks must be disabled for at least one
7085 	 *  frame before enabling sprite scaling, and kept disabled
7086 	 *  until sprite scaling is disabled."
7087 	 *
7088 	 * ILK/SNB DVSASCALE/Scaling Enable
7089 	 * "When in Self Refresh Big FIFO mode, scaling enable will be
7090 	 *  masked off while Big FIFO mode is exiting."
7091 	 *
7092 	 * Despite the w/a only being listed for IVB we assume that
7093 	 * the ILK/SNB note has similar ramifications, hence we apply
7094 	 * the w/a on all three platforms.
7095 	 *
7096 	 * With experimental results seems this is needed also for primary
7097 	 * plane, not only sprite plane.
7098 	 */
7099 	if (plane->id != PLANE_CURSOR &&
7100 	    (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
7101 	     IS_IVYBRIDGE(dev_priv)) &&
7102 	    (turn_on || (!needs_scaling(old_plane_state) &&
7103 			 needs_scaling(plane_state))))
7104 		crtc_state->disable_lp_wm = true;
7105 
7106 	return 0;
7107 }
7108 
encoders_cloneable(const struct intel_encoder * a,const struct intel_encoder * b)7109 static bool encoders_cloneable(const struct intel_encoder *a,
7110 			       const struct intel_encoder *b)
7111 {
7112 	/* masks could be asymmetric, so check both ways */
7113 	return a == b || (a->cloneable & (1 << b->type) &&
7114 			  b->cloneable & (1 << a->type));
7115 }
7116 
check_single_encoder_cloning(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)7117 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
7118 					 struct intel_crtc *crtc,
7119 					 struct intel_encoder *encoder)
7120 {
7121 	struct intel_encoder *source_encoder;
7122 	struct drm_connector *connector;
7123 	struct drm_connector_state *connector_state;
7124 	int i;
7125 
7126 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7127 		if (connector_state->crtc != &crtc->base)
7128 			continue;
7129 
7130 		source_encoder =
7131 			to_intel_encoder(connector_state->best_encoder);
7132 		if (!encoders_cloneable(encoder, source_encoder))
7133 			return false;
7134 	}
7135 
7136 	return true;
7137 }
7138 
icl_add_linked_planes(struct intel_atomic_state * state)7139 static int icl_add_linked_planes(struct intel_atomic_state *state)
7140 {
7141 	struct intel_plane *plane, *linked;
7142 	struct intel_plane_state *plane_state, *linked_plane_state;
7143 	int i;
7144 
7145 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7146 		linked = plane_state->planar_linked_plane;
7147 
7148 		if (!linked)
7149 			continue;
7150 
7151 		linked_plane_state = intel_atomic_get_plane_state(state, linked);
7152 		if (IS_ERR(linked_plane_state))
7153 			return PTR_ERR(linked_plane_state);
7154 
7155 		drm_WARN_ON(state->base.dev,
7156 			    linked_plane_state->planar_linked_plane != plane);
7157 		drm_WARN_ON(state->base.dev,
7158 			    linked_plane_state->planar_slave == plane_state->planar_slave);
7159 	}
7160 
7161 	return 0;
7162 }
7163 
icl_check_nv12_planes(struct intel_crtc_state * crtc_state)7164 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
7165 {
7166 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7167 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7168 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
7169 	struct intel_plane *plane, *linked;
7170 	struct intel_plane_state *plane_state;
7171 	int i;
7172 
7173 	if (DISPLAY_VER(dev_priv) < 11)
7174 		return 0;
7175 
7176 	/*
7177 	 * Destroy all old plane links and make the slave plane invisible
7178 	 * in the crtc_state->active_planes mask.
7179 	 */
7180 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7181 		if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
7182 			continue;
7183 
7184 		plane_state->planar_linked_plane = NULL;
7185 		if (plane_state->planar_slave && !plane_state->uapi.visible) {
7186 			crtc_state->enabled_planes &= ~BIT(plane->id);
7187 			crtc_state->active_planes &= ~BIT(plane->id);
7188 			crtc_state->update_planes |= BIT(plane->id);
7189 		}
7190 
7191 		plane_state->planar_slave = false;
7192 	}
7193 
7194 	if (!crtc_state->nv12_planes)
7195 		return 0;
7196 
7197 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7198 		struct intel_plane_state *linked_state = NULL;
7199 
7200 		if (plane->pipe != crtc->pipe ||
7201 		    !(crtc_state->nv12_planes & BIT(plane->id)))
7202 			continue;
7203 
7204 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
7205 			if (!icl_is_nv12_y_plane(dev_priv, linked->id))
7206 				continue;
7207 
7208 			if (crtc_state->active_planes & BIT(linked->id))
7209 				continue;
7210 
7211 			linked_state = intel_atomic_get_plane_state(state, linked);
7212 			if (IS_ERR(linked_state))
7213 				return PTR_ERR(linked_state);
7214 
7215 			break;
7216 		}
7217 
7218 		if (!linked_state) {
7219 			drm_dbg_kms(&dev_priv->drm,
7220 				    "Need %d free Y planes for planar YUV\n",
7221 				    hweight8(crtc_state->nv12_planes));
7222 
7223 			return -EINVAL;
7224 		}
7225 
7226 		plane_state->planar_linked_plane = linked;
7227 
7228 		linked_state->planar_slave = true;
7229 		linked_state->planar_linked_plane = plane;
7230 		crtc_state->enabled_planes |= BIT(linked->id);
7231 		crtc_state->active_planes |= BIT(linked->id);
7232 		crtc_state->update_planes |= BIT(linked->id);
7233 		drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
7234 			    linked->base.name, plane->base.name);
7235 
7236 		/* Copy parameters to slave plane */
7237 		linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
7238 		linked_state->color_ctl = plane_state->color_ctl;
7239 		linked_state->view = plane_state->view;
7240 
7241 		intel_plane_copy_hw_state(linked_state, plane_state);
7242 		linked_state->uapi.src = plane_state->uapi.src;
7243 		linked_state->uapi.dst = plane_state->uapi.dst;
7244 
7245 		if (icl_is_hdr_plane(dev_priv, plane->id)) {
7246 			if (linked->id == PLANE_SPRITE5)
7247 				plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
7248 			else if (linked->id == PLANE_SPRITE4)
7249 				plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
7250 			else if (linked->id == PLANE_SPRITE3)
7251 				plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
7252 			else if (linked->id == PLANE_SPRITE2)
7253 				plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
7254 			else
7255 				MISSING_CASE(linked->id);
7256 		}
7257 	}
7258 
7259 	return 0;
7260 }
7261 
c8_planes_changed(const struct intel_crtc_state * new_crtc_state)7262 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
7263 {
7264 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
7265 	struct intel_atomic_state *state =
7266 		to_intel_atomic_state(new_crtc_state->uapi.state);
7267 	const struct intel_crtc_state *old_crtc_state =
7268 		intel_atomic_get_old_crtc_state(state, crtc);
7269 
7270 	return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
7271 }
7272 
hsw_linetime_wm(const struct intel_crtc_state * crtc_state)7273 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
7274 {
7275 	const struct drm_display_mode *pipe_mode =
7276 		&crtc_state->hw.pipe_mode;
7277 	int linetime_wm;
7278 
7279 	if (!crtc_state->hw.enable)
7280 		return 0;
7281 
7282 	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
7283 					pipe_mode->crtc_clock);
7284 
7285 	return min(linetime_wm, 0x1ff);
7286 }
7287 
hsw_ips_linetime_wm(const struct intel_crtc_state * crtc_state,const struct intel_cdclk_state * cdclk_state)7288 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
7289 			       const struct intel_cdclk_state *cdclk_state)
7290 {
7291 	const struct drm_display_mode *pipe_mode =
7292 		&crtc_state->hw.pipe_mode;
7293 	int linetime_wm;
7294 
7295 	if (!crtc_state->hw.enable)
7296 		return 0;
7297 
7298 	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
7299 					cdclk_state->logical.cdclk);
7300 
7301 	return min(linetime_wm, 0x1ff);
7302 }
7303 
skl_linetime_wm(const struct intel_crtc_state * crtc_state)7304 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
7305 {
7306 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7307 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7308 	const struct drm_display_mode *pipe_mode =
7309 		&crtc_state->hw.pipe_mode;
7310 	int linetime_wm;
7311 
7312 	if (!crtc_state->hw.enable)
7313 		return 0;
7314 
7315 	linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
7316 				   crtc_state->pixel_rate);
7317 
7318 	/* Display WA #1135: BXT:ALL GLK:ALL */
7319 	if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
7320 	    dev_priv->ipc_enabled)
7321 		linetime_wm /= 2;
7322 
7323 	return min(linetime_wm, 0x1ff);
7324 }
7325 
hsw_compute_linetime_wm(struct intel_atomic_state * state,struct intel_crtc * crtc)7326 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
7327 				   struct intel_crtc *crtc)
7328 {
7329 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7330 	struct intel_crtc_state *crtc_state =
7331 		intel_atomic_get_new_crtc_state(state, crtc);
7332 	const struct intel_cdclk_state *cdclk_state;
7333 
7334 	if (DISPLAY_VER(dev_priv) >= 9)
7335 		crtc_state->linetime = skl_linetime_wm(crtc_state);
7336 	else
7337 		crtc_state->linetime = hsw_linetime_wm(crtc_state);
7338 
7339 	if (!hsw_crtc_supports_ips(crtc))
7340 		return 0;
7341 
7342 	cdclk_state = intel_atomic_get_cdclk_state(state);
7343 	if (IS_ERR(cdclk_state))
7344 		return PTR_ERR(cdclk_state);
7345 
7346 	crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
7347 						       cdclk_state);
7348 
7349 	return 0;
7350 }
7351 
intel_crtc_atomic_check(struct intel_atomic_state * state,struct intel_crtc * crtc)7352 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
7353 				   struct intel_crtc *crtc)
7354 {
7355 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7356 	struct intel_crtc_state *crtc_state =
7357 		intel_atomic_get_new_crtc_state(state, crtc);
7358 	bool mode_changed = intel_crtc_needs_modeset(crtc_state);
7359 	int ret;
7360 
7361 	if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
7362 	    mode_changed && !crtc_state->hw.active)
7363 		crtc_state->update_wm_post = true;
7364 
7365 	if (mode_changed && crtc_state->hw.enable &&
7366 	    dev_priv->display.crtc_compute_clock &&
7367 	    !crtc_state->bigjoiner_slave &&
7368 	    !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
7369 		ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
7370 		if (ret)
7371 			return ret;
7372 	}
7373 
7374 	/*
7375 	 * May need to update pipe gamma enable bits
7376 	 * when C8 planes are getting enabled/disabled.
7377 	 */
7378 	if (c8_planes_changed(crtc_state))
7379 		crtc_state->uapi.color_mgmt_changed = true;
7380 
7381 	if (mode_changed || crtc_state->update_pipe ||
7382 	    crtc_state->uapi.color_mgmt_changed) {
7383 		ret = intel_color_check(crtc_state);
7384 		if (ret)
7385 			return ret;
7386 	}
7387 
7388 	if (dev_priv->display.compute_pipe_wm) {
7389 		ret = dev_priv->display.compute_pipe_wm(state, crtc);
7390 		if (ret) {
7391 			drm_dbg_kms(&dev_priv->drm,
7392 				    "Target pipe watermarks are invalid\n");
7393 			return ret;
7394 		}
7395 
7396 	}
7397 
7398 	if (dev_priv->display.compute_intermediate_wm) {
7399 		if (drm_WARN_ON(&dev_priv->drm,
7400 				!dev_priv->display.compute_pipe_wm))
7401 			return 0;
7402 
7403 		/*
7404 		 * Calculate 'intermediate' watermarks that satisfy both the
7405 		 * old state and the new state.  We can program these
7406 		 * immediately.
7407 		 */
7408 		ret = dev_priv->display.compute_intermediate_wm(state, crtc);
7409 		if (ret) {
7410 			drm_dbg_kms(&dev_priv->drm,
7411 				    "No valid intermediate pipe watermarks are possible\n");
7412 			return ret;
7413 		}
7414 	}
7415 
7416 	if (DISPLAY_VER(dev_priv) >= 9) {
7417 		if (mode_changed || crtc_state->update_pipe) {
7418 			ret = skl_update_scaler_crtc(crtc_state);
7419 			if (ret)
7420 				return ret;
7421 		}
7422 
7423 		ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
7424 		if (ret)
7425 			return ret;
7426 	}
7427 
7428 	if (HAS_IPS(dev_priv)) {
7429 		ret = hsw_compute_ips_config(crtc_state);
7430 		if (ret)
7431 			return ret;
7432 	}
7433 
7434 	if (DISPLAY_VER(dev_priv) >= 9 ||
7435 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
7436 		ret = hsw_compute_linetime_wm(state, crtc);
7437 		if (ret)
7438 			return ret;
7439 
7440 	}
7441 
7442 	if (!mode_changed) {
7443 		ret = intel_psr2_sel_fetch_update(state, crtc);
7444 		if (ret)
7445 			return ret;
7446 	}
7447 
7448 	return 0;
7449 }
7450 
intel_modeset_update_connector_atomic_state(struct drm_device * dev)7451 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
7452 {
7453 	struct intel_connector *connector;
7454 	struct drm_connector_list_iter conn_iter;
7455 
7456 	drm_connector_list_iter_begin(dev, &conn_iter);
7457 	for_each_intel_connector_iter(connector, &conn_iter) {
7458 		struct drm_connector_state *conn_state = connector->base.state;
7459 		struct intel_encoder *encoder =
7460 			to_intel_encoder(connector->base.encoder);
7461 
7462 		if (conn_state->crtc)
7463 			drm_connector_put(&connector->base);
7464 
7465 		if (encoder) {
7466 			struct intel_crtc *crtc =
7467 				to_intel_crtc(encoder->base.crtc);
7468 			const struct intel_crtc_state *crtc_state =
7469 				to_intel_crtc_state(crtc->base.state);
7470 
7471 			conn_state->best_encoder = &encoder->base;
7472 			conn_state->crtc = &crtc->base;
7473 			conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
7474 
7475 			drm_connector_get(&connector->base);
7476 		} else {
7477 			conn_state->best_encoder = NULL;
7478 			conn_state->crtc = NULL;
7479 		}
7480 	}
7481 	drm_connector_list_iter_end(&conn_iter);
7482 }
7483 
7484 static int
compute_sink_pipe_bpp(const struct drm_connector_state * conn_state,struct intel_crtc_state * pipe_config)7485 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
7486 		      struct intel_crtc_state *pipe_config)
7487 {
7488 	struct drm_connector *connector = conn_state->connector;
7489 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7490 	const struct drm_display_info *info = &connector->display_info;
7491 	int bpp;
7492 
7493 	switch (conn_state->max_bpc) {
7494 	case 6 ... 7:
7495 		bpp = 6 * 3;
7496 		break;
7497 	case 8 ... 9:
7498 		bpp = 8 * 3;
7499 		break;
7500 	case 10 ... 11:
7501 		bpp = 10 * 3;
7502 		break;
7503 	case 12 ... 16:
7504 		bpp = 12 * 3;
7505 		break;
7506 	default:
7507 		MISSING_CASE(conn_state->max_bpc);
7508 		return -EINVAL;
7509 	}
7510 
7511 	if (bpp < pipe_config->pipe_bpp) {
7512 		drm_dbg_kms(&i915->drm,
7513 			    "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
7514 			    "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
7515 			    connector->base.id, connector->name,
7516 			    bpp, 3 * info->bpc,
7517 			    3 * conn_state->max_requested_bpc,
7518 			    pipe_config->pipe_bpp);
7519 
7520 		pipe_config->pipe_bpp = bpp;
7521 	}
7522 
7523 	return 0;
7524 }
7525 
7526 static int
compute_baseline_pipe_bpp(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)7527 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
7528 			  struct intel_crtc_state *pipe_config)
7529 {
7530 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7531 	struct drm_atomic_state *state = pipe_config->uapi.state;
7532 	struct drm_connector *connector;
7533 	struct drm_connector_state *connector_state;
7534 	int bpp, i;
7535 
7536 	if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7537 	    IS_CHERRYVIEW(dev_priv)))
7538 		bpp = 10*3;
7539 	else if (DISPLAY_VER(dev_priv) >= 5)
7540 		bpp = 12*3;
7541 	else
7542 		bpp = 8*3;
7543 
7544 	pipe_config->pipe_bpp = bpp;
7545 
7546 	/* Clamp display bpp to connector max bpp */
7547 	for_each_new_connector_in_state(state, connector, connector_state, i) {
7548 		int ret;
7549 
7550 		if (connector_state->crtc != &crtc->base)
7551 			continue;
7552 
7553 		ret = compute_sink_pipe_bpp(connector_state, pipe_config);
7554 		if (ret)
7555 			return ret;
7556 	}
7557 
7558 	return 0;
7559 }
7560 
intel_dump_crtc_timings(struct drm_i915_private * i915,const struct drm_display_mode * mode)7561 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
7562 				    const struct drm_display_mode *mode)
7563 {
7564 	drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
7565 		    "type: 0x%x flags: 0x%x\n",
7566 		    mode->crtc_clock,
7567 		    mode->crtc_hdisplay, mode->crtc_hsync_start,
7568 		    mode->crtc_hsync_end, mode->crtc_htotal,
7569 		    mode->crtc_vdisplay, mode->crtc_vsync_start,
7570 		    mode->crtc_vsync_end, mode->crtc_vtotal,
7571 		    mode->type, mode->flags);
7572 }
7573 
7574 static void
intel_dump_m_n_config(const struct intel_crtc_state * pipe_config,const char * id,unsigned int lane_count,const struct intel_link_m_n * m_n)7575 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
7576 		      const char *id, unsigned int lane_count,
7577 		      const struct intel_link_m_n *m_n)
7578 {
7579 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7580 
7581 	drm_dbg_kms(&i915->drm,
7582 		    "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
7583 		    id, lane_count,
7584 		    m_n->gmch_m, m_n->gmch_n,
7585 		    m_n->link_m, m_n->link_n, m_n->tu);
7586 }
7587 
7588 static void
intel_dump_infoframe(struct drm_i915_private * dev_priv,const union hdmi_infoframe * frame)7589 intel_dump_infoframe(struct drm_i915_private *dev_priv,
7590 		     const union hdmi_infoframe *frame)
7591 {
7592 	if (!drm_debug_enabled(DRM_UT_KMS))
7593 		return;
7594 
7595 	hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
7596 }
7597 
7598 static void
intel_dump_dp_vsc_sdp(struct drm_i915_private * dev_priv,const struct drm_dp_vsc_sdp * vsc)7599 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
7600 		      const struct drm_dp_vsc_sdp *vsc)
7601 {
7602 	if (!drm_debug_enabled(DRM_UT_KMS))
7603 		return;
7604 
7605 	drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
7606 }
7607 
7608 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
7609 
7610 static const char * const output_type_str[] = {
7611 	OUTPUT_TYPE(UNUSED),
7612 	OUTPUT_TYPE(ANALOG),
7613 	OUTPUT_TYPE(DVO),
7614 	OUTPUT_TYPE(SDVO),
7615 	OUTPUT_TYPE(LVDS),
7616 	OUTPUT_TYPE(TVOUT),
7617 	OUTPUT_TYPE(HDMI),
7618 	OUTPUT_TYPE(DP),
7619 	OUTPUT_TYPE(EDP),
7620 	OUTPUT_TYPE(DSI),
7621 	OUTPUT_TYPE(DDI),
7622 	OUTPUT_TYPE(DP_MST),
7623 };
7624 
7625 #undef OUTPUT_TYPE
7626 
snprintf_output_types(char * buf,size_t len,unsigned int output_types)7627 static void snprintf_output_types(char *buf, size_t len,
7628 				  unsigned int output_types)
7629 {
7630 	char *str = buf;
7631 	int i;
7632 
7633 	str[0] = '\0';
7634 
7635 	for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
7636 		int r;
7637 
7638 		if ((output_types & BIT(i)) == 0)
7639 			continue;
7640 
7641 		r = snprintf(str, len, "%s%s",
7642 			     str != buf ? "," : "", output_type_str[i]);
7643 		if (r >= len)
7644 			break;
7645 		str += r;
7646 		len -= r;
7647 
7648 		output_types &= ~BIT(i);
7649 	}
7650 
7651 	WARN_ON_ONCE(output_types != 0);
7652 }
7653 
7654 static const char * const output_format_str[] = {
7655 	[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
7656 	[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
7657 	[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
7658 };
7659 
output_formats(enum intel_output_format format)7660 static const char *output_formats(enum intel_output_format format)
7661 {
7662 	if (format >= ARRAY_SIZE(output_format_str))
7663 		return "invalid";
7664 	return output_format_str[format];
7665 }
7666 
intel_dump_plane_state(const struct intel_plane_state * plane_state)7667 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
7668 {
7669 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
7670 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
7671 	const struct drm_framebuffer *fb = plane_state->hw.fb;
7672 
7673 	if (!fb) {
7674 		drm_dbg_kms(&i915->drm,
7675 			    "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
7676 			    plane->base.base.id, plane->base.name,
7677 			    yesno(plane_state->uapi.visible));
7678 		return;
7679 	}
7680 
7681 	drm_dbg_kms(&i915->drm,
7682 		    "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
7683 		    plane->base.base.id, plane->base.name,
7684 		    fb->base.id, fb->width, fb->height, &fb->format->format,
7685 		    fb->modifier, yesno(plane_state->uapi.visible));
7686 	drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
7687 		    plane_state->hw.rotation, plane_state->scaler_id);
7688 	if (plane_state->uapi.visible)
7689 		drm_dbg_kms(&i915->drm,
7690 			    "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
7691 			    DRM_RECT_FP_ARG(&plane_state->uapi.src),
7692 			    DRM_RECT_ARG(&plane_state->uapi.dst));
7693 }
7694 
intel_dump_pipe_config(const struct intel_crtc_state * pipe_config,struct intel_atomic_state * state,const char * context)7695 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
7696 				   struct intel_atomic_state *state,
7697 				   const char *context)
7698 {
7699 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
7700 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7701 	const struct intel_plane_state *plane_state;
7702 	struct intel_plane *plane;
7703 	char buf[64];
7704 	int i;
7705 
7706 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
7707 		    crtc->base.base.id, crtc->base.name,
7708 		    yesno(pipe_config->hw.enable), context);
7709 
7710 	if (!pipe_config->hw.enable)
7711 		goto dump_planes;
7712 
7713 	snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
7714 	drm_dbg_kms(&dev_priv->drm,
7715 		    "active: %s, output_types: %s (0x%x), output format: %s\n",
7716 		    yesno(pipe_config->hw.active),
7717 		    buf, pipe_config->output_types,
7718 		    output_formats(pipe_config->output_format));
7719 
7720 	drm_dbg_kms(&dev_priv->drm,
7721 		    "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
7722 		    transcoder_name(pipe_config->cpu_transcoder),
7723 		    pipe_config->pipe_bpp, pipe_config->dither);
7724 
7725 	drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
7726 		    transcoder_name(pipe_config->mst_master_transcoder));
7727 
7728 	drm_dbg_kms(&dev_priv->drm,
7729 		    "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
7730 		    transcoder_name(pipe_config->master_transcoder),
7731 		    pipe_config->sync_mode_slaves_mask);
7732 
7733 	drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
7734 		    pipe_config->bigjoiner_slave ? "slave" :
7735 		    pipe_config->bigjoiner ? "master" : "no");
7736 
7737 	drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
7738 		    enableddisabled(pipe_config->splitter.enable),
7739 		    pipe_config->splitter.link_count,
7740 		    pipe_config->splitter.pixel_overlap);
7741 
7742 	if (pipe_config->has_pch_encoder)
7743 		intel_dump_m_n_config(pipe_config, "fdi",
7744 				      pipe_config->fdi_lanes,
7745 				      &pipe_config->fdi_m_n);
7746 
7747 	if (intel_crtc_has_dp_encoder(pipe_config)) {
7748 		intel_dump_m_n_config(pipe_config, "dp m_n",
7749 				pipe_config->lane_count, &pipe_config->dp_m_n);
7750 		if (pipe_config->has_drrs)
7751 			intel_dump_m_n_config(pipe_config, "dp m2_n2",
7752 					      pipe_config->lane_count,
7753 					      &pipe_config->dp_m2_n2);
7754 	}
7755 
7756 	drm_dbg_kms(&dev_priv->drm,
7757 		    "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
7758 		    pipe_config->has_audio, pipe_config->has_infoframe,
7759 		    pipe_config->infoframes.enable);
7760 
7761 	if (pipe_config->infoframes.enable &
7762 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
7763 		drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
7764 			    pipe_config->infoframes.gcp);
7765 	if (pipe_config->infoframes.enable &
7766 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
7767 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
7768 	if (pipe_config->infoframes.enable &
7769 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
7770 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
7771 	if (pipe_config->infoframes.enable &
7772 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
7773 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
7774 	if (pipe_config->infoframes.enable &
7775 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
7776 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
7777 	if (pipe_config->infoframes.enable &
7778 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
7779 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
7780 	if (pipe_config->infoframes.enable &
7781 	    intel_hdmi_infoframe_enable(DP_SDP_VSC))
7782 		intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
7783 
7784 	drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
7785 		    yesno(pipe_config->vrr.enable),
7786 		    pipe_config->vrr.vmin, pipe_config->vrr.vmax,
7787 		    pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
7788 		    pipe_config->vrr.flipline,
7789 		    intel_vrr_vmin_vblank_start(pipe_config),
7790 		    intel_vrr_vmax_vblank_start(pipe_config));
7791 
7792 	drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
7793 	drm_mode_debug_printmodeline(&pipe_config->hw.mode);
7794 	drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
7795 	drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
7796 	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
7797 	drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
7798 	drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
7799 	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
7800 	drm_dbg_kms(&dev_priv->drm,
7801 		    "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
7802 		    pipe_config->port_clock,
7803 		    pipe_config->pipe_src_w, pipe_config->pipe_src_h,
7804 		    pipe_config->pixel_rate);
7805 
7806 	drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
7807 		    pipe_config->linetime, pipe_config->ips_linetime);
7808 
7809 	if (DISPLAY_VER(dev_priv) >= 9)
7810 		drm_dbg_kms(&dev_priv->drm,
7811 			    "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
7812 			    crtc->num_scalers,
7813 			    pipe_config->scaler_state.scaler_users,
7814 			    pipe_config->scaler_state.scaler_id);
7815 
7816 	if (HAS_GMCH(dev_priv))
7817 		drm_dbg_kms(&dev_priv->drm,
7818 			    "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
7819 			    pipe_config->gmch_pfit.control,
7820 			    pipe_config->gmch_pfit.pgm_ratios,
7821 			    pipe_config->gmch_pfit.lvds_border_bits);
7822 	else
7823 		drm_dbg_kms(&dev_priv->drm,
7824 			    "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
7825 			    DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
7826 			    enableddisabled(pipe_config->pch_pfit.enabled),
7827 			    yesno(pipe_config->pch_pfit.force_thru));
7828 
7829 	drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
7830 		    pipe_config->ips_enabled, pipe_config->double_wide);
7831 
7832 	intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
7833 
7834 	if (IS_CHERRYVIEW(dev_priv))
7835 		drm_dbg_kms(&dev_priv->drm,
7836 			    "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
7837 			    pipe_config->cgm_mode, pipe_config->gamma_mode,
7838 			    pipe_config->gamma_enable, pipe_config->csc_enable);
7839 	else
7840 		drm_dbg_kms(&dev_priv->drm,
7841 			    "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
7842 			    pipe_config->csc_mode, pipe_config->gamma_mode,
7843 			    pipe_config->gamma_enable, pipe_config->csc_enable);
7844 
7845 	drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
7846 		    pipe_config->hw.degamma_lut ?
7847 		    drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
7848 		    pipe_config->hw.gamma_lut ?
7849 		    drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
7850 
7851 dump_planes:
7852 	if (!state)
7853 		return;
7854 
7855 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7856 		if (plane->pipe == crtc->pipe)
7857 			intel_dump_plane_state(plane_state);
7858 	}
7859 }
7860 
check_digital_port_conflicts(struct intel_atomic_state * state)7861 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
7862 {
7863 	struct drm_device *dev = state->base.dev;
7864 	struct drm_connector *connector;
7865 	struct drm_connector_list_iter conn_iter;
7866 	unsigned int used_ports = 0;
7867 	unsigned int used_mst_ports = 0;
7868 	bool ret = true;
7869 
7870 	/*
7871 	 * We're going to peek into connector->state,
7872 	 * hence connection_mutex must be held.
7873 	 */
7874 	drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
7875 
7876 	/*
7877 	 * Walk the connector list instead of the encoder
7878 	 * list to detect the problem on ddi platforms
7879 	 * where there's just one encoder per digital port.
7880 	 */
7881 	drm_connector_list_iter_begin(dev, &conn_iter);
7882 	drm_for_each_connector_iter(connector, &conn_iter) {
7883 		struct drm_connector_state *connector_state;
7884 		struct intel_encoder *encoder;
7885 
7886 		connector_state =
7887 			drm_atomic_get_new_connector_state(&state->base,
7888 							   connector);
7889 		if (!connector_state)
7890 			connector_state = connector->state;
7891 
7892 		if (!connector_state->best_encoder)
7893 			continue;
7894 
7895 		encoder = to_intel_encoder(connector_state->best_encoder);
7896 
7897 		drm_WARN_ON(dev, !connector_state->crtc);
7898 
7899 		switch (encoder->type) {
7900 		case INTEL_OUTPUT_DDI:
7901 			if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
7902 				break;
7903 			fallthrough;
7904 		case INTEL_OUTPUT_DP:
7905 		case INTEL_OUTPUT_HDMI:
7906 		case INTEL_OUTPUT_EDP:
7907 			/* the same port mustn't appear more than once */
7908 			if (used_ports & BIT(encoder->port))
7909 				ret = false;
7910 
7911 			used_ports |= BIT(encoder->port);
7912 			break;
7913 		case INTEL_OUTPUT_DP_MST:
7914 			used_mst_ports |=
7915 				1 << encoder->port;
7916 			break;
7917 		default:
7918 			break;
7919 		}
7920 	}
7921 	drm_connector_list_iter_end(&conn_iter);
7922 
7923 	/* can't mix MST and SST/HDMI on the same port */
7924 	if (used_ports & used_mst_ports)
7925 		return false;
7926 
7927 	return ret;
7928 }
7929 
7930 static void
intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state * state,struct intel_crtc_state * crtc_state)7931 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
7932 					   struct intel_crtc_state *crtc_state)
7933 {
7934 	const struct intel_crtc_state *from_crtc_state = crtc_state;
7935 
7936 	if (crtc_state->bigjoiner_slave) {
7937 		from_crtc_state = intel_atomic_get_new_crtc_state(state,
7938 								  crtc_state->bigjoiner_linked_crtc);
7939 
7940 		/* No need to copy state if the master state is unchanged */
7941 		if (!from_crtc_state)
7942 			return;
7943 	}
7944 
7945 	intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
7946 }
7947 
7948 static void
intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state * state,struct intel_crtc_state * crtc_state)7949 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
7950 				 struct intel_crtc_state *crtc_state)
7951 {
7952 	crtc_state->hw.enable = crtc_state->uapi.enable;
7953 	crtc_state->hw.active = crtc_state->uapi.active;
7954 	crtc_state->hw.mode = crtc_state->uapi.mode;
7955 	crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
7956 	crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
7957 
7958 	intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
7959 }
7960 
intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state * crtc_state)7961 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
7962 {
7963 	if (crtc_state->bigjoiner_slave)
7964 		return;
7965 
7966 	crtc_state->uapi.enable = crtc_state->hw.enable;
7967 	crtc_state->uapi.active = crtc_state->hw.active;
7968 	drm_WARN_ON(crtc_state->uapi.crtc->dev,
7969 		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
7970 
7971 	crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
7972 	crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
7973 
7974 	/* copy color blobs to uapi */
7975 	drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
7976 				  crtc_state->hw.degamma_lut);
7977 	drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
7978 				  crtc_state->hw.gamma_lut);
7979 	drm_property_replace_blob(&crtc_state->uapi.ctm,
7980 				  crtc_state->hw.ctm);
7981 }
7982 
7983 static int
copy_bigjoiner_crtc_state(struct intel_crtc_state * crtc_state,const struct intel_crtc_state * from_crtc_state)7984 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
7985 			  const struct intel_crtc_state *from_crtc_state)
7986 {
7987 	struct intel_crtc_state *saved_state;
7988 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7989 
7990 	saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
7991 	if (!saved_state)
7992 		return -ENOMEM;
7993 
7994 	saved_state->uapi = crtc_state->uapi;
7995 	saved_state->scaler_state = crtc_state->scaler_state;
7996 	saved_state->shared_dpll = crtc_state->shared_dpll;
7997 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
7998 	saved_state->crc_enabled = crtc_state->crc_enabled;
7999 
8000 	intel_crtc_free_hw_state(crtc_state);
8001 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
8002 	kfree(saved_state);
8003 
8004 	/* Re-init hw state */
8005 	memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
8006 	crtc_state->hw.enable = from_crtc_state->hw.enable;
8007 	crtc_state->hw.active = from_crtc_state->hw.active;
8008 	crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
8009 	crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
8010 
8011 	/* Some fixups */
8012 	crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
8013 	crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
8014 	crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
8015 	crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
8016 	crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
8017 	crtc_state->bigjoiner_slave = true;
8018 	crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
8019 	crtc_state->has_audio = false;
8020 
8021 	return 0;
8022 }
8023 
8024 static int
intel_crtc_prepare_cleared_state(struct intel_atomic_state * state,struct intel_crtc_state * crtc_state)8025 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
8026 				 struct intel_crtc_state *crtc_state)
8027 {
8028 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8029 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8030 	struct intel_crtc_state *saved_state;
8031 
8032 	saved_state = intel_crtc_state_alloc(crtc);
8033 	if (!saved_state)
8034 		return -ENOMEM;
8035 
8036 	/* free the old crtc_state->hw members */
8037 	intel_crtc_free_hw_state(crtc_state);
8038 
8039 	/* FIXME: before the switch to atomic started, a new pipe_config was
8040 	 * kzalloc'd. Code that depends on any field being zero should be
8041 	 * fixed, so that the crtc_state can be safely duplicated. For now,
8042 	 * only fields that are know to not cause problems are preserved. */
8043 
8044 	saved_state->uapi = crtc_state->uapi;
8045 	saved_state->scaler_state = crtc_state->scaler_state;
8046 	saved_state->shared_dpll = crtc_state->shared_dpll;
8047 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
8048 	memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
8049 	       sizeof(saved_state->icl_port_dplls));
8050 	saved_state->crc_enabled = crtc_state->crc_enabled;
8051 	if (IS_G4X(dev_priv) ||
8052 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8053 		saved_state->wm = crtc_state->wm;
8054 
8055 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
8056 	kfree(saved_state);
8057 
8058 	intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
8059 
8060 	return 0;
8061 }
8062 
8063 static int
intel_modeset_pipe_config(struct intel_atomic_state * state,struct intel_crtc_state * pipe_config)8064 intel_modeset_pipe_config(struct intel_atomic_state *state,
8065 			  struct intel_crtc_state *pipe_config)
8066 {
8067 	struct drm_crtc *crtc = pipe_config->uapi.crtc;
8068 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
8069 	struct drm_connector *connector;
8070 	struct drm_connector_state *connector_state;
8071 	int base_bpp, ret, i;
8072 	bool retry = true;
8073 
8074 	pipe_config->cpu_transcoder =
8075 		(enum transcoder) to_intel_crtc(crtc)->pipe;
8076 
8077 	/*
8078 	 * Sanitize sync polarity flags based on requested ones. If neither
8079 	 * positive or negative polarity is requested, treat this as meaning
8080 	 * negative polarity.
8081 	 */
8082 	if (!(pipe_config->hw.adjusted_mode.flags &
8083 	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
8084 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
8085 
8086 	if (!(pipe_config->hw.adjusted_mode.flags &
8087 	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
8088 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
8089 
8090 	ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
8091 					pipe_config);
8092 	if (ret)
8093 		return ret;
8094 
8095 	base_bpp = pipe_config->pipe_bpp;
8096 
8097 	/*
8098 	 * Determine the real pipe dimensions. Note that stereo modes can
8099 	 * increase the actual pipe size due to the frame doubling and
8100 	 * insertion of additional space for blanks between the frame. This
8101 	 * is stored in the crtc timings. We use the requested mode to do this
8102 	 * computation to clearly distinguish it from the adjusted mode, which
8103 	 * can be changed by the connectors in the below retry loop.
8104 	 */
8105 	drm_mode_get_hv_timing(&pipe_config->hw.mode,
8106 			       &pipe_config->pipe_src_w,
8107 			       &pipe_config->pipe_src_h);
8108 
8109 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
8110 		struct intel_encoder *encoder =
8111 			to_intel_encoder(connector_state->best_encoder);
8112 
8113 		if (connector_state->crtc != crtc)
8114 			continue;
8115 
8116 		if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
8117 			drm_dbg_kms(&i915->drm,
8118 				    "rejecting invalid cloning configuration\n");
8119 			return -EINVAL;
8120 		}
8121 
8122 		/*
8123 		 * Determine output_types before calling the .compute_config()
8124 		 * hooks so that the hooks can use this information safely.
8125 		 */
8126 		if (encoder->compute_output_type)
8127 			pipe_config->output_types |=
8128 				BIT(encoder->compute_output_type(encoder, pipe_config,
8129 								 connector_state));
8130 		else
8131 			pipe_config->output_types |= BIT(encoder->type);
8132 	}
8133 
8134 encoder_retry:
8135 	/* Ensure the port clock defaults are reset when retrying. */
8136 	pipe_config->port_clock = 0;
8137 	pipe_config->pixel_multiplier = 1;
8138 
8139 	/* Fill in default crtc timings, allow encoders to overwrite them. */
8140 	drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
8141 			      CRTC_STEREO_DOUBLE);
8142 
8143 	/* Pass our mode to the connectors and the CRTC to give them a chance to
8144 	 * adjust it according to limitations or connector properties, and also
8145 	 * a chance to reject the mode entirely.
8146 	 */
8147 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
8148 		struct intel_encoder *encoder =
8149 			to_intel_encoder(connector_state->best_encoder);
8150 
8151 		if (connector_state->crtc != crtc)
8152 			continue;
8153 
8154 		ret = encoder->compute_config(encoder, pipe_config,
8155 					      connector_state);
8156 		if (ret < 0) {
8157 			if (ret != -EDEADLK)
8158 				drm_dbg_kms(&i915->drm,
8159 					    "Encoder config failure: %d\n",
8160 					    ret);
8161 			return ret;
8162 		}
8163 	}
8164 
8165 	/* Set default port clock if not overwritten by the encoder. Needs to be
8166 	 * done afterwards in case the encoder adjusts the mode. */
8167 	if (!pipe_config->port_clock)
8168 		pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
8169 			* pipe_config->pixel_multiplier;
8170 
8171 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
8172 	if (ret == -EDEADLK)
8173 		return ret;
8174 	if (ret < 0) {
8175 		drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
8176 		return ret;
8177 	}
8178 
8179 	if (ret == I915_DISPLAY_CONFIG_RETRY) {
8180 		if (drm_WARN(&i915->drm, !retry,
8181 			     "loop in pipe configuration computation\n"))
8182 			return -EINVAL;
8183 
8184 		drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
8185 		retry = false;
8186 		goto encoder_retry;
8187 	}
8188 
8189 	/* Dithering seems to not pass-through bits correctly when it should, so
8190 	 * only enable it on 6bpc panels and when its not a compliance
8191 	 * test requesting 6bpc video pattern.
8192 	 */
8193 	pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
8194 		!pipe_config->dither_force_disable;
8195 	drm_dbg_kms(&i915->drm,
8196 		    "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
8197 		    base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
8198 
8199 	return 0;
8200 }
8201 
8202 static int
intel_modeset_pipe_config_late(struct intel_crtc_state * crtc_state)8203 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
8204 {
8205 	struct intel_atomic_state *state =
8206 		to_intel_atomic_state(crtc_state->uapi.state);
8207 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8208 	struct drm_connector_state *conn_state;
8209 	struct drm_connector *connector;
8210 	int i;
8211 
8212 	for_each_new_connector_in_state(&state->base, connector,
8213 					conn_state, i) {
8214 		struct intel_encoder *encoder =
8215 			to_intel_encoder(conn_state->best_encoder);
8216 		int ret;
8217 
8218 		if (conn_state->crtc != &crtc->base ||
8219 		    !encoder->compute_config_late)
8220 			continue;
8221 
8222 		ret = encoder->compute_config_late(encoder, crtc_state,
8223 						   conn_state);
8224 		if (ret)
8225 			return ret;
8226 	}
8227 
8228 	return 0;
8229 }
8230 
intel_fuzzy_clock_check(int clock1,int clock2)8231 bool intel_fuzzy_clock_check(int clock1, int clock2)
8232 {
8233 	int diff;
8234 
8235 	if (clock1 == clock2)
8236 		return true;
8237 
8238 	if (!clock1 || !clock2)
8239 		return false;
8240 
8241 	diff = abs(clock1 - clock2);
8242 
8243 	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
8244 		return true;
8245 
8246 	return false;
8247 }
8248 
8249 static bool
intel_compare_m_n(unsigned int m,unsigned int n,unsigned int m2,unsigned int n2,bool exact)8250 intel_compare_m_n(unsigned int m, unsigned int n,
8251 		  unsigned int m2, unsigned int n2,
8252 		  bool exact)
8253 {
8254 	if (m == m2 && n == n2)
8255 		return true;
8256 
8257 	if (exact || !m || !n || !m2 || !n2)
8258 		return false;
8259 
8260 	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
8261 
8262 	if (n > n2) {
8263 		while (n > n2) {
8264 			m2 <<= 1;
8265 			n2 <<= 1;
8266 		}
8267 	} else if (n < n2) {
8268 		while (n < n2) {
8269 			m <<= 1;
8270 			n <<= 1;
8271 		}
8272 	}
8273 
8274 	if (n != n2)
8275 		return false;
8276 
8277 	return intel_fuzzy_clock_check(m, m2);
8278 }
8279 
8280 static bool
intel_compare_link_m_n(const struct intel_link_m_n * m_n,const struct intel_link_m_n * m2_n2,bool exact)8281 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
8282 		       const struct intel_link_m_n *m2_n2,
8283 		       bool exact)
8284 {
8285 	return m_n->tu == m2_n2->tu &&
8286 		intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
8287 				  m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
8288 		intel_compare_m_n(m_n->link_m, m_n->link_n,
8289 				  m2_n2->link_m, m2_n2->link_n, exact);
8290 }
8291 
8292 static bool
intel_compare_infoframe(const union hdmi_infoframe * a,const union hdmi_infoframe * b)8293 intel_compare_infoframe(const union hdmi_infoframe *a,
8294 			const union hdmi_infoframe *b)
8295 {
8296 	return memcmp(a, b, sizeof(*a)) == 0;
8297 }
8298 
8299 static bool
intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp * a,const struct drm_dp_vsc_sdp * b)8300 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
8301 			 const struct drm_dp_vsc_sdp *b)
8302 {
8303 	return memcmp(a, b, sizeof(*a)) == 0;
8304 }
8305 
8306 static void
pipe_config_infoframe_mismatch(struct drm_i915_private * dev_priv,bool fastset,const char * name,const union hdmi_infoframe * a,const union hdmi_infoframe * b)8307 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
8308 			       bool fastset, const char *name,
8309 			       const union hdmi_infoframe *a,
8310 			       const union hdmi_infoframe *b)
8311 {
8312 	if (fastset) {
8313 		if (!drm_debug_enabled(DRM_UT_KMS))
8314 			return;
8315 
8316 		drm_dbg_kms(&dev_priv->drm,
8317 			    "fastset mismatch in %s infoframe\n", name);
8318 		drm_dbg_kms(&dev_priv->drm, "expected:\n");
8319 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
8320 		drm_dbg_kms(&dev_priv->drm, "found:\n");
8321 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
8322 	} else {
8323 		drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
8324 		drm_err(&dev_priv->drm, "expected:\n");
8325 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
8326 		drm_err(&dev_priv->drm, "found:\n");
8327 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
8328 	}
8329 }
8330 
8331 static void
pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private * dev_priv,bool fastset,const char * name,const struct drm_dp_vsc_sdp * a,const struct drm_dp_vsc_sdp * b)8332 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
8333 				bool fastset, const char *name,
8334 				const struct drm_dp_vsc_sdp *a,
8335 				const struct drm_dp_vsc_sdp *b)
8336 {
8337 	if (fastset) {
8338 		if (!drm_debug_enabled(DRM_UT_KMS))
8339 			return;
8340 
8341 		drm_dbg_kms(&dev_priv->drm,
8342 			    "fastset mismatch in %s dp sdp\n", name);
8343 		drm_dbg_kms(&dev_priv->drm, "expected:\n");
8344 		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
8345 		drm_dbg_kms(&dev_priv->drm, "found:\n");
8346 		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
8347 	} else {
8348 		drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
8349 		drm_err(&dev_priv->drm, "expected:\n");
8350 		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
8351 		drm_err(&dev_priv->drm, "found:\n");
8352 		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
8353 	}
8354 }
8355 
8356 static void __printf(4, 5)
pipe_config_mismatch(bool fastset,const struct intel_crtc * crtc,const char * name,const char * format,...)8357 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
8358 		     const char *name, const char *format, ...)
8359 {
8360 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
8361 	struct va_format vaf;
8362 	va_list args;
8363 
8364 	va_start(args, format);
8365 	vaf.fmt = format;
8366 	vaf.va = &args;
8367 
8368 	if (fastset)
8369 		drm_dbg_kms(&i915->drm,
8370 			    "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
8371 			    crtc->base.base.id, crtc->base.name, name, &vaf);
8372 	else
8373 		drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
8374 			crtc->base.base.id, crtc->base.name, name, &vaf);
8375 
8376 	va_end(args);
8377 }
8378 
fastboot_enabled(struct drm_i915_private * dev_priv)8379 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
8380 {
8381 	if (dev_priv->params.fastboot != -1)
8382 		return dev_priv->params.fastboot;
8383 
8384 	/* Enable fastboot by default on Skylake and newer */
8385 	if (DISPLAY_VER(dev_priv) >= 9)
8386 		return true;
8387 
8388 	/* Enable fastboot by default on VLV and CHV */
8389 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8390 		return true;
8391 
8392 	/* Disabled by default on all others */
8393 	return false;
8394 }
8395 
8396 static bool
intel_pipe_config_compare(const struct intel_crtc_state * current_config,const struct intel_crtc_state * pipe_config,bool fastset)8397 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
8398 			  const struct intel_crtc_state *pipe_config,
8399 			  bool fastset)
8400 {
8401 	struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
8402 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
8403 	bool ret = true;
8404 	u32 bp_gamma = 0;
8405 	bool fixup_inherited = fastset &&
8406 		current_config->inherited && !pipe_config->inherited;
8407 
8408 	if (fixup_inherited && !fastboot_enabled(dev_priv)) {
8409 		drm_dbg_kms(&dev_priv->drm,
8410 			    "initial modeset and fastboot not set\n");
8411 		ret = false;
8412 	}
8413 
8414 #define PIPE_CONF_CHECK_X(name) do { \
8415 	if (current_config->name != pipe_config->name) { \
8416 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8417 				     "(expected 0x%08x, found 0x%08x)", \
8418 				     current_config->name, \
8419 				     pipe_config->name); \
8420 		ret = false; \
8421 	} \
8422 } while (0)
8423 
8424 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
8425 	if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
8426 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8427 				     "(expected 0x%08x, found 0x%08x)", \
8428 				     current_config->name & (mask), \
8429 				     pipe_config->name & (mask)); \
8430 		ret = false; \
8431 	} \
8432 } while (0)
8433 
8434 #define PIPE_CONF_CHECK_I(name) do { \
8435 	if (current_config->name != pipe_config->name) { \
8436 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8437 				     "(expected %i, found %i)", \
8438 				     current_config->name, \
8439 				     pipe_config->name); \
8440 		ret = false; \
8441 	} \
8442 } while (0)
8443 
8444 #define PIPE_CONF_CHECK_BOOL(name) do { \
8445 	if (current_config->name != pipe_config->name) { \
8446 		pipe_config_mismatch(fastset, crtc,  __stringify(name), \
8447 				     "(expected %s, found %s)", \
8448 				     yesno(current_config->name), \
8449 				     yesno(pipe_config->name)); \
8450 		ret = false; \
8451 	} \
8452 } while (0)
8453 
8454 /*
8455  * Checks state where we only read out the enabling, but not the entire
8456  * state itself (like full infoframes or ELD for audio). These states
8457  * require a full modeset on bootup to fix up.
8458  */
8459 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
8460 	if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
8461 		PIPE_CONF_CHECK_BOOL(name); \
8462 	} else { \
8463 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8464 				     "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
8465 				     yesno(current_config->name), \
8466 				     yesno(pipe_config->name)); \
8467 		ret = false; \
8468 	} \
8469 } while (0)
8470 
8471 #define PIPE_CONF_CHECK_P(name) do { \
8472 	if (current_config->name != pipe_config->name) { \
8473 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8474 				     "(expected %p, found %p)", \
8475 				     current_config->name, \
8476 				     pipe_config->name); \
8477 		ret = false; \
8478 	} \
8479 } while (0)
8480 
8481 #define PIPE_CONF_CHECK_M_N(name) do { \
8482 	if (!intel_compare_link_m_n(&current_config->name, \
8483 				    &pipe_config->name,\
8484 				    !fastset)) { \
8485 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8486 				     "(expected tu %i gmch %i/%i link %i/%i, " \
8487 				     "found tu %i, gmch %i/%i link %i/%i)", \
8488 				     current_config->name.tu, \
8489 				     current_config->name.gmch_m, \
8490 				     current_config->name.gmch_n, \
8491 				     current_config->name.link_m, \
8492 				     current_config->name.link_n, \
8493 				     pipe_config->name.tu, \
8494 				     pipe_config->name.gmch_m, \
8495 				     pipe_config->name.gmch_n, \
8496 				     pipe_config->name.link_m, \
8497 				     pipe_config->name.link_n); \
8498 		ret = false; \
8499 	} \
8500 } while (0)
8501 
8502 /* This is required for BDW+ where there is only one set of registers for
8503  * switching between high and low RR.
8504  * This macro can be used whenever a comparison has to be made between one
8505  * hw state and multiple sw state variables.
8506  */
8507 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
8508 	if (!intel_compare_link_m_n(&current_config->name, \
8509 				    &pipe_config->name, !fastset) && \
8510 	    !intel_compare_link_m_n(&current_config->alt_name, \
8511 				    &pipe_config->name, !fastset)) { \
8512 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8513 				     "(expected tu %i gmch %i/%i link %i/%i, " \
8514 				     "or tu %i gmch %i/%i link %i/%i, " \
8515 				     "found tu %i, gmch %i/%i link %i/%i)", \
8516 				     current_config->name.tu, \
8517 				     current_config->name.gmch_m, \
8518 				     current_config->name.gmch_n, \
8519 				     current_config->name.link_m, \
8520 				     current_config->name.link_n, \
8521 				     current_config->alt_name.tu, \
8522 				     current_config->alt_name.gmch_m, \
8523 				     current_config->alt_name.gmch_n, \
8524 				     current_config->alt_name.link_m, \
8525 				     current_config->alt_name.link_n, \
8526 				     pipe_config->name.tu, \
8527 				     pipe_config->name.gmch_m, \
8528 				     pipe_config->name.gmch_n, \
8529 				     pipe_config->name.link_m, \
8530 				     pipe_config->name.link_n); \
8531 		ret = false; \
8532 	} \
8533 } while (0)
8534 
8535 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
8536 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
8537 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8538 				     "(%x) (expected %i, found %i)", \
8539 				     (mask), \
8540 				     current_config->name & (mask), \
8541 				     pipe_config->name & (mask)); \
8542 		ret = false; \
8543 	} \
8544 } while (0)
8545 
8546 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
8547 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
8548 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8549 				     "(expected %i, found %i)", \
8550 				     current_config->name, \
8551 				     pipe_config->name); \
8552 		ret = false; \
8553 	} \
8554 } while (0)
8555 
8556 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
8557 	if (!intel_compare_infoframe(&current_config->infoframes.name, \
8558 				     &pipe_config->infoframes.name)) { \
8559 		pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
8560 					       &current_config->infoframes.name, \
8561 					       &pipe_config->infoframes.name); \
8562 		ret = false; \
8563 	} \
8564 } while (0)
8565 
8566 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
8567 	if (!current_config->has_psr && !pipe_config->has_psr && \
8568 	    !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
8569 				      &pipe_config->infoframes.name)) { \
8570 		pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
8571 						&current_config->infoframes.name, \
8572 						&pipe_config->infoframes.name); \
8573 		ret = false; \
8574 	} \
8575 } while (0)
8576 
8577 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
8578 	if (current_config->name1 != pipe_config->name1) { \
8579 		pipe_config_mismatch(fastset, crtc, __stringify(name1), \
8580 				"(expected %i, found %i, won't compare lut values)", \
8581 				current_config->name1, \
8582 				pipe_config->name1); \
8583 		ret = false;\
8584 	} else { \
8585 		if (!intel_color_lut_equal(current_config->name2, \
8586 					pipe_config->name2, pipe_config->name1, \
8587 					bit_precision)) { \
8588 			pipe_config_mismatch(fastset, crtc, __stringify(name2), \
8589 					"hw_state doesn't match sw_state"); \
8590 			ret = false; \
8591 		} \
8592 	} \
8593 } while (0)
8594 
8595 #define PIPE_CONF_QUIRK(quirk) \
8596 	((current_config->quirks | pipe_config->quirks) & (quirk))
8597 
8598 	PIPE_CONF_CHECK_I(cpu_transcoder);
8599 
8600 	PIPE_CONF_CHECK_BOOL(has_pch_encoder);
8601 	PIPE_CONF_CHECK_I(fdi_lanes);
8602 	PIPE_CONF_CHECK_M_N(fdi_m_n);
8603 
8604 	PIPE_CONF_CHECK_I(lane_count);
8605 	PIPE_CONF_CHECK_X(lane_lat_optim_mask);
8606 
8607 	if (DISPLAY_VER(dev_priv) < 8) {
8608 		PIPE_CONF_CHECK_M_N(dp_m_n);
8609 
8610 		if (current_config->has_drrs)
8611 			PIPE_CONF_CHECK_M_N(dp_m2_n2);
8612 	} else
8613 		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
8614 
8615 	PIPE_CONF_CHECK_X(output_types);
8616 
8617 	/* FIXME do the readout properly and get rid of this quirk */
8618 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8619 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
8620 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
8621 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
8622 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
8623 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
8624 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
8625 
8626 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
8627 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
8628 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
8629 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
8630 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
8631 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
8632 
8633 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
8634 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
8635 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
8636 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
8637 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
8638 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
8639 
8640 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
8641 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
8642 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
8643 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
8644 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
8645 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
8646 
8647 		PIPE_CONF_CHECK_I(pixel_multiplier);
8648 
8649 		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8650 				      DRM_MODE_FLAG_INTERLACE);
8651 
8652 		if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
8653 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8654 					      DRM_MODE_FLAG_PHSYNC);
8655 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8656 					      DRM_MODE_FLAG_NHSYNC);
8657 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8658 					      DRM_MODE_FLAG_PVSYNC);
8659 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8660 					      DRM_MODE_FLAG_NVSYNC);
8661 		}
8662 	}
8663 
8664 	PIPE_CONF_CHECK_I(output_format);
8665 	PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
8666 	if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
8667 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8668 		PIPE_CONF_CHECK_BOOL(limited_color_range);
8669 
8670 	PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
8671 	PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
8672 	PIPE_CONF_CHECK_BOOL(has_infoframe);
8673 	/* FIXME do the readout properly and get rid of this quirk */
8674 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
8675 		PIPE_CONF_CHECK_BOOL(fec_enable);
8676 
8677 	PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
8678 
8679 	PIPE_CONF_CHECK_X(gmch_pfit.control);
8680 	/* pfit ratios are autocomputed by the hw on gen4+ */
8681 	if (DISPLAY_VER(dev_priv) < 4)
8682 		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
8683 	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
8684 
8685 	/*
8686 	 * Changing the EDP transcoder input mux
8687 	 * (A_ONOFF vs. A_ON) requires a full modeset.
8688 	 */
8689 	PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
8690 
8691 	if (!fastset) {
8692 		PIPE_CONF_CHECK_I(pipe_src_w);
8693 		PIPE_CONF_CHECK_I(pipe_src_h);
8694 
8695 		PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
8696 		if (current_config->pch_pfit.enabled) {
8697 			PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
8698 			PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
8699 			PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
8700 			PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
8701 		}
8702 
8703 		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
8704 		/* FIXME do the readout properly and get rid of this quirk */
8705 		if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
8706 			PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
8707 
8708 		PIPE_CONF_CHECK_X(gamma_mode);
8709 		if (IS_CHERRYVIEW(dev_priv))
8710 			PIPE_CONF_CHECK_X(cgm_mode);
8711 		else
8712 			PIPE_CONF_CHECK_X(csc_mode);
8713 		PIPE_CONF_CHECK_BOOL(gamma_enable);
8714 		PIPE_CONF_CHECK_BOOL(csc_enable);
8715 
8716 		PIPE_CONF_CHECK_I(linetime);
8717 		PIPE_CONF_CHECK_I(ips_linetime);
8718 
8719 		bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
8720 		if (bp_gamma)
8721 			PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
8722 
8723 		PIPE_CONF_CHECK_BOOL(has_psr);
8724 		PIPE_CONF_CHECK_BOOL(has_psr2);
8725 		PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
8726 		PIPE_CONF_CHECK_I(dc3co_exitline);
8727 	}
8728 
8729 	PIPE_CONF_CHECK_BOOL(double_wide);
8730 
8731 	if (dev_priv->dpll.mgr)
8732 		PIPE_CONF_CHECK_P(shared_dpll);
8733 
8734 	/* FIXME do the readout properly and get rid of this quirk */
8735 	if (dev_priv->dpll.mgr && !PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8736 		PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8737 		PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
8738 		PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
8739 		PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
8740 		PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
8741 		PIPE_CONF_CHECK_X(dpll_hw_state.spll);
8742 		PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
8743 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
8744 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
8745 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
8746 		PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
8747 		PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
8748 		PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
8749 		PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
8750 		PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
8751 		PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
8752 		PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
8753 		PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
8754 		PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
8755 		PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
8756 		PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
8757 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
8758 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
8759 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
8760 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
8761 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
8762 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
8763 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
8764 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
8765 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
8766 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
8767 	}
8768 
8769 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8770 		PIPE_CONF_CHECK_X(dsi_pll.ctrl);
8771 		PIPE_CONF_CHECK_X(dsi_pll.div);
8772 
8773 		if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
8774 			PIPE_CONF_CHECK_I(pipe_bpp);
8775 
8776 		PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
8777 		PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
8778 		PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
8779 
8780 		PIPE_CONF_CHECK_I(min_voltage_level);
8781 	}
8782 
8783 	if (fastset && (current_config->has_psr || pipe_config->has_psr))
8784 		PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
8785 					    ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
8786 	else
8787 		PIPE_CONF_CHECK_X(infoframes.enable);
8788 
8789 	PIPE_CONF_CHECK_X(infoframes.gcp);
8790 	PIPE_CONF_CHECK_INFOFRAME(avi);
8791 	PIPE_CONF_CHECK_INFOFRAME(spd);
8792 	PIPE_CONF_CHECK_INFOFRAME(hdmi);
8793 	PIPE_CONF_CHECK_INFOFRAME(drm);
8794 	PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
8795 
8796 	PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
8797 	PIPE_CONF_CHECK_I(master_transcoder);
8798 	PIPE_CONF_CHECK_BOOL(bigjoiner);
8799 	PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
8800 	PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
8801 
8802 	PIPE_CONF_CHECK_I(dsc.compression_enable);
8803 	PIPE_CONF_CHECK_I(dsc.dsc_split);
8804 	PIPE_CONF_CHECK_I(dsc.compressed_bpp);
8805 
8806 	PIPE_CONF_CHECK_BOOL(splitter.enable);
8807 	PIPE_CONF_CHECK_I(splitter.link_count);
8808 	PIPE_CONF_CHECK_I(splitter.pixel_overlap);
8809 
8810 	PIPE_CONF_CHECK_I(mst_master_transcoder);
8811 
8812 	PIPE_CONF_CHECK_BOOL(vrr.enable);
8813 	PIPE_CONF_CHECK_I(vrr.vmin);
8814 	PIPE_CONF_CHECK_I(vrr.vmax);
8815 	PIPE_CONF_CHECK_I(vrr.flipline);
8816 	PIPE_CONF_CHECK_I(vrr.pipeline_full);
8817 	PIPE_CONF_CHECK_I(vrr.guardband);
8818 
8819 #undef PIPE_CONF_CHECK_X
8820 #undef PIPE_CONF_CHECK_I
8821 #undef PIPE_CONF_CHECK_BOOL
8822 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
8823 #undef PIPE_CONF_CHECK_P
8824 #undef PIPE_CONF_CHECK_FLAGS
8825 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
8826 #undef PIPE_CONF_CHECK_COLOR_LUT
8827 #undef PIPE_CONF_QUIRK
8828 
8829 	return ret;
8830 }
8831 
intel_pipe_config_sanity_check(struct drm_i915_private * dev_priv,const struct intel_crtc_state * pipe_config)8832 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
8833 					   const struct intel_crtc_state *pipe_config)
8834 {
8835 	if (pipe_config->has_pch_encoder) {
8836 		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
8837 							    &pipe_config->fdi_m_n);
8838 		int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
8839 
8840 		/*
8841 		 * FDI already provided one idea for the dotclock.
8842 		 * Yell if the encoder disagrees.
8843 		 */
8844 		drm_WARN(&dev_priv->drm,
8845 			 !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
8846 			 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
8847 			 fdi_dotclock, dotclock);
8848 	}
8849 }
8850 
verify_wm_state(struct intel_crtc * crtc,struct intel_crtc_state * new_crtc_state)8851 static void verify_wm_state(struct intel_crtc *crtc,
8852 			    struct intel_crtc_state *new_crtc_state)
8853 {
8854 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8855 	struct skl_hw_state {
8856 		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
8857 		struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
8858 		struct skl_pipe_wm wm;
8859 	} *hw;
8860 	const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
8861 	int level, max_level = ilk_wm_max_level(dev_priv);
8862 	struct intel_plane *plane;
8863 	u8 hw_enabled_slices;
8864 
8865 	if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
8866 		return;
8867 
8868 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
8869 	if (!hw)
8870 		return;
8871 
8872 	skl_pipe_wm_get_hw_state(crtc, &hw->wm);
8873 
8874 	skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
8875 
8876 	hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
8877 
8878 	if (DISPLAY_VER(dev_priv) >= 11 &&
8879 	    hw_enabled_slices != dev_priv->dbuf.enabled_slices)
8880 		drm_err(&dev_priv->drm,
8881 			"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
8882 			dev_priv->dbuf.enabled_slices,
8883 			hw_enabled_slices);
8884 
8885 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
8886 		const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
8887 		const struct skl_wm_level *hw_wm_level, *sw_wm_level;
8888 
8889 		/* Watermarks */
8890 		for (level = 0; level <= max_level; level++) {
8891 			hw_wm_level = &hw->wm.planes[plane->id].wm[level];
8892 			sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
8893 
8894 			if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
8895 				continue;
8896 
8897 			drm_err(&dev_priv->drm,
8898 				"[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8899 				plane->base.base.id, plane->base.name, level,
8900 				sw_wm_level->enable,
8901 				sw_wm_level->blocks,
8902 				sw_wm_level->lines,
8903 				hw_wm_level->enable,
8904 				hw_wm_level->blocks,
8905 				hw_wm_level->lines);
8906 		}
8907 
8908 		hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
8909 		sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
8910 
8911 		if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8912 			drm_err(&dev_priv->drm,
8913 				"[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8914 				plane->base.base.id, plane->base.name,
8915 				sw_wm_level->enable,
8916 				sw_wm_level->blocks,
8917 				sw_wm_level->lines,
8918 				hw_wm_level->enable,
8919 				hw_wm_level->blocks,
8920 				hw_wm_level->lines);
8921 		}
8922 
8923 		hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
8924 		sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
8925 
8926 		if (HAS_HW_SAGV_WM(dev_priv) &&
8927 		    !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8928 			drm_err(&dev_priv->drm,
8929 				"[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8930 				plane->base.base.id, plane->base.name,
8931 				sw_wm_level->enable,
8932 				sw_wm_level->blocks,
8933 				sw_wm_level->lines,
8934 				hw_wm_level->enable,
8935 				hw_wm_level->blocks,
8936 				hw_wm_level->lines);
8937 		}
8938 
8939 		hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
8940 		sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
8941 
8942 		if (HAS_HW_SAGV_WM(dev_priv) &&
8943 		    !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8944 			drm_err(&dev_priv->drm,
8945 				"[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8946 				plane->base.base.id, plane->base.name,
8947 				sw_wm_level->enable,
8948 				sw_wm_level->blocks,
8949 				sw_wm_level->lines,
8950 				hw_wm_level->enable,
8951 				hw_wm_level->blocks,
8952 				hw_wm_level->lines);
8953 		}
8954 
8955 		/* DDB */
8956 		hw_ddb_entry = &hw->ddb_y[plane->id];
8957 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
8958 
8959 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
8960 			drm_err(&dev_priv->drm,
8961 				"[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
8962 				plane->base.base.id, plane->base.name,
8963 				sw_ddb_entry->start, sw_ddb_entry->end,
8964 				hw_ddb_entry->start, hw_ddb_entry->end);
8965 		}
8966 	}
8967 
8968 	kfree(hw);
8969 }
8970 
8971 static void
verify_connector_state(struct intel_atomic_state * state,struct intel_crtc * crtc)8972 verify_connector_state(struct intel_atomic_state *state,
8973 		       struct intel_crtc *crtc)
8974 {
8975 	struct drm_connector *connector;
8976 	struct drm_connector_state *new_conn_state;
8977 	int i;
8978 
8979 	for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
8980 		struct drm_encoder *encoder = connector->encoder;
8981 		struct intel_crtc_state *crtc_state = NULL;
8982 
8983 		if (new_conn_state->crtc != &crtc->base)
8984 			continue;
8985 
8986 		if (crtc)
8987 			crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
8988 
8989 		intel_connector_verify_state(crtc_state, new_conn_state);
8990 
8991 		I915_STATE_WARN(new_conn_state->best_encoder != encoder,
8992 		     "connector's atomic encoder doesn't match legacy encoder\n");
8993 	}
8994 }
8995 
8996 static void
verify_encoder_state(struct drm_i915_private * dev_priv,struct intel_atomic_state * state)8997 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
8998 {
8999 	struct intel_encoder *encoder;
9000 	struct drm_connector *connector;
9001 	struct drm_connector_state *old_conn_state, *new_conn_state;
9002 	int i;
9003 
9004 	for_each_intel_encoder(&dev_priv->drm, encoder) {
9005 		bool enabled = false, found = false;
9006 		enum pipe pipe;
9007 
9008 		drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
9009 			    encoder->base.base.id,
9010 			    encoder->base.name);
9011 
9012 		for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
9013 						   new_conn_state, i) {
9014 			if (old_conn_state->best_encoder == &encoder->base)
9015 				found = true;
9016 
9017 			if (new_conn_state->best_encoder != &encoder->base)
9018 				continue;
9019 			found = enabled = true;
9020 
9021 			I915_STATE_WARN(new_conn_state->crtc !=
9022 					encoder->base.crtc,
9023 			     "connector's crtc doesn't match encoder crtc\n");
9024 		}
9025 
9026 		if (!found)
9027 			continue;
9028 
9029 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
9030 		     "encoder's enabled state mismatch "
9031 		     "(expected %i, found %i)\n",
9032 		     !!encoder->base.crtc, enabled);
9033 
9034 		if (!encoder->base.crtc) {
9035 			bool active;
9036 
9037 			active = encoder->get_hw_state(encoder, &pipe);
9038 			I915_STATE_WARN(active,
9039 			     "encoder detached but still enabled on pipe %c.\n",
9040 			     pipe_name(pipe));
9041 		}
9042 	}
9043 }
9044 
9045 static void
verify_crtc_state(struct intel_crtc * crtc,struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)9046 verify_crtc_state(struct intel_crtc *crtc,
9047 		  struct intel_crtc_state *old_crtc_state,
9048 		  struct intel_crtc_state *new_crtc_state)
9049 {
9050 	struct drm_device *dev = crtc->base.dev;
9051 	struct drm_i915_private *dev_priv = to_i915(dev);
9052 	struct intel_encoder *encoder;
9053 	struct intel_crtc_state *pipe_config = old_crtc_state;
9054 	struct drm_atomic_state *state = old_crtc_state->uapi.state;
9055 	struct intel_crtc *master = crtc;
9056 
9057 	__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
9058 	intel_crtc_free_hw_state(old_crtc_state);
9059 	intel_crtc_state_reset(old_crtc_state, crtc);
9060 	old_crtc_state->uapi.state = state;
9061 
9062 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
9063 		    crtc->base.name);
9064 
9065 	pipe_config->hw.enable = new_crtc_state->hw.enable;
9066 
9067 	intel_crtc_get_pipe_config(pipe_config);
9068 
9069 	/* we keep both pipes enabled on 830 */
9070 	if (IS_I830(dev_priv) && pipe_config->hw.active)
9071 		pipe_config->hw.active = new_crtc_state->hw.active;
9072 
9073 	I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
9074 			"crtc active state doesn't match with hw state "
9075 			"(expected %i, found %i)\n",
9076 			new_crtc_state->hw.active, pipe_config->hw.active);
9077 
9078 	I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
9079 			"transitional active state does not match atomic hw state "
9080 			"(expected %i, found %i)\n",
9081 			new_crtc_state->hw.active, crtc->active);
9082 
9083 	if (new_crtc_state->bigjoiner_slave)
9084 		master = new_crtc_state->bigjoiner_linked_crtc;
9085 
9086 	for_each_encoder_on_crtc(dev, &master->base, encoder) {
9087 		enum pipe pipe;
9088 		bool active;
9089 
9090 		active = encoder->get_hw_state(encoder, &pipe);
9091 		I915_STATE_WARN(active != new_crtc_state->hw.active,
9092 				"[ENCODER:%i] active %i with crtc active %i\n",
9093 				encoder->base.base.id, active,
9094 				new_crtc_state->hw.active);
9095 
9096 		I915_STATE_WARN(active && master->pipe != pipe,
9097 				"Encoder connected to wrong pipe %c\n",
9098 				pipe_name(pipe));
9099 
9100 		if (active)
9101 			intel_encoder_get_config(encoder, pipe_config);
9102 	}
9103 
9104 	if (!new_crtc_state->hw.active)
9105 		return;
9106 
9107 	if (new_crtc_state->bigjoiner_slave)
9108 		/* No PLLs set for slave */
9109 		pipe_config->shared_dpll = NULL;
9110 
9111 	intel_pipe_config_sanity_check(dev_priv, pipe_config);
9112 
9113 	if (!intel_pipe_config_compare(new_crtc_state,
9114 				       pipe_config, false)) {
9115 		I915_STATE_WARN(1, "pipe state doesn't match!\n");
9116 		intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
9117 		intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
9118 	}
9119 }
9120 
9121 static void
intel_verify_planes(struct intel_atomic_state * state)9122 intel_verify_planes(struct intel_atomic_state *state)
9123 {
9124 	struct intel_plane *plane;
9125 	const struct intel_plane_state *plane_state;
9126 	int i;
9127 
9128 	for_each_new_intel_plane_in_state(state, plane,
9129 					  plane_state, i)
9130 		assert_plane(plane, plane_state->planar_slave ||
9131 			     plane_state->uapi.visible);
9132 }
9133 
9134 static void
verify_single_dpll_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_crtc * crtc,struct intel_crtc_state * new_crtc_state)9135 verify_single_dpll_state(struct drm_i915_private *dev_priv,
9136 			 struct intel_shared_dpll *pll,
9137 			 struct intel_crtc *crtc,
9138 			 struct intel_crtc_state *new_crtc_state)
9139 {
9140 	struct intel_dpll_hw_state dpll_hw_state;
9141 	u8 pipe_mask;
9142 	bool active;
9143 
9144 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
9145 
9146 	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
9147 
9148 	active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
9149 
9150 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
9151 		I915_STATE_WARN(!pll->on && pll->active_mask,
9152 		     "pll in active use but not on in sw tracking\n");
9153 		I915_STATE_WARN(pll->on && !pll->active_mask,
9154 		     "pll is on but not used by any active pipe\n");
9155 		I915_STATE_WARN(pll->on != active,
9156 		     "pll on state mismatch (expected %i, found %i)\n",
9157 		     pll->on, active);
9158 	}
9159 
9160 	if (!crtc) {
9161 		I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
9162 				"more active pll users than references: 0x%x vs 0x%x\n",
9163 				pll->active_mask, pll->state.pipe_mask);
9164 
9165 		return;
9166 	}
9167 
9168 	pipe_mask = BIT(crtc->pipe);
9169 
9170 	if (new_crtc_state->hw.active)
9171 		I915_STATE_WARN(!(pll->active_mask & pipe_mask),
9172 				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
9173 				pipe_name(crtc->pipe), pll->active_mask);
9174 	else
9175 		I915_STATE_WARN(pll->active_mask & pipe_mask,
9176 				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
9177 				pipe_name(crtc->pipe), pll->active_mask);
9178 
9179 	I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
9180 			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
9181 			pipe_mask, pll->state.pipe_mask);
9182 
9183 	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
9184 					  &dpll_hw_state,
9185 					  sizeof(dpll_hw_state)),
9186 			"pll hw state mismatch\n");
9187 }
9188 
9189 static void
verify_shared_dpll_state(struct intel_crtc * crtc,struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)9190 verify_shared_dpll_state(struct intel_crtc *crtc,
9191 			 struct intel_crtc_state *old_crtc_state,
9192 			 struct intel_crtc_state *new_crtc_state)
9193 {
9194 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9195 
9196 	if (new_crtc_state->shared_dpll)
9197 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
9198 
9199 	if (old_crtc_state->shared_dpll &&
9200 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
9201 		u8 pipe_mask = BIT(crtc->pipe);
9202 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
9203 
9204 		I915_STATE_WARN(pll->active_mask & pipe_mask,
9205 				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
9206 				pipe_name(crtc->pipe), pll->active_mask);
9207 		I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
9208 				"pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
9209 				pipe_name(crtc->pipe), pll->state.pipe_mask);
9210 	}
9211 }
9212 
9213 static void
verify_mpllb_state(struct intel_atomic_state * state,struct intel_crtc_state * new_crtc_state)9214 verify_mpllb_state(struct intel_atomic_state *state,
9215 		   struct intel_crtc_state *new_crtc_state)
9216 {
9217 	struct drm_i915_private *i915 = to_i915(state->base.dev);
9218 	struct intel_mpllb_state mpllb_hw_state = { 0 };
9219 	struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
9220 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
9221 	struct intel_encoder *encoder;
9222 
9223 	if (!IS_DG2(i915))
9224 		return;
9225 
9226 	if (!new_crtc_state->hw.active)
9227 		return;
9228 
9229 	if (new_crtc_state->bigjoiner_slave)
9230 		return;
9231 
9232 	encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
9233 	intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
9234 
9235 #define MPLLB_CHECK(name) do { \
9236 	if (mpllb_sw_state->name != mpllb_hw_state.name) { \
9237 		pipe_config_mismatch(false, crtc, "MPLLB:" __stringify(name), \
9238 				     "(expected 0x%08x, found 0x%08x)", \
9239 				     mpllb_sw_state->name, \
9240 				     mpllb_hw_state.name); \
9241 	} \
9242 } while (0)
9243 
9244 	MPLLB_CHECK(mpllb_cp);
9245 	MPLLB_CHECK(mpllb_div);
9246 	MPLLB_CHECK(mpllb_div2);
9247 	MPLLB_CHECK(mpllb_fracn1);
9248 	MPLLB_CHECK(mpllb_fracn2);
9249 	MPLLB_CHECK(mpllb_sscen);
9250 	MPLLB_CHECK(mpllb_sscstep);
9251 
9252 	/*
9253 	 * ref_control is handled by the hardware/firemware and never
9254 	 * programmed by the software, but the proper values are supplied
9255 	 * in the bspec for verification purposes.
9256 	 */
9257 	MPLLB_CHECK(ref_control);
9258 
9259 #undef MPLLB_CHECK
9260 }
9261 
9262 static void
intel_modeset_verify_crtc(struct intel_crtc * crtc,struct intel_atomic_state * state,struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)9263 intel_modeset_verify_crtc(struct intel_crtc *crtc,
9264 			  struct intel_atomic_state *state,
9265 			  struct intel_crtc_state *old_crtc_state,
9266 			  struct intel_crtc_state *new_crtc_state)
9267 {
9268 	if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
9269 		return;
9270 
9271 	verify_wm_state(crtc, new_crtc_state);
9272 	verify_connector_state(state, crtc);
9273 	verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
9274 	verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
9275 	verify_mpllb_state(state, new_crtc_state);
9276 }
9277 
9278 static void
verify_disabled_dpll_state(struct drm_i915_private * dev_priv)9279 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
9280 {
9281 	int i;
9282 
9283 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
9284 		verify_single_dpll_state(dev_priv,
9285 					 &dev_priv->dpll.shared_dplls[i],
9286 					 NULL, NULL);
9287 }
9288 
9289 static void
intel_modeset_verify_disabled(struct drm_i915_private * dev_priv,struct intel_atomic_state * state)9290 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
9291 			      struct intel_atomic_state *state)
9292 {
9293 	verify_encoder_state(dev_priv, state);
9294 	verify_connector_state(state, NULL);
9295 	verify_disabled_dpll_state(dev_priv);
9296 }
9297 
intel_modeset_all_pipes(struct intel_atomic_state * state)9298 int intel_modeset_all_pipes(struct intel_atomic_state *state)
9299 {
9300 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9301 	struct intel_crtc *crtc;
9302 
9303 	/*
9304 	 * Add all pipes to the state, and force
9305 	 * a modeset on all the active ones.
9306 	 */
9307 	for_each_intel_crtc(&dev_priv->drm, crtc) {
9308 		struct intel_crtc_state *crtc_state;
9309 		int ret;
9310 
9311 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
9312 		if (IS_ERR(crtc_state))
9313 			return PTR_ERR(crtc_state);
9314 
9315 		if (!crtc_state->hw.active ||
9316 		    drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
9317 			continue;
9318 
9319 		crtc_state->uapi.mode_changed = true;
9320 
9321 		ret = drm_atomic_add_affected_connectors(&state->base,
9322 							 &crtc->base);
9323 		if (ret)
9324 			return ret;
9325 
9326 		ret = intel_atomic_add_affected_planes(state, crtc);
9327 		if (ret)
9328 			return ret;
9329 
9330 		crtc_state->update_planes |= crtc_state->active_planes;
9331 	}
9332 
9333 	return 0;
9334 }
9335 
9336 static void
intel_crtc_update_active_timings(const struct intel_crtc_state * crtc_state)9337 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
9338 {
9339 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9340 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9341 	struct drm_display_mode adjusted_mode =
9342 		crtc_state->hw.adjusted_mode;
9343 
9344 	if (crtc_state->vrr.enable) {
9345 		adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
9346 		adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
9347 		adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
9348 		crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
9349 	}
9350 
9351 	drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
9352 
9353 	crtc->mode_flags = crtc_state->mode_flags;
9354 
9355 	/*
9356 	 * The scanline counter increments at the leading edge of hsync.
9357 	 *
9358 	 * On most platforms it starts counting from vtotal-1 on the
9359 	 * first active line. That means the scanline counter value is
9360 	 * always one less than what we would expect. Ie. just after
9361 	 * start of vblank, which also occurs at start of hsync (on the
9362 	 * last active line), the scanline counter will read vblank_start-1.
9363 	 *
9364 	 * On gen2 the scanline counter starts counting from 1 instead
9365 	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
9366 	 * to keep the value positive), instead of adding one.
9367 	 *
9368 	 * On HSW+ the behaviour of the scanline counter depends on the output
9369 	 * type. For DP ports it behaves like most other platforms, but on HDMI
9370 	 * there's an extra 1 line difference. So we need to add two instead of
9371 	 * one to the value.
9372 	 *
9373 	 * On VLV/CHV DSI the scanline counter would appear to increment
9374 	 * approx. 1/3 of a scanline before start of vblank. Unfortunately
9375 	 * that means we can't tell whether we're in vblank or not while
9376 	 * we're on that particular line. We must still set scanline_offset
9377 	 * to 1 so that the vblank timestamps come out correct when we query
9378 	 * the scanline counter from within the vblank interrupt handler.
9379 	 * However if queried just before the start of vblank we'll get an
9380 	 * answer that's slightly in the future.
9381 	 */
9382 	if (DISPLAY_VER(dev_priv) == 2) {
9383 		int vtotal;
9384 
9385 		vtotal = adjusted_mode.crtc_vtotal;
9386 		if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9387 			vtotal /= 2;
9388 
9389 		crtc->scanline_offset = vtotal - 1;
9390 	} else if (HAS_DDI(dev_priv) &&
9391 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
9392 		crtc->scanline_offset = 2;
9393 	} else {
9394 		crtc->scanline_offset = 1;
9395 	}
9396 }
9397 
intel_modeset_clear_plls(struct intel_atomic_state * state)9398 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
9399 {
9400 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9401 	struct intel_crtc_state *new_crtc_state;
9402 	struct intel_crtc *crtc;
9403 	int i;
9404 
9405 	if (!dev_priv->display.crtc_compute_clock)
9406 		return;
9407 
9408 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9409 		if (!intel_crtc_needs_modeset(new_crtc_state))
9410 			continue;
9411 
9412 		intel_release_shared_dplls(state, crtc);
9413 	}
9414 }
9415 
9416 /*
9417  * This implements the workaround described in the "notes" section of the mode
9418  * set sequence documentation. When going from no pipes or single pipe to
9419  * multiple pipes, and planes are enabled after the pipe, we need to wait at
9420  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
9421  */
hsw_mode_set_planes_workaround(struct intel_atomic_state * state)9422 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
9423 {
9424 	struct intel_crtc_state *crtc_state;
9425 	struct intel_crtc *crtc;
9426 	struct intel_crtc_state *first_crtc_state = NULL;
9427 	struct intel_crtc_state *other_crtc_state = NULL;
9428 	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
9429 	int i;
9430 
9431 	/* look at all crtc's that are going to be enabled in during modeset */
9432 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9433 		if (!crtc_state->hw.active ||
9434 		    !intel_crtc_needs_modeset(crtc_state))
9435 			continue;
9436 
9437 		if (first_crtc_state) {
9438 			other_crtc_state = crtc_state;
9439 			break;
9440 		} else {
9441 			first_crtc_state = crtc_state;
9442 			first_pipe = crtc->pipe;
9443 		}
9444 	}
9445 
9446 	/* No workaround needed? */
9447 	if (!first_crtc_state)
9448 		return 0;
9449 
9450 	/* w/a possibly needed, check how many crtc's are already enabled. */
9451 	for_each_intel_crtc(state->base.dev, crtc) {
9452 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
9453 		if (IS_ERR(crtc_state))
9454 			return PTR_ERR(crtc_state);
9455 
9456 		crtc_state->hsw_workaround_pipe = INVALID_PIPE;
9457 
9458 		if (!crtc_state->hw.active ||
9459 		    intel_crtc_needs_modeset(crtc_state))
9460 			continue;
9461 
9462 		/* 2 or more enabled crtcs means no need for w/a */
9463 		if (enabled_pipe != INVALID_PIPE)
9464 			return 0;
9465 
9466 		enabled_pipe = crtc->pipe;
9467 	}
9468 
9469 	if (enabled_pipe != INVALID_PIPE)
9470 		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
9471 	else if (other_crtc_state)
9472 		other_crtc_state->hsw_workaround_pipe = first_pipe;
9473 
9474 	return 0;
9475 }
9476 
intel_calc_active_pipes(struct intel_atomic_state * state,u8 active_pipes)9477 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
9478 			   u8 active_pipes)
9479 {
9480 	const struct intel_crtc_state *crtc_state;
9481 	struct intel_crtc *crtc;
9482 	int i;
9483 
9484 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9485 		if (crtc_state->hw.active)
9486 			active_pipes |= BIT(crtc->pipe);
9487 		else
9488 			active_pipes &= ~BIT(crtc->pipe);
9489 	}
9490 
9491 	return active_pipes;
9492 }
9493 
intel_modeset_checks(struct intel_atomic_state * state)9494 static int intel_modeset_checks(struct intel_atomic_state *state)
9495 {
9496 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9497 
9498 	state->modeset = true;
9499 
9500 	if (IS_HASWELL(dev_priv))
9501 		return hsw_mode_set_planes_workaround(state);
9502 
9503 	return 0;
9504 }
9505 
9506 /*
9507  * Handle calculation of various watermark data at the end of the atomic check
9508  * phase.  The code here should be run after the per-crtc and per-plane 'check'
9509  * handlers to ensure that all derived state has been updated.
9510  */
calc_watermark_data(struct intel_atomic_state * state)9511 static int calc_watermark_data(struct intel_atomic_state *state)
9512 {
9513 	struct drm_device *dev = state->base.dev;
9514 	struct drm_i915_private *dev_priv = to_i915(dev);
9515 
9516 	/* Is there platform-specific watermark information to calculate? */
9517 	if (dev_priv->display.compute_global_watermarks)
9518 		return dev_priv->display.compute_global_watermarks(state);
9519 
9520 	return 0;
9521 }
9522 
intel_crtc_check_fastset(const struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)9523 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
9524 				     struct intel_crtc_state *new_crtc_state)
9525 {
9526 	if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
9527 		return;
9528 
9529 	new_crtc_state->uapi.mode_changed = false;
9530 	new_crtc_state->update_pipe = true;
9531 }
9532 
intel_crtc_copy_fastset(const struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)9533 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
9534 				    struct intel_crtc_state *new_crtc_state)
9535 {
9536 	/*
9537 	 * If we're not doing the full modeset we want to
9538 	 * keep the current M/N values as they may be
9539 	 * sufficiently different to the computed values
9540 	 * to cause problems.
9541 	 *
9542 	 * FIXME: should really copy more fuzzy state here
9543 	 */
9544 	new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
9545 	new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
9546 	new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
9547 	new_crtc_state->has_drrs = old_crtc_state->has_drrs;
9548 }
9549 
intel_crtc_add_planes_to_state(struct intel_atomic_state * state,struct intel_crtc * crtc,u8 plane_ids_mask)9550 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
9551 					  struct intel_crtc *crtc,
9552 					  u8 plane_ids_mask)
9553 {
9554 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9555 	struct intel_plane *plane;
9556 
9557 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
9558 		struct intel_plane_state *plane_state;
9559 
9560 		if ((plane_ids_mask & BIT(plane->id)) == 0)
9561 			continue;
9562 
9563 		plane_state = intel_atomic_get_plane_state(state, plane);
9564 		if (IS_ERR(plane_state))
9565 			return PTR_ERR(plane_state);
9566 	}
9567 
9568 	return 0;
9569 }
9570 
intel_atomic_add_affected_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)9571 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
9572 				     struct intel_crtc *crtc)
9573 {
9574 	const struct intel_crtc_state *old_crtc_state =
9575 		intel_atomic_get_old_crtc_state(state, crtc);
9576 	const struct intel_crtc_state *new_crtc_state =
9577 		intel_atomic_get_new_crtc_state(state, crtc);
9578 
9579 	return intel_crtc_add_planes_to_state(state, crtc,
9580 					      old_crtc_state->enabled_planes |
9581 					      new_crtc_state->enabled_planes);
9582 }
9583 
active_planes_affects_min_cdclk(struct drm_i915_private * dev_priv)9584 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
9585 {
9586 	/* See {hsw,vlv,ivb}_plane_ratio() */
9587 	return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
9588 		IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9589 		IS_IVYBRIDGE(dev_priv);
9590 }
9591 
intel_crtc_add_bigjoiner_planes(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_crtc * other)9592 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
9593 					   struct intel_crtc *crtc,
9594 					   struct intel_crtc *other)
9595 {
9596 	const struct intel_plane_state *plane_state;
9597 	struct intel_plane *plane;
9598 	u8 plane_ids = 0;
9599 	int i;
9600 
9601 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9602 		if (plane->pipe == crtc->pipe)
9603 			plane_ids |= BIT(plane->id);
9604 	}
9605 
9606 	return intel_crtc_add_planes_to_state(state, other, plane_ids);
9607 }
9608 
intel_bigjoiner_add_affected_planes(struct intel_atomic_state * state)9609 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
9610 {
9611 	const struct intel_crtc_state *crtc_state;
9612 	struct intel_crtc *crtc;
9613 	int i;
9614 
9615 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9616 		int ret;
9617 
9618 		if (!crtc_state->bigjoiner)
9619 			continue;
9620 
9621 		ret = intel_crtc_add_bigjoiner_planes(state, crtc,
9622 						      crtc_state->bigjoiner_linked_crtc);
9623 		if (ret)
9624 			return ret;
9625 	}
9626 
9627 	return 0;
9628 }
9629 
intel_atomic_check_planes(struct intel_atomic_state * state)9630 static int intel_atomic_check_planes(struct intel_atomic_state *state)
9631 {
9632 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9633 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9634 	struct intel_plane_state *plane_state;
9635 	struct intel_plane *plane;
9636 	struct intel_crtc *crtc;
9637 	int i, ret;
9638 
9639 	ret = icl_add_linked_planes(state);
9640 	if (ret)
9641 		return ret;
9642 
9643 	ret = intel_bigjoiner_add_affected_planes(state);
9644 	if (ret)
9645 		return ret;
9646 
9647 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9648 		ret = intel_plane_atomic_check(state, plane);
9649 		if (ret) {
9650 			drm_dbg_atomic(&dev_priv->drm,
9651 				       "[PLANE:%d:%s] atomic driver check failed\n",
9652 				       plane->base.base.id, plane->base.name);
9653 			return ret;
9654 		}
9655 	}
9656 
9657 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9658 					    new_crtc_state, i) {
9659 		u8 old_active_planes, new_active_planes;
9660 
9661 		ret = icl_check_nv12_planes(new_crtc_state);
9662 		if (ret)
9663 			return ret;
9664 
9665 		/*
9666 		 * On some platforms the number of active planes affects
9667 		 * the planes' minimum cdclk calculation. Add such planes
9668 		 * to the state before we compute the minimum cdclk.
9669 		 */
9670 		if (!active_planes_affects_min_cdclk(dev_priv))
9671 			continue;
9672 
9673 		old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9674 		new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9675 
9676 		if (hweight8(old_active_planes) == hweight8(new_active_planes))
9677 			continue;
9678 
9679 		ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
9680 		if (ret)
9681 			return ret;
9682 	}
9683 
9684 	return 0;
9685 }
9686 
intel_atomic_check_cdclk(struct intel_atomic_state * state,bool * need_cdclk_calc)9687 static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
9688 				    bool *need_cdclk_calc)
9689 {
9690 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9691 	const struct intel_cdclk_state *old_cdclk_state;
9692 	const struct intel_cdclk_state *new_cdclk_state;
9693 	struct intel_plane_state *plane_state;
9694 	struct intel_bw_state *new_bw_state;
9695 	struct intel_plane *plane;
9696 	int min_cdclk = 0;
9697 	enum pipe pipe;
9698 	int ret;
9699 	int i;
9700 	/*
9701 	 * active_planes bitmask has been updated, and potentially
9702 	 * affected planes are part of the state. We can now
9703 	 * compute the minimum cdclk for each plane.
9704 	 */
9705 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9706 		ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
9707 		if (ret)
9708 			return ret;
9709 	}
9710 
9711 	old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
9712 	new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
9713 
9714 	if (new_cdclk_state &&
9715 	    old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
9716 		*need_cdclk_calc = true;
9717 
9718 	ret = dev_priv->display.bw_calc_min_cdclk(state);
9719 	if (ret)
9720 		return ret;
9721 
9722 	new_bw_state = intel_atomic_get_new_bw_state(state);
9723 
9724 	if (!new_cdclk_state || !new_bw_state)
9725 		return 0;
9726 
9727 	for_each_pipe(dev_priv, pipe) {
9728 		min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
9729 
9730 		/*
9731 		 * Currently do this change only if we need to increase
9732 		 */
9733 		if (new_bw_state->min_cdclk > min_cdclk)
9734 			*need_cdclk_calc = true;
9735 	}
9736 
9737 	return 0;
9738 }
9739 
intel_atomic_check_crtcs(struct intel_atomic_state * state)9740 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
9741 {
9742 	struct intel_crtc_state *crtc_state;
9743 	struct intel_crtc *crtc;
9744 	int i;
9745 
9746 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9747 		struct drm_i915_private *i915 = to_i915(crtc->base.dev);
9748 		int ret;
9749 
9750 		ret = intel_crtc_atomic_check(state, crtc);
9751 		if (ret) {
9752 			drm_dbg_atomic(&i915->drm,
9753 				       "[CRTC:%d:%s] atomic driver check failed\n",
9754 				       crtc->base.base.id, crtc->base.name);
9755 			return ret;
9756 		}
9757 	}
9758 
9759 	return 0;
9760 }
9761 
intel_cpu_transcoders_need_modeset(struct intel_atomic_state * state,u8 transcoders)9762 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
9763 					       u8 transcoders)
9764 {
9765 	const struct intel_crtc_state *new_crtc_state;
9766 	struct intel_crtc *crtc;
9767 	int i;
9768 
9769 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9770 		if (new_crtc_state->hw.enable &&
9771 		    transcoders & BIT(new_crtc_state->cpu_transcoder) &&
9772 		    intel_crtc_needs_modeset(new_crtc_state))
9773 			return true;
9774 	}
9775 
9776 	return false;
9777 }
9778 
intel_atomic_check_bigjoiner(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)9779 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
9780 					struct intel_crtc *crtc,
9781 					struct intel_crtc_state *old_crtc_state,
9782 					struct intel_crtc_state *new_crtc_state)
9783 {
9784 	struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
9785 	struct intel_crtc *slave, *master;
9786 
9787 	/* slave being enabled, is master is still claiming this crtc? */
9788 	if (old_crtc_state->bigjoiner_slave) {
9789 		slave = crtc;
9790 		master = old_crtc_state->bigjoiner_linked_crtc;
9791 		master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
9792 		if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
9793 			goto claimed;
9794 	}
9795 
9796 	if (!new_crtc_state->bigjoiner)
9797 		return 0;
9798 
9799 	slave = intel_dsc_get_bigjoiner_secondary(crtc);
9800 	if (!slave) {
9801 		DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
9802 			      "CRTC + 1 to be used, doesn't exist\n",
9803 			      crtc->base.base.id, crtc->base.name);
9804 		return -EINVAL;
9805 	}
9806 
9807 	new_crtc_state->bigjoiner_linked_crtc = slave;
9808 	slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
9809 	master = crtc;
9810 	if (IS_ERR(slave_crtc_state))
9811 		return PTR_ERR(slave_crtc_state);
9812 
9813 	/* master being enabled, slave was already configured? */
9814 	if (slave_crtc_state->uapi.enable)
9815 		goto claimed;
9816 
9817 	DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
9818 		      slave->base.base.id, slave->base.name);
9819 
9820 	return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
9821 
9822 claimed:
9823 	DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
9824 		      "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
9825 		      slave->base.base.id, slave->base.name,
9826 		      master->base.base.id, master->base.name);
9827 	return -EINVAL;
9828 }
9829 
kill_bigjoiner_slave(struct intel_atomic_state * state,struct intel_crtc_state * master_crtc_state)9830 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
9831 				 struct intel_crtc_state *master_crtc_state)
9832 {
9833 	struct intel_crtc_state *slave_crtc_state =
9834 		intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
9835 
9836 	slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
9837 	slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
9838 	slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
9839 	intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
9840 }
9841 
9842 /**
9843  * DOC: asynchronous flip implementation
9844  *
9845  * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
9846  * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
9847  * Correspondingly, support is currently added for primary plane only.
9848  *
9849  * Async flip can only change the plane surface address, so anything else
9850  * changing is rejected from the intel_atomic_check_async() function.
9851  * Once this check is cleared, flip done interrupt is enabled using
9852  * the intel_crtc_enable_flip_done() function.
9853  *
9854  * As soon as the surface address register is written, flip done interrupt is
9855  * generated and the requested events are sent to the usersapce in the interrupt
9856  * handler itself. The timestamp and sequence sent during the flip done event
9857  * correspond to the last vblank and have no relation to the actual time when
9858  * the flip done event was sent.
9859  */
intel_atomic_check_async(struct intel_atomic_state * state)9860 static int intel_atomic_check_async(struct intel_atomic_state *state)
9861 {
9862 	struct drm_i915_private *i915 = to_i915(state->base.dev);
9863 	const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9864 	const struct intel_plane_state *new_plane_state, *old_plane_state;
9865 	struct intel_crtc *crtc;
9866 	struct intel_plane *plane;
9867 	int i;
9868 
9869 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9870 					    new_crtc_state, i) {
9871 		if (intel_crtc_needs_modeset(new_crtc_state)) {
9872 			drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
9873 			return -EINVAL;
9874 		}
9875 
9876 		if (!new_crtc_state->hw.active) {
9877 			drm_dbg_kms(&i915->drm, "CRTC inactive\n");
9878 			return -EINVAL;
9879 		}
9880 		if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
9881 			drm_dbg_kms(&i915->drm,
9882 				    "Active planes cannot be changed during async flip\n");
9883 			return -EINVAL;
9884 		}
9885 	}
9886 
9887 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
9888 					     new_plane_state, i) {
9889 		/*
9890 		 * TODO: Async flip is only supported through the page flip IOCTL
9891 		 * as of now. So support currently added for primary plane only.
9892 		 * Support for other planes on platforms on which supports
9893 		 * this(vlv/chv and icl+) should be added when async flip is
9894 		 * enabled in the atomic IOCTL path.
9895 		 */
9896 		if (!plane->async_flip)
9897 			return -EINVAL;
9898 
9899 		/*
9900 		 * FIXME: This check is kept generic for all platforms.
9901 		 * Need to verify this for all gen9 platforms to enable
9902 		 * this selectively if required.
9903 		 */
9904 		switch (new_plane_state->hw.fb->modifier) {
9905 		case I915_FORMAT_MOD_X_TILED:
9906 		case I915_FORMAT_MOD_Y_TILED:
9907 		case I915_FORMAT_MOD_Yf_TILED:
9908 			break;
9909 		default:
9910 			drm_dbg_kms(&i915->drm,
9911 				    "Linear memory/CCS does not support async flips\n");
9912 			return -EINVAL;
9913 		}
9914 
9915 		if (old_plane_state->view.color_plane[0].stride !=
9916 		    new_plane_state->view.color_plane[0].stride) {
9917 			drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
9918 			return -EINVAL;
9919 		}
9920 
9921 		if (old_plane_state->hw.fb->modifier !=
9922 		    new_plane_state->hw.fb->modifier) {
9923 			drm_dbg_kms(&i915->drm,
9924 				    "Framebuffer modifiers cannot be changed in async flip\n");
9925 			return -EINVAL;
9926 		}
9927 
9928 		if (old_plane_state->hw.fb->format !=
9929 		    new_plane_state->hw.fb->format) {
9930 			drm_dbg_kms(&i915->drm,
9931 				    "Framebuffer format cannot be changed in async flip\n");
9932 			return -EINVAL;
9933 		}
9934 
9935 		if (old_plane_state->hw.rotation !=
9936 		    new_plane_state->hw.rotation) {
9937 			drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
9938 			return -EINVAL;
9939 		}
9940 
9941 		if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
9942 		    !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
9943 			drm_dbg_kms(&i915->drm,
9944 				    "Plane size/co-ordinates cannot be changed in async flip\n");
9945 			return -EINVAL;
9946 		}
9947 
9948 		if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
9949 			drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
9950 			return -EINVAL;
9951 		}
9952 
9953 		if (old_plane_state->hw.pixel_blend_mode !=
9954 		    new_plane_state->hw.pixel_blend_mode) {
9955 			drm_dbg_kms(&i915->drm,
9956 				    "Pixel blend mode cannot be changed in async flip\n");
9957 			return -EINVAL;
9958 		}
9959 
9960 		if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
9961 			drm_dbg_kms(&i915->drm,
9962 				    "Color encoding cannot be changed in async flip\n");
9963 			return -EINVAL;
9964 		}
9965 
9966 		if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
9967 			drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
9968 			return -EINVAL;
9969 		}
9970 	}
9971 
9972 	return 0;
9973 }
9974 
intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state * state)9975 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
9976 {
9977 	struct intel_crtc_state *crtc_state;
9978 	struct intel_crtc *crtc;
9979 	int i;
9980 
9981 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9982 		struct intel_crtc_state *linked_crtc_state;
9983 		struct intel_crtc *linked_crtc;
9984 		int ret;
9985 
9986 		if (!crtc_state->bigjoiner)
9987 			continue;
9988 
9989 		linked_crtc = crtc_state->bigjoiner_linked_crtc;
9990 		linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
9991 		if (IS_ERR(linked_crtc_state))
9992 			return PTR_ERR(linked_crtc_state);
9993 
9994 		if (!intel_crtc_needs_modeset(crtc_state))
9995 			continue;
9996 
9997 		linked_crtc_state->uapi.mode_changed = true;
9998 
9999 		ret = drm_atomic_add_affected_connectors(&state->base,
10000 							 &linked_crtc->base);
10001 		if (ret)
10002 			return ret;
10003 
10004 		ret = intel_atomic_add_affected_planes(state, linked_crtc);
10005 		if (ret)
10006 			return ret;
10007 	}
10008 
10009 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10010 		/* Kill old bigjoiner link, we may re-establish afterwards */
10011 		if (intel_crtc_needs_modeset(crtc_state) &&
10012 		    crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
10013 			kill_bigjoiner_slave(state, crtc_state);
10014 	}
10015 
10016 	return 0;
10017 }
10018 
10019 /**
10020  * intel_atomic_check - validate state object
10021  * @dev: drm device
10022  * @_state: state to validate
10023  */
intel_atomic_check(struct drm_device * dev,struct drm_atomic_state * _state)10024 static int intel_atomic_check(struct drm_device *dev,
10025 			      struct drm_atomic_state *_state)
10026 {
10027 	struct drm_i915_private *dev_priv = to_i915(dev);
10028 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
10029 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10030 	struct intel_crtc *crtc;
10031 	int ret, i;
10032 	bool any_ms = false;
10033 
10034 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10035 					    new_crtc_state, i) {
10036 		if (new_crtc_state->inherited != old_crtc_state->inherited)
10037 			new_crtc_state->uapi.mode_changed = true;
10038 	}
10039 
10040 	intel_vrr_check_modeset(state);
10041 
10042 	ret = drm_atomic_helper_check_modeset(dev, &state->base);
10043 	if (ret)
10044 		goto fail;
10045 
10046 	ret = intel_bigjoiner_add_affected_crtcs(state);
10047 	if (ret)
10048 		goto fail;
10049 
10050 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10051 					    new_crtc_state, i) {
10052 		if (!intel_crtc_needs_modeset(new_crtc_state)) {
10053 			/* Light copy */
10054 			intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
10055 
10056 			continue;
10057 		}
10058 
10059 		if (!new_crtc_state->uapi.enable) {
10060 			if (!new_crtc_state->bigjoiner_slave) {
10061 				intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
10062 				any_ms = true;
10063 			}
10064 			continue;
10065 		}
10066 
10067 		ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
10068 		if (ret)
10069 			goto fail;
10070 
10071 		ret = intel_modeset_pipe_config(state, new_crtc_state);
10072 		if (ret)
10073 			goto fail;
10074 
10075 		ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
10076 						   new_crtc_state);
10077 		if (ret)
10078 			goto fail;
10079 	}
10080 
10081 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10082 					    new_crtc_state, i) {
10083 		if (!intel_crtc_needs_modeset(new_crtc_state))
10084 			continue;
10085 
10086 		ret = intel_modeset_pipe_config_late(new_crtc_state);
10087 		if (ret)
10088 			goto fail;
10089 
10090 		intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
10091 	}
10092 
10093 	/**
10094 	 * Check if fastset is allowed by external dependencies like other
10095 	 * pipes and transcoders.
10096 	 *
10097 	 * Right now it only forces a fullmodeset when the MST master
10098 	 * transcoder did not changed but the pipe of the master transcoder
10099 	 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
10100 	 * in case of port synced crtcs, if one of the synced crtcs
10101 	 * needs a full modeset, all other synced crtcs should be
10102 	 * forced a full modeset.
10103 	 */
10104 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10105 		if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
10106 			continue;
10107 
10108 		if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
10109 			enum transcoder master = new_crtc_state->mst_master_transcoder;
10110 
10111 			if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
10112 				new_crtc_state->uapi.mode_changed = true;
10113 				new_crtc_state->update_pipe = false;
10114 			}
10115 		}
10116 
10117 		if (is_trans_port_sync_mode(new_crtc_state)) {
10118 			u8 trans = new_crtc_state->sync_mode_slaves_mask;
10119 
10120 			if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
10121 				trans |= BIT(new_crtc_state->master_transcoder);
10122 
10123 			if (intel_cpu_transcoders_need_modeset(state, trans)) {
10124 				new_crtc_state->uapi.mode_changed = true;
10125 				new_crtc_state->update_pipe = false;
10126 			}
10127 		}
10128 
10129 		if (new_crtc_state->bigjoiner) {
10130 			struct intel_crtc_state *linked_crtc_state =
10131 				intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
10132 
10133 			if (intel_crtc_needs_modeset(linked_crtc_state)) {
10134 				new_crtc_state->uapi.mode_changed = true;
10135 				new_crtc_state->update_pipe = false;
10136 			}
10137 		}
10138 	}
10139 
10140 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10141 					    new_crtc_state, i) {
10142 		if (intel_crtc_needs_modeset(new_crtc_state)) {
10143 			any_ms = true;
10144 			continue;
10145 		}
10146 
10147 		if (!new_crtc_state->update_pipe)
10148 			continue;
10149 
10150 		intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
10151 	}
10152 
10153 	if (any_ms && !check_digital_port_conflicts(state)) {
10154 		drm_dbg_kms(&dev_priv->drm,
10155 			    "rejecting conflicting digital port configuration\n");
10156 		ret = -EINVAL;
10157 		goto fail;
10158 	}
10159 
10160 	ret = drm_dp_mst_atomic_check(&state->base);
10161 	if (ret)
10162 		goto fail;
10163 
10164 	ret = intel_atomic_check_planes(state);
10165 	if (ret)
10166 		goto fail;
10167 
10168 	intel_fbc_choose_crtc(dev_priv, state);
10169 	ret = calc_watermark_data(state);
10170 	if (ret)
10171 		goto fail;
10172 
10173 	ret = intel_bw_atomic_check(state);
10174 	if (ret)
10175 		goto fail;
10176 
10177 	ret = intel_atomic_check_cdclk(state, &any_ms);
10178 	if (ret)
10179 		goto fail;
10180 
10181 	if (intel_any_crtc_needs_modeset(state))
10182 		any_ms = true;
10183 
10184 	if (any_ms) {
10185 		ret = intel_modeset_checks(state);
10186 		if (ret)
10187 			goto fail;
10188 
10189 		ret = intel_modeset_calc_cdclk(state);
10190 		if (ret)
10191 			return ret;
10192 
10193 		intel_modeset_clear_plls(state);
10194 	}
10195 
10196 	ret = intel_atomic_check_crtcs(state);
10197 	if (ret)
10198 		goto fail;
10199 
10200 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10201 					    new_crtc_state, i) {
10202 		if (new_crtc_state->uapi.async_flip) {
10203 			ret = intel_atomic_check_async(state);
10204 			if (ret)
10205 				goto fail;
10206 		}
10207 
10208 		if (!intel_crtc_needs_modeset(new_crtc_state) &&
10209 		    !new_crtc_state->update_pipe)
10210 			continue;
10211 
10212 		intel_dump_pipe_config(new_crtc_state, state,
10213 				       intel_crtc_needs_modeset(new_crtc_state) ?
10214 				       "[modeset]" : "[fastset]");
10215 	}
10216 
10217 	return 0;
10218 
10219  fail:
10220 	if (ret == -EDEADLK)
10221 		return ret;
10222 
10223 	/*
10224 	 * FIXME would probably be nice to know which crtc specifically
10225 	 * caused the failure, in cases where we can pinpoint it.
10226 	 */
10227 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10228 					    new_crtc_state, i)
10229 		intel_dump_pipe_config(new_crtc_state, state, "[failed]");
10230 
10231 	return ret;
10232 }
10233 
intel_atomic_prepare_commit(struct intel_atomic_state * state)10234 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
10235 {
10236 	struct intel_crtc_state *crtc_state;
10237 	struct intel_crtc *crtc;
10238 	int i, ret;
10239 
10240 	ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
10241 	if (ret < 0)
10242 		return ret;
10243 
10244 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10245 		bool mode_changed = intel_crtc_needs_modeset(crtc_state);
10246 
10247 		if (mode_changed || crtc_state->update_pipe ||
10248 		    crtc_state->uapi.color_mgmt_changed) {
10249 			intel_dsb_prepare(crtc_state);
10250 		}
10251 	}
10252 
10253 	return 0;
10254 }
10255 
intel_crtc_arm_fifo_underrun(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state)10256 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
10257 				  struct intel_crtc_state *crtc_state)
10258 {
10259 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10260 
10261 	if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
10262 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
10263 
10264 	if (crtc_state->has_pch_encoder) {
10265 		enum pipe pch_transcoder =
10266 			intel_crtc_pch_transcoder(crtc);
10267 
10268 		intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
10269 	}
10270 }
10271 
intel_pipe_fastset(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)10272 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
10273 			       const struct intel_crtc_state *new_crtc_state)
10274 {
10275 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
10276 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10277 
10278 	/*
10279 	 * Update pipe size and adjust fitter if needed: the reason for this is
10280 	 * that in compute_mode_changes we check the native mode (not the pfit
10281 	 * mode) to see if we can flip rather than do a full mode set. In the
10282 	 * fastboot case, we'll flip, but if we don't update the pipesrc and
10283 	 * pfit state, we'll end up with a big fb scanned out into the wrong
10284 	 * sized surface.
10285 	 */
10286 	intel_set_pipe_src_size(new_crtc_state);
10287 
10288 	/* on skylake this is done by detaching scalers */
10289 	if (DISPLAY_VER(dev_priv) >= 9) {
10290 		if (new_crtc_state->pch_pfit.enabled)
10291 			skl_pfit_enable(new_crtc_state);
10292 	} else if (HAS_PCH_SPLIT(dev_priv)) {
10293 		if (new_crtc_state->pch_pfit.enabled)
10294 			ilk_pfit_enable(new_crtc_state);
10295 		else if (old_crtc_state->pch_pfit.enabled)
10296 			ilk_pfit_disable(old_crtc_state);
10297 	}
10298 
10299 	/*
10300 	 * The register is supposedly single buffered so perhaps
10301 	 * not 100% correct to do this here. But SKL+ calculate
10302 	 * this based on the adjust pixel rate so pfit changes do
10303 	 * affect it and so it must be updated for fastsets.
10304 	 * HSW/BDW only really need this here for fastboot, after
10305 	 * that the value should not change without a full modeset.
10306 	 */
10307 	if (DISPLAY_VER(dev_priv) >= 9 ||
10308 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
10309 		hsw_set_linetime_wm(new_crtc_state);
10310 
10311 	if (DISPLAY_VER(dev_priv) >= 11)
10312 		icl_set_pipe_chicken(new_crtc_state);
10313 }
10314 
commit_pipe_pre_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)10315 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
10316 				   struct intel_crtc *crtc)
10317 {
10318 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10319 	const struct intel_crtc_state *old_crtc_state =
10320 		intel_atomic_get_old_crtc_state(state, crtc);
10321 	const struct intel_crtc_state *new_crtc_state =
10322 		intel_atomic_get_new_crtc_state(state, crtc);
10323 	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10324 
10325 	/*
10326 	 * During modesets pipe configuration was programmed as the
10327 	 * CRTC was enabled.
10328 	 */
10329 	if (!modeset) {
10330 		if (new_crtc_state->uapi.color_mgmt_changed ||
10331 		    new_crtc_state->update_pipe)
10332 			intel_color_commit(new_crtc_state);
10333 
10334 		if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
10335 			bdw_set_pipemisc(new_crtc_state);
10336 
10337 		if (new_crtc_state->update_pipe)
10338 			intel_pipe_fastset(old_crtc_state, new_crtc_state);
10339 
10340 		intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
10341 	}
10342 
10343 	if (dev_priv->display.atomic_update_watermarks)
10344 		dev_priv->display.atomic_update_watermarks(state, crtc);
10345 }
10346 
commit_pipe_post_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)10347 static void commit_pipe_post_planes(struct intel_atomic_state *state,
10348 				    struct intel_crtc *crtc)
10349 {
10350 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10351 	const struct intel_crtc_state *new_crtc_state =
10352 		intel_atomic_get_new_crtc_state(state, crtc);
10353 
10354 	/*
10355 	 * Disable the scaler(s) after the plane(s) so that we don't
10356 	 * get a catastrophic underrun even if the two operations
10357 	 * end up happening in two different frames.
10358 	 */
10359 	if (DISPLAY_VER(dev_priv) >= 9 &&
10360 	    !intel_crtc_needs_modeset(new_crtc_state))
10361 		skl_detach_scalers(new_crtc_state);
10362 }
10363 
intel_enable_crtc(struct intel_atomic_state * state,struct intel_crtc * crtc)10364 static void intel_enable_crtc(struct intel_atomic_state *state,
10365 			      struct intel_crtc *crtc)
10366 {
10367 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10368 	const struct intel_crtc_state *new_crtc_state =
10369 		intel_atomic_get_new_crtc_state(state, crtc);
10370 
10371 	if (!intel_crtc_needs_modeset(new_crtc_state))
10372 		return;
10373 
10374 	intel_crtc_update_active_timings(new_crtc_state);
10375 
10376 	dev_priv->display.crtc_enable(state, crtc);
10377 
10378 	if (new_crtc_state->bigjoiner_slave)
10379 		return;
10380 
10381 	/* vblanks work again, re-enable pipe CRC. */
10382 	intel_crtc_enable_pipe_crc(crtc);
10383 }
10384 
intel_update_crtc(struct intel_atomic_state * state,struct intel_crtc * crtc)10385 static void intel_update_crtc(struct intel_atomic_state *state,
10386 			      struct intel_crtc *crtc)
10387 {
10388 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10389 	const struct intel_crtc_state *old_crtc_state =
10390 		intel_atomic_get_old_crtc_state(state, crtc);
10391 	struct intel_crtc_state *new_crtc_state =
10392 		intel_atomic_get_new_crtc_state(state, crtc);
10393 	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10394 
10395 	if (!modeset) {
10396 		if (new_crtc_state->preload_luts &&
10397 		    (new_crtc_state->uapi.color_mgmt_changed ||
10398 		     new_crtc_state->update_pipe))
10399 			intel_color_load_luts(new_crtc_state);
10400 
10401 		intel_pre_plane_update(state, crtc);
10402 
10403 		if (new_crtc_state->update_pipe)
10404 			intel_encoders_update_pipe(state, crtc);
10405 	}
10406 
10407 	if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
10408 		intel_fbc_disable(crtc);
10409 	else
10410 		intel_fbc_enable(state, crtc);
10411 
10412 	/* Perform vblank evasion around commit operation */
10413 	intel_pipe_update_start(new_crtc_state);
10414 
10415 	commit_pipe_pre_planes(state, crtc);
10416 
10417 	if (DISPLAY_VER(dev_priv) >= 9)
10418 		skl_update_planes_on_crtc(state, crtc);
10419 	else
10420 		i9xx_update_planes_on_crtc(state, crtc);
10421 
10422 	commit_pipe_post_planes(state, crtc);
10423 
10424 	intel_pipe_update_end(new_crtc_state);
10425 
10426 	/*
10427 	 * We usually enable FIFO underrun interrupts as part of the
10428 	 * CRTC enable sequence during modesets.  But when we inherit a
10429 	 * valid pipe configuration from the BIOS we need to take care
10430 	 * of enabling them on the CRTC's first fastset.
10431 	 */
10432 	if (new_crtc_state->update_pipe && !modeset &&
10433 	    old_crtc_state->inherited)
10434 		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
10435 }
10436 
intel_old_crtc_state_disables(struct intel_atomic_state * state,struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state,struct intel_crtc * crtc)10437 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
10438 					  struct intel_crtc_state *old_crtc_state,
10439 					  struct intel_crtc_state *new_crtc_state,
10440 					  struct intel_crtc *crtc)
10441 {
10442 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10443 
10444 	drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
10445 
10446 	intel_encoders_pre_disable(state, crtc);
10447 
10448 	intel_crtc_disable_planes(state, crtc);
10449 
10450 	/*
10451 	 * We still need special handling for disabling bigjoiner master
10452 	 * and slaves since for slave we do not have encoder or plls
10453 	 * so we dont need to disable those.
10454 	 */
10455 	if (old_crtc_state->bigjoiner) {
10456 		intel_crtc_disable_planes(state,
10457 					  old_crtc_state->bigjoiner_linked_crtc);
10458 		old_crtc_state->bigjoiner_linked_crtc->active = false;
10459 	}
10460 
10461 	/*
10462 	 * We need to disable pipe CRC before disabling the pipe,
10463 	 * or we race against vblank off.
10464 	 */
10465 	intel_crtc_disable_pipe_crc(crtc);
10466 
10467 	dev_priv->display.crtc_disable(state, crtc);
10468 	crtc->active = false;
10469 	intel_fbc_disable(crtc);
10470 	intel_disable_shared_dpll(old_crtc_state);
10471 
10472 	/* FIXME unify this for all platforms */
10473 	if (!new_crtc_state->hw.active &&
10474 	    !HAS_GMCH(dev_priv) &&
10475 	    dev_priv->display.initial_watermarks)
10476 		dev_priv->display.initial_watermarks(state, crtc);
10477 }
10478 
intel_commit_modeset_disables(struct intel_atomic_state * state)10479 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
10480 {
10481 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10482 	struct intel_crtc *crtc;
10483 	u32 handled = 0;
10484 	int i;
10485 
10486 	/* Only disable port sync and MST slaves */
10487 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10488 					    new_crtc_state, i) {
10489 		if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
10490 			continue;
10491 
10492 		if (!old_crtc_state->hw.active)
10493 			continue;
10494 
10495 		/* In case of Transcoder port Sync master slave CRTCs can be
10496 		 * assigned in any order and we need to make sure that
10497 		 * slave CRTCs are disabled first and then master CRTC since
10498 		 * Slave vblanks are masked till Master Vblanks.
10499 		 */
10500 		if (!is_trans_port_sync_slave(old_crtc_state) &&
10501 		    !intel_dp_mst_is_slave_trans(old_crtc_state))
10502 			continue;
10503 
10504 		intel_pre_plane_update(state, crtc);
10505 		intel_old_crtc_state_disables(state, old_crtc_state,
10506 					      new_crtc_state, crtc);
10507 		handled |= BIT(crtc->pipe);
10508 	}
10509 
10510 	/* Disable everything else left on */
10511 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10512 					    new_crtc_state, i) {
10513 		if (!intel_crtc_needs_modeset(new_crtc_state) ||
10514 		    (handled & BIT(crtc->pipe)) ||
10515 		    old_crtc_state->bigjoiner_slave)
10516 			continue;
10517 
10518 		intel_pre_plane_update(state, crtc);
10519 		if (old_crtc_state->bigjoiner) {
10520 			struct intel_crtc *slave =
10521 				old_crtc_state->bigjoiner_linked_crtc;
10522 
10523 			intel_pre_plane_update(state, slave);
10524 		}
10525 
10526 		if (old_crtc_state->hw.active)
10527 			intel_old_crtc_state_disables(state, old_crtc_state,
10528 						      new_crtc_state, crtc);
10529 	}
10530 }
10531 
intel_commit_modeset_enables(struct intel_atomic_state * state)10532 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
10533 {
10534 	struct intel_crtc_state *new_crtc_state;
10535 	struct intel_crtc *crtc;
10536 	int i;
10537 
10538 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10539 		if (!new_crtc_state->hw.active)
10540 			continue;
10541 
10542 		intel_enable_crtc(state, crtc);
10543 		intel_update_crtc(state, crtc);
10544 	}
10545 }
10546 
skl_commit_modeset_enables(struct intel_atomic_state * state)10547 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
10548 {
10549 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10550 	struct intel_crtc *crtc;
10551 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10552 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
10553 	u8 update_pipes = 0, modeset_pipes = 0;
10554 	int i;
10555 
10556 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10557 		enum pipe pipe = crtc->pipe;
10558 
10559 		if (!new_crtc_state->hw.active)
10560 			continue;
10561 
10562 		/* ignore allocations for crtc's that have been turned off. */
10563 		if (!intel_crtc_needs_modeset(new_crtc_state)) {
10564 			entries[pipe] = old_crtc_state->wm.skl.ddb;
10565 			update_pipes |= BIT(pipe);
10566 		} else {
10567 			modeset_pipes |= BIT(pipe);
10568 		}
10569 	}
10570 
10571 	/*
10572 	 * Whenever the number of active pipes changes, we need to make sure we
10573 	 * update the pipes in the right order so that their ddb allocations
10574 	 * never overlap with each other between CRTC updates. Otherwise we'll
10575 	 * cause pipe underruns and other bad stuff.
10576 	 *
10577 	 * So first lets enable all pipes that do not need a fullmodeset as
10578 	 * those don't have any external dependency.
10579 	 */
10580 	while (update_pipes) {
10581 		for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10582 						    new_crtc_state, i) {
10583 			enum pipe pipe = crtc->pipe;
10584 
10585 			if ((update_pipes & BIT(pipe)) == 0)
10586 				continue;
10587 
10588 			if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10589 							entries, I915_MAX_PIPES, pipe))
10590 				continue;
10591 
10592 			entries[pipe] = new_crtc_state->wm.skl.ddb;
10593 			update_pipes &= ~BIT(pipe);
10594 
10595 			intel_update_crtc(state, crtc);
10596 
10597 			/*
10598 			 * If this is an already active pipe, it's DDB changed,
10599 			 * and this isn't the last pipe that needs updating
10600 			 * then we need to wait for a vblank to pass for the
10601 			 * new ddb allocation to take effect.
10602 			 */
10603 			if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
10604 						 &old_crtc_state->wm.skl.ddb) &&
10605 			    (update_pipes | modeset_pipes))
10606 				intel_wait_for_vblank(dev_priv, pipe);
10607 		}
10608 	}
10609 
10610 	update_pipes = modeset_pipes;
10611 
10612 	/*
10613 	 * Enable all pipes that needs a modeset and do not depends on other
10614 	 * pipes
10615 	 */
10616 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10617 		enum pipe pipe = crtc->pipe;
10618 
10619 		if ((modeset_pipes & BIT(pipe)) == 0)
10620 			continue;
10621 
10622 		if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
10623 		    is_trans_port_sync_master(new_crtc_state) ||
10624 		    (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
10625 			continue;
10626 
10627 		modeset_pipes &= ~BIT(pipe);
10628 
10629 		intel_enable_crtc(state, crtc);
10630 	}
10631 
10632 	/*
10633 	 * Then we enable all remaining pipes that depend on other
10634 	 * pipes: MST slaves and port sync masters, big joiner master
10635 	 */
10636 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10637 		enum pipe pipe = crtc->pipe;
10638 
10639 		if ((modeset_pipes & BIT(pipe)) == 0)
10640 			continue;
10641 
10642 		modeset_pipes &= ~BIT(pipe);
10643 
10644 		intel_enable_crtc(state, crtc);
10645 	}
10646 
10647 	/*
10648 	 * Finally we do the plane updates/etc. for all pipes that got enabled.
10649 	 */
10650 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10651 		enum pipe pipe = crtc->pipe;
10652 
10653 		if ((update_pipes & BIT(pipe)) == 0)
10654 			continue;
10655 
10656 		drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10657 									entries, I915_MAX_PIPES, pipe));
10658 
10659 		entries[pipe] = new_crtc_state->wm.skl.ddb;
10660 		update_pipes &= ~BIT(pipe);
10661 
10662 		intel_update_crtc(state, crtc);
10663 	}
10664 
10665 	drm_WARN_ON(&dev_priv->drm, modeset_pipes);
10666 	drm_WARN_ON(&dev_priv->drm, update_pipes);
10667 }
10668 
intel_atomic_helper_free_state(struct drm_i915_private * dev_priv)10669 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
10670 {
10671 	struct intel_atomic_state *state, *next;
10672 	struct llist_node *freed;
10673 
10674 	freed = llist_del_all(&dev_priv->atomic_helper.free_list);
10675 	llist_for_each_entry_safe(state, next, freed, freed)
10676 		drm_atomic_state_put(&state->base);
10677 }
10678 
intel_atomic_helper_free_state_worker(struct work_struct * work)10679 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
10680 {
10681 	struct drm_i915_private *dev_priv =
10682 		container_of(work, typeof(*dev_priv), atomic_helper.free_work);
10683 
10684 	intel_atomic_helper_free_state(dev_priv);
10685 }
10686 
intel_atomic_commit_fence_wait(struct intel_atomic_state * intel_state)10687 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
10688 {
10689 	struct wait_queue_entry wait_fence, wait_reset;
10690 	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
10691 
10692 	init_wait_entry(&wait_fence, 0);
10693 	init_wait_entry(&wait_reset, 0);
10694 	for (;;) {
10695 		prepare_to_wait(&intel_state->commit_ready.wait,
10696 				&wait_fence, TASK_UNINTERRUPTIBLE);
10697 		prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10698 					      I915_RESET_MODESET),
10699 				&wait_reset, TASK_UNINTERRUPTIBLE);
10700 
10701 
10702 		if (i915_sw_fence_done(&intel_state->commit_ready) ||
10703 		    test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
10704 			break;
10705 
10706 		schedule();
10707 	}
10708 	finish_wait(&intel_state->commit_ready.wait, &wait_fence);
10709 	finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10710 				  I915_RESET_MODESET),
10711 		    &wait_reset);
10712 }
10713 
intel_cleanup_dsbs(struct intel_atomic_state * state)10714 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
10715 {
10716 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10717 	struct intel_crtc *crtc;
10718 	int i;
10719 
10720 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10721 					    new_crtc_state, i)
10722 		intel_dsb_cleanup(old_crtc_state);
10723 }
10724 
intel_atomic_cleanup_work(struct work_struct * work)10725 static void intel_atomic_cleanup_work(struct work_struct *work)
10726 {
10727 	struct intel_atomic_state *state =
10728 		container_of(work, struct intel_atomic_state, base.commit_work);
10729 	struct drm_i915_private *i915 = to_i915(state->base.dev);
10730 
10731 	intel_cleanup_dsbs(state);
10732 	drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
10733 	drm_atomic_helper_commit_cleanup_done(&state->base);
10734 	drm_atomic_state_put(&state->base);
10735 
10736 	intel_atomic_helper_free_state(i915);
10737 }
10738 
intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state * state)10739 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
10740 {
10741 	struct drm_i915_private *i915 = to_i915(state->base.dev);
10742 	struct intel_plane *plane;
10743 	struct intel_plane_state *plane_state;
10744 	int i;
10745 
10746 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10747 		struct drm_framebuffer *fb = plane_state->hw.fb;
10748 		int ret;
10749 
10750 		if (!fb ||
10751 		    fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
10752 			continue;
10753 
10754 		/*
10755 		 * The layout of the fast clear color value expected by HW
10756 		 * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
10757 		 * - 4 x 4 bytes per-channel value
10758 		 *   (in surface type specific float/int format provided by the fb user)
10759 		 * - 8 bytes native color value used by the display
10760 		 *   (converted/written by GPU during a fast clear operation using the
10761 		 *    above per-channel values)
10762 		 *
10763 		 * The commit's FB prepare hook already ensured that FB obj is pinned and the
10764 		 * caller made sure that the object is synced wrt. the related color clear value
10765 		 * GPU write on it.
10766 		 */
10767 		ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
10768 						     fb->offsets[2] + 16,
10769 						     &plane_state->ccval,
10770 						     sizeof(plane_state->ccval));
10771 		/* The above could only fail if the FB obj has an unexpected backing store type. */
10772 		drm_WARN_ON(&i915->drm, ret);
10773 	}
10774 }
10775 
intel_atomic_commit_tail(struct intel_atomic_state * state)10776 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
10777 {
10778 	struct drm_device *dev = state->base.dev;
10779 	struct drm_i915_private *dev_priv = to_i915(dev);
10780 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10781 	struct intel_crtc *crtc;
10782 	u64 put_domains[I915_MAX_PIPES] = {};
10783 	intel_wakeref_t wakeref = 0;
10784 	int i;
10785 
10786 	intel_atomic_commit_fence_wait(state);
10787 
10788 	drm_atomic_helper_wait_for_dependencies(&state->base);
10789 
10790 	if (state->modeset)
10791 		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
10792 
10793 	intel_atomic_prepare_plane_clear_colors(state);
10794 
10795 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10796 					    new_crtc_state, i) {
10797 		if (intel_crtc_needs_modeset(new_crtc_state) ||
10798 		    new_crtc_state->update_pipe) {
10799 
10800 			put_domains[crtc->pipe] =
10801 				modeset_get_crtc_power_domains(new_crtc_state);
10802 		}
10803 	}
10804 
10805 	intel_commit_modeset_disables(state);
10806 
10807 	/* FIXME: Eventually get rid of our crtc->config pointer */
10808 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10809 		crtc->config = new_crtc_state;
10810 
10811 	if (state->modeset) {
10812 		drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
10813 
10814 		intel_set_cdclk_pre_plane_update(state);
10815 
10816 		intel_modeset_verify_disabled(dev_priv, state);
10817 	}
10818 
10819 	intel_sagv_pre_plane_update(state);
10820 
10821 	/* Complete the events for pipes that have now been disabled */
10822 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10823 		bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10824 
10825 		/* Complete events for now disable pipes here. */
10826 		if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
10827 			spin_lock_irq(&dev->event_lock);
10828 			drm_crtc_send_vblank_event(&crtc->base,
10829 						   new_crtc_state->uapi.event);
10830 			spin_unlock_irq(&dev->event_lock);
10831 
10832 			new_crtc_state->uapi.event = NULL;
10833 		}
10834 	}
10835 
10836 	if (state->modeset)
10837 		intel_encoders_update_prepare(state);
10838 
10839 	intel_dbuf_pre_plane_update(state);
10840 
10841 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10842 		if (new_crtc_state->uapi.async_flip)
10843 			intel_crtc_enable_flip_done(state, crtc);
10844 	}
10845 
10846 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
10847 	dev_priv->display.commit_modeset_enables(state);
10848 
10849 	if (state->modeset) {
10850 		intel_encoders_update_complete(state);
10851 
10852 		intel_set_cdclk_post_plane_update(state);
10853 	}
10854 
10855 	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
10856 	 * already, but still need the state for the delayed optimization. To
10857 	 * fix this:
10858 	 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
10859 	 * - schedule that vblank worker _before_ calling hw_done
10860 	 * - at the start of commit_tail, cancel it _synchrously
10861 	 * - switch over to the vblank wait helper in the core after that since
10862 	 *   we don't need out special handling any more.
10863 	 */
10864 	drm_atomic_helper_wait_for_flip_done(dev, &state->base);
10865 
10866 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10867 		if (new_crtc_state->uapi.async_flip)
10868 			intel_crtc_disable_flip_done(state, crtc);
10869 
10870 		if (new_crtc_state->hw.active &&
10871 		    !intel_crtc_needs_modeset(new_crtc_state) &&
10872 		    !new_crtc_state->preload_luts &&
10873 		    (new_crtc_state->uapi.color_mgmt_changed ||
10874 		     new_crtc_state->update_pipe))
10875 			intel_color_load_luts(new_crtc_state);
10876 	}
10877 
10878 	/*
10879 	 * Now that the vblank has passed, we can go ahead and program the
10880 	 * optimal watermarks on platforms that need two-step watermark
10881 	 * programming.
10882 	 *
10883 	 * TODO: Move this (and other cleanup) to an async worker eventually.
10884 	 */
10885 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10886 					    new_crtc_state, i) {
10887 		/*
10888 		 * Gen2 reports pipe underruns whenever all planes are disabled.
10889 		 * So re-enable underrun reporting after some planes get enabled.
10890 		 *
10891 		 * We do this before .optimize_watermarks() so that we have a
10892 		 * chance of catching underruns with the intermediate watermarks
10893 		 * vs. the new plane configuration.
10894 		 */
10895 		if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
10896 			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
10897 
10898 		if (dev_priv->display.optimize_watermarks)
10899 			dev_priv->display.optimize_watermarks(state, crtc);
10900 	}
10901 
10902 	intel_dbuf_post_plane_update(state);
10903 
10904 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10905 		intel_post_plane_update(state, crtc);
10906 
10907 		modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
10908 
10909 		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
10910 
10911 		/*
10912 		 * DSB cleanup is done in cleanup_work aligning with framebuffer
10913 		 * cleanup. So copy and reset the dsb structure to sync with
10914 		 * commit_done and later do dsb cleanup in cleanup_work.
10915 		 */
10916 		old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
10917 	}
10918 
10919 	/* Underruns don't always raise interrupts, so check manually */
10920 	intel_check_cpu_fifo_underruns(dev_priv);
10921 	intel_check_pch_fifo_underruns(dev_priv);
10922 
10923 	if (state->modeset)
10924 		intel_verify_planes(state);
10925 
10926 	intel_sagv_post_plane_update(state);
10927 
10928 	drm_atomic_helper_commit_hw_done(&state->base);
10929 
10930 	if (state->modeset) {
10931 		/* As one of the primary mmio accessors, KMS has a high
10932 		 * likelihood of triggering bugs in unclaimed access. After we
10933 		 * finish modesetting, see if an error has been flagged, and if
10934 		 * so enable debugging for the next modeset - and hope we catch
10935 		 * the culprit.
10936 		 */
10937 		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
10938 		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
10939 	}
10940 	intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10941 
10942 	/*
10943 	 * Defer the cleanup of the old state to a separate worker to not
10944 	 * impede the current task (userspace for blocking modesets) that
10945 	 * are executed inline. For out-of-line asynchronous modesets/flips,
10946 	 * deferring to a new worker seems overkill, but we would place a
10947 	 * schedule point (cond_resched()) here anyway to keep latencies
10948 	 * down.
10949 	 */
10950 	INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
10951 	queue_work(system_highpri_wq, &state->base.commit_work);
10952 }
10953 
intel_atomic_commit_work(struct work_struct * work)10954 static void intel_atomic_commit_work(struct work_struct *work)
10955 {
10956 	struct intel_atomic_state *state =
10957 		container_of(work, struct intel_atomic_state, base.commit_work);
10958 
10959 	intel_atomic_commit_tail(state);
10960 }
10961 
10962 static int __i915_sw_fence_call
intel_atomic_commit_ready(struct i915_sw_fence * fence,enum i915_sw_fence_notify notify)10963 intel_atomic_commit_ready(struct i915_sw_fence *fence,
10964 			  enum i915_sw_fence_notify notify)
10965 {
10966 	struct intel_atomic_state *state =
10967 		container_of(fence, struct intel_atomic_state, commit_ready);
10968 
10969 	switch (notify) {
10970 	case FENCE_COMPLETE:
10971 		/* we do blocking waits in the worker, nothing to do here */
10972 		break;
10973 	case FENCE_FREE:
10974 		{
10975 			struct intel_atomic_helper *helper =
10976 				&to_i915(state->base.dev)->atomic_helper;
10977 
10978 			if (llist_add(&state->freed, &helper->free_list))
10979 				schedule_work(&helper->free_work);
10980 			break;
10981 		}
10982 	}
10983 
10984 	return NOTIFY_DONE;
10985 }
10986 
intel_atomic_track_fbs(struct intel_atomic_state * state)10987 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
10988 {
10989 	struct intel_plane_state *old_plane_state, *new_plane_state;
10990 	struct intel_plane *plane;
10991 	int i;
10992 
10993 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
10994 					     new_plane_state, i)
10995 		intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
10996 					to_intel_frontbuffer(new_plane_state->hw.fb),
10997 					plane->frontbuffer_bit);
10998 }
10999 
intel_atomic_commit(struct drm_device * dev,struct drm_atomic_state * _state,bool nonblock)11000 static int intel_atomic_commit(struct drm_device *dev,
11001 			       struct drm_atomic_state *_state,
11002 			       bool nonblock)
11003 {
11004 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
11005 	struct drm_i915_private *dev_priv = to_i915(dev);
11006 	int ret = 0;
11007 
11008 	state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
11009 
11010 	drm_atomic_state_get(&state->base);
11011 	i915_sw_fence_init(&state->commit_ready,
11012 			   intel_atomic_commit_ready);
11013 
11014 	/*
11015 	 * The intel_legacy_cursor_update() fast path takes care
11016 	 * of avoiding the vblank waits for simple cursor
11017 	 * movement and flips. For cursor on/off and size changes,
11018 	 * we want to perform the vblank waits so that watermark
11019 	 * updates happen during the correct frames. Gen9+ have
11020 	 * double buffered watermarks and so shouldn't need this.
11021 	 *
11022 	 * Unset state->legacy_cursor_update before the call to
11023 	 * drm_atomic_helper_setup_commit() because otherwise
11024 	 * drm_atomic_helper_wait_for_flip_done() is a noop and
11025 	 * we get FIFO underruns because we didn't wait
11026 	 * for vblank.
11027 	 *
11028 	 * FIXME doing watermarks and fb cleanup from a vblank worker
11029 	 * (assuming we had any) would solve these problems.
11030 	 */
11031 	if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
11032 		struct intel_crtc_state *new_crtc_state;
11033 		struct intel_crtc *crtc;
11034 		int i;
11035 
11036 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
11037 			if (new_crtc_state->wm.need_postvbl_update ||
11038 			    new_crtc_state->update_wm_post)
11039 				state->base.legacy_cursor_update = false;
11040 	}
11041 
11042 	ret = intel_atomic_prepare_commit(state);
11043 	if (ret) {
11044 		drm_dbg_atomic(&dev_priv->drm,
11045 			       "Preparing state failed with %i\n", ret);
11046 		i915_sw_fence_commit(&state->commit_ready);
11047 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
11048 		return ret;
11049 	}
11050 
11051 	ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
11052 	if (!ret)
11053 		ret = drm_atomic_helper_swap_state(&state->base, true);
11054 	if (!ret)
11055 		intel_atomic_swap_global_state(state);
11056 
11057 	if (ret) {
11058 		struct intel_crtc_state *new_crtc_state;
11059 		struct intel_crtc *crtc;
11060 		int i;
11061 
11062 		i915_sw_fence_commit(&state->commit_ready);
11063 
11064 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
11065 			intel_dsb_cleanup(new_crtc_state);
11066 
11067 		drm_atomic_helper_cleanup_planes(dev, &state->base);
11068 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
11069 		return ret;
11070 	}
11071 	intel_shared_dpll_swap_state(state);
11072 	intel_atomic_track_fbs(state);
11073 
11074 	drm_atomic_state_get(&state->base);
11075 	INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
11076 
11077 	i915_sw_fence_commit(&state->commit_ready);
11078 	if (nonblock && state->modeset) {
11079 		queue_work(dev_priv->modeset_wq, &state->base.commit_work);
11080 	} else if (nonblock) {
11081 		queue_work(dev_priv->flip_wq, &state->base.commit_work);
11082 	} else {
11083 		if (state->modeset)
11084 			flush_workqueue(dev_priv->modeset_wq);
11085 		intel_atomic_commit_tail(state);
11086 	}
11087 
11088 	return 0;
11089 }
11090 
11091 struct wait_rps_boost {
11092 	struct wait_queue_entry wait;
11093 
11094 	struct drm_crtc *crtc;
11095 	struct i915_request *request;
11096 };
11097 
do_rps_boost(struct wait_queue_entry * _wait,unsigned mode,int sync,void * key)11098 static int do_rps_boost(struct wait_queue_entry *_wait,
11099 			unsigned mode, int sync, void *key)
11100 {
11101 	struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
11102 	struct i915_request *rq = wait->request;
11103 
11104 	/*
11105 	 * If we missed the vblank, but the request is already running it
11106 	 * is reasonable to assume that it will complete before the next
11107 	 * vblank without our intervention, so leave RPS alone.
11108 	 */
11109 	if (!i915_request_started(rq))
11110 		intel_rps_boost(rq);
11111 	i915_request_put(rq);
11112 
11113 	drm_crtc_vblank_put(wait->crtc);
11114 
11115 	list_del(&wait->wait.entry);
11116 	kfree(wait);
11117 	return 1;
11118 }
11119 
add_rps_boost_after_vblank(struct drm_crtc * crtc,struct dma_fence * fence)11120 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
11121 				       struct dma_fence *fence)
11122 {
11123 	struct wait_rps_boost *wait;
11124 
11125 	if (!dma_fence_is_i915(fence))
11126 		return;
11127 
11128 	if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
11129 		return;
11130 
11131 	if (drm_crtc_vblank_get(crtc))
11132 		return;
11133 
11134 	wait = kmalloc(sizeof(*wait), GFP_KERNEL);
11135 	if (!wait) {
11136 		drm_crtc_vblank_put(crtc);
11137 		return;
11138 	}
11139 
11140 	wait->request = to_request(dma_fence_get(fence));
11141 	wait->crtc = crtc;
11142 
11143 	wait->wait.func = do_rps_boost;
11144 	wait->wait.flags = 0;
11145 
11146 	add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
11147 }
11148 
intel_plane_pin_fb(struct intel_plane_state * plane_state)11149 int intel_plane_pin_fb(struct intel_plane_state *plane_state)
11150 {
11151 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11152 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11153 	struct drm_framebuffer *fb = plane_state->hw.fb;
11154 	struct i915_vma *vma;
11155 	bool phys_cursor =
11156 		plane->id == PLANE_CURSOR &&
11157 		INTEL_INFO(dev_priv)->display.cursor_needs_physical;
11158 
11159 	if (!intel_fb_uses_dpt(fb)) {
11160 		vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
11161 						 &plane_state->view.gtt,
11162 						 intel_plane_uses_fence(plane_state),
11163 						 &plane_state->flags);
11164 		if (IS_ERR(vma))
11165 			return PTR_ERR(vma);
11166 
11167 		plane_state->ggtt_vma = vma;
11168 	} else {
11169 		struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11170 
11171 		vma = intel_dpt_pin(intel_fb->dpt_vm);
11172 		if (IS_ERR(vma))
11173 			return PTR_ERR(vma);
11174 
11175 		plane_state->ggtt_vma = vma;
11176 
11177 		vma = intel_pin_fb_obj_dpt(fb, &plane_state->view.gtt, false,
11178 					   &plane_state->flags, intel_fb->dpt_vm);
11179 		if (IS_ERR(vma)) {
11180 			intel_dpt_unpin(intel_fb->dpt_vm);
11181 			plane_state->ggtt_vma = NULL;
11182 			return PTR_ERR(vma);
11183 		}
11184 
11185 		plane_state->dpt_vma = vma;
11186 
11187 		WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma);
11188 	}
11189 
11190 	return 0;
11191 }
11192 
intel_plane_unpin_fb(struct intel_plane_state * old_plane_state)11193 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
11194 {
11195 	struct drm_framebuffer *fb = old_plane_state->hw.fb;
11196 	struct i915_vma *vma;
11197 
11198 	if (!intel_fb_uses_dpt(fb)) {
11199 		vma = fetch_and_zero(&old_plane_state->ggtt_vma);
11200 		if (vma)
11201 			intel_unpin_fb_vma(vma, old_plane_state->flags);
11202 	} else {
11203 		struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11204 
11205 		vma = fetch_and_zero(&old_plane_state->dpt_vma);
11206 		if (vma)
11207 			intel_unpin_fb_vma(vma, old_plane_state->flags);
11208 
11209 		vma = fetch_and_zero(&old_plane_state->ggtt_vma);
11210 		if (vma)
11211 			intel_dpt_unpin(intel_fb->dpt_vm);
11212 	}
11213 }
11214 
11215 /**
11216  * intel_prepare_plane_fb - Prepare fb for usage on plane
11217  * @_plane: drm plane to prepare for
11218  * @_new_plane_state: the plane state being prepared
11219  *
11220  * Prepares a framebuffer for usage on a display plane.  Generally this
11221  * involves pinning the underlying object and updating the frontbuffer tracking
11222  * bits.  Some older platforms need special physical address handling for
11223  * cursor planes.
11224  *
11225  * Returns 0 on success, negative error code on failure.
11226  */
11227 int
intel_prepare_plane_fb(struct drm_plane * _plane,struct drm_plane_state * _new_plane_state)11228 intel_prepare_plane_fb(struct drm_plane *_plane,
11229 		       struct drm_plane_state *_new_plane_state)
11230 {
11231 	struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
11232 	struct intel_plane *plane = to_intel_plane(_plane);
11233 	struct intel_plane_state *new_plane_state =
11234 		to_intel_plane_state(_new_plane_state);
11235 	struct intel_atomic_state *state =
11236 		to_intel_atomic_state(new_plane_state->uapi.state);
11237 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11238 	const struct intel_plane_state *old_plane_state =
11239 		intel_atomic_get_old_plane_state(state, plane);
11240 	struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
11241 	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
11242 	int ret;
11243 
11244 	if (old_obj) {
11245 		const struct intel_crtc_state *crtc_state =
11246 			intel_atomic_get_new_crtc_state(state,
11247 							to_intel_crtc(old_plane_state->hw.crtc));
11248 
11249 		/* Big Hammer, we also need to ensure that any pending
11250 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
11251 		 * current scanout is retired before unpinning the old
11252 		 * framebuffer. Note that we rely on userspace rendering
11253 		 * into the buffer attached to the pipe they are waiting
11254 		 * on. If not, userspace generates a GPU hang with IPEHR
11255 		 * point to the MI_WAIT_FOR_EVENT.
11256 		 *
11257 		 * This should only fail upon a hung GPU, in which case we
11258 		 * can safely continue.
11259 		 */
11260 		if (intel_crtc_needs_modeset(crtc_state)) {
11261 			ret = i915_sw_fence_await_reservation(&state->commit_ready,
11262 							      old_obj->base.resv, NULL,
11263 							      false, 0,
11264 							      GFP_KERNEL);
11265 			if (ret < 0)
11266 				return ret;
11267 		}
11268 	}
11269 
11270 	if (new_plane_state->uapi.fence) { /* explicit fencing */
11271 		i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
11272 					     &attr);
11273 		ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
11274 						    new_plane_state->uapi.fence,
11275 						    i915_fence_timeout(dev_priv),
11276 						    GFP_KERNEL);
11277 		if (ret < 0)
11278 			return ret;
11279 	}
11280 
11281 	if (!obj)
11282 		return 0;
11283 
11284 
11285 	ret = intel_plane_pin_fb(new_plane_state);
11286 	if (ret)
11287 		return ret;
11288 
11289 	i915_gem_object_wait_priority(obj, 0, &attr);
11290 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
11291 
11292 	if (!new_plane_state->uapi.fence) { /* implicit fencing */
11293 		struct dma_fence *fence;
11294 
11295 		ret = i915_sw_fence_await_reservation(&state->commit_ready,
11296 						      obj->base.resv, NULL,
11297 						      false,
11298 						      i915_fence_timeout(dev_priv),
11299 						      GFP_KERNEL);
11300 		if (ret < 0)
11301 			goto unpin_fb;
11302 
11303 		fence = dma_resv_get_excl_unlocked(obj->base.resv);
11304 		if (fence) {
11305 			add_rps_boost_after_vblank(new_plane_state->hw.crtc,
11306 						   fence);
11307 			dma_fence_put(fence);
11308 		}
11309 	} else {
11310 		add_rps_boost_after_vblank(new_plane_state->hw.crtc,
11311 					   new_plane_state->uapi.fence);
11312 	}
11313 
11314 	/*
11315 	 * We declare pageflips to be interactive and so merit a small bias
11316 	 * towards upclocking to deliver the frame on time. By only changing
11317 	 * the RPS thresholds to sample more regularly and aim for higher
11318 	 * clocks we can hopefully deliver low power workloads (like kodi)
11319 	 * that are not quite steady state without resorting to forcing
11320 	 * maximum clocks following a vblank miss (see do_rps_boost()).
11321 	 */
11322 	if (!state->rps_interactive) {
11323 		intel_rps_mark_interactive(&dev_priv->gt.rps, true);
11324 		state->rps_interactive = true;
11325 	}
11326 
11327 	return 0;
11328 
11329 unpin_fb:
11330 	intel_plane_unpin_fb(new_plane_state);
11331 
11332 	return ret;
11333 }
11334 
11335 /**
11336  * intel_cleanup_plane_fb - Cleans up an fb after plane use
11337  * @plane: drm plane to clean up for
11338  * @_old_plane_state: the state from the previous modeset
11339  *
11340  * Cleans up a framebuffer that has just been removed from a plane.
11341  */
11342 void
intel_cleanup_plane_fb(struct drm_plane * plane,struct drm_plane_state * _old_plane_state)11343 intel_cleanup_plane_fb(struct drm_plane *plane,
11344 		       struct drm_plane_state *_old_plane_state)
11345 {
11346 	struct intel_plane_state *old_plane_state =
11347 		to_intel_plane_state(_old_plane_state);
11348 	struct intel_atomic_state *state =
11349 		to_intel_atomic_state(old_plane_state->uapi.state);
11350 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
11351 	struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
11352 
11353 	if (!obj)
11354 		return;
11355 
11356 	if (state->rps_interactive) {
11357 		intel_rps_mark_interactive(&dev_priv->gt.rps, false);
11358 		state->rps_interactive = false;
11359 	}
11360 
11361 	/* Should only be called after a successful intel_prepare_plane_fb()! */
11362 	intel_plane_unpin_fb(old_plane_state);
11363 }
11364 
11365 /**
11366  * intel_plane_destroy - destroy a plane
11367  * @plane: plane to destroy
11368  *
11369  * Common destruction function for all types of planes (primary, cursor,
11370  * sprite).
11371  */
intel_plane_destroy(struct drm_plane * plane)11372 void intel_plane_destroy(struct drm_plane *plane)
11373 {
11374 	drm_plane_cleanup(plane);
11375 	kfree(to_intel_plane(plane));
11376 }
11377 
intel_plane_possible_crtcs_init(struct drm_i915_private * dev_priv)11378 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
11379 {
11380 	struct intel_plane *plane;
11381 
11382 	for_each_intel_plane(&dev_priv->drm, plane) {
11383 		struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
11384 								  plane->pipe);
11385 
11386 		plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
11387 	}
11388 }
11389 
11390 
intel_get_pipe_from_crtc_id_ioctl(struct drm_device * dev,void * data,struct drm_file * file)11391 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
11392 				      struct drm_file *file)
11393 {
11394 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
11395 	struct drm_crtc *drmmode_crtc;
11396 	struct intel_crtc *crtc;
11397 
11398 	drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
11399 	if (!drmmode_crtc)
11400 		return -ENOENT;
11401 
11402 	crtc = to_intel_crtc(drmmode_crtc);
11403 	pipe_from_crtc_id->pipe = crtc->pipe;
11404 
11405 	return 0;
11406 }
11407 
intel_encoder_possible_clones(struct intel_encoder * encoder)11408 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
11409 {
11410 	struct drm_device *dev = encoder->base.dev;
11411 	struct intel_encoder *source_encoder;
11412 	u32 possible_clones = 0;
11413 
11414 	for_each_intel_encoder(dev, source_encoder) {
11415 		if (encoders_cloneable(encoder, source_encoder))
11416 			possible_clones |= drm_encoder_mask(&source_encoder->base);
11417 	}
11418 
11419 	return possible_clones;
11420 }
11421 
intel_encoder_possible_crtcs(struct intel_encoder * encoder)11422 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
11423 {
11424 	struct drm_device *dev = encoder->base.dev;
11425 	struct intel_crtc *crtc;
11426 	u32 possible_crtcs = 0;
11427 
11428 	for_each_intel_crtc(dev, crtc) {
11429 		if (encoder->pipe_mask & BIT(crtc->pipe))
11430 			possible_crtcs |= drm_crtc_mask(&crtc->base);
11431 	}
11432 
11433 	return possible_crtcs;
11434 }
11435 
ilk_has_edp_a(struct drm_i915_private * dev_priv)11436 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
11437 {
11438 	if (!IS_MOBILE(dev_priv))
11439 		return false;
11440 
11441 	if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
11442 		return false;
11443 
11444 	if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
11445 		return false;
11446 
11447 	return true;
11448 }
11449 
intel_ddi_crt_present(struct drm_i915_private * dev_priv)11450 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
11451 {
11452 	if (DISPLAY_VER(dev_priv) >= 9)
11453 		return false;
11454 
11455 	if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
11456 		return false;
11457 
11458 	if (HAS_PCH_LPT_H(dev_priv) &&
11459 	    intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
11460 		return false;
11461 
11462 	/* DDI E can't be used if DDI A requires 4 lanes */
11463 	if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
11464 		return false;
11465 
11466 	if (!dev_priv->vbt.int_crt_support)
11467 		return false;
11468 
11469 	return true;
11470 }
11471 
intel_setup_outputs(struct drm_i915_private * dev_priv)11472 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
11473 {
11474 	struct intel_encoder *encoder;
11475 	bool dpd_is_edp = false;
11476 
11477 	intel_pps_unlock_regs_wa(dev_priv);
11478 
11479 	if (!HAS_DISPLAY(dev_priv))
11480 		return;
11481 
11482 	if (IS_DG2(dev_priv)) {
11483 		intel_ddi_init(dev_priv, PORT_A);
11484 		intel_ddi_init(dev_priv, PORT_B);
11485 		intel_ddi_init(dev_priv, PORT_C);
11486 		intel_ddi_init(dev_priv, PORT_D_XELPD);
11487 	} else if (IS_ALDERLAKE_P(dev_priv)) {
11488 		intel_ddi_init(dev_priv, PORT_A);
11489 		intel_ddi_init(dev_priv, PORT_B);
11490 		intel_ddi_init(dev_priv, PORT_TC1);
11491 		intel_ddi_init(dev_priv, PORT_TC2);
11492 		intel_ddi_init(dev_priv, PORT_TC3);
11493 		intel_ddi_init(dev_priv, PORT_TC4);
11494 	} else if (IS_ALDERLAKE_S(dev_priv)) {
11495 		intel_ddi_init(dev_priv, PORT_A);
11496 		intel_ddi_init(dev_priv, PORT_TC1);
11497 		intel_ddi_init(dev_priv, PORT_TC2);
11498 		intel_ddi_init(dev_priv, PORT_TC3);
11499 		intel_ddi_init(dev_priv, PORT_TC4);
11500 	} else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
11501 		intel_ddi_init(dev_priv, PORT_A);
11502 		intel_ddi_init(dev_priv, PORT_B);
11503 		intel_ddi_init(dev_priv, PORT_TC1);
11504 		intel_ddi_init(dev_priv, PORT_TC2);
11505 	} else if (DISPLAY_VER(dev_priv) >= 12) {
11506 		intel_ddi_init(dev_priv, PORT_A);
11507 		intel_ddi_init(dev_priv, PORT_B);
11508 		intel_ddi_init(dev_priv, PORT_TC1);
11509 		intel_ddi_init(dev_priv, PORT_TC2);
11510 		intel_ddi_init(dev_priv, PORT_TC3);
11511 		intel_ddi_init(dev_priv, PORT_TC4);
11512 		intel_ddi_init(dev_priv, PORT_TC5);
11513 		intel_ddi_init(dev_priv, PORT_TC6);
11514 		icl_dsi_init(dev_priv);
11515 	} else if (IS_JSL_EHL(dev_priv)) {
11516 		intel_ddi_init(dev_priv, PORT_A);
11517 		intel_ddi_init(dev_priv, PORT_B);
11518 		intel_ddi_init(dev_priv, PORT_C);
11519 		intel_ddi_init(dev_priv, PORT_D);
11520 		icl_dsi_init(dev_priv);
11521 	} else if (DISPLAY_VER(dev_priv) == 11) {
11522 		intel_ddi_init(dev_priv, PORT_A);
11523 		intel_ddi_init(dev_priv, PORT_B);
11524 		intel_ddi_init(dev_priv, PORT_C);
11525 		intel_ddi_init(dev_priv, PORT_D);
11526 		intel_ddi_init(dev_priv, PORT_E);
11527 		intel_ddi_init(dev_priv, PORT_F);
11528 		icl_dsi_init(dev_priv);
11529 	} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
11530 		intel_ddi_init(dev_priv, PORT_A);
11531 		intel_ddi_init(dev_priv, PORT_B);
11532 		intel_ddi_init(dev_priv, PORT_C);
11533 		vlv_dsi_init(dev_priv);
11534 	} else if (DISPLAY_VER(dev_priv) >= 9) {
11535 		intel_ddi_init(dev_priv, PORT_A);
11536 		intel_ddi_init(dev_priv, PORT_B);
11537 		intel_ddi_init(dev_priv, PORT_C);
11538 		intel_ddi_init(dev_priv, PORT_D);
11539 		intel_ddi_init(dev_priv, PORT_E);
11540 	} else if (HAS_DDI(dev_priv)) {
11541 		u32 found;
11542 
11543 		if (intel_ddi_crt_present(dev_priv))
11544 			intel_crt_init(dev_priv);
11545 
11546 		/* Haswell uses DDI functions to detect digital outputs. */
11547 		found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
11548 		if (found)
11549 			intel_ddi_init(dev_priv, PORT_A);
11550 
11551 		found = intel_de_read(dev_priv, SFUSE_STRAP);
11552 		if (found & SFUSE_STRAP_DDIB_DETECTED)
11553 			intel_ddi_init(dev_priv, PORT_B);
11554 		if (found & SFUSE_STRAP_DDIC_DETECTED)
11555 			intel_ddi_init(dev_priv, PORT_C);
11556 		if (found & SFUSE_STRAP_DDID_DETECTED)
11557 			intel_ddi_init(dev_priv, PORT_D);
11558 		if (found & SFUSE_STRAP_DDIF_DETECTED)
11559 			intel_ddi_init(dev_priv, PORT_F);
11560 	} else if (HAS_PCH_SPLIT(dev_priv)) {
11561 		int found;
11562 
11563 		/*
11564 		 * intel_edp_init_connector() depends on this completing first,
11565 		 * to prevent the registration of both eDP and LVDS and the
11566 		 * incorrect sharing of the PPS.
11567 		 */
11568 		intel_lvds_init(dev_priv);
11569 		intel_crt_init(dev_priv);
11570 
11571 		dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
11572 
11573 		if (ilk_has_edp_a(dev_priv))
11574 			g4x_dp_init(dev_priv, DP_A, PORT_A);
11575 
11576 		if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
11577 			/* PCH SDVOB multiplex with HDMIB */
11578 			found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
11579 			if (!found)
11580 				g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
11581 			if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
11582 				g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
11583 		}
11584 
11585 		if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
11586 			g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
11587 
11588 		if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
11589 			g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
11590 
11591 		if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
11592 			g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
11593 
11594 		if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
11595 			g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
11596 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
11597 		bool has_edp, has_port;
11598 
11599 		if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
11600 			intel_crt_init(dev_priv);
11601 
11602 		/*
11603 		 * The DP_DETECTED bit is the latched state of the DDC
11604 		 * SDA pin at boot. However since eDP doesn't require DDC
11605 		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
11606 		 * eDP ports may have been muxed to an alternate function.
11607 		 * Thus we can't rely on the DP_DETECTED bit alone to detect
11608 		 * eDP ports. Consult the VBT as well as DP_DETECTED to
11609 		 * detect eDP ports.
11610 		 *
11611 		 * Sadly the straps seem to be missing sometimes even for HDMI
11612 		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
11613 		 * and VBT for the presence of the port. Additionally we can't
11614 		 * trust the port type the VBT declares as we've seen at least
11615 		 * HDMI ports that the VBT claim are DP or eDP.
11616 		 */
11617 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
11618 		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
11619 		if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
11620 			has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
11621 		if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
11622 			g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
11623 
11624 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
11625 		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
11626 		if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
11627 			has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
11628 		if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
11629 			g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
11630 
11631 		if (IS_CHERRYVIEW(dev_priv)) {
11632 			/*
11633 			 * eDP not supported on port D,
11634 			 * so no need to worry about it
11635 			 */
11636 			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
11637 			if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
11638 				g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
11639 			if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
11640 				g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
11641 		}
11642 
11643 		vlv_dsi_init(dev_priv);
11644 	} else if (IS_PINEVIEW(dev_priv)) {
11645 		intel_lvds_init(dev_priv);
11646 		intel_crt_init(dev_priv);
11647 	} else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
11648 		bool found = false;
11649 
11650 		if (IS_MOBILE(dev_priv))
11651 			intel_lvds_init(dev_priv);
11652 
11653 		intel_crt_init(dev_priv);
11654 
11655 		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11656 			drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
11657 			found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
11658 			if (!found && IS_G4X(dev_priv)) {
11659 				drm_dbg_kms(&dev_priv->drm,
11660 					    "probing HDMI on SDVOB\n");
11661 				g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
11662 			}
11663 
11664 			if (!found && IS_G4X(dev_priv))
11665 				g4x_dp_init(dev_priv, DP_B, PORT_B);
11666 		}
11667 
11668 		/* Before G4X SDVOC doesn't have its own detect register */
11669 
11670 		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11671 			drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
11672 			found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
11673 		}
11674 
11675 		if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
11676 
11677 			if (IS_G4X(dev_priv)) {
11678 				drm_dbg_kms(&dev_priv->drm,
11679 					    "probing HDMI on SDVOC\n");
11680 				g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
11681 			}
11682 			if (IS_G4X(dev_priv))
11683 				g4x_dp_init(dev_priv, DP_C, PORT_C);
11684 		}
11685 
11686 		if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
11687 			g4x_dp_init(dev_priv, DP_D, PORT_D);
11688 
11689 		if (SUPPORTS_TV(dev_priv))
11690 			intel_tv_init(dev_priv);
11691 	} else if (DISPLAY_VER(dev_priv) == 2) {
11692 		if (IS_I85X(dev_priv))
11693 			intel_lvds_init(dev_priv);
11694 
11695 		intel_crt_init(dev_priv);
11696 		intel_dvo_init(dev_priv);
11697 	}
11698 
11699 	for_each_intel_encoder(&dev_priv->drm, encoder) {
11700 		encoder->base.possible_crtcs =
11701 			intel_encoder_possible_crtcs(encoder);
11702 		encoder->base.possible_clones =
11703 			intel_encoder_possible_clones(encoder);
11704 	}
11705 
11706 	intel_init_pch_refclk(dev_priv);
11707 
11708 	drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
11709 }
11710 
intel_user_framebuffer_destroy(struct drm_framebuffer * fb)11711 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
11712 {
11713 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11714 
11715 	drm_framebuffer_cleanup(fb);
11716 
11717 	if (intel_fb_uses_dpt(fb))
11718 		intel_dpt_destroy(intel_fb->dpt_vm);
11719 
11720 	intel_frontbuffer_put(intel_fb->frontbuffer);
11721 
11722 	kfree(intel_fb);
11723 }
11724 
intel_user_framebuffer_create_handle(struct drm_framebuffer * fb,struct drm_file * file,unsigned int * handle)11725 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
11726 						struct drm_file *file,
11727 						unsigned int *handle)
11728 {
11729 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11730 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
11731 
11732 	if (i915_gem_object_is_userptr(obj)) {
11733 		drm_dbg(&i915->drm,
11734 			"attempting to use a userptr for a framebuffer, denied\n");
11735 		return -EINVAL;
11736 	}
11737 
11738 	return drm_gem_handle_create(file, &obj->base, handle);
11739 }
11740 
intel_user_framebuffer_dirty(struct drm_framebuffer * fb,struct drm_file * file,unsigned flags,unsigned color,struct drm_clip_rect * clips,unsigned num_clips)11741 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
11742 					struct drm_file *file,
11743 					unsigned flags, unsigned color,
11744 					struct drm_clip_rect *clips,
11745 					unsigned num_clips)
11746 {
11747 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11748 
11749 	i915_gem_object_flush_if_display(obj);
11750 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
11751 
11752 	return 0;
11753 }
11754 
11755 static const struct drm_framebuffer_funcs intel_fb_funcs = {
11756 	.destroy = intel_user_framebuffer_destroy,
11757 	.create_handle = intel_user_framebuffer_create_handle,
11758 	.dirty = intel_user_framebuffer_dirty,
11759 };
11760 
intel_framebuffer_init(struct intel_framebuffer * intel_fb,struct drm_i915_gem_object * obj,struct drm_mode_fb_cmd2 * mode_cmd)11761 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
11762 				  struct drm_i915_gem_object *obj,
11763 				  struct drm_mode_fb_cmd2 *mode_cmd)
11764 {
11765 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
11766 	struct drm_framebuffer *fb = &intel_fb->base;
11767 	u32 max_stride;
11768 	unsigned int tiling, stride;
11769 	int ret = -EINVAL;
11770 	int i;
11771 
11772 	intel_fb->frontbuffer = intel_frontbuffer_get(obj);
11773 	if (!intel_fb->frontbuffer)
11774 		return -ENOMEM;
11775 
11776 	i915_gem_object_lock(obj, NULL);
11777 	tiling = i915_gem_object_get_tiling(obj);
11778 	stride = i915_gem_object_get_stride(obj);
11779 	i915_gem_object_unlock(obj);
11780 
11781 	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
11782 		/*
11783 		 * If there's a fence, enforce that
11784 		 * the fb modifier and tiling mode match.
11785 		 */
11786 		if (tiling != I915_TILING_NONE &&
11787 		    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
11788 			drm_dbg_kms(&dev_priv->drm,
11789 				    "tiling_mode doesn't match fb modifier\n");
11790 			goto err;
11791 		}
11792 	} else {
11793 		if (tiling == I915_TILING_X) {
11794 			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
11795 		} else if (tiling == I915_TILING_Y) {
11796 			drm_dbg_kms(&dev_priv->drm,
11797 				    "No Y tiling for legacy addfb\n");
11798 			goto err;
11799 		}
11800 	}
11801 
11802 	if (!drm_any_plane_has_format(&dev_priv->drm,
11803 				      mode_cmd->pixel_format,
11804 				      mode_cmd->modifier[0])) {
11805 		drm_dbg_kms(&dev_priv->drm,
11806 			    "unsupported pixel format %p4cc / modifier 0x%llx\n",
11807 			    &mode_cmd->pixel_format, mode_cmd->modifier[0]);
11808 		goto err;
11809 	}
11810 
11811 	/*
11812 	 * gen2/3 display engine uses the fence if present,
11813 	 * so the tiling mode must match the fb modifier exactly.
11814 	 */
11815 	if (DISPLAY_VER(dev_priv) < 4 &&
11816 	    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
11817 		drm_dbg_kms(&dev_priv->drm,
11818 			    "tiling_mode must match fb modifier exactly on gen2/3\n");
11819 		goto err;
11820 	}
11821 
11822 	max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
11823 					 mode_cmd->modifier[0]);
11824 	if (mode_cmd->pitches[0] > max_stride) {
11825 		drm_dbg_kms(&dev_priv->drm,
11826 			    "%s pitch (%u) must be at most %d\n",
11827 			    mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
11828 			    "tiled" : "linear",
11829 			    mode_cmd->pitches[0], max_stride);
11830 		goto err;
11831 	}
11832 
11833 	/*
11834 	 * If there's a fence, enforce that
11835 	 * the fb pitch and fence stride match.
11836 	 */
11837 	if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
11838 		drm_dbg_kms(&dev_priv->drm,
11839 			    "pitch (%d) must match tiling stride (%d)\n",
11840 			    mode_cmd->pitches[0], stride);
11841 		goto err;
11842 	}
11843 
11844 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
11845 	if (mode_cmd->offsets[0] != 0) {
11846 		drm_dbg_kms(&dev_priv->drm,
11847 			    "plane 0 offset (0x%08x) must be 0\n",
11848 			    mode_cmd->offsets[0]);
11849 		goto err;
11850 	}
11851 
11852 	drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
11853 
11854 	for (i = 0; i < fb->format->num_planes; i++) {
11855 		u32 stride_alignment;
11856 
11857 		if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
11858 			drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
11859 				    i);
11860 			goto err;
11861 		}
11862 
11863 		stride_alignment = intel_fb_stride_alignment(fb, i);
11864 		if (fb->pitches[i] & (stride_alignment - 1)) {
11865 			drm_dbg_kms(&dev_priv->drm,
11866 				    "plane %d pitch (%d) must be at least %u byte aligned\n",
11867 				    i, fb->pitches[i], stride_alignment);
11868 			goto err;
11869 		}
11870 
11871 		if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
11872 			int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
11873 
11874 			if (fb->pitches[i] != ccs_aux_stride) {
11875 				drm_dbg_kms(&dev_priv->drm,
11876 					    "ccs aux plane %d pitch (%d) must be %d\n",
11877 					    i,
11878 					    fb->pitches[i], ccs_aux_stride);
11879 				goto err;
11880 			}
11881 		}
11882 
11883 		/* TODO: Add POT stride remapping support for CCS formats as well. */
11884 		if (IS_ALDERLAKE_P(dev_priv) &&
11885 		    mode_cmd->modifier[i] != DRM_FORMAT_MOD_LINEAR &&
11886 		    !intel_fb_needs_pot_stride_remap(intel_fb) &&
11887 		    !is_power_of_2(mode_cmd->pitches[i])) {
11888 			drm_dbg_kms(&dev_priv->drm,
11889 				    "plane %d pitch (%d) must be power of two for tiled buffers\n",
11890 				    i, mode_cmd->pitches[i]);
11891 			goto err;
11892 		}
11893 
11894 		fb->obj[i] = &obj->base;
11895 	}
11896 
11897 	ret = intel_fill_fb_info(dev_priv, intel_fb);
11898 	if (ret)
11899 		goto err;
11900 
11901 	if (intel_fb_uses_dpt(fb)) {
11902 		struct i915_address_space *vm;
11903 
11904 		vm = intel_dpt_create(intel_fb);
11905 		if (IS_ERR(vm)) {
11906 			ret = PTR_ERR(vm);
11907 			goto err;
11908 		}
11909 
11910 		intel_fb->dpt_vm = vm;
11911 	}
11912 
11913 	ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
11914 	if (ret) {
11915 		drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
11916 		goto err;
11917 	}
11918 
11919 	return 0;
11920 
11921 err:
11922 	intel_frontbuffer_put(intel_fb->frontbuffer);
11923 	return ret;
11924 }
11925 
11926 static struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device * dev,struct drm_file * filp,const struct drm_mode_fb_cmd2 * user_mode_cmd)11927 intel_user_framebuffer_create(struct drm_device *dev,
11928 			      struct drm_file *filp,
11929 			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
11930 {
11931 	struct drm_framebuffer *fb;
11932 	struct drm_i915_gem_object *obj;
11933 	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
11934 	struct drm_i915_private *i915;
11935 
11936 	obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
11937 	if (!obj)
11938 		return ERR_PTR(-ENOENT);
11939 
11940 	/* object is backed with LMEM for discrete */
11941 	i915 = to_i915(obj->base.dev);
11942 	if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM)) {
11943 		/* object is "remote", not in local memory */
11944 		i915_gem_object_put(obj);
11945 		return ERR_PTR(-EREMOTE);
11946 	}
11947 
11948 	fb = intel_framebuffer_create(obj, &mode_cmd);
11949 	i915_gem_object_put(obj);
11950 
11951 	return fb;
11952 }
11953 
11954 static enum drm_mode_status
intel_mode_valid(struct drm_device * dev,const struct drm_display_mode * mode)11955 intel_mode_valid(struct drm_device *dev,
11956 		 const struct drm_display_mode *mode)
11957 {
11958 	struct drm_i915_private *dev_priv = to_i915(dev);
11959 	int hdisplay_max, htotal_max;
11960 	int vdisplay_max, vtotal_max;
11961 
11962 	/*
11963 	 * Can't reject DBLSCAN here because Xorg ddxen can add piles
11964 	 * of DBLSCAN modes to the output's mode list when they detect
11965 	 * the scaling mode property on the connector. And they don't
11966 	 * ask the kernel to validate those modes in any way until
11967 	 * modeset time at which point the client gets a protocol error.
11968 	 * So in order to not upset those clients we silently ignore the
11969 	 * DBLSCAN flag on such connectors. For other connectors we will
11970 	 * reject modes with the DBLSCAN flag in encoder->compute_config().
11971 	 * And we always reject DBLSCAN modes in connector->mode_valid()
11972 	 * as we never want such modes on the connector's mode list.
11973 	 */
11974 
11975 	if (mode->vscan > 1)
11976 		return MODE_NO_VSCAN;
11977 
11978 	if (mode->flags & DRM_MODE_FLAG_HSKEW)
11979 		return MODE_H_ILLEGAL;
11980 
11981 	if (mode->flags & (DRM_MODE_FLAG_CSYNC |
11982 			   DRM_MODE_FLAG_NCSYNC |
11983 			   DRM_MODE_FLAG_PCSYNC))
11984 		return MODE_HSYNC;
11985 
11986 	if (mode->flags & (DRM_MODE_FLAG_BCAST |
11987 			   DRM_MODE_FLAG_PIXMUX |
11988 			   DRM_MODE_FLAG_CLKDIV2))
11989 		return MODE_BAD;
11990 
11991 	/* Transcoder timing limits */
11992 	if (DISPLAY_VER(dev_priv) >= 11) {
11993 		hdisplay_max = 16384;
11994 		vdisplay_max = 8192;
11995 		htotal_max = 16384;
11996 		vtotal_max = 8192;
11997 	} else if (DISPLAY_VER(dev_priv) >= 9 ||
11998 		   IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
11999 		hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
12000 		vdisplay_max = 4096;
12001 		htotal_max = 8192;
12002 		vtotal_max = 8192;
12003 	} else if (DISPLAY_VER(dev_priv) >= 3) {
12004 		hdisplay_max = 4096;
12005 		vdisplay_max = 4096;
12006 		htotal_max = 8192;
12007 		vtotal_max = 8192;
12008 	} else {
12009 		hdisplay_max = 2048;
12010 		vdisplay_max = 2048;
12011 		htotal_max = 4096;
12012 		vtotal_max = 4096;
12013 	}
12014 
12015 	if (mode->hdisplay > hdisplay_max ||
12016 	    mode->hsync_start > htotal_max ||
12017 	    mode->hsync_end > htotal_max ||
12018 	    mode->htotal > htotal_max)
12019 		return MODE_H_ILLEGAL;
12020 
12021 	if (mode->vdisplay > vdisplay_max ||
12022 	    mode->vsync_start > vtotal_max ||
12023 	    mode->vsync_end > vtotal_max ||
12024 	    mode->vtotal > vtotal_max)
12025 		return MODE_V_ILLEGAL;
12026 
12027 	if (DISPLAY_VER(dev_priv) >= 5) {
12028 		if (mode->hdisplay < 64 ||
12029 		    mode->htotal - mode->hdisplay < 32)
12030 			return MODE_H_ILLEGAL;
12031 
12032 		if (mode->vtotal - mode->vdisplay < 5)
12033 			return MODE_V_ILLEGAL;
12034 	} else {
12035 		if (mode->htotal - mode->hdisplay < 32)
12036 			return MODE_H_ILLEGAL;
12037 
12038 		if (mode->vtotal - mode->vdisplay < 3)
12039 			return MODE_V_ILLEGAL;
12040 	}
12041 
12042 	return MODE_OK;
12043 }
12044 
12045 enum drm_mode_status
intel_mode_valid_max_plane_size(struct drm_i915_private * dev_priv,const struct drm_display_mode * mode,bool bigjoiner)12046 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
12047 				const struct drm_display_mode *mode,
12048 				bool bigjoiner)
12049 {
12050 	int plane_width_max, plane_height_max;
12051 
12052 	/*
12053 	 * intel_mode_valid() should be
12054 	 * sufficient on older platforms.
12055 	 */
12056 	if (DISPLAY_VER(dev_priv) < 9)
12057 		return MODE_OK;
12058 
12059 	/*
12060 	 * Most people will probably want a fullscreen
12061 	 * plane so let's not advertize modes that are
12062 	 * too big for that.
12063 	 */
12064 	if (DISPLAY_VER(dev_priv) >= 11) {
12065 		plane_width_max = 5120 << bigjoiner;
12066 		plane_height_max = 4320;
12067 	} else {
12068 		plane_width_max = 5120;
12069 		plane_height_max = 4096;
12070 	}
12071 
12072 	if (mode->hdisplay > plane_width_max)
12073 		return MODE_H_ILLEGAL;
12074 
12075 	if (mode->vdisplay > plane_height_max)
12076 		return MODE_V_ILLEGAL;
12077 
12078 	return MODE_OK;
12079 }
12080 
12081 static const struct drm_mode_config_funcs intel_mode_funcs = {
12082 	.fb_create = intel_user_framebuffer_create,
12083 	.get_format_info = intel_get_format_info,
12084 	.output_poll_changed = intel_fbdev_output_poll_changed,
12085 	.mode_valid = intel_mode_valid,
12086 	.atomic_check = intel_atomic_check,
12087 	.atomic_commit = intel_atomic_commit,
12088 	.atomic_state_alloc = intel_atomic_state_alloc,
12089 	.atomic_state_clear = intel_atomic_state_clear,
12090 	.atomic_state_free = intel_atomic_state_free,
12091 };
12092 
12093 /**
12094  * intel_init_display_hooks - initialize the display modesetting hooks
12095  * @dev_priv: device private
12096  */
intel_init_display_hooks(struct drm_i915_private * dev_priv)12097 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
12098 {
12099 	if (!HAS_DISPLAY(dev_priv))
12100 		return;
12101 
12102 	intel_init_cdclk_hooks(dev_priv);
12103 	intel_init_audio_hooks(dev_priv);
12104 
12105 	intel_dpll_init_clock_hook(dev_priv);
12106 
12107 	if (DISPLAY_VER(dev_priv) >= 9) {
12108 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
12109 		dev_priv->display.crtc_enable = hsw_crtc_enable;
12110 		dev_priv->display.crtc_disable = hsw_crtc_disable;
12111 	} else if (HAS_DDI(dev_priv)) {
12112 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
12113 		dev_priv->display.crtc_enable = hsw_crtc_enable;
12114 		dev_priv->display.crtc_disable = hsw_crtc_disable;
12115 	} else if (HAS_PCH_SPLIT(dev_priv)) {
12116 		dev_priv->display.get_pipe_config = ilk_get_pipe_config;
12117 		dev_priv->display.crtc_enable = ilk_crtc_enable;
12118 		dev_priv->display.crtc_disable = ilk_crtc_disable;
12119 	} else if (IS_CHERRYVIEW(dev_priv) ||
12120 		   IS_VALLEYVIEW(dev_priv)) {
12121 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12122 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
12123 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
12124 	} else {
12125 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12126 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
12127 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
12128 	}
12129 
12130 	intel_fdi_init_hook(dev_priv);
12131 
12132 	if (DISPLAY_VER(dev_priv) >= 9) {
12133 		dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
12134 		dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
12135 	} else {
12136 		dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
12137 		dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
12138 	}
12139 
12140 }
12141 
intel_modeset_init_hw(struct drm_i915_private * i915)12142 void intel_modeset_init_hw(struct drm_i915_private *i915)
12143 {
12144 	struct intel_cdclk_state *cdclk_state;
12145 
12146 	if (!HAS_DISPLAY(i915))
12147 		return;
12148 
12149 	cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state);
12150 
12151 	intel_update_cdclk(i915);
12152 	intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
12153 	cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
12154 }
12155 
sanitize_watermarks_add_affected(struct drm_atomic_state * state)12156 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
12157 {
12158 	struct drm_plane *plane;
12159 	struct intel_crtc *crtc;
12160 
12161 	for_each_intel_crtc(state->dev, crtc) {
12162 		struct intel_crtc_state *crtc_state;
12163 
12164 		crtc_state = intel_atomic_get_crtc_state(state, crtc);
12165 		if (IS_ERR(crtc_state))
12166 			return PTR_ERR(crtc_state);
12167 
12168 		if (crtc_state->hw.active) {
12169 			/*
12170 			 * Preserve the inherited flag to avoid
12171 			 * taking the full modeset path.
12172 			 */
12173 			crtc_state->inherited = true;
12174 		}
12175 	}
12176 
12177 	drm_for_each_plane(plane, state->dev) {
12178 		struct drm_plane_state *plane_state;
12179 
12180 		plane_state = drm_atomic_get_plane_state(state, plane);
12181 		if (IS_ERR(plane_state))
12182 			return PTR_ERR(plane_state);
12183 	}
12184 
12185 	return 0;
12186 }
12187 
12188 /*
12189  * Calculate what we think the watermarks should be for the state we've read
12190  * out of the hardware and then immediately program those watermarks so that
12191  * we ensure the hardware settings match our internal state.
12192  *
12193  * We can calculate what we think WM's should be by creating a duplicate of the
12194  * current state (which was constructed during hardware readout) and running it
12195  * through the atomic check code to calculate new watermark values in the
12196  * state object.
12197  */
sanitize_watermarks(struct drm_i915_private * dev_priv)12198 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
12199 {
12200 	struct drm_atomic_state *state;
12201 	struct intel_atomic_state *intel_state;
12202 	struct intel_crtc *crtc;
12203 	struct intel_crtc_state *crtc_state;
12204 	struct drm_modeset_acquire_ctx ctx;
12205 	int ret;
12206 	int i;
12207 
12208 	/* Only supported on platforms that use atomic watermark design */
12209 	if (!dev_priv->display.optimize_watermarks)
12210 		return;
12211 
12212 	state = drm_atomic_state_alloc(&dev_priv->drm);
12213 	if (drm_WARN_ON(&dev_priv->drm, !state))
12214 		return;
12215 
12216 	intel_state = to_intel_atomic_state(state);
12217 
12218 	drm_modeset_acquire_init(&ctx, 0);
12219 
12220 retry:
12221 	state->acquire_ctx = &ctx;
12222 
12223 	/*
12224 	 * Hardware readout is the only time we don't want to calculate
12225 	 * intermediate watermarks (since we don't trust the current
12226 	 * watermarks).
12227 	 */
12228 	if (!HAS_GMCH(dev_priv))
12229 		intel_state->skip_intermediate_wm = true;
12230 
12231 	ret = sanitize_watermarks_add_affected(state);
12232 	if (ret)
12233 		goto fail;
12234 
12235 	ret = intel_atomic_check(&dev_priv->drm, state);
12236 	if (ret)
12237 		goto fail;
12238 
12239 	/* Write calculated watermark values back */
12240 	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
12241 		crtc_state->wm.need_postvbl_update = true;
12242 		dev_priv->display.optimize_watermarks(intel_state, crtc);
12243 
12244 		to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
12245 	}
12246 
12247 fail:
12248 	if (ret == -EDEADLK) {
12249 		drm_atomic_state_clear(state);
12250 		drm_modeset_backoff(&ctx);
12251 		goto retry;
12252 	}
12253 
12254 	/*
12255 	 * If we fail here, it means that the hardware appears to be
12256 	 * programmed in a way that shouldn't be possible, given our
12257 	 * understanding of watermark requirements.  This might mean a
12258 	 * mistake in the hardware readout code or a mistake in the
12259 	 * watermark calculations for a given platform.  Raise a WARN
12260 	 * so that this is noticeable.
12261 	 *
12262 	 * If this actually happens, we'll have to just leave the
12263 	 * BIOS-programmed watermarks untouched and hope for the best.
12264 	 */
12265 	drm_WARN(&dev_priv->drm, ret,
12266 		 "Could not determine valid watermarks for inherited state\n");
12267 
12268 	drm_atomic_state_put(state);
12269 
12270 	drm_modeset_drop_locks(&ctx);
12271 	drm_modeset_acquire_fini(&ctx);
12272 }
12273 
intel_update_fdi_pll_freq(struct drm_i915_private * dev_priv)12274 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
12275 {
12276 	if (IS_IRONLAKE(dev_priv)) {
12277 		u32 fdi_pll_clk =
12278 			intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
12279 
12280 		dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
12281 	} else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
12282 		dev_priv->fdi_pll_freq = 270000;
12283 	} else {
12284 		return;
12285 	}
12286 
12287 	drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
12288 }
12289 
intel_initial_commit(struct drm_device * dev)12290 static int intel_initial_commit(struct drm_device *dev)
12291 {
12292 	struct drm_atomic_state *state = NULL;
12293 	struct drm_modeset_acquire_ctx ctx;
12294 	struct intel_crtc *crtc;
12295 	int ret = 0;
12296 
12297 	state = drm_atomic_state_alloc(dev);
12298 	if (!state)
12299 		return -ENOMEM;
12300 
12301 	drm_modeset_acquire_init(&ctx, 0);
12302 
12303 retry:
12304 	state->acquire_ctx = &ctx;
12305 
12306 	for_each_intel_crtc(dev, crtc) {
12307 		struct intel_crtc_state *crtc_state =
12308 			intel_atomic_get_crtc_state(state, crtc);
12309 
12310 		if (IS_ERR(crtc_state)) {
12311 			ret = PTR_ERR(crtc_state);
12312 			goto out;
12313 		}
12314 
12315 		if (crtc_state->hw.active) {
12316 			struct intel_encoder *encoder;
12317 
12318 			/*
12319 			 * We've not yet detected sink capabilities
12320 			 * (audio,infoframes,etc.) and thus we don't want to
12321 			 * force a full state recomputation yet. We want that to
12322 			 * happen only for the first real commit from userspace.
12323 			 * So preserve the inherited flag for the time being.
12324 			 */
12325 			crtc_state->inherited = true;
12326 
12327 			ret = drm_atomic_add_affected_planes(state, &crtc->base);
12328 			if (ret)
12329 				goto out;
12330 
12331 			/*
12332 			 * FIXME hack to force a LUT update to avoid the
12333 			 * plane update forcing the pipe gamma on without
12334 			 * having a proper LUT loaded. Remove once we
12335 			 * have readout for pipe gamma enable.
12336 			 */
12337 			crtc_state->uapi.color_mgmt_changed = true;
12338 
12339 			for_each_intel_encoder_mask(dev, encoder,
12340 						    crtc_state->uapi.encoder_mask) {
12341 				if (encoder->initial_fastset_check &&
12342 				    !encoder->initial_fastset_check(encoder, crtc_state)) {
12343 					ret = drm_atomic_add_affected_connectors(state,
12344 										 &crtc->base);
12345 					if (ret)
12346 						goto out;
12347 				}
12348 			}
12349 		}
12350 	}
12351 
12352 	ret = drm_atomic_commit(state);
12353 
12354 out:
12355 	if (ret == -EDEADLK) {
12356 		drm_atomic_state_clear(state);
12357 		drm_modeset_backoff(&ctx);
12358 		goto retry;
12359 	}
12360 
12361 	drm_atomic_state_put(state);
12362 
12363 	drm_modeset_drop_locks(&ctx);
12364 	drm_modeset_acquire_fini(&ctx);
12365 
12366 	return ret;
12367 }
12368 
intel_mode_config_init(struct drm_i915_private * i915)12369 static void intel_mode_config_init(struct drm_i915_private *i915)
12370 {
12371 	struct drm_mode_config *mode_config = &i915->drm.mode_config;
12372 
12373 	drm_mode_config_init(&i915->drm);
12374 	INIT_LIST_HEAD(&i915->global_obj_list);
12375 
12376 	mode_config->min_width = 0;
12377 	mode_config->min_height = 0;
12378 
12379 	mode_config->preferred_depth = 24;
12380 	mode_config->prefer_shadow = 1;
12381 
12382 	mode_config->funcs = &intel_mode_funcs;
12383 
12384 	mode_config->async_page_flip = has_async_flips(i915);
12385 
12386 	/*
12387 	 * Maximum framebuffer dimensions, chosen to match
12388 	 * the maximum render engine surface size on gen4+.
12389 	 */
12390 	if (DISPLAY_VER(i915) >= 7) {
12391 		mode_config->max_width = 16384;
12392 		mode_config->max_height = 16384;
12393 	} else if (DISPLAY_VER(i915) >= 4) {
12394 		mode_config->max_width = 8192;
12395 		mode_config->max_height = 8192;
12396 	} else if (DISPLAY_VER(i915) == 3) {
12397 		mode_config->max_width = 4096;
12398 		mode_config->max_height = 4096;
12399 	} else {
12400 		mode_config->max_width = 2048;
12401 		mode_config->max_height = 2048;
12402 	}
12403 
12404 	if (IS_I845G(i915) || IS_I865G(i915)) {
12405 		mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
12406 		mode_config->cursor_height = 1023;
12407 	} else if (IS_I830(i915) || IS_I85X(i915) ||
12408 		   IS_I915G(i915) || IS_I915GM(i915)) {
12409 		mode_config->cursor_width = 64;
12410 		mode_config->cursor_height = 64;
12411 	} else {
12412 		mode_config->cursor_width = 256;
12413 		mode_config->cursor_height = 256;
12414 	}
12415 }
12416 
intel_mode_config_cleanup(struct drm_i915_private * i915)12417 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
12418 {
12419 	intel_atomic_global_obj_cleanup(i915);
12420 	drm_mode_config_cleanup(&i915->drm);
12421 }
12422 
plane_config_fini(struct intel_initial_plane_config * plane_config)12423 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
12424 {
12425 	if (plane_config->fb) {
12426 		struct drm_framebuffer *fb = &plane_config->fb->base;
12427 
12428 		/* We may only have the stub and not a full framebuffer */
12429 		if (drm_framebuffer_read_refcount(fb))
12430 			drm_framebuffer_put(fb);
12431 		else
12432 			kfree(fb);
12433 	}
12434 
12435 	if (plane_config->vma)
12436 		i915_vma_put(plane_config->vma);
12437 }
12438 
12439 /* part #1: call before irq install */
intel_modeset_init_noirq(struct drm_i915_private * i915)12440 int intel_modeset_init_noirq(struct drm_i915_private *i915)
12441 {
12442 	int ret;
12443 
12444 	if (i915_inject_probe_failure(i915))
12445 		return -ENODEV;
12446 
12447 	if (HAS_DISPLAY(i915)) {
12448 		ret = drm_vblank_init(&i915->drm,
12449 				      INTEL_NUM_PIPES(i915));
12450 		if (ret)
12451 			return ret;
12452 	}
12453 
12454 	intel_bios_init(i915);
12455 
12456 	ret = intel_vga_register(i915);
12457 	if (ret)
12458 		goto cleanup_bios;
12459 
12460 	/* FIXME: completely on the wrong abstraction layer */
12461 	intel_power_domains_init_hw(i915, false);
12462 
12463 	if (!HAS_DISPLAY(i915))
12464 		return 0;
12465 
12466 	intel_dmc_ucode_init(i915);
12467 
12468 	i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
12469 	i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
12470 					WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
12471 
12472 	i915->framestart_delay = 1; /* 1-4 */
12473 
12474 	i915->window2_delay = 0; /* No DSB so no window2 delay */
12475 
12476 	intel_mode_config_init(i915);
12477 
12478 	ret = intel_cdclk_init(i915);
12479 	if (ret)
12480 		goto cleanup_vga_client_pw_domain_dmc;
12481 
12482 	ret = intel_dbuf_init(i915);
12483 	if (ret)
12484 		goto cleanup_vga_client_pw_domain_dmc;
12485 
12486 	ret = intel_bw_init(i915);
12487 	if (ret)
12488 		goto cleanup_vga_client_pw_domain_dmc;
12489 
12490 	init_llist_head(&i915->atomic_helper.free_list);
12491 	INIT_WORK(&i915->atomic_helper.free_work,
12492 		  intel_atomic_helper_free_state_worker);
12493 
12494 	intel_init_quirks(i915);
12495 
12496 	intel_fbc_init(i915);
12497 
12498 	return 0;
12499 
12500 cleanup_vga_client_pw_domain_dmc:
12501 	intel_dmc_ucode_fini(i915);
12502 	intel_power_domains_driver_remove(i915);
12503 	intel_vga_unregister(i915);
12504 cleanup_bios:
12505 	intel_bios_driver_remove(i915);
12506 
12507 	return ret;
12508 }
12509 
12510 /* part #2: call after irq install, but before gem init */
intel_modeset_init_nogem(struct drm_i915_private * i915)12511 int intel_modeset_init_nogem(struct drm_i915_private *i915)
12512 {
12513 	struct drm_device *dev = &i915->drm;
12514 	enum pipe pipe;
12515 	struct intel_crtc *crtc;
12516 	int ret;
12517 
12518 	if (!HAS_DISPLAY(i915))
12519 		return 0;
12520 
12521 	intel_init_pm(i915);
12522 
12523 	intel_panel_sanitize_ssc(i915);
12524 
12525 	intel_pps_setup(i915);
12526 
12527 	intel_gmbus_setup(i915);
12528 
12529 	drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
12530 		    INTEL_NUM_PIPES(i915),
12531 		    INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
12532 
12533 	for_each_pipe(i915, pipe) {
12534 		ret = intel_crtc_init(i915, pipe);
12535 		if (ret) {
12536 			intel_mode_config_cleanup(i915);
12537 			return ret;
12538 		}
12539 	}
12540 
12541 	intel_plane_possible_crtcs_init(i915);
12542 	intel_shared_dpll_init(dev);
12543 	intel_update_fdi_pll_freq(i915);
12544 
12545 	intel_update_czclk(i915);
12546 	intel_modeset_init_hw(i915);
12547 	intel_dpll_update_ref_clks(i915);
12548 
12549 	intel_hdcp_component_init(i915);
12550 
12551 	if (i915->max_cdclk_freq == 0)
12552 		intel_update_max_cdclk(i915);
12553 
12554 	/*
12555 	 * If the platform has HTI, we need to find out whether it has reserved
12556 	 * any display resources before we create our display outputs.
12557 	 */
12558 	if (INTEL_INFO(i915)->display.has_hti)
12559 		i915->hti_state = intel_de_read(i915, HDPORT_STATE);
12560 
12561 	/* Just disable it once at startup */
12562 	intel_vga_disable(i915);
12563 	intel_setup_outputs(i915);
12564 
12565 	drm_modeset_lock_all(dev);
12566 	intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
12567 	drm_modeset_unlock_all(dev);
12568 
12569 	for_each_intel_crtc(dev, crtc) {
12570 		struct intel_initial_plane_config plane_config = {};
12571 
12572 		if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
12573 			continue;
12574 
12575 		/*
12576 		 * Note that reserving the BIOS fb up front prevents us
12577 		 * from stuffing other stolen allocations like the ring
12578 		 * on top.  This prevents some ugliness at boot time, and
12579 		 * can even allow for smooth boot transitions if the BIOS
12580 		 * fb is large enough for the active pipe configuration.
12581 		 */
12582 		i915->display.get_initial_plane_config(crtc, &plane_config);
12583 
12584 		/*
12585 		 * If the fb is shared between multiple heads, we'll
12586 		 * just get the first one.
12587 		 */
12588 		intel_find_initial_plane_obj(crtc, &plane_config);
12589 
12590 		plane_config_fini(&plane_config);
12591 	}
12592 
12593 	/*
12594 	 * Make sure hardware watermarks really match the state we read out.
12595 	 * Note that we need to do this after reconstructing the BIOS fb's
12596 	 * since the watermark calculation done here will use pstate->fb.
12597 	 */
12598 	if (!HAS_GMCH(i915))
12599 		sanitize_watermarks(i915);
12600 
12601 	return 0;
12602 }
12603 
12604 /* part #3: call after gem init */
intel_modeset_init(struct drm_i915_private * i915)12605 int intel_modeset_init(struct drm_i915_private *i915)
12606 {
12607 	int ret;
12608 
12609 	if (!HAS_DISPLAY(i915))
12610 		return 0;
12611 
12612 	/*
12613 	 * Force all active planes to recompute their states. So that on
12614 	 * mode_setcrtc after probe, all the intel_plane_state variables
12615 	 * are already calculated and there is no assert_plane warnings
12616 	 * during bootup.
12617 	 */
12618 	ret = intel_initial_commit(&i915->drm);
12619 	if (ret)
12620 		drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
12621 
12622 	intel_overlay_setup(i915);
12623 
12624 	ret = intel_fbdev_init(&i915->drm);
12625 	if (ret)
12626 		return ret;
12627 
12628 	/* Only enable hotplug handling once the fbdev is fully set up. */
12629 	intel_hpd_init(i915);
12630 	intel_hpd_poll_disable(i915);
12631 
12632 	intel_init_ipc(i915);
12633 
12634 	return 0;
12635 }
12636 
i830_enable_pipe(struct drm_i915_private * dev_priv,enum pipe pipe)12637 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
12638 {
12639 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12640 	/* 640x480@60Hz, ~25175 kHz */
12641 	struct dpll clock = {
12642 		.m1 = 18,
12643 		.m2 = 7,
12644 		.p1 = 13,
12645 		.p2 = 4,
12646 		.n = 2,
12647 	};
12648 	u32 dpll, fp;
12649 	int i;
12650 
12651 	drm_WARN_ON(&dev_priv->drm,
12652 		    i9xx_calc_dpll_params(48000, &clock) != 25154);
12653 
12654 	drm_dbg_kms(&dev_priv->drm,
12655 		    "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
12656 		    pipe_name(pipe), clock.vco, clock.dot);
12657 
12658 	fp = i9xx_dpll_compute_fp(&clock);
12659 	dpll = DPLL_DVO_2X_MODE |
12660 		DPLL_VGA_MODE_DIS |
12661 		((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
12662 		PLL_P2_DIVIDE_BY_4 |
12663 		PLL_REF_INPUT_DREFCLK |
12664 		DPLL_VCO_ENABLE;
12665 
12666 	intel_de_write(dev_priv, FP0(pipe), fp);
12667 	intel_de_write(dev_priv, FP1(pipe), fp);
12668 
12669 	intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
12670 	intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
12671 	intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
12672 	intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
12673 	intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
12674 	intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
12675 	intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
12676 
12677 	/*
12678 	 * Apparently we need to have VGA mode enabled prior to changing
12679 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
12680 	 * dividers, even though the register value does change.
12681 	 */
12682 	intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
12683 	intel_de_write(dev_priv, DPLL(pipe), dpll);
12684 
12685 	/* Wait for the clocks to stabilize. */
12686 	intel_de_posting_read(dev_priv, DPLL(pipe));
12687 	udelay(150);
12688 
12689 	/* The pixel multiplier can only be updated once the
12690 	 * DPLL is enabled and the clocks are stable.
12691 	 *
12692 	 * So write it again.
12693 	 */
12694 	intel_de_write(dev_priv, DPLL(pipe), dpll);
12695 
12696 	/* We do this three times for luck */
12697 	for (i = 0; i < 3 ; i++) {
12698 		intel_de_write(dev_priv, DPLL(pipe), dpll);
12699 		intel_de_posting_read(dev_priv, DPLL(pipe));
12700 		udelay(150); /* wait for warmup */
12701 	}
12702 
12703 	intel_de_write(dev_priv, PIPECONF(pipe),
12704 		       PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
12705 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
12706 
12707 	intel_wait_for_pipe_scanline_moving(crtc);
12708 }
12709 
i830_disable_pipe(struct drm_i915_private * dev_priv,enum pipe pipe)12710 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
12711 {
12712 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12713 
12714 	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
12715 		    pipe_name(pipe));
12716 
12717 	drm_WARN_ON(&dev_priv->drm,
12718 		    intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
12719 		    DISPLAY_PLANE_ENABLE);
12720 	drm_WARN_ON(&dev_priv->drm,
12721 		    intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
12722 		    DISPLAY_PLANE_ENABLE);
12723 	drm_WARN_ON(&dev_priv->drm,
12724 		    intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
12725 		    DISPLAY_PLANE_ENABLE);
12726 	drm_WARN_ON(&dev_priv->drm,
12727 		    intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
12728 	drm_WARN_ON(&dev_priv->drm,
12729 		    intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
12730 
12731 	intel_de_write(dev_priv, PIPECONF(pipe), 0);
12732 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
12733 
12734 	intel_wait_for_pipe_scanline_stopped(crtc);
12735 
12736 	intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
12737 	intel_de_posting_read(dev_priv, DPLL(pipe));
12738 }
12739 
12740 static void
intel_sanitize_plane_mapping(struct drm_i915_private * dev_priv)12741 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
12742 {
12743 	struct intel_crtc *crtc;
12744 
12745 	if (DISPLAY_VER(dev_priv) >= 4)
12746 		return;
12747 
12748 	for_each_intel_crtc(&dev_priv->drm, crtc) {
12749 		struct intel_plane *plane =
12750 			to_intel_plane(crtc->base.primary);
12751 		struct intel_crtc *plane_crtc;
12752 		enum pipe pipe;
12753 
12754 		if (!plane->get_hw_state(plane, &pipe))
12755 			continue;
12756 
12757 		if (pipe == crtc->pipe)
12758 			continue;
12759 
12760 		drm_dbg_kms(&dev_priv->drm,
12761 			    "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
12762 			    plane->base.base.id, plane->base.name);
12763 
12764 		plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12765 		intel_plane_disable_noatomic(plane_crtc, plane);
12766 	}
12767 }
12768 
intel_crtc_has_encoders(struct intel_crtc * crtc)12769 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
12770 {
12771 	struct drm_device *dev = crtc->base.dev;
12772 	struct intel_encoder *encoder;
12773 
12774 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
12775 		return true;
12776 
12777 	return false;
12778 }
12779 
intel_encoder_find_connector(struct intel_encoder * encoder)12780 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
12781 {
12782 	struct drm_device *dev = encoder->base.dev;
12783 	struct intel_connector *connector;
12784 
12785 	for_each_connector_on_encoder(dev, &encoder->base, connector)
12786 		return connector;
12787 
12788 	return NULL;
12789 }
12790 
has_pch_trancoder(struct drm_i915_private * dev_priv,enum pipe pch_transcoder)12791 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
12792 			      enum pipe pch_transcoder)
12793 {
12794 	return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
12795 		(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
12796 }
12797 
intel_sanitize_frame_start_delay(const struct intel_crtc_state * crtc_state)12798 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
12799 {
12800 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12801 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12802 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
12803 
12804 	if (DISPLAY_VER(dev_priv) >= 9 ||
12805 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12806 		i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
12807 		u32 val;
12808 
12809 		if (transcoder_is_dsi(cpu_transcoder))
12810 			return;
12811 
12812 		val = intel_de_read(dev_priv, reg);
12813 		val &= ~HSW_FRAME_START_DELAY_MASK;
12814 		val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12815 		intel_de_write(dev_priv, reg, val);
12816 	} else {
12817 		i915_reg_t reg = PIPECONF(cpu_transcoder);
12818 		u32 val;
12819 
12820 		val = intel_de_read(dev_priv, reg);
12821 		val &= ~PIPECONF_FRAME_START_DELAY_MASK;
12822 		val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12823 		intel_de_write(dev_priv, reg, val);
12824 	}
12825 
12826 	if (!crtc_state->has_pch_encoder)
12827 		return;
12828 
12829 	if (HAS_PCH_IBX(dev_priv)) {
12830 		i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
12831 		u32 val;
12832 
12833 		val = intel_de_read(dev_priv, reg);
12834 		val &= ~TRANS_FRAME_START_DELAY_MASK;
12835 		val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12836 		intel_de_write(dev_priv, reg, val);
12837 	} else {
12838 		enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
12839 		i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
12840 		u32 val;
12841 
12842 		val = intel_de_read(dev_priv, reg);
12843 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
12844 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12845 		intel_de_write(dev_priv, reg, val);
12846 	}
12847 }
12848 
intel_sanitize_crtc(struct intel_crtc * crtc,struct drm_modeset_acquire_ctx * ctx)12849 static void intel_sanitize_crtc(struct intel_crtc *crtc,
12850 				struct drm_modeset_acquire_ctx *ctx)
12851 {
12852 	struct drm_device *dev = crtc->base.dev;
12853 	struct drm_i915_private *dev_priv = to_i915(dev);
12854 	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
12855 
12856 	if (crtc_state->hw.active) {
12857 		struct intel_plane *plane;
12858 
12859 		/* Clear any frame start delays used for debugging left by the BIOS */
12860 		intel_sanitize_frame_start_delay(crtc_state);
12861 
12862 		/* Disable everything but the primary plane */
12863 		for_each_intel_plane_on_crtc(dev, crtc, plane) {
12864 			const struct intel_plane_state *plane_state =
12865 				to_intel_plane_state(plane->base.state);
12866 
12867 			if (plane_state->uapi.visible &&
12868 			    plane->base.type != DRM_PLANE_TYPE_PRIMARY)
12869 				intel_plane_disable_noatomic(crtc, plane);
12870 		}
12871 
12872 		/*
12873 		 * Disable any background color set by the BIOS, but enable the
12874 		 * gamma and CSC to match how we program our planes.
12875 		 */
12876 		if (DISPLAY_VER(dev_priv) >= 9)
12877 			intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
12878 				       SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
12879 	}
12880 
12881 	/* Adjust the state of the output pipe according to whether we
12882 	 * have active connectors/encoders. */
12883 	if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
12884 	    !crtc_state->bigjoiner_slave)
12885 		intel_crtc_disable_noatomic(crtc, ctx);
12886 
12887 	if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
12888 		/*
12889 		 * We start out with underrun reporting disabled to avoid races.
12890 		 * For correct bookkeeping mark this on active crtcs.
12891 		 *
12892 		 * Also on gmch platforms we dont have any hardware bits to
12893 		 * disable the underrun reporting. Which means we need to start
12894 		 * out with underrun reporting disabled also on inactive pipes,
12895 		 * since otherwise we'll complain about the garbage we read when
12896 		 * e.g. coming up after runtime pm.
12897 		 *
12898 		 * No protection against concurrent access is required - at
12899 		 * worst a fifo underrun happens which also sets this to false.
12900 		 */
12901 		crtc->cpu_fifo_underrun_disabled = true;
12902 		/*
12903 		 * We track the PCH trancoder underrun reporting state
12904 		 * within the crtc. With crtc for pipe A housing the underrun
12905 		 * reporting state for PCH transcoder A, crtc for pipe B housing
12906 		 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
12907 		 * and marking underrun reporting as disabled for the non-existing
12908 		 * PCH transcoders B and C would prevent enabling the south
12909 		 * error interrupt (see cpt_can_enable_serr_int()).
12910 		 */
12911 		if (has_pch_trancoder(dev_priv, crtc->pipe))
12912 			crtc->pch_fifo_underrun_disabled = true;
12913 	}
12914 }
12915 
has_bogus_dpll_config(const struct intel_crtc_state * crtc_state)12916 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
12917 {
12918 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
12919 
12920 	/*
12921 	 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
12922 	 * the hardware when a high res displays plugged in. DPLL P
12923 	 * divider is zero, and the pipe timings are bonkers. We'll
12924 	 * try to disable everything in that case.
12925 	 *
12926 	 * FIXME would be nice to be able to sanitize this state
12927 	 * without several WARNs, but for now let's take the easy
12928 	 * road.
12929 	 */
12930 	return IS_SANDYBRIDGE(dev_priv) &&
12931 		crtc_state->hw.active &&
12932 		crtc_state->shared_dpll &&
12933 		crtc_state->port_clock == 0;
12934 }
12935 
intel_sanitize_encoder(struct intel_encoder * encoder)12936 static void intel_sanitize_encoder(struct intel_encoder *encoder)
12937 {
12938 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
12939 	struct intel_connector *connector;
12940 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
12941 	struct intel_crtc_state *crtc_state = crtc ?
12942 		to_intel_crtc_state(crtc->base.state) : NULL;
12943 
12944 	/* We need to check both for a crtc link (meaning that the
12945 	 * encoder is active and trying to read from a pipe) and the
12946 	 * pipe itself being active. */
12947 	bool has_active_crtc = crtc_state &&
12948 		crtc_state->hw.active;
12949 
12950 	if (crtc_state && has_bogus_dpll_config(crtc_state)) {
12951 		drm_dbg_kms(&dev_priv->drm,
12952 			    "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
12953 			    pipe_name(crtc->pipe));
12954 		has_active_crtc = false;
12955 	}
12956 
12957 	connector = intel_encoder_find_connector(encoder);
12958 	if (connector && !has_active_crtc) {
12959 		drm_dbg_kms(&dev_priv->drm,
12960 			    "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
12961 			    encoder->base.base.id,
12962 			    encoder->base.name);
12963 
12964 		/* Connector is active, but has no active pipe. This is
12965 		 * fallout from our resume register restoring. Disable
12966 		 * the encoder manually again. */
12967 		if (crtc_state) {
12968 			struct drm_encoder *best_encoder;
12969 
12970 			drm_dbg_kms(&dev_priv->drm,
12971 				    "[ENCODER:%d:%s] manually disabled\n",
12972 				    encoder->base.base.id,
12973 				    encoder->base.name);
12974 
12975 			/* avoid oopsing in case the hooks consult best_encoder */
12976 			best_encoder = connector->base.state->best_encoder;
12977 			connector->base.state->best_encoder = &encoder->base;
12978 
12979 			/* FIXME NULL atomic state passed! */
12980 			if (encoder->disable)
12981 				encoder->disable(NULL, encoder, crtc_state,
12982 						 connector->base.state);
12983 			if (encoder->post_disable)
12984 				encoder->post_disable(NULL, encoder, crtc_state,
12985 						      connector->base.state);
12986 
12987 			connector->base.state->best_encoder = best_encoder;
12988 		}
12989 		encoder->base.crtc = NULL;
12990 
12991 		/* Inconsistent output/port/pipe state happens presumably due to
12992 		 * a bug in one of the get_hw_state functions. Or someplace else
12993 		 * in our code, like the register restore mess on resume. Clamp
12994 		 * things to off as a safer default. */
12995 
12996 		connector->base.dpms = DRM_MODE_DPMS_OFF;
12997 		connector->base.encoder = NULL;
12998 	}
12999 
13000 	/* notify opregion of the sanitized encoder state */
13001 	intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
13002 
13003 	if (HAS_DDI(dev_priv))
13004 		intel_ddi_sanitize_encoder_pll_mapping(encoder);
13005 }
13006 
13007 /* FIXME read out full plane state for all planes */
readout_plane_state(struct drm_i915_private * dev_priv)13008 static void readout_plane_state(struct drm_i915_private *dev_priv)
13009 {
13010 	struct intel_plane *plane;
13011 	struct intel_crtc *crtc;
13012 
13013 	for_each_intel_plane(&dev_priv->drm, plane) {
13014 		struct intel_plane_state *plane_state =
13015 			to_intel_plane_state(plane->base.state);
13016 		struct intel_crtc_state *crtc_state;
13017 		enum pipe pipe = PIPE_A;
13018 		bool visible;
13019 
13020 		visible = plane->get_hw_state(plane, &pipe);
13021 
13022 		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
13023 		crtc_state = to_intel_crtc_state(crtc->base.state);
13024 
13025 		intel_set_plane_visible(crtc_state, plane_state, visible);
13026 
13027 		drm_dbg_kms(&dev_priv->drm,
13028 			    "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
13029 			    plane->base.base.id, plane->base.name,
13030 			    enableddisabled(visible), pipe_name(pipe));
13031 	}
13032 
13033 	for_each_intel_crtc(&dev_priv->drm, crtc) {
13034 		struct intel_crtc_state *crtc_state =
13035 			to_intel_crtc_state(crtc->base.state);
13036 
13037 		fixup_plane_bitmasks(crtc_state);
13038 	}
13039 }
13040 
intel_modeset_readout_hw_state(struct drm_device * dev)13041 static void intel_modeset_readout_hw_state(struct drm_device *dev)
13042 {
13043 	struct drm_i915_private *dev_priv = to_i915(dev);
13044 	struct intel_cdclk_state *cdclk_state =
13045 		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
13046 	struct intel_dbuf_state *dbuf_state =
13047 		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
13048 	enum pipe pipe;
13049 	struct intel_crtc *crtc;
13050 	struct intel_encoder *encoder;
13051 	struct intel_connector *connector;
13052 	struct drm_connector_list_iter conn_iter;
13053 	u8 active_pipes = 0;
13054 
13055 	for_each_intel_crtc(dev, crtc) {
13056 		struct intel_crtc_state *crtc_state =
13057 			to_intel_crtc_state(crtc->base.state);
13058 
13059 		__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
13060 		intel_crtc_free_hw_state(crtc_state);
13061 		intel_crtc_state_reset(crtc_state, crtc);
13062 
13063 		intel_crtc_get_pipe_config(crtc_state);
13064 
13065 		crtc_state->hw.enable = crtc_state->hw.active;
13066 
13067 		crtc->base.enabled = crtc_state->hw.enable;
13068 		crtc->active = crtc_state->hw.active;
13069 
13070 		if (crtc_state->hw.active)
13071 			active_pipes |= BIT(crtc->pipe);
13072 
13073 		drm_dbg_kms(&dev_priv->drm,
13074 			    "[CRTC:%d:%s] hw state readout: %s\n",
13075 			    crtc->base.base.id, crtc->base.name,
13076 			    enableddisabled(crtc_state->hw.active));
13077 	}
13078 
13079 	dev_priv->active_pipes = cdclk_state->active_pipes =
13080 		dbuf_state->active_pipes = active_pipes;
13081 
13082 	readout_plane_state(dev_priv);
13083 
13084 	for_each_intel_encoder(dev, encoder) {
13085 		struct intel_crtc_state *crtc_state = NULL;
13086 
13087 		pipe = 0;
13088 
13089 		if (encoder->get_hw_state(encoder, &pipe)) {
13090 			crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
13091 			crtc_state = to_intel_crtc_state(crtc->base.state);
13092 
13093 			encoder->base.crtc = &crtc->base;
13094 			intel_encoder_get_config(encoder, crtc_state);
13095 
13096 			/* read out to slave crtc as well for bigjoiner */
13097 			if (crtc_state->bigjoiner) {
13098 				/* encoder should read be linked to bigjoiner master */
13099 				WARN_ON(crtc_state->bigjoiner_slave);
13100 
13101 				crtc = crtc_state->bigjoiner_linked_crtc;
13102 				crtc_state = to_intel_crtc_state(crtc->base.state);
13103 				intel_encoder_get_config(encoder, crtc_state);
13104 			}
13105 		} else {
13106 			encoder->base.crtc = NULL;
13107 		}
13108 
13109 		if (encoder->sync_state)
13110 			encoder->sync_state(encoder, crtc_state);
13111 
13112 		drm_dbg_kms(&dev_priv->drm,
13113 			    "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
13114 			    encoder->base.base.id, encoder->base.name,
13115 			    enableddisabled(encoder->base.crtc),
13116 			    pipe_name(pipe));
13117 	}
13118 
13119 	intel_dpll_readout_hw_state(dev_priv);
13120 
13121 	drm_connector_list_iter_begin(dev, &conn_iter);
13122 	for_each_intel_connector_iter(connector, &conn_iter) {
13123 		if (connector->get_hw_state(connector)) {
13124 			struct intel_crtc_state *crtc_state;
13125 			struct intel_crtc *crtc;
13126 
13127 			connector->base.dpms = DRM_MODE_DPMS_ON;
13128 
13129 			encoder = intel_attached_encoder(connector);
13130 			connector->base.encoder = &encoder->base;
13131 
13132 			crtc = to_intel_crtc(encoder->base.crtc);
13133 			crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
13134 
13135 			if (crtc_state && crtc_state->hw.active) {
13136 				/*
13137 				 * This has to be done during hardware readout
13138 				 * because anything calling .crtc_disable may
13139 				 * rely on the connector_mask being accurate.
13140 				 */
13141 				crtc_state->uapi.connector_mask |=
13142 					drm_connector_mask(&connector->base);
13143 				crtc_state->uapi.encoder_mask |=
13144 					drm_encoder_mask(&encoder->base);
13145 			}
13146 		} else {
13147 			connector->base.dpms = DRM_MODE_DPMS_OFF;
13148 			connector->base.encoder = NULL;
13149 		}
13150 		drm_dbg_kms(&dev_priv->drm,
13151 			    "[CONNECTOR:%d:%s] hw state readout: %s\n",
13152 			    connector->base.base.id, connector->base.name,
13153 			    enableddisabled(connector->base.encoder));
13154 	}
13155 	drm_connector_list_iter_end(&conn_iter);
13156 
13157 	for_each_intel_crtc(dev, crtc) {
13158 		struct intel_bw_state *bw_state =
13159 			to_intel_bw_state(dev_priv->bw_obj.state);
13160 		struct intel_crtc_state *crtc_state =
13161 			to_intel_crtc_state(crtc->base.state);
13162 		struct intel_plane *plane;
13163 		int min_cdclk = 0;
13164 
13165 		if (crtc_state->bigjoiner_slave)
13166 			continue;
13167 
13168 		if (crtc_state->hw.active) {
13169 			/*
13170 			 * The initial mode needs to be set in order to keep
13171 			 * the atomic core happy. It wants a valid mode if the
13172 			 * crtc's enabled, so we do the above call.
13173 			 *
13174 			 * But we don't set all the derived state fully, hence
13175 			 * set a flag to indicate that a full recalculation is
13176 			 * needed on the next commit.
13177 			 */
13178 			crtc_state->inherited = true;
13179 
13180 			intel_crtc_update_active_timings(crtc_state);
13181 
13182 			intel_crtc_copy_hw_to_uapi_state(crtc_state);
13183 		}
13184 
13185 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
13186 			const struct intel_plane_state *plane_state =
13187 				to_intel_plane_state(plane->base.state);
13188 
13189 			/*
13190 			 * FIXME don't have the fb yet, so can't
13191 			 * use intel_plane_data_rate() :(
13192 			 */
13193 			if (plane_state->uapi.visible)
13194 				crtc_state->data_rate[plane->id] =
13195 					4 * crtc_state->pixel_rate;
13196 			/*
13197 			 * FIXME don't have the fb yet, so can't
13198 			 * use plane->min_cdclk() :(
13199 			 */
13200 			if (plane_state->uapi.visible && plane->min_cdclk) {
13201 				if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
13202 					crtc_state->min_cdclk[plane->id] =
13203 						DIV_ROUND_UP(crtc_state->pixel_rate, 2);
13204 				else
13205 					crtc_state->min_cdclk[plane->id] =
13206 						crtc_state->pixel_rate;
13207 			}
13208 			drm_dbg_kms(&dev_priv->drm,
13209 				    "[PLANE:%d:%s] min_cdclk %d kHz\n",
13210 				    plane->base.base.id, plane->base.name,
13211 				    crtc_state->min_cdclk[plane->id]);
13212 		}
13213 
13214 		if (crtc_state->hw.active) {
13215 			min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
13216 			if (drm_WARN_ON(dev, min_cdclk < 0))
13217 				min_cdclk = 0;
13218 		}
13219 
13220 		cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
13221 		cdclk_state->min_voltage_level[crtc->pipe] =
13222 			crtc_state->min_voltage_level;
13223 
13224 		intel_bw_crtc_update(bw_state, crtc_state);
13225 
13226 		intel_pipe_config_sanity_check(dev_priv, crtc_state);
13227 
13228 		/* discard our incomplete slave state, copy it from master */
13229 		if (crtc_state->bigjoiner && crtc_state->hw.active) {
13230 			struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc;
13231 			struct intel_crtc_state *slave_crtc_state =
13232 				to_intel_crtc_state(slave->base.state);
13233 
13234 			copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state);
13235 			slave->base.mode = crtc->base.mode;
13236 
13237 			cdclk_state->min_cdclk[slave->pipe] = min_cdclk;
13238 			cdclk_state->min_voltage_level[slave->pipe] =
13239 				crtc_state->min_voltage_level;
13240 
13241 			for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) {
13242 				const struct intel_plane_state *plane_state =
13243 					to_intel_plane_state(plane->base.state);
13244 
13245 				/*
13246 				 * FIXME don't have the fb yet, so can't
13247 				 * use intel_plane_data_rate() :(
13248 				 */
13249 				if (plane_state->uapi.visible)
13250 					crtc_state->data_rate[plane->id] =
13251 						4 * crtc_state->pixel_rate;
13252 				else
13253 					crtc_state->data_rate[plane->id] = 0;
13254 			}
13255 
13256 			intel_bw_crtc_update(bw_state, slave_crtc_state);
13257 			drm_calc_timestamping_constants(&slave->base,
13258 							&slave_crtc_state->hw.adjusted_mode);
13259 		}
13260 	}
13261 }
13262 
13263 static void
get_encoder_power_domains(struct drm_i915_private * dev_priv)13264 get_encoder_power_domains(struct drm_i915_private *dev_priv)
13265 {
13266 	struct intel_encoder *encoder;
13267 
13268 	for_each_intel_encoder(&dev_priv->drm, encoder) {
13269 		struct intel_crtc_state *crtc_state;
13270 
13271 		if (!encoder->get_power_domains)
13272 			continue;
13273 
13274 		/*
13275 		 * MST-primary and inactive encoders don't have a crtc state
13276 		 * and neither of these require any power domain references.
13277 		 */
13278 		if (!encoder->base.crtc)
13279 			continue;
13280 
13281 		crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
13282 		encoder->get_power_domains(encoder, crtc_state);
13283 	}
13284 }
13285 
intel_early_display_was(struct drm_i915_private * dev_priv)13286 static void intel_early_display_was(struct drm_i915_private *dev_priv)
13287 {
13288 	/*
13289 	 * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
13290 	 * Also known as Wa_14010480278.
13291 	 */
13292 	if (IS_DISPLAY_VER(dev_priv, 10, 12))
13293 		intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
13294 			       intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
13295 
13296 	if (IS_HASWELL(dev_priv)) {
13297 		/*
13298 		 * WaRsPkgCStateDisplayPMReq:hsw
13299 		 * System hang if this isn't done before disabling all planes!
13300 		 */
13301 		intel_de_write(dev_priv, CHICKEN_PAR1_1,
13302 			       intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
13303 	}
13304 
13305 	if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
13306 		/* Display WA #1142:kbl,cfl,cml */
13307 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
13308 			     KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
13309 		intel_de_rmw(dev_priv, CHICKEN_MISC_2,
13310 			     KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
13311 			     KBL_ARB_FILL_SPARE_14);
13312 	}
13313 }
13314 
ibx_sanitize_pch_hdmi_port(struct drm_i915_private * dev_priv,enum port port,i915_reg_t hdmi_reg)13315 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
13316 				       enum port port, i915_reg_t hdmi_reg)
13317 {
13318 	u32 val = intel_de_read(dev_priv, hdmi_reg);
13319 
13320 	if (val & SDVO_ENABLE ||
13321 	    (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
13322 		return;
13323 
13324 	drm_dbg_kms(&dev_priv->drm,
13325 		    "Sanitizing transcoder select for HDMI %c\n",
13326 		    port_name(port));
13327 
13328 	val &= ~SDVO_PIPE_SEL_MASK;
13329 	val |= SDVO_PIPE_SEL(PIPE_A);
13330 
13331 	intel_de_write(dev_priv, hdmi_reg, val);
13332 }
13333 
ibx_sanitize_pch_dp_port(struct drm_i915_private * dev_priv,enum port port,i915_reg_t dp_reg)13334 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
13335 				     enum port port, i915_reg_t dp_reg)
13336 {
13337 	u32 val = intel_de_read(dev_priv, dp_reg);
13338 
13339 	if (val & DP_PORT_EN ||
13340 	    (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
13341 		return;
13342 
13343 	drm_dbg_kms(&dev_priv->drm,
13344 		    "Sanitizing transcoder select for DP %c\n",
13345 		    port_name(port));
13346 
13347 	val &= ~DP_PIPE_SEL_MASK;
13348 	val |= DP_PIPE_SEL(PIPE_A);
13349 
13350 	intel_de_write(dev_priv, dp_reg, val);
13351 }
13352 
ibx_sanitize_pch_ports(struct drm_i915_private * dev_priv)13353 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
13354 {
13355 	/*
13356 	 * The BIOS may select transcoder B on some of the PCH
13357 	 * ports even it doesn't enable the port. This would trip
13358 	 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
13359 	 * Sanitize the transcoder select bits to prevent that. We
13360 	 * assume that the BIOS never actually enabled the port,
13361 	 * because if it did we'd actually have to toggle the port
13362 	 * on and back off to make the transcoder A select stick
13363 	 * (see. intel_dp_link_down(), intel_disable_hdmi(),
13364 	 * intel_disable_sdvo()).
13365 	 */
13366 	ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
13367 	ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
13368 	ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
13369 
13370 	/* PCH SDVOB multiplex with HDMIB */
13371 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
13372 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
13373 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
13374 }
13375 
13376 /* Scan out the current hw modeset state,
13377  * and sanitizes it to the current state
13378  */
13379 static void
intel_modeset_setup_hw_state(struct drm_device * dev,struct drm_modeset_acquire_ctx * ctx)13380 intel_modeset_setup_hw_state(struct drm_device *dev,
13381 			     struct drm_modeset_acquire_ctx *ctx)
13382 {
13383 	struct drm_i915_private *dev_priv = to_i915(dev);
13384 	struct intel_encoder *encoder;
13385 	struct intel_crtc *crtc;
13386 	intel_wakeref_t wakeref;
13387 
13388 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
13389 
13390 	intel_early_display_was(dev_priv);
13391 	intel_modeset_readout_hw_state(dev);
13392 
13393 	/* HW state is read out, now we need to sanitize this mess. */
13394 	get_encoder_power_domains(dev_priv);
13395 
13396 	if (HAS_PCH_IBX(dev_priv))
13397 		ibx_sanitize_pch_ports(dev_priv);
13398 
13399 	/*
13400 	 * intel_sanitize_plane_mapping() may need to do vblank
13401 	 * waits, so we need vblank interrupts restored beforehand.
13402 	 */
13403 	for_each_intel_crtc(&dev_priv->drm, crtc) {
13404 		struct intel_crtc_state *crtc_state =
13405 			to_intel_crtc_state(crtc->base.state);
13406 
13407 		drm_crtc_vblank_reset(&crtc->base);
13408 
13409 		if (crtc_state->hw.active)
13410 			intel_crtc_vblank_on(crtc_state);
13411 	}
13412 
13413 	intel_sanitize_plane_mapping(dev_priv);
13414 
13415 	for_each_intel_encoder(dev, encoder)
13416 		intel_sanitize_encoder(encoder);
13417 
13418 	for_each_intel_crtc(&dev_priv->drm, crtc) {
13419 		struct intel_crtc_state *crtc_state =
13420 			to_intel_crtc_state(crtc->base.state);
13421 
13422 		intel_sanitize_crtc(crtc, ctx);
13423 		intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
13424 	}
13425 
13426 	intel_modeset_update_connector_atomic_state(dev);
13427 
13428 	intel_dpll_sanitize_state(dev_priv);
13429 
13430 	if (IS_G4X(dev_priv)) {
13431 		g4x_wm_get_hw_state(dev_priv);
13432 		g4x_wm_sanitize(dev_priv);
13433 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
13434 		vlv_wm_get_hw_state(dev_priv);
13435 		vlv_wm_sanitize(dev_priv);
13436 	} else if (DISPLAY_VER(dev_priv) >= 9) {
13437 		skl_wm_get_hw_state(dev_priv);
13438 	} else if (HAS_PCH_SPLIT(dev_priv)) {
13439 		ilk_wm_get_hw_state(dev_priv);
13440 	}
13441 
13442 	for_each_intel_crtc(dev, crtc) {
13443 		struct intel_crtc_state *crtc_state =
13444 			to_intel_crtc_state(crtc->base.state);
13445 		u64 put_domains;
13446 
13447 		put_domains = modeset_get_crtc_power_domains(crtc_state);
13448 		if (drm_WARN_ON(dev, put_domains))
13449 			modeset_put_crtc_power_domains(crtc, put_domains);
13450 	}
13451 
13452 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
13453 }
13454 
intel_display_resume(struct drm_device * dev)13455 void intel_display_resume(struct drm_device *dev)
13456 {
13457 	struct drm_i915_private *dev_priv = to_i915(dev);
13458 	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
13459 	struct drm_modeset_acquire_ctx ctx;
13460 	int ret;
13461 
13462 	if (!HAS_DISPLAY(dev_priv))
13463 		return;
13464 
13465 	dev_priv->modeset_restore_state = NULL;
13466 	if (state)
13467 		state->acquire_ctx = &ctx;
13468 
13469 	drm_modeset_acquire_init(&ctx, 0);
13470 
13471 	while (1) {
13472 		ret = drm_modeset_lock_all_ctx(dev, &ctx);
13473 		if (ret != -EDEADLK)
13474 			break;
13475 
13476 		drm_modeset_backoff(&ctx);
13477 	}
13478 
13479 	if (!ret)
13480 		ret = __intel_display_resume(dev, state, &ctx);
13481 
13482 	intel_enable_ipc(dev_priv);
13483 	drm_modeset_drop_locks(&ctx);
13484 	drm_modeset_acquire_fini(&ctx);
13485 
13486 	if (ret)
13487 		drm_err(&dev_priv->drm,
13488 			"Restoring old state failed with %i\n", ret);
13489 	if (state)
13490 		drm_atomic_state_put(state);
13491 }
13492 
intel_hpd_poll_fini(struct drm_i915_private * i915)13493 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
13494 {
13495 	struct intel_connector *connector;
13496 	struct drm_connector_list_iter conn_iter;
13497 
13498 	/* Kill all the work that may have been queued by hpd. */
13499 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
13500 	for_each_intel_connector_iter(connector, &conn_iter) {
13501 		if (connector->modeset_retry_work.func)
13502 			cancel_work_sync(&connector->modeset_retry_work);
13503 		if (connector->hdcp.shim) {
13504 			cancel_delayed_work_sync(&connector->hdcp.check_work);
13505 			cancel_work_sync(&connector->hdcp.prop_work);
13506 		}
13507 	}
13508 	drm_connector_list_iter_end(&conn_iter);
13509 }
13510 
13511 /* part #1: call before irq uninstall */
intel_modeset_driver_remove(struct drm_i915_private * i915)13512 void intel_modeset_driver_remove(struct drm_i915_private *i915)
13513 {
13514 	if (!HAS_DISPLAY(i915))
13515 		return;
13516 
13517 	flush_workqueue(i915->flip_wq);
13518 	flush_workqueue(i915->modeset_wq);
13519 
13520 	flush_work(&i915->atomic_helper.free_work);
13521 	drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
13522 }
13523 
13524 /* part #2: call after irq uninstall */
intel_modeset_driver_remove_noirq(struct drm_i915_private * i915)13525 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
13526 {
13527 	if (!HAS_DISPLAY(i915))
13528 		return;
13529 
13530 	/*
13531 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
13532 	 * poll handlers. Hence disable polling after hpd handling is shut down.
13533 	 */
13534 	intel_hpd_poll_fini(i915);
13535 
13536 	/*
13537 	 * MST topology needs to be suspended so we don't have any calls to
13538 	 * fbdev after it's finalized. MST will be destroyed later as part of
13539 	 * drm_mode_config_cleanup()
13540 	 */
13541 	intel_dp_mst_suspend(i915);
13542 
13543 	/* poll work can call into fbdev, hence clean that up afterwards */
13544 	intel_fbdev_fini(i915);
13545 
13546 	intel_unregister_dsm_handler();
13547 
13548 	intel_fbc_global_disable(i915);
13549 
13550 	/* flush any delayed tasks or pending work */
13551 	flush_scheduled_work();
13552 
13553 	intel_hdcp_component_fini(i915);
13554 
13555 	intel_mode_config_cleanup(i915);
13556 
13557 	intel_overlay_cleanup(i915);
13558 
13559 	intel_gmbus_teardown(i915);
13560 
13561 	destroy_workqueue(i915->flip_wq);
13562 	destroy_workqueue(i915->modeset_wq);
13563 
13564 	intel_fbc_cleanup_cfb(i915);
13565 }
13566 
13567 /* part #3: call after gem init */
intel_modeset_driver_remove_nogem(struct drm_i915_private * i915)13568 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
13569 {
13570 	intel_dmc_ucode_fini(i915);
13571 
13572 	intel_power_domains_driver_remove(i915);
13573 
13574 	intel_vga_unregister(i915);
13575 
13576 	intel_bios_driver_remove(i915);
13577 }
13578 
intel_display_driver_register(struct drm_i915_private * i915)13579 void intel_display_driver_register(struct drm_i915_private *i915)
13580 {
13581 	if (!HAS_DISPLAY(i915))
13582 		return;
13583 
13584 	intel_display_debugfs_register(i915);
13585 
13586 	/* Must be done after probing outputs */
13587 	intel_opregion_register(i915);
13588 	acpi_video_register();
13589 
13590 	intel_audio_init(i915);
13591 
13592 	/*
13593 	 * Some ports require correctly set-up hpd registers for
13594 	 * detection to work properly (leading to ghost connected
13595 	 * connector status), e.g. VGA on gm45.  Hence we can only set
13596 	 * up the initial fbdev config after hpd irqs are fully
13597 	 * enabled. We do it last so that the async config cannot run
13598 	 * before the connectors are registered.
13599 	 */
13600 	intel_fbdev_initial_config_async(&i915->drm);
13601 
13602 	/*
13603 	 * We need to coordinate the hotplugs with the asynchronous
13604 	 * fbdev configuration, for which we use the
13605 	 * fbdev->async_cookie.
13606 	 */
13607 	drm_kms_helper_poll_init(&i915->drm);
13608 }
13609 
intel_display_driver_unregister(struct drm_i915_private * i915)13610 void intel_display_driver_unregister(struct drm_i915_private *i915)
13611 {
13612 	if (!HAS_DISPLAY(i915))
13613 		return;
13614 
13615 	intel_fbdev_unregister(i915);
13616 	intel_audio_deinit(i915);
13617 
13618 	/*
13619 	 * After flushing the fbdev (incl. a late async config which
13620 	 * will have delayed queuing of a hotplug event), then flush
13621 	 * the hotplug events.
13622 	 */
13623 	drm_kms_helper_poll_fini(&i915->drm);
13624 	drm_atomic_helper_shutdown(&i915->drm);
13625 
13626 	acpi_video_unregister();
13627 	intel_opregion_unregister(i915);
13628 }
13629