1 /*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
34 #include <linux/vgaarb.h>
35
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/drm_rect.h>
45 #include <drm/i915_drm.h>
46
47 #include "display/intel_crt.h"
48 #include "display/intel_ddi.h"
49 #include "display/intel_dp.h"
50 #include "display/intel_dsi.h"
51 #include "display/intel_dvo.h"
52 #include "display/intel_gmbus.h"
53 #include "display/intel_hdmi.h"
54 #include "display/intel_lvds.h"
55 #include "display/intel_sdvo.h"
56 #include "display/intel_tv.h"
57 #include "display/intel_vdsc.h"
58
59 #include "i915_drv.h"
60 #include "i915_trace.h"
61 #include "intel_acpi.h"
62 #include "intel_atomic.h"
63 #include "intel_atomic_plane.h"
64 #include "intel_bw.h"
65 #include "intel_cdclk.h"
66 #include "intel_color.h"
67 #include "intel_display_types.h"
68 #include "intel_fbc.h"
69 #include "intel_fbdev.h"
70 #include "intel_fifo_underrun.h"
71 #include "intel_frontbuffer.h"
72 #include "intel_hdcp.h"
73 #include "intel_hotplug.h"
74 #include "intel_overlay.h"
75 #include "intel_pipe_crc.h"
76 #include "intel_pm.h"
77 #include "intel_psr.h"
78 #include "intel_quirks.h"
79 #include "intel_sideband.h"
80 #include "intel_sprite.h"
81 #include "intel_tc.h"
82
83 /* Primary plane formats for gen <= 3 */
84 static const u32 i8xx_primary_formats[] = {
85 DRM_FORMAT_C8,
86 DRM_FORMAT_RGB565,
87 DRM_FORMAT_XRGB1555,
88 DRM_FORMAT_XRGB8888,
89 };
90
91 /* Primary plane formats for gen >= 4 */
92 static const u32 i965_primary_formats[] = {
93 DRM_FORMAT_C8,
94 DRM_FORMAT_RGB565,
95 DRM_FORMAT_XRGB8888,
96 DRM_FORMAT_XBGR8888,
97 DRM_FORMAT_XRGB2101010,
98 DRM_FORMAT_XBGR2101010,
99 };
100
101 static const u64 i9xx_format_modifiers[] = {
102 I915_FORMAT_MOD_X_TILED,
103 DRM_FORMAT_MOD_LINEAR,
104 DRM_FORMAT_MOD_INVALID
105 };
106
107 /* Cursor formats */
108 static const u32 intel_cursor_formats[] = {
109 DRM_FORMAT_ARGB8888,
110 };
111
112 static const u64 cursor_format_modifiers[] = {
113 DRM_FORMAT_MOD_LINEAR,
114 DRM_FORMAT_MOD_INVALID
115 };
116
117 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
118 struct intel_crtc_state *pipe_config);
119 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
120 struct intel_crtc_state *pipe_config);
121
122 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
123 struct drm_i915_gem_object *obj,
124 struct drm_mode_fb_cmd2 *mode_cmd);
125 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
126 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
127 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
128 const struct intel_link_m_n *m_n,
129 const struct intel_link_m_n *m2_n2);
130 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
131 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
132 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
133 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
134 static void vlv_prepare_pll(struct intel_crtc *crtc,
135 const struct intel_crtc_state *pipe_config);
136 static void chv_prepare_pll(struct intel_crtc *crtc,
137 const struct intel_crtc_state *pipe_config);
138 static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
139 static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
140 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
141 struct intel_crtc_state *crtc_state);
142 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
143 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
144 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
145 static void intel_modeset_setup_hw_state(struct drm_device *dev,
146 struct drm_modeset_acquire_ctx *ctx);
147 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
148
149 struct intel_limit {
150 struct {
151 int min, max;
152 } dot, vco, n, m, m1, m2, p, p1;
153
154 struct {
155 int dot_limit;
156 int p2_slow, p2_fast;
157 } p2;
158 };
159
160 /* returns HPLL frequency in kHz */
vlv_get_hpll_vco(struct drm_i915_private * dev_priv)161 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
162 {
163 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
164
165 /* Obtain SKU information */
166 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
167 CCK_FUSE_HPLL_FREQ_MASK;
168
169 return vco_freq[hpll_freq] * 1000;
170 }
171
vlv_get_cck_clock(struct drm_i915_private * dev_priv,const char * name,u32 reg,int ref_freq)172 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
173 const char *name, u32 reg, int ref_freq)
174 {
175 u32 val;
176 int divider;
177
178 val = vlv_cck_read(dev_priv, reg);
179 divider = val & CCK_FREQUENCY_VALUES;
180
181 WARN((val & CCK_FREQUENCY_STATUS) !=
182 (divider << CCK_FREQUENCY_STATUS_SHIFT),
183 "%s change in progress\n", name);
184
185 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
186 }
187
vlv_get_cck_clock_hpll(struct drm_i915_private * dev_priv,const char * name,u32 reg)188 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
189 const char *name, u32 reg)
190 {
191 int hpll;
192
193 vlv_cck_get(dev_priv);
194
195 if (dev_priv->hpll_freq == 0)
196 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
197
198 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
199
200 vlv_cck_put(dev_priv);
201
202 return hpll;
203 }
204
intel_update_czclk(struct drm_i915_private * dev_priv)205 static void intel_update_czclk(struct drm_i915_private *dev_priv)
206 {
207 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
208 return;
209
210 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
211 CCK_CZ_CLOCK_CONTROL);
212
213 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
214 }
215
216 static inline u32 /* units of 100MHz */
intel_fdi_link_freq(struct drm_i915_private * dev_priv,const struct intel_crtc_state * pipe_config)217 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
218 const struct intel_crtc_state *pipe_config)
219 {
220 if (HAS_DDI(dev_priv))
221 return pipe_config->port_clock; /* SPLL */
222 else
223 return dev_priv->fdi_pll_freq;
224 }
225
226 static const struct intel_limit intel_limits_i8xx_dac = {
227 .dot = { .min = 25000, .max = 350000 },
228 .vco = { .min = 908000, .max = 1512000 },
229 .n = { .min = 2, .max = 16 },
230 .m = { .min = 96, .max = 140 },
231 .m1 = { .min = 18, .max = 26 },
232 .m2 = { .min = 6, .max = 16 },
233 .p = { .min = 4, .max = 128 },
234 .p1 = { .min = 2, .max = 33 },
235 .p2 = { .dot_limit = 165000,
236 .p2_slow = 4, .p2_fast = 2 },
237 };
238
239 static const struct intel_limit intel_limits_i8xx_dvo = {
240 .dot = { .min = 25000, .max = 350000 },
241 .vco = { .min = 908000, .max = 1512000 },
242 .n = { .min = 2, .max = 16 },
243 .m = { .min = 96, .max = 140 },
244 .m1 = { .min = 18, .max = 26 },
245 .m2 = { .min = 6, .max = 16 },
246 .p = { .min = 4, .max = 128 },
247 .p1 = { .min = 2, .max = 33 },
248 .p2 = { .dot_limit = 165000,
249 .p2_slow = 4, .p2_fast = 4 },
250 };
251
252 static const struct intel_limit intel_limits_i8xx_lvds = {
253 .dot = { .min = 25000, .max = 350000 },
254 .vco = { .min = 908000, .max = 1512000 },
255 .n = { .min = 2, .max = 16 },
256 .m = { .min = 96, .max = 140 },
257 .m1 = { .min = 18, .max = 26 },
258 .m2 = { .min = 6, .max = 16 },
259 .p = { .min = 4, .max = 128 },
260 .p1 = { .min = 1, .max = 6 },
261 .p2 = { .dot_limit = 165000,
262 .p2_slow = 14, .p2_fast = 7 },
263 };
264
265 static const struct intel_limit intel_limits_i9xx_sdvo = {
266 .dot = { .min = 20000, .max = 400000 },
267 .vco = { .min = 1400000, .max = 2800000 },
268 .n = { .min = 1, .max = 6 },
269 .m = { .min = 70, .max = 120 },
270 .m1 = { .min = 8, .max = 18 },
271 .m2 = { .min = 3, .max = 7 },
272 .p = { .min = 5, .max = 80 },
273 .p1 = { .min = 1, .max = 8 },
274 .p2 = { .dot_limit = 200000,
275 .p2_slow = 10, .p2_fast = 5 },
276 };
277
278 static const struct intel_limit intel_limits_i9xx_lvds = {
279 .dot = { .min = 20000, .max = 400000 },
280 .vco = { .min = 1400000, .max = 2800000 },
281 .n = { .min = 1, .max = 6 },
282 .m = { .min = 70, .max = 120 },
283 .m1 = { .min = 8, .max = 18 },
284 .m2 = { .min = 3, .max = 7 },
285 .p = { .min = 7, .max = 98 },
286 .p1 = { .min = 1, .max = 8 },
287 .p2 = { .dot_limit = 112000,
288 .p2_slow = 14, .p2_fast = 7 },
289 };
290
291
292 static const struct intel_limit intel_limits_g4x_sdvo = {
293 .dot = { .min = 25000, .max = 270000 },
294 .vco = { .min = 1750000, .max = 3500000},
295 .n = { .min = 1, .max = 4 },
296 .m = { .min = 104, .max = 138 },
297 .m1 = { .min = 17, .max = 23 },
298 .m2 = { .min = 5, .max = 11 },
299 .p = { .min = 10, .max = 30 },
300 .p1 = { .min = 1, .max = 3},
301 .p2 = { .dot_limit = 270000,
302 .p2_slow = 10,
303 .p2_fast = 10
304 },
305 };
306
307 static const struct intel_limit intel_limits_g4x_hdmi = {
308 .dot = { .min = 22000, .max = 400000 },
309 .vco = { .min = 1750000, .max = 3500000},
310 .n = { .min = 1, .max = 4 },
311 .m = { .min = 104, .max = 138 },
312 .m1 = { .min = 16, .max = 23 },
313 .m2 = { .min = 5, .max = 11 },
314 .p = { .min = 5, .max = 80 },
315 .p1 = { .min = 1, .max = 8},
316 .p2 = { .dot_limit = 165000,
317 .p2_slow = 10, .p2_fast = 5 },
318 };
319
320 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
321 .dot = { .min = 20000, .max = 115000 },
322 .vco = { .min = 1750000, .max = 3500000 },
323 .n = { .min = 1, .max = 3 },
324 .m = { .min = 104, .max = 138 },
325 .m1 = { .min = 17, .max = 23 },
326 .m2 = { .min = 5, .max = 11 },
327 .p = { .min = 28, .max = 112 },
328 .p1 = { .min = 2, .max = 8 },
329 .p2 = { .dot_limit = 0,
330 .p2_slow = 14, .p2_fast = 14
331 },
332 };
333
334 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
335 .dot = { .min = 80000, .max = 224000 },
336 .vco = { .min = 1750000, .max = 3500000 },
337 .n = { .min = 1, .max = 3 },
338 .m = { .min = 104, .max = 138 },
339 .m1 = { .min = 17, .max = 23 },
340 .m2 = { .min = 5, .max = 11 },
341 .p = { .min = 14, .max = 42 },
342 .p1 = { .min = 2, .max = 6 },
343 .p2 = { .dot_limit = 0,
344 .p2_slow = 7, .p2_fast = 7
345 },
346 };
347
348 static const struct intel_limit intel_limits_pineview_sdvo = {
349 .dot = { .min = 20000, .max = 400000},
350 .vco = { .min = 1700000, .max = 3500000 },
351 /* Pineview's Ncounter is a ring counter */
352 .n = { .min = 3, .max = 6 },
353 .m = { .min = 2, .max = 256 },
354 /* Pineview only has one combined m divider, which we treat as m2. */
355 .m1 = { .min = 0, .max = 0 },
356 .m2 = { .min = 0, .max = 254 },
357 .p = { .min = 5, .max = 80 },
358 .p1 = { .min = 1, .max = 8 },
359 .p2 = { .dot_limit = 200000,
360 .p2_slow = 10, .p2_fast = 5 },
361 };
362
363 static const struct intel_limit intel_limits_pineview_lvds = {
364 .dot = { .min = 20000, .max = 400000 },
365 .vco = { .min = 1700000, .max = 3500000 },
366 .n = { .min = 3, .max = 6 },
367 .m = { .min = 2, .max = 256 },
368 .m1 = { .min = 0, .max = 0 },
369 .m2 = { .min = 0, .max = 254 },
370 .p = { .min = 7, .max = 112 },
371 .p1 = { .min = 1, .max = 8 },
372 .p2 = { .dot_limit = 112000,
373 .p2_slow = 14, .p2_fast = 14 },
374 };
375
376 /* Ironlake / Sandybridge
377 *
378 * We calculate clock using (register_value + 2) for N/M1/M2, so here
379 * the range value for them is (actual_value - 2).
380 */
381 static const struct intel_limit intel_limits_ironlake_dac = {
382 .dot = { .min = 25000, .max = 350000 },
383 .vco = { .min = 1760000, .max = 3510000 },
384 .n = { .min = 1, .max = 5 },
385 .m = { .min = 79, .max = 127 },
386 .m1 = { .min = 12, .max = 22 },
387 .m2 = { .min = 5, .max = 9 },
388 .p = { .min = 5, .max = 80 },
389 .p1 = { .min = 1, .max = 8 },
390 .p2 = { .dot_limit = 225000,
391 .p2_slow = 10, .p2_fast = 5 },
392 };
393
394 static const struct intel_limit intel_limits_ironlake_single_lvds = {
395 .dot = { .min = 25000, .max = 350000 },
396 .vco = { .min = 1760000, .max = 3510000 },
397 .n = { .min = 1, .max = 3 },
398 .m = { .min = 79, .max = 118 },
399 .m1 = { .min = 12, .max = 22 },
400 .m2 = { .min = 5, .max = 9 },
401 .p = { .min = 28, .max = 112 },
402 .p1 = { .min = 2, .max = 8 },
403 .p2 = { .dot_limit = 225000,
404 .p2_slow = 14, .p2_fast = 14 },
405 };
406
407 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
408 .dot = { .min = 25000, .max = 350000 },
409 .vco = { .min = 1760000, .max = 3510000 },
410 .n = { .min = 1, .max = 3 },
411 .m = { .min = 79, .max = 127 },
412 .m1 = { .min = 12, .max = 22 },
413 .m2 = { .min = 5, .max = 9 },
414 .p = { .min = 14, .max = 56 },
415 .p1 = { .min = 2, .max = 8 },
416 .p2 = { .dot_limit = 225000,
417 .p2_slow = 7, .p2_fast = 7 },
418 };
419
420 /* LVDS 100mhz refclk limits. */
421 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
422 .dot = { .min = 25000, .max = 350000 },
423 .vco = { .min = 1760000, .max = 3510000 },
424 .n = { .min = 1, .max = 2 },
425 .m = { .min = 79, .max = 126 },
426 .m1 = { .min = 12, .max = 22 },
427 .m2 = { .min = 5, .max = 9 },
428 .p = { .min = 28, .max = 112 },
429 .p1 = { .min = 2, .max = 8 },
430 .p2 = { .dot_limit = 225000,
431 .p2_slow = 14, .p2_fast = 14 },
432 };
433
434 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
435 .dot = { .min = 25000, .max = 350000 },
436 .vco = { .min = 1760000, .max = 3510000 },
437 .n = { .min = 1, .max = 3 },
438 .m = { .min = 79, .max = 126 },
439 .m1 = { .min = 12, .max = 22 },
440 .m2 = { .min = 5, .max = 9 },
441 .p = { .min = 14, .max = 42 },
442 .p1 = { .min = 2, .max = 6 },
443 .p2 = { .dot_limit = 225000,
444 .p2_slow = 7, .p2_fast = 7 },
445 };
446
447 static const struct intel_limit intel_limits_vlv = {
448 /*
449 * These are the data rate limits (measured in fast clocks)
450 * since those are the strictest limits we have. The fast
451 * clock and actual rate limits are more relaxed, so checking
452 * them would make no difference.
453 */
454 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
455 .vco = { .min = 4000000, .max = 6000000 },
456 .n = { .min = 1, .max = 7 },
457 .m1 = { .min = 2, .max = 3 },
458 .m2 = { .min = 11, .max = 156 },
459 .p1 = { .min = 2, .max = 3 },
460 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
461 };
462
463 static const struct intel_limit intel_limits_chv = {
464 /*
465 * These are the data rate limits (measured in fast clocks)
466 * since those are the strictest limits we have. The fast
467 * clock and actual rate limits are more relaxed, so checking
468 * them would make no difference.
469 */
470 .dot = { .min = 25000 * 5, .max = 540000 * 5},
471 .vco = { .min = 4800000, .max = 6480000 },
472 .n = { .min = 1, .max = 1 },
473 .m1 = { .min = 2, .max = 2 },
474 .m2 = { .min = 24 << 22, .max = 175 << 22 },
475 .p1 = { .min = 2, .max = 4 },
476 .p2 = { .p2_slow = 1, .p2_fast = 14 },
477 };
478
479 static const struct intel_limit intel_limits_bxt = {
480 /* FIXME: find real dot limits */
481 .dot = { .min = 0, .max = INT_MAX },
482 .vco = { .min = 4800000, .max = 6700000 },
483 .n = { .min = 1, .max = 1 },
484 .m1 = { .min = 2, .max = 2 },
485 /* FIXME: find real m2 limits */
486 .m2 = { .min = 2 << 22, .max = 255 << 22 },
487 .p1 = { .min = 2, .max = 4 },
488 .p2 = { .p2_slow = 1, .p2_fast = 20 },
489 };
490
491 /* WA Display #0827: Gen9:all */
492 static void
skl_wa_827(struct drm_i915_private * dev_priv,int pipe,bool enable)493 skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
494 {
495 if (enable)
496 I915_WRITE(CLKGATE_DIS_PSL(pipe),
497 I915_READ(CLKGATE_DIS_PSL(pipe)) |
498 DUPS1_GATING_DIS | DUPS2_GATING_DIS);
499 else
500 I915_WRITE(CLKGATE_DIS_PSL(pipe),
501 I915_READ(CLKGATE_DIS_PSL(pipe)) &
502 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
503 }
504
505 /* Wa_2006604312:icl */
506 static void
icl_wa_scalerclkgating(struct drm_i915_private * dev_priv,enum pipe pipe,bool enable)507 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
508 bool enable)
509 {
510 if (enable)
511 I915_WRITE(CLKGATE_DIS_PSL(pipe),
512 I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
513 else
514 I915_WRITE(CLKGATE_DIS_PSL(pipe),
515 I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
516 }
517
518 static bool
needs_modeset(const struct intel_crtc_state * state)519 needs_modeset(const struct intel_crtc_state *state)
520 {
521 return drm_atomic_crtc_needs_modeset(&state->base);
522 }
523
524 /*
525 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
526 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
527 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
528 * The helpers' return value is the rate of the clock that is fed to the
529 * display engine's pipe which can be the above fast dot clock rate or a
530 * divided-down version of it.
531 */
532 /* m1 is reserved as 0 in Pineview, n is a ring counter */
pnv_calc_dpll_params(int refclk,struct dpll * clock)533 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
534 {
535 clock->m = clock->m2 + 2;
536 clock->p = clock->p1 * clock->p2;
537 if (WARN_ON(clock->n == 0 || clock->p == 0))
538 return 0;
539 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
540 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
541
542 return clock->dot;
543 }
544
i9xx_dpll_compute_m(struct dpll * dpll)545 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
546 {
547 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
548 }
549
i9xx_calc_dpll_params(int refclk,struct dpll * clock)550 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
551 {
552 clock->m = i9xx_dpll_compute_m(clock);
553 clock->p = clock->p1 * clock->p2;
554 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
555 return 0;
556 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
557 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
558
559 return clock->dot;
560 }
561
vlv_calc_dpll_params(int refclk,struct dpll * clock)562 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
563 {
564 clock->m = clock->m1 * clock->m2;
565 clock->p = clock->p1 * clock->p2;
566 if (WARN_ON(clock->n == 0 || clock->p == 0))
567 return 0;
568 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
569 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
570
571 return clock->dot / 5;
572 }
573
chv_calc_dpll_params(int refclk,struct dpll * clock)574 int chv_calc_dpll_params(int refclk, struct dpll *clock)
575 {
576 clock->m = clock->m1 * clock->m2;
577 clock->p = clock->p1 * clock->p2;
578 if (WARN_ON(clock->n == 0 || clock->p == 0))
579 return 0;
580 clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
581 clock->n << 22);
582 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
583
584 return clock->dot / 5;
585 }
586
587 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
588
589 /*
590 * Returns whether the given set of divisors are valid for a given refclk with
591 * the given connectors.
592 */
intel_PLL_is_valid(struct drm_i915_private * dev_priv,const struct intel_limit * limit,const struct dpll * clock)593 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
594 const struct intel_limit *limit,
595 const struct dpll *clock)
596 {
597 if (clock->n < limit->n.min || limit->n.max < clock->n)
598 INTELPllInvalid("n out of range\n");
599 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
600 INTELPllInvalid("p1 out of range\n");
601 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
602 INTELPllInvalid("m2 out of range\n");
603 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
604 INTELPllInvalid("m1 out of range\n");
605
606 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
607 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
608 if (clock->m1 <= clock->m2)
609 INTELPllInvalid("m1 <= m2\n");
610
611 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
612 !IS_GEN9_LP(dev_priv)) {
613 if (clock->p < limit->p.min || limit->p.max < clock->p)
614 INTELPllInvalid("p out of range\n");
615 if (clock->m < limit->m.min || limit->m.max < clock->m)
616 INTELPllInvalid("m out of range\n");
617 }
618
619 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
620 INTELPllInvalid("vco out of range\n");
621 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
622 * connector, etc., rather than just a single range.
623 */
624 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
625 INTELPllInvalid("dot out of range\n");
626
627 return true;
628 }
629
630 static int
i9xx_select_p2_div(const struct intel_limit * limit,const struct intel_crtc_state * crtc_state,int target)631 i9xx_select_p2_div(const struct intel_limit *limit,
632 const struct intel_crtc_state *crtc_state,
633 int target)
634 {
635 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
636
637 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
638 /*
639 * For LVDS just rely on its current settings for dual-channel.
640 * We haven't figured out how to reliably set up different
641 * single/dual channel state, if we even can.
642 */
643 if (intel_is_dual_link_lvds(dev_priv))
644 return limit->p2.p2_fast;
645 else
646 return limit->p2.p2_slow;
647 } else {
648 if (target < limit->p2.dot_limit)
649 return limit->p2.p2_slow;
650 else
651 return limit->p2.p2_fast;
652 }
653 }
654
655 /*
656 * Returns a set of divisors for the desired target clock with the given
657 * refclk, or FALSE. The returned values represent the clock equation:
658 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
659 *
660 * Target and reference clocks are specified in kHz.
661 *
662 * If match_clock is provided, then best_clock P divider must match the P
663 * divider from @match_clock used for LVDS downclocking.
664 */
665 static bool
i9xx_find_best_dpll(const struct intel_limit * limit,struct intel_crtc_state * crtc_state,int target,int refclk,struct dpll * match_clock,struct dpll * best_clock)666 i9xx_find_best_dpll(const struct intel_limit *limit,
667 struct intel_crtc_state *crtc_state,
668 int target, int refclk, struct dpll *match_clock,
669 struct dpll *best_clock)
670 {
671 struct drm_device *dev = crtc_state->base.crtc->dev;
672 struct dpll clock;
673 int err = target;
674
675 memset(best_clock, 0, sizeof(*best_clock));
676
677 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
678
679 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
680 clock.m1++) {
681 for (clock.m2 = limit->m2.min;
682 clock.m2 <= limit->m2.max; clock.m2++) {
683 if (clock.m2 >= clock.m1)
684 break;
685 for (clock.n = limit->n.min;
686 clock.n <= limit->n.max; clock.n++) {
687 for (clock.p1 = limit->p1.min;
688 clock.p1 <= limit->p1.max; clock.p1++) {
689 int this_err;
690
691 i9xx_calc_dpll_params(refclk, &clock);
692 if (!intel_PLL_is_valid(to_i915(dev),
693 limit,
694 &clock))
695 continue;
696 if (match_clock &&
697 clock.p != match_clock->p)
698 continue;
699
700 this_err = abs(clock.dot - target);
701 if (this_err < err) {
702 *best_clock = clock;
703 err = this_err;
704 }
705 }
706 }
707 }
708 }
709
710 return (err != target);
711 }
712
713 /*
714 * Returns a set of divisors for the desired target clock with the given
715 * refclk, or FALSE. The returned values represent the clock equation:
716 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
717 *
718 * Target and reference clocks are specified in kHz.
719 *
720 * If match_clock is provided, then best_clock P divider must match the P
721 * divider from @match_clock used for LVDS downclocking.
722 */
723 static bool
pnv_find_best_dpll(const struct intel_limit * limit,struct intel_crtc_state * crtc_state,int target,int refclk,struct dpll * match_clock,struct dpll * best_clock)724 pnv_find_best_dpll(const struct intel_limit *limit,
725 struct intel_crtc_state *crtc_state,
726 int target, int refclk, struct dpll *match_clock,
727 struct dpll *best_clock)
728 {
729 struct drm_device *dev = crtc_state->base.crtc->dev;
730 struct dpll clock;
731 int err = target;
732
733 memset(best_clock, 0, sizeof(*best_clock));
734
735 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
736
737 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
738 clock.m1++) {
739 for (clock.m2 = limit->m2.min;
740 clock.m2 <= limit->m2.max; clock.m2++) {
741 for (clock.n = limit->n.min;
742 clock.n <= limit->n.max; clock.n++) {
743 for (clock.p1 = limit->p1.min;
744 clock.p1 <= limit->p1.max; clock.p1++) {
745 int this_err;
746
747 pnv_calc_dpll_params(refclk, &clock);
748 if (!intel_PLL_is_valid(to_i915(dev),
749 limit,
750 &clock))
751 continue;
752 if (match_clock &&
753 clock.p != match_clock->p)
754 continue;
755
756 this_err = abs(clock.dot - target);
757 if (this_err < err) {
758 *best_clock = clock;
759 err = this_err;
760 }
761 }
762 }
763 }
764 }
765
766 return (err != target);
767 }
768
769 /*
770 * Returns a set of divisors for the desired target clock with the given
771 * refclk, or FALSE. The returned values represent the clock equation:
772 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
773 *
774 * Target and reference clocks are specified in kHz.
775 *
776 * If match_clock is provided, then best_clock P divider must match the P
777 * divider from @match_clock used for LVDS downclocking.
778 */
779 static bool
g4x_find_best_dpll(const struct intel_limit * limit,struct intel_crtc_state * crtc_state,int target,int refclk,struct dpll * match_clock,struct dpll * best_clock)780 g4x_find_best_dpll(const struct intel_limit *limit,
781 struct intel_crtc_state *crtc_state,
782 int target, int refclk, struct dpll *match_clock,
783 struct dpll *best_clock)
784 {
785 struct drm_device *dev = crtc_state->base.crtc->dev;
786 struct dpll clock;
787 int max_n;
788 bool found = false;
789 /* approximately equals target * 0.00585 */
790 int err_most = (target >> 8) + (target >> 9);
791
792 memset(best_clock, 0, sizeof(*best_clock));
793
794 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
795
796 max_n = limit->n.max;
797 /* based on hardware requirement, prefer smaller n to precision */
798 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
799 /* based on hardware requirement, prefere larger m1,m2 */
800 for (clock.m1 = limit->m1.max;
801 clock.m1 >= limit->m1.min; clock.m1--) {
802 for (clock.m2 = limit->m2.max;
803 clock.m2 >= limit->m2.min; clock.m2--) {
804 for (clock.p1 = limit->p1.max;
805 clock.p1 >= limit->p1.min; clock.p1--) {
806 int this_err;
807
808 i9xx_calc_dpll_params(refclk, &clock);
809 if (!intel_PLL_is_valid(to_i915(dev),
810 limit,
811 &clock))
812 continue;
813
814 this_err = abs(clock.dot - target);
815 if (this_err < err_most) {
816 *best_clock = clock;
817 err_most = this_err;
818 max_n = clock.n;
819 found = true;
820 }
821 }
822 }
823 }
824 }
825 return found;
826 }
827
828 /*
829 * Check if the calculated PLL configuration is more optimal compared to the
830 * best configuration and error found so far. Return the calculated error.
831 */
vlv_PLL_is_optimal(struct drm_device * dev,int target_freq,const struct dpll * calculated_clock,const struct dpll * best_clock,unsigned int best_error_ppm,unsigned int * error_ppm)832 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
833 const struct dpll *calculated_clock,
834 const struct dpll *best_clock,
835 unsigned int best_error_ppm,
836 unsigned int *error_ppm)
837 {
838 /*
839 * For CHV ignore the error and consider only the P value.
840 * Prefer a bigger P value based on HW requirements.
841 */
842 if (IS_CHERRYVIEW(to_i915(dev))) {
843 *error_ppm = 0;
844
845 return calculated_clock->p > best_clock->p;
846 }
847
848 if (WARN_ON_ONCE(!target_freq))
849 return false;
850
851 *error_ppm = div_u64(1000000ULL *
852 abs(target_freq - calculated_clock->dot),
853 target_freq);
854 /*
855 * Prefer a better P value over a better (smaller) error if the error
856 * is small. Ensure this preference for future configurations too by
857 * setting the error to 0.
858 */
859 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
860 *error_ppm = 0;
861
862 return true;
863 }
864
865 return *error_ppm + 10 < best_error_ppm;
866 }
867
868 /*
869 * Returns a set of divisors for the desired target clock with the given
870 * refclk, or FALSE. The returned values represent the clock equation:
871 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
872 */
873 static bool
vlv_find_best_dpll(const struct intel_limit * limit,struct intel_crtc_state * crtc_state,int target,int refclk,struct dpll * match_clock,struct dpll * best_clock)874 vlv_find_best_dpll(const struct intel_limit *limit,
875 struct intel_crtc_state *crtc_state,
876 int target, int refclk, struct dpll *match_clock,
877 struct dpll *best_clock)
878 {
879 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
880 struct drm_device *dev = crtc->base.dev;
881 struct dpll clock;
882 unsigned int bestppm = 1000000;
883 /* min update 19.2 MHz */
884 int max_n = min(limit->n.max, refclk / 19200);
885 bool found = false;
886
887 target *= 5; /* fast clock */
888
889 memset(best_clock, 0, sizeof(*best_clock));
890
891 /* based on hardware requirement, prefer smaller n to precision */
892 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
893 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
894 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
895 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
896 clock.p = clock.p1 * clock.p2;
897 /* based on hardware requirement, prefer bigger m1,m2 values */
898 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
899 unsigned int ppm;
900
901 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
902 refclk * clock.m1);
903
904 vlv_calc_dpll_params(refclk, &clock);
905
906 if (!intel_PLL_is_valid(to_i915(dev),
907 limit,
908 &clock))
909 continue;
910
911 if (!vlv_PLL_is_optimal(dev, target,
912 &clock,
913 best_clock,
914 bestppm, &ppm))
915 continue;
916
917 *best_clock = clock;
918 bestppm = ppm;
919 found = true;
920 }
921 }
922 }
923 }
924
925 return found;
926 }
927
928 /*
929 * Returns a set of divisors for the desired target clock with the given
930 * refclk, or FALSE. The returned values represent the clock equation:
931 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
932 */
933 static bool
chv_find_best_dpll(const struct intel_limit * limit,struct intel_crtc_state * crtc_state,int target,int refclk,struct dpll * match_clock,struct dpll * best_clock)934 chv_find_best_dpll(const struct intel_limit *limit,
935 struct intel_crtc_state *crtc_state,
936 int target, int refclk, struct dpll *match_clock,
937 struct dpll *best_clock)
938 {
939 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
940 struct drm_device *dev = crtc->base.dev;
941 unsigned int best_error_ppm;
942 struct dpll clock;
943 u64 m2;
944 int found = false;
945
946 memset(best_clock, 0, sizeof(*best_clock));
947 best_error_ppm = 1000000;
948
949 /*
950 * Based on hardware doc, the n always set to 1, and m1 always
951 * set to 2. If requires to support 200Mhz refclk, we need to
952 * revisit this because n may not 1 anymore.
953 */
954 clock.n = 1, clock.m1 = 2;
955 target *= 5; /* fast clock */
956
957 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
958 for (clock.p2 = limit->p2.p2_fast;
959 clock.p2 >= limit->p2.p2_slow;
960 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
961 unsigned int error_ppm;
962
963 clock.p = clock.p1 * clock.p2;
964
965 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
966 refclk * clock.m1);
967
968 if (m2 > INT_MAX/clock.m1)
969 continue;
970
971 clock.m2 = m2;
972
973 chv_calc_dpll_params(refclk, &clock);
974
975 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
976 continue;
977
978 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
979 best_error_ppm, &error_ppm))
980 continue;
981
982 *best_clock = clock;
983 best_error_ppm = error_ppm;
984 found = true;
985 }
986 }
987
988 return found;
989 }
990
bxt_find_best_dpll(struct intel_crtc_state * crtc_state,struct dpll * best_clock)991 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
992 struct dpll *best_clock)
993 {
994 int refclk = 100000;
995 const struct intel_limit *limit = &intel_limits_bxt;
996
997 return chv_find_best_dpll(limit, crtc_state,
998 crtc_state->port_clock, refclk,
999 NULL, best_clock);
1000 }
1001
intel_crtc_active(struct intel_crtc * crtc)1002 bool intel_crtc_active(struct intel_crtc *crtc)
1003 {
1004 /* Be paranoid as we can arrive here with only partial
1005 * state retrieved from the hardware during setup.
1006 *
1007 * We can ditch the adjusted_mode.crtc_clock check as soon
1008 * as Haswell has gained clock readout/fastboot support.
1009 *
1010 * We can ditch the crtc->primary->state->fb check as soon as we can
1011 * properly reconstruct framebuffers.
1012 *
1013 * FIXME: The intel_crtc->active here should be switched to
1014 * crtc->state->active once we have proper CRTC states wired up
1015 * for atomic.
1016 */
1017 return crtc->active && crtc->base.primary->state->fb &&
1018 crtc->config->base.adjusted_mode.crtc_clock;
1019 }
1020
intel_pipe_to_cpu_transcoder(struct drm_i915_private * dev_priv,enum pipe pipe)1021 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1022 enum pipe pipe)
1023 {
1024 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1025
1026 return crtc->config->cpu_transcoder;
1027 }
1028
pipe_scanline_is_moving(struct drm_i915_private * dev_priv,enum pipe pipe)1029 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1030 enum pipe pipe)
1031 {
1032 i915_reg_t reg = PIPEDSL(pipe);
1033 u32 line1, line2;
1034 u32 line_mask;
1035
1036 if (IS_GEN(dev_priv, 2))
1037 line_mask = DSL_LINEMASK_GEN2;
1038 else
1039 line_mask = DSL_LINEMASK_GEN3;
1040
1041 line1 = I915_READ(reg) & line_mask;
1042 msleep(5);
1043 line2 = I915_READ(reg) & line_mask;
1044
1045 return line1 != line2;
1046 }
1047
wait_for_pipe_scanline_moving(struct intel_crtc * crtc,bool state)1048 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1049 {
1050 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1051 enum pipe pipe = crtc->pipe;
1052
1053 /* Wait for the display line to settle/start moving */
1054 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1055 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1056 pipe_name(pipe), onoff(state));
1057 }
1058
intel_wait_for_pipe_scanline_stopped(struct intel_crtc * crtc)1059 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1060 {
1061 wait_for_pipe_scanline_moving(crtc, false);
1062 }
1063
intel_wait_for_pipe_scanline_moving(struct intel_crtc * crtc)1064 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1065 {
1066 wait_for_pipe_scanline_moving(crtc, true);
1067 }
1068
1069 static void
intel_wait_for_pipe_off(const struct intel_crtc_state * old_crtc_state)1070 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1071 {
1072 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1073 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1074
1075 if (INTEL_GEN(dev_priv) >= 4) {
1076 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1077 i915_reg_t reg = PIPECONF(cpu_transcoder);
1078
1079 /* Wait for the Pipe State to go off */
1080 if (intel_de_wait_for_clear(dev_priv, reg,
1081 I965_PIPECONF_ACTIVE, 100))
1082 WARN(1, "pipe_off wait timed out\n");
1083 } else {
1084 intel_wait_for_pipe_scanline_stopped(crtc);
1085 }
1086 }
1087
1088 /* Only for pre-ILK configs */
assert_pll(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)1089 void assert_pll(struct drm_i915_private *dev_priv,
1090 enum pipe pipe, bool state)
1091 {
1092 u32 val;
1093 bool cur_state;
1094
1095 val = I915_READ(DPLL(pipe));
1096 cur_state = !!(val & DPLL_VCO_ENABLE);
1097 I915_STATE_WARN(cur_state != state,
1098 "PLL state assertion failure (expected %s, current %s)\n",
1099 onoff(state), onoff(cur_state));
1100 }
1101
1102 /* XXX: the dsi pll is shared between MIPI DSI ports */
assert_dsi_pll(struct drm_i915_private * dev_priv,bool state)1103 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1104 {
1105 u32 val;
1106 bool cur_state;
1107
1108 vlv_cck_get(dev_priv);
1109 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1110 vlv_cck_put(dev_priv);
1111
1112 cur_state = val & DSI_PLL_VCO_EN;
1113 I915_STATE_WARN(cur_state != state,
1114 "DSI PLL state assertion failure (expected %s, current %s)\n",
1115 onoff(state), onoff(cur_state));
1116 }
1117
assert_fdi_tx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)1118 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1119 enum pipe pipe, bool state)
1120 {
1121 bool cur_state;
1122 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1123 pipe);
1124
1125 if (HAS_DDI(dev_priv)) {
1126 /* DDI does not have a specific FDI_TX register */
1127 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1128 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1129 } else {
1130 u32 val = I915_READ(FDI_TX_CTL(pipe));
1131 cur_state = !!(val & FDI_TX_ENABLE);
1132 }
1133 I915_STATE_WARN(cur_state != state,
1134 "FDI TX state assertion failure (expected %s, current %s)\n",
1135 onoff(state), onoff(cur_state));
1136 }
1137 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1138 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1139
assert_fdi_rx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)1140 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1141 enum pipe pipe, bool state)
1142 {
1143 u32 val;
1144 bool cur_state;
1145
1146 val = I915_READ(FDI_RX_CTL(pipe));
1147 cur_state = !!(val & FDI_RX_ENABLE);
1148 I915_STATE_WARN(cur_state != state,
1149 "FDI RX state assertion failure (expected %s, current %s)\n",
1150 onoff(state), onoff(cur_state));
1151 }
1152 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1153 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1154
assert_fdi_tx_pll_enabled(struct drm_i915_private * dev_priv,enum pipe pipe)1155 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1156 enum pipe pipe)
1157 {
1158 u32 val;
1159
1160 /* ILK FDI PLL is always enabled */
1161 if (IS_GEN(dev_priv, 5))
1162 return;
1163
1164 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1165 if (HAS_DDI(dev_priv))
1166 return;
1167
1168 val = I915_READ(FDI_TX_CTL(pipe));
1169 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1170 }
1171
assert_fdi_rx_pll(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)1172 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1173 enum pipe pipe, bool state)
1174 {
1175 u32 val;
1176 bool cur_state;
1177
1178 val = I915_READ(FDI_RX_CTL(pipe));
1179 cur_state = !!(val & FDI_RX_PLL_ENABLE);
1180 I915_STATE_WARN(cur_state != state,
1181 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1182 onoff(state), onoff(cur_state));
1183 }
1184
assert_panel_unlocked(struct drm_i915_private * dev_priv,enum pipe pipe)1185 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1186 {
1187 i915_reg_t pp_reg;
1188 u32 val;
1189 enum pipe panel_pipe = INVALID_PIPE;
1190 bool locked = true;
1191
1192 if (WARN_ON(HAS_DDI(dev_priv)))
1193 return;
1194
1195 if (HAS_PCH_SPLIT(dev_priv)) {
1196 u32 port_sel;
1197
1198 pp_reg = PP_CONTROL(0);
1199 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1200
1201 switch (port_sel) {
1202 case PANEL_PORT_SELECT_LVDS:
1203 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1204 break;
1205 case PANEL_PORT_SELECT_DPA:
1206 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1207 break;
1208 case PANEL_PORT_SELECT_DPC:
1209 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1210 break;
1211 case PANEL_PORT_SELECT_DPD:
1212 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1213 break;
1214 default:
1215 MISSING_CASE(port_sel);
1216 break;
1217 }
1218 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1219 /* presumably write lock depends on pipe, not port select */
1220 pp_reg = PP_CONTROL(pipe);
1221 panel_pipe = pipe;
1222 } else {
1223 u32 port_sel;
1224
1225 pp_reg = PP_CONTROL(0);
1226 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1227
1228 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1229 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1230 }
1231
1232 val = I915_READ(pp_reg);
1233 if (!(val & PANEL_POWER_ON) ||
1234 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1235 locked = false;
1236
1237 I915_STATE_WARN(panel_pipe == pipe && locked,
1238 "panel assertion failure, pipe %c regs locked\n",
1239 pipe_name(pipe));
1240 }
1241
assert_pipe(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)1242 void assert_pipe(struct drm_i915_private *dev_priv,
1243 enum pipe pipe, bool state)
1244 {
1245 bool cur_state;
1246 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1247 pipe);
1248 enum intel_display_power_domain power_domain;
1249 intel_wakeref_t wakeref;
1250
1251 /* we keep both pipes enabled on 830 */
1252 if (IS_I830(dev_priv))
1253 state = true;
1254
1255 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1256 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1257 if (wakeref) {
1258 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1259 cur_state = !!(val & PIPECONF_ENABLE);
1260
1261 intel_display_power_put(dev_priv, power_domain, wakeref);
1262 } else {
1263 cur_state = false;
1264 }
1265
1266 I915_STATE_WARN(cur_state != state,
1267 "pipe %c assertion failure (expected %s, current %s)\n",
1268 pipe_name(pipe), onoff(state), onoff(cur_state));
1269 }
1270
assert_plane(struct intel_plane * plane,bool state)1271 static void assert_plane(struct intel_plane *plane, bool state)
1272 {
1273 enum pipe pipe;
1274 bool cur_state;
1275
1276 cur_state = plane->get_hw_state(plane, &pipe);
1277
1278 I915_STATE_WARN(cur_state != state,
1279 "%s assertion failure (expected %s, current %s)\n",
1280 plane->base.name, onoff(state), onoff(cur_state));
1281 }
1282
1283 #define assert_plane_enabled(p) assert_plane(p, true)
1284 #define assert_plane_disabled(p) assert_plane(p, false)
1285
assert_planes_disabled(struct intel_crtc * crtc)1286 static void assert_planes_disabled(struct intel_crtc *crtc)
1287 {
1288 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1289 struct intel_plane *plane;
1290
1291 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1292 assert_plane_disabled(plane);
1293 }
1294
assert_vblank_disabled(struct drm_crtc * crtc)1295 static void assert_vblank_disabled(struct drm_crtc *crtc)
1296 {
1297 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1298 drm_crtc_vblank_put(crtc);
1299 }
1300
assert_pch_transcoder_disabled(struct drm_i915_private * dev_priv,enum pipe pipe)1301 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1302 enum pipe pipe)
1303 {
1304 u32 val;
1305 bool enabled;
1306
1307 val = I915_READ(PCH_TRANSCONF(pipe));
1308 enabled = !!(val & TRANS_ENABLE);
1309 I915_STATE_WARN(enabled,
1310 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1311 pipe_name(pipe));
1312 }
1313
assert_pch_dp_disabled(struct drm_i915_private * dev_priv,enum pipe pipe,enum port port,i915_reg_t dp_reg)1314 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1315 enum pipe pipe, enum port port,
1316 i915_reg_t dp_reg)
1317 {
1318 enum pipe port_pipe;
1319 bool state;
1320
1321 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1322
1323 I915_STATE_WARN(state && port_pipe == pipe,
1324 "PCH DP %c enabled on transcoder %c, should be disabled\n",
1325 port_name(port), pipe_name(pipe));
1326
1327 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1328 "IBX PCH DP %c still using transcoder B\n",
1329 port_name(port));
1330 }
1331
assert_pch_hdmi_disabled(struct drm_i915_private * dev_priv,enum pipe pipe,enum port port,i915_reg_t hdmi_reg)1332 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1333 enum pipe pipe, enum port port,
1334 i915_reg_t hdmi_reg)
1335 {
1336 enum pipe port_pipe;
1337 bool state;
1338
1339 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1340
1341 I915_STATE_WARN(state && port_pipe == pipe,
1342 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1343 port_name(port), pipe_name(pipe));
1344
1345 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1346 "IBX PCH HDMI %c still using transcoder B\n",
1347 port_name(port));
1348 }
1349
assert_pch_ports_disabled(struct drm_i915_private * dev_priv,enum pipe pipe)1350 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1351 enum pipe pipe)
1352 {
1353 enum pipe port_pipe;
1354
1355 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1356 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1357 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1358
1359 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1360 port_pipe == pipe,
1361 "PCH VGA enabled on transcoder %c, should be disabled\n",
1362 pipe_name(pipe));
1363
1364 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1365 port_pipe == pipe,
1366 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1367 pipe_name(pipe));
1368
1369 /* PCH SDVOB multiplex with HDMIB */
1370 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1371 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1372 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1373 }
1374
_vlv_enable_pll(struct intel_crtc * crtc,const struct intel_crtc_state * pipe_config)1375 static void _vlv_enable_pll(struct intel_crtc *crtc,
1376 const struct intel_crtc_state *pipe_config)
1377 {
1378 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1379 enum pipe pipe = crtc->pipe;
1380
1381 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1382 POSTING_READ(DPLL(pipe));
1383 udelay(150);
1384
1385 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1386 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1387 }
1388
vlv_enable_pll(struct intel_crtc * crtc,const struct intel_crtc_state * pipe_config)1389 static void vlv_enable_pll(struct intel_crtc *crtc,
1390 const struct intel_crtc_state *pipe_config)
1391 {
1392 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1393 enum pipe pipe = crtc->pipe;
1394
1395 assert_pipe_disabled(dev_priv, pipe);
1396
1397 /* PLL is protected by panel, make sure we can write it */
1398 assert_panel_unlocked(dev_priv, pipe);
1399
1400 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1401 _vlv_enable_pll(crtc, pipe_config);
1402
1403 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1404 POSTING_READ(DPLL_MD(pipe));
1405 }
1406
1407
_chv_enable_pll(struct intel_crtc * crtc,const struct intel_crtc_state * pipe_config)1408 static void _chv_enable_pll(struct intel_crtc *crtc,
1409 const struct intel_crtc_state *pipe_config)
1410 {
1411 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1412 enum pipe pipe = crtc->pipe;
1413 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1414 u32 tmp;
1415
1416 vlv_dpio_get(dev_priv);
1417
1418 /* Enable back the 10bit clock to display controller */
1419 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1420 tmp |= DPIO_DCLKP_EN;
1421 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1422
1423 vlv_dpio_put(dev_priv);
1424
1425 /*
1426 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1427 */
1428 udelay(1);
1429
1430 /* Enable PLL */
1431 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1432
1433 /* Check PLL is locked */
1434 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1435 DRM_ERROR("PLL %d failed to lock\n", pipe);
1436 }
1437
chv_enable_pll(struct intel_crtc * crtc,const struct intel_crtc_state * pipe_config)1438 static void chv_enable_pll(struct intel_crtc *crtc,
1439 const struct intel_crtc_state *pipe_config)
1440 {
1441 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1442 enum pipe pipe = crtc->pipe;
1443
1444 assert_pipe_disabled(dev_priv, pipe);
1445
1446 /* PLL is protected by panel, make sure we can write it */
1447 assert_panel_unlocked(dev_priv, pipe);
1448
1449 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1450 _chv_enable_pll(crtc, pipe_config);
1451
1452 if (pipe != PIPE_A) {
1453 /*
1454 * WaPixelRepeatModeFixForC0:chv
1455 *
1456 * DPLLCMD is AWOL. Use chicken bits to propagate
1457 * the value from DPLLBMD to either pipe B or C.
1458 */
1459 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1460 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1461 I915_WRITE(CBR4_VLV, 0);
1462 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1463
1464 /*
1465 * DPLLB VGA mode also seems to cause problems.
1466 * We should always have it disabled.
1467 */
1468 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1469 } else {
1470 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1471 POSTING_READ(DPLL_MD(pipe));
1472 }
1473 }
1474
i9xx_has_pps(struct drm_i915_private * dev_priv)1475 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1476 {
1477 if (IS_I830(dev_priv))
1478 return false;
1479
1480 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1481 }
1482
i9xx_enable_pll(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)1483 static void i9xx_enable_pll(struct intel_crtc *crtc,
1484 const struct intel_crtc_state *crtc_state)
1485 {
1486 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1487 i915_reg_t reg = DPLL(crtc->pipe);
1488 u32 dpll = crtc_state->dpll_hw_state.dpll;
1489 int i;
1490
1491 assert_pipe_disabled(dev_priv, crtc->pipe);
1492
1493 /* PLL is protected by panel, make sure we can write it */
1494 if (i9xx_has_pps(dev_priv))
1495 assert_panel_unlocked(dev_priv, crtc->pipe);
1496
1497 /*
1498 * Apparently we need to have VGA mode enabled prior to changing
1499 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1500 * dividers, even though the register value does change.
1501 */
1502 I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
1503 I915_WRITE(reg, dpll);
1504
1505 /* Wait for the clocks to stabilize. */
1506 POSTING_READ(reg);
1507 udelay(150);
1508
1509 if (INTEL_GEN(dev_priv) >= 4) {
1510 I915_WRITE(DPLL_MD(crtc->pipe),
1511 crtc_state->dpll_hw_state.dpll_md);
1512 } else {
1513 /* The pixel multiplier can only be updated once the
1514 * DPLL is enabled and the clocks are stable.
1515 *
1516 * So write it again.
1517 */
1518 I915_WRITE(reg, dpll);
1519 }
1520
1521 /* We do this three times for luck */
1522 for (i = 0; i < 3; i++) {
1523 I915_WRITE(reg, dpll);
1524 POSTING_READ(reg);
1525 udelay(150); /* wait for warmup */
1526 }
1527 }
1528
i9xx_disable_pll(const struct intel_crtc_state * crtc_state)1529 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1530 {
1531 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1532 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1533 enum pipe pipe = crtc->pipe;
1534
1535 /* Don't disable pipe or pipe PLLs if needed */
1536 if (IS_I830(dev_priv))
1537 return;
1538
1539 /* Make sure the pipe isn't still relying on us */
1540 assert_pipe_disabled(dev_priv, pipe);
1541
1542 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1543 POSTING_READ(DPLL(pipe));
1544 }
1545
vlv_disable_pll(struct drm_i915_private * dev_priv,enum pipe pipe)1546 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1547 {
1548 u32 val;
1549
1550 /* Make sure the pipe isn't still relying on us */
1551 assert_pipe_disabled(dev_priv, pipe);
1552
1553 val = DPLL_INTEGRATED_REF_CLK_VLV |
1554 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1555 if (pipe != PIPE_A)
1556 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1557
1558 I915_WRITE(DPLL(pipe), val);
1559 POSTING_READ(DPLL(pipe));
1560 }
1561
chv_disable_pll(struct drm_i915_private * dev_priv,enum pipe pipe)1562 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1563 {
1564 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1565 u32 val;
1566
1567 /* Make sure the pipe isn't still relying on us */
1568 assert_pipe_disabled(dev_priv, pipe);
1569
1570 val = DPLL_SSC_REF_CLK_CHV |
1571 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1572 if (pipe != PIPE_A)
1573 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1574
1575 I915_WRITE(DPLL(pipe), val);
1576 POSTING_READ(DPLL(pipe));
1577
1578 vlv_dpio_get(dev_priv);
1579
1580 /* Disable 10bit clock to display controller */
1581 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1582 val &= ~DPIO_DCLKP_EN;
1583 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1584
1585 vlv_dpio_put(dev_priv);
1586 }
1587
vlv_wait_port_ready(struct drm_i915_private * dev_priv,struct intel_digital_port * dport,unsigned int expected_mask)1588 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1589 struct intel_digital_port *dport,
1590 unsigned int expected_mask)
1591 {
1592 u32 port_mask;
1593 i915_reg_t dpll_reg;
1594
1595 switch (dport->base.port) {
1596 case PORT_B:
1597 port_mask = DPLL_PORTB_READY_MASK;
1598 dpll_reg = DPLL(0);
1599 break;
1600 case PORT_C:
1601 port_mask = DPLL_PORTC_READY_MASK;
1602 dpll_reg = DPLL(0);
1603 expected_mask <<= 4;
1604 break;
1605 case PORT_D:
1606 port_mask = DPLL_PORTD_READY_MASK;
1607 dpll_reg = DPIO_PHY_STATUS;
1608 break;
1609 default:
1610 BUG();
1611 }
1612
1613 if (intel_de_wait_for_register(dev_priv, dpll_reg,
1614 port_mask, expected_mask, 1000))
1615 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1616 port_name(dport->base.port),
1617 I915_READ(dpll_reg) & port_mask, expected_mask);
1618 }
1619
ironlake_enable_pch_transcoder(const struct intel_crtc_state * crtc_state)1620 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1621 {
1622 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1623 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1624 enum pipe pipe = crtc->pipe;
1625 i915_reg_t reg;
1626 u32 val, pipeconf_val;
1627
1628 /* Make sure PCH DPLL is enabled */
1629 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1630
1631 /* FDI must be feeding us bits for PCH ports */
1632 assert_fdi_tx_enabled(dev_priv, pipe);
1633 assert_fdi_rx_enabled(dev_priv, pipe);
1634
1635 if (HAS_PCH_CPT(dev_priv)) {
1636 /* Workaround: Set the timing override bit before enabling the
1637 * pch transcoder. */
1638 reg = TRANS_CHICKEN2(pipe);
1639 val = I915_READ(reg);
1640 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1641 I915_WRITE(reg, val);
1642 }
1643
1644 reg = PCH_TRANSCONF(pipe);
1645 val = I915_READ(reg);
1646 pipeconf_val = I915_READ(PIPECONF(pipe));
1647
1648 if (HAS_PCH_IBX(dev_priv)) {
1649 /*
1650 * Make the BPC in transcoder be consistent with
1651 * that in pipeconf reg. For HDMI we must use 8bpc
1652 * here for both 8bpc and 12bpc.
1653 */
1654 val &= ~PIPECONF_BPC_MASK;
1655 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1656 val |= PIPECONF_8BPC;
1657 else
1658 val |= pipeconf_val & PIPECONF_BPC_MASK;
1659 }
1660
1661 val &= ~TRANS_INTERLACE_MASK;
1662 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1663 if (HAS_PCH_IBX(dev_priv) &&
1664 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1665 val |= TRANS_LEGACY_INTERLACED_ILK;
1666 else
1667 val |= TRANS_INTERLACED;
1668 } else {
1669 val |= TRANS_PROGRESSIVE;
1670 }
1671
1672 I915_WRITE(reg, val | TRANS_ENABLE);
1673 if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
1674 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1675 }
1676
lpt_enable_pch_transcoder(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)1677 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1678 enum transcoder cpu_transcoder)
1679 {
1680 u32 val, pipeconf_val;
1681
1682 /* FDI must be feeding us bits for PCH ports */
1683 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1684 assert_fdi_rx_enabled(dev_priv, PIPE_A);
1685
1686 /* Workaround: set timing override bit. */
1687 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1688 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1689 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1690
1691 val = TRANS_ENABLE;
1692 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1693
1694 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1695 PIPECONF_INTERLACED_ILK)
1696 val |= TRANS_INTERLACED;
1697 else
1698 val |= TRANS_PROGRESSIVE;
1699
1700 I915_WRITE(LPT_TRANSCONF, val);
1701 if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
1702 TRANS_STATE_ENABLE, 100))
1703 DRM_ERROR("Failed to enable PCH transcoder\n");
1704 }
1705
ironlake_disable_pch_transcoder(struct drm_i915_private * dev_priv,enum pipe pipe)1706 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1707 enum pipe pipe)
1708 {
1709 i915_reg_t reg;
1710 u32 val;
1711
1712 /* FDI relies on the transcoder */
1713 assert_fdi_tx_disabled(dev_priv, pipe);
1714 assert_fdi_rx_disabled(dev_priv, pipe);
1715
1716 /* Ports must be off as well */
1717 assert_pch_ports_disabled(dev_priv, pipe);
1718
1719 reg = PCH_TRANSCONF(pipe);
1720 val = I915_READ(reg);
1721 val &= ~TRANS_ENABLE;
1722 I915_WRITE(reg, val);
1723 /* wait for PCH transcoder off, transcoder state */
1724 if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
1725 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1726
1727 if (HAS_PCH_CPT(dev_priv)) {
1728 /* Workaround: Clear the timing override chicken bit again. */
1729 reg = TRANS_CHICKEN2(pipe);
1730 val = I915_READ(reg);
1731 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1732 I915_WRITE(reg, val);
1733 }
1734 }
1735
lpt_disable_pch_transcoder(struct drm_i915_private * dev_priv)1736 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1737 {
1738 u32 val;
1739
1740 val = I915_READ(LPT_TRANSCONF);
1741 val &= ~TRANS_ENABLE;
1742 I915_WRITE(LPT_TRANSCONF, val);
1743 /* wait for PCH transcoder off, transcoder state */
1744 if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
1745 TRANS_STATE_ENABLE, 50))
1746 DRM_ERROR("Failed to disable PCH transcoder\n");
1747
1748 /* Workaround: clear timing override bit. */
1749 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1750 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1751 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1752 }
1753
intel_crtc_pch_transcoder(struct intel_crtc * crtc)1754 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1755 {
1756 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1757
1758 if (HAS_PCH_LPT(dev_priv))
1759 return PIPE_A;
1760 else
1761 return crtc->pipe;
1762 }
1763
intel_crtc_max_vblank_count(const struct intel_crtc_state * crtc_state)1764 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1765 {
1766 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1767
1768 /*
1769 * On i965gm the hardware frame counter reads
1770 * zero when the TV encoder is enabled :(
1771 */
1772 if (IS_I965GM(dev_priv) &&
1773 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1774 return 0;
1775
1776 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1777 return 0xffffffff; /* full 32 bit counter */
1778 else if (INTEL_GEN(dev_priv) >= 3)
1779 return 0xffffff; /* only 24 bits of frame count */
1780 else
1781 return 0; /* Gen2 doesn't have a hardware frame counter */
1782 }
1783
intel_crtc_vblank_on(const struct intel_crtc_state * crtc_state)1784 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1785 {
1786 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1787
1788 drm_crtc_set_max_vblank_count(&crtc->base,
1789 intel_crtc_max_vblank_count(crtc_state));
1790 drm_crtc_vblank_on(&crtc->base);
1791 }
1792
intel_enable_pipe(const struct intel_crtc_state * new_crtc_state)1793 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1794 {
1795 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1796 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1797 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1798 enum pipe pipe = crtc->pipe;
1799 i915_reg_t reg;
1800 u32 val;
1801
1802 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1803
1804 assert_planes_disabled(crtc);
1805
1806 /*
1807 * A pipe without a PLL won't actually be able to drive bits from
1808 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1809 * need the check.
1810 */
1811 if (HAS_GMCH(dev_priv)) {
1812 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1813 assert_dsi_pll_enabled(dev_priv);
1814 else
1815 assert_pll_enabled(dev_priv, pipe);
1816 } else {
1817 if (new_crtc_state->has_pch_encoder) {
1818 /* if driving the PCH, we need FDI enabled */
1819 assert_fdi_rx_pll_enabled(dev_priv,
1820 intel_crtc_pch_transcoder(crtc));
1821 assert_fdi_tx_pll_enabled(dev_priv,
1822 (enum pipe) cpu_transcoder);
1823 }
1824 /* FIXME: assert CPU port conditions for SNB+ */
1825 }
1826
1827 trace_intel_pipe_enable(crtc);
1828
1829 reg = PIPECONF(cpu_transcoder);
1830 val = I915_READ(reg);
1831 if (val & PIPECONF_ENABLE) {
1832 /* we keep both pipes enabled on 830 */
1833 WARN_ON(!IS_I830(dev_priv));
1834 return;
1835 }
1836
1837 I915_WRITE(reg, val | PIPECONF_ENABLE);
1838 POSTING_READ(reg);
1839
1840 /*
1841 * Until the pipe starts PIPEDSL reads will return a stale value,
1842 * which causes an apparent vblank timestamp jump when PIPEDSL
1843 * resets to its proper value. That also messes up the frame count
1844 * when it's derived from the timestamps. So let's wait for the
1845 * pipe to start properly before we call drm_crtc_vblank_on()
1846 */
1847 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1848 intel_wait_for_pipe_scanline_moving(crtc);
1849 }
1850
intel_disable_pipe(const struct intel_crtc_state * old_crtc_state)1851 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1852 {
1853 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1854 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1855 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1856 enum pipe pipe = crtc->pipe;
1857 i915_reg_t reg;
1858 u32 val;
1859
1860 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1861
1862 /*
1863 * Make sure planes won't keep trying to pump pixels to us,
1864 * or we might hang the display.
1865 */
1866 assert_planes_disabled(crtc);
1867
1868 trace_intel_pipe_disable(crtc);
1869
1870 reg = PIPECONF(cpu_transcoder);
1871 val = I915_READ(reg);
1872 if ((val & PIPECONF_ENABLE) == 0)
1873 return;
1874
1875 /*
1876 * Double wide has implications for planes
1877 * so best keep it disabled when not needed.
1878 */
1879 if (old_crtc_state->double_wide)
1880 val &= ~PIPECONF_DOUBLE_WIDE;
1881
1882 /* Don't disable pipe or pipe PLLs if needed */
1883 if (!IS_I830(dev_priv))
1884 val &= ~PIPECONF_ENABLE;
1885
1886 I915_WRITE(reg, val);
1887 if ((val & PIPECONF_ENABLE) == 0)
1888 intel_wait_for_pipe_off(old_crtc_state);
1889 }
1890
intel_tile_size(const struct drm_i915_private * dev_priv)1891 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1892 {
1893 return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1894 }
1895
1896 static unsigned int
intel_tile_width_bytes(const struct drm_framebuffer * fb,int color_plane)1897 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1898 {
1899 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1900 unsigned int cpp = fb->format->cpp[color_plane];
1901
1902 switch (fb->modifier) {
1903 case DRM_FORMAT_MOD_LINEAR:
1904 return intel_tile_size(dev_priv);
1905 case I915_FORMAT_MOD_X_TILED:
1906 if (IS_GEN(dev_priv, 2))
1907 return 128;
1908 else
1909 return 512;
1910 case I915_FORMAT_MOD_Y_TILED_CCS:
1911 if (color_plane == 1)
1912 return 128;
1913 /* fall through */
1914 case I915_FORMAT_MOD_Y_TILED:
1915 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1916 return 128;
1917 else
1918 return 512;
1919 case I915_FORMAT_MOD_Yf_TILED_CCS:
1920 if (color_plane == 1)
1921 return 128;
1922 /* fall through */
1923 case I915_FORMAT_MOD_Yf_TILED:
1924 switch (cpp) {
1925 case 1:
1926 return 64;
1927 case 2:
1928 case 4:
1929 return 128;
1930 case 8:
1931 case 16:
1932 return 256;
1933 default:
1934 MISSING_CASE(cpp);
1935 return cpp;
1936 }
1937 break;
1938 default:
1939 MISSING_CASE(fb->modifier);
1940 return cpp;
1941 }
1942 }
1943
1944 static unsigned int
intel_tile_height(const struct drm_framebuffer * fb,int color_plane)1945 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1946 {
1947 return intel_tile_size(to_i915(fb->dev)) /
1948 intel_tile_width_bytes(fb, color_plane);
1949 }
1950
1951 /* Return the tile dimensions in pixel units */
intel_tile_dims(const struct drm_framebuffer * fb,int color_plane,unsigned int * tile_width,unsigned int * tile_height)1952 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1953 unsigned int *tile_width,
1954 unsigned int *tile_height)
1955 {
1956 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1957 unsigned int cpp = fb->format->cpp[color_plane];
1958
1959 *tile_width = tile_width_bytes / cpp;
1960 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1961 }
1962
1963 unsigned int
intel_fb_align_height(const struct drm_framebuffer * fb,int color_plane,unsigned int height)1964 intel_fb_align_height(const struct drm_framebuffer *fb,
1965 int color_plane, unsigned int height)
1966 {
1967 unsigned int tile_height = intel_tile_height(fb, color_plane);
1968
1969 return ALIGN(height, tile_height);
1970 }
1971
intel_rotation_info_size(const struct intel_rotation_info * rot_info)1972 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1973 {
1974 unsigned int size = 0;
1975 int i;
1976
1977 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1978 size += rot_info->plane[i].width * rot_info->plane[i].height;
1979
1980 return size;
1981 }
1982
intel_remapped_info_size(const struct intel_remapped_info * rem_info)1983 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
1984 {
1985 unsigned int size = 0;
1986 int i;
1987
1988 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
1989 size += rem_info->plane[i].width * rem_info->plane[i].height;
1990
1991 return size;
1992 }
1993
1994 static void
intel_fill_fb_ggtt_view(struct i915_ggtt_view * view,const struct drm_framebuffer * fb,unsigned int rotation)1995 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
1996 const struct drm_framebuffer *fb,
1997 unsigned int rotation)
1998 {
1999 view->type = I915_GGTT_VIEW_NORMAL;
2000 if (drm_rotation_90_or_270(rotation)) {
2001 view->type = I915_GGTT_VIEW_ROTATED;
2002 view->rotated = to_intel_framebuffer(fb)->rot_info;
2003 }
2004 }
2005
intel_cursor_alignment(const struct drm_i915_private * dev_priv)2006 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2007 {
2008 if (IS_I830(dev_priv))
2009 return 16 * 1024;
2010 else if (IS_I85X(dev_priv))
2011 return 256;
2012 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2013 return 32;
2014 else
2015 return 4 * 1024;
2016 }
2017
intel_linear_alignment(const struct drm_i915_private * dev_priv)2018 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2019 {
2020 if (INTEL_GEN(dev_priv) >= 9)
2021 return 256 * 1024;
2022 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2023 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2024 return 128 * 1024;
2025 else if (INTEL_GEN(dev_priv) >= 4)
2026 return 4 * 1024;
2027 else
2028 return 0;
2029 }
2030
intel_surf_alignment(const struct drm_framebuffer * fb,int color_plane)2031 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2032 int color_plane)
2033 {
2034 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2035
2036 /* AUX_DIST needs only 4K alignment */
2037 if (color_plane == 1)
2038 return 4096;
2039
2040 switch (fb->modifier) {
2041 case DRM_FORMAT_MOD_LINEAR:
2042 return intel_linear_alignment(dev_priv);
2043 case I915_FORMAT_MOD_X_TILED:
2044 if (INTEL_GEN(dev_priv) >= 9)
2045 return 256 * 1024;
2046 return 0;
2047 case I915_FORMAT_MOD_Y_TILED_CCS:
2048 case I915_FORMAT_MOD_Yf_TILED_CCS:
2049 case I915_FORMAT_MOD_Y_TILED:
2050 case I915_FORMAT_MOD_Yf_TILED:
2051 return 1 * 1024 * 1024;
2052 default:
2053 MISSING_CASE(fb->modifier);
2054 return 0;
2055 }
2056 }
2057
intel_plane_uses_fence(const struct intel_plane_state * plane_state)2058 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2059 {
2060 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2061 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2062
2063 return INTEL_GEN(dev_priv) < 4 ||
2064 (plane->has_fbc &&
2065 plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2066 }
2067
2068 struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer * fb,const struct i915_ggtt_view * view,bool uses_fence,unsigned long * out_flags)2069 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2070 const struct i915_ggtt_view *view,
2071 bool uses_fence,
2072 unsigned long *out_flags)
2073 {
2074 struct drm_device *dev = fb->dev;
2075 struct drm_i915_private *dev_priv = to_i915(dev);
2076 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2077 intel_wakeref_t wakeref;
2078 struct i915_vma *vma;
2079 unsigned int pinctl;
2080 u32 alignment;
2081
2082 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2083
2084 alignment = intel_surf_alignment(fb, 0);
2085
2086 /* Note that the w/a also requires 64 PTE of padding following the
2087 * bo. We currently fill all unused PTE with the shadow page and so
2088 * we should always have valid PTE following the scanout preventing
2089 * the VT-d warning.
2090 */
2091 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2092 alignment = 256 * 1024;
2093
2094 /*
2095 * Global gtt pte registers are special registers which actually forward
2096 * writes to a chunk of system memory. Which means that there is no risk
2097 * that the register values disappear as soon as we call
2098 * intel_runtime_pm_put(), so it is correct to wrap only the
2099 * pin/unpin/fence and not more.
2100 */
2101 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2102 i915_gem_object_lock(obj);
2103
2104 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2105
2106 pinctl = 0;
2107
2108 /* Valleyview is definitely limited to scanning out the first
2109 * 512MiB. Lets presume this behaviour was inherited from the
2110 * g4x display engine and that all earlier gen are similarly
2111 * limited. Testing suggests that it is a little more
2112 * complicated than this. For example, Cherryview appears quite
2113 * happy to scanout from anywhere within its global aperture.
2114 */
2115 if (HAS_GMCH(dev_priv))
2116 pinctl |= PIN_MAPPABLE;
2117
2118 vma = i915_gem_object_pin_to_display_plane(obj,
2119 alignment, view, pinctl);
2120 if (IS_ERR(vma))
2121 goto err;
2122
2123 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2124 int ret;
2125
2126 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2127 * fence, whereas 965+ only requires a fence if using
2128 * framebuffer compression. For simplicity, we always, when
2129 * possible, install a fence as the cost is not that onerous.
2130 *
2131 * If we fail to fence the tiled scanout, then either the
2132 * modeset will reject the change (which is highly unlikely as
2133 * the affected systems, all but one, do not have unmappable
2134 * space) or we will not be able to enable full powersaving
2135 * techniques (also likely not to apply due to various limits
2136 * FBC and the like impose on the size of the buffer, which
2137 * presumably we violated anyway with this unmappable buffer).
2138 * Anyway, it is presumably better to stumble onwards with
2139 * something and try to run the system in a "less than optimal"
2140 * mode that matches the user configuration.
2141 */
2142 ret = i915_vma_pin_fence(vma);
2143 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2144 i915_gem_object_unpin_from_display_plane(vma);
2145 vma = ERR_PTR(ret);
2146 goto err;
2147 }
2148
2149 if (ret == 0 && vma->fence)
2150 *out_flags |= PLANE_HAS_FENCE;
2151 }
2152
2153 i915_vma_get(vma);
2154 err:
2155 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2156
2157 i915_gem_object_unlock(obj);
2158 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2159 return vma;
2160 }
2161
intel_unpin_fb_vma(struct i915_vma * vma,unsigned long flags)2162 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2163 {
2164 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2165
2166 i915_gem_object_lock(vma->obj);
2167 if (flags & PLANE_HAS_FENCE)
2168 i915_vma_unpin_fence(vma);
2169 i915_gem_object_unpin_from_display_plane(vma);
2170 i915_gem_object_unlock(vma->obj);
2171
2172 i915_vma_put(vma);
2173 }
2174
intel_fb_pitch(const struct drm_framebuffer * fb,int color_plane,unsigned int rotation)2175 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2176 unsigned int rotation)
2177 {
2178 if (drm_rotation_90_or_270(rotation))
2179 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2180 else
2181 return fb->pitches[color_plane];
2182 }
2183
2184 /*
2185 * Convert the x/y offsets into a linear offset.
2186 * Only valid with 0/180 degree rotation, which is fine since linear
2187 * offset is only used with linear buffers on pre-hsw and tiled buffers
2188 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2189 */
intel_fb_xy_to_linear(int x,int y,const struct intel_plane_state * state,int color_plane)2190 u32 intel_fb_xy_to_linear(int x, int y,
2191 const struct intel_plane_state *state,
2192 int color_plane)
2193 {
2194 const struct drm_framebuffer *fb = state->base.fb;
2195 unsigned int cpp = fb->format->cpp[color_plane];
2196 unsigned int pitch = state->color_plane[color_plane].stride;
2197
2198 return y * pitch + x * cpp;
2199 }
2200
2201 /*
2202 * Add the x/y offsets derived from fb->offsets[] to the user
2203 * specified plane src x/y offsets. The resulting x/y offsets
2204 * specify the start of scanout from the beginning of the gtt mapping.
2205 */
intel_add_fb_offsets(int * x,int * y,const struct intel_plane_state * state,int color_plane)2206 void intel_add_fb_offsets(int *x, int *y,
2207 const struct intel_plane_state *state,
2208 int color_plane)
2209
2210 {
2211 *x += state->color_plane[color_plane].x;
2212 *y += state->color_plane[color_plane].y;
2213 }
2214
intel_adjust_tile_offset(int * x,int * y,unsigned int tile_width,unsigned int tile_height,unsigned int tile_size,unsigned int pitch_tiles,u32 old_offset,u32 new_offset)2215 static u32 intel_adjust_tile_offset(int *x, int *y,
2216 unsigned int tile_width,
2217 unsigned int tile_height,
2218 unsigned int tile_size,
2219 unsigned int pitch_tiles,
2220 u32 old_offset,
2221 u32 new_offset)
2222 {
2223 unsigned int pitch_pixels = pitch_tiles * tile_width;
2224 unsigned int tiles;
2225
2226 WARN_ON(old_offset & (tile_size - 1));
2227 WARN_ON(new_offset & (tile_size - 1));
2228 WARN_ON(new_offset > old_offset);
2229
2230 tiles = (old_offset - new_offset) / tile_size;
2231
2232 *y += tiles / pitch_tiles * tile_height;
2233 *x += tiles % pitch_tiles * tile_width;
2234
2235 /* minimize x in case it got needlessly big */
2236 *y += *x / pitch_pixels * tile_height;
2237 *x %= pitch_pixels;
2238
2239 return new_offset;
2240 }
2241
is_surface_linear(u64 modifier,int color_plane)2242 static bool is_surface_linear(u64 modifier, int color_plane)
2243 {
2244 return modifier == DRM_FORMAT_MOD_LINEAR;
2245 }
2246
intel_adjust_aligned_offset(int * x,int * y,const struct drm_framebuffer * fb,int color_plane,unsigned int rotation,unsigned int pitch,u32 old_offset,u32 new_offset)2247 static u32 intel_adjust_aligned_offset(int *x, int *y,
2248 const struct drm_framebuffer *fb,
2249 int color_plane,
2250 unsigned int rotation,
2251 unsigned int pitch,
2252 u32 old_offset, u32 new_offset)
2253 {
2254 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2255 unsigned int cpp = fb->format->cpp[color_plane];
2256
2257 WARN_ON(new_offset > old_offset);
2258
2259 if (!is_surface_linear(fb->modifier, color_plane)) {
2260 unsigned int tile_size, tile_width, tile_height;
2261 unsigned int pitch_tiles;
2262
2263 tile_size = intel_tile_size(dev_priv);
2264 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2265
2266 if (drm_rotation_90_or_270(rotation)) {
2267 pitch_tiles = pitch / tile_height;
2268 swap(tile_width, tile_height);
2269 } else {
2270 pitch_tiles = pitch / (tile_width * cpp);
2271 }
2272
2273 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2274 tile_size, pitch_tiles,
2275 old_offset, new_offset);
2276 } else {
2277 old_offset += *y * pitch + *x * cpp;
2278
2279 *y = (old_offset - new_offset) / pitch;
2280 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2281 }
2282
2283 return new_offset;
2284 }
2285
2286 /*
2287 * Adjust the tile offset by moving the difference into
2288 * the x/y offsets.
2289 */
intel_plane_adjust_aligned_offset(int * x,int * y,const struct intel_plane_state * state,int color_plane,u32 old_offset,u32 new_offset)2290 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2291 const struct intel_plane_state *state,
2292 int color_plane,
2293 u32 old_offset, u32 new_offset)
2294 {
2295 return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
2296 state->base.rotation,
2297 state->color_plane[color_plane].stride,
2298 old_offset, new_offset);
2299 }
2300
2301 /*
2302 * Computes the aligned offset to the base tile and adjusts
2303 * x, y. bytes per pixel is assumed to be a power-of-two.
2304 *
2305 * In the 90/270 rotated case, x and y are assumed
2306 * to be already rotated to match the rotated GTT view, and
2307 * pitch is the tile_height aligned framebuffer height.
2308 *
2309 * This function is used when computing the derived information
2310 * under intel_framebuffer, so using any of that information
2311 * here is not allowed. Anything under drm_framebuffer can be
2312 * used. This is why the user has to pass in the pitch since it
2313 * is specified in the rotated orientation.
2314 */
intel_compute_aligned_offset(struct drm_i915_private * dev_priv,int * x,int * y,const struct drm_framebuffer * fb,int color_plane,unsigned int pitch,unsigned int rotation,u32 alignment)2315 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2316 int *x, int *y,
2317 const struct drm_framebuffer *fb,
2318 int color_plane,
2319 unsigned int pitch,
2320 unsigned int rotation,
2321 u32 alignment)
2322 {
2323 unsigned int cpp = fb->format->cpp[color_plane];
2324 u32 offset, offset_aligned;
2325
2326 if (alignment)
2327 alignment--;
2328
2329 if (!is_surface_linear(fb->modifier, color_plane)) {
2330 unsigned int tile_size, tile_width, tile_height;
2331 unsigned int tile_rows, tiles, pitch_tiles;
2332
2333 tile_size = intel_tile_size(dev_priv);
2334 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2335
2336 if (drm_rotation_90_or_270(rotation)) {
2337 pitch_tiles = pitch / tile_height;
2338 swap(tile_width, tile_height);
2339 } else {
2340 pitch_tiles = pitch / (tile_width * cpp);
2341 }
2342
2343 tile_rows = *y / tile_height;
2344 *y %= tile_height;
2345
2346 tiles = *x / tile_width;
2347 *x %= tile_width;
2348
2349 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2350 offset_aligned = offset & ~alignment;
2351
2352 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2353 tile_size, pitch_tiles,
2354 offset, offset_aligned);
2355 } else {
2356 offset = *y * pitch + *x * cpp;
2357 offset_aligned = offset & ~alignment;
2358
2359 *y = (offset & alignment) / pitch;
2360 *x = ((offset & alignment) - *y * pitch) / cpp;
2361 }
2362
2363 return offset_aligned;
2364 }
2365
intel_plane_compute_aligned_offset(int * x,int * y,const struct intel_plane_state * state,int color_plane)2366 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2367 const struct intel_plane_state *state,
2368 int color_plane)
2369 {
2370 struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2371 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2372 const struct drm_framebuffer *fb = state->base.fb;
2373 unsigned int rotation = state->base.rotation;
2374 int pitch = state->color_plane[color_plane].stride;
2375 u32 alignment;
2376
2377 if (intel_plane->id == PLANE_CURSOR)
2378 alignment = intel_cursor_alignment(dev_priv);
2379 else
2380 alignment = intel_surf_alignment(fb, color_plane);
2381
2382 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2383 pitch, rotation, alignment);
2384 }
2385
2386 /* Convert the fb->offset[] into x/y offsets */
intel_fb_offset_to_xy(int * x,int * y,const struct drm_framebuffer * fb,int color_plane)2387 static int intel_fb_offset_to_xy(int *x, int *y,
2388 const struct drm_framebuffer *fb,
2389 int color_plane)
2390 {
2391 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2392 unsigned int height;
2393
2394 if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2395 fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2396 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2397 fb->offsets[color_plane], color_plane);
2398 return -EINVAL;
2399 }
2400
2401 height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2402 height = ALIGN(height, intel_tile_height(fb, color_plane));
2403
2404 /* Catch potential overflows early */
2405 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2406 fb->offsets[color_plane])) {
2407 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2408 fb->offsets[color_plane], fb->pitches[color_plane],
2409 color_plane);
2410 return -ERANGE;
2411 }
2412
2413 *x = 0;
2414 *y = 0;
2415
2416 intel_adjust_aligned_offset(x, y,
2417 fb, color_plane, DRM_MODE_ROTATE_0,
2418 fb->pitches[color_plane],
2419 fb->offsets[color_plane], 0);
2420
2421 return 0;
2422 }
2423
intel_fb_modifier_to_tiling(u64 fb_modifier)2424 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2425 {
2426 switch (fb_modifier) {
2427 case I915_FORMAT_MOD_X_TILED:
2428 return I915_TILING_X;
2429 case I915_FORMAT_MOD_Y_TILED:
2430 case I915_FORMAT_MOD_Y_TILED_CCS:
2431 return I915_TILING_Y;
2432 default:
2433 return I915_TILING_NONE;
2434 }
2435 }
2436
2437 /*
2438 * From the Sky Lake PRM:
2439 * "The Color Control Surface (CCS) contains the compression status of
2440 * the cache-line pairs. The compression state of the cache-line pair
2441 * is specified by 2 bits in the CCS. Each CCS cache-line represents
2442 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2443 * cache-line-pairs. CCS is always Y tiled."
2444 *
2445 * Since cache line pairs refers to horizontally adjacent cache lines,
2446 * each cache line in the CCS corresponds to an area of 32x16 cache
2447 * lines on the main surface. Since each pixel is 4 bytes, this gives
2448 * us a ratio of one byte in the CCS for each 8x16 pixels in the
2449 * main surface.
2450 */
2451 static const struct drm_format_info ccs_formats[] = {
2452 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2453 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2454 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2455 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2456 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2457 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2458 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2459 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2460 };
2461
2462 static const struct drm_format_info *
lookup_format_info(const struct drm_format_info formats[],int num_formats,u32 format)2463 lookup_format_info(const struct drm_format_info formats[],
2464 int num_formats, u32 format)
2465 {
2466 int i;
2467
2468 for (i = 0; i < num_formats; i++) {
2469 if (formats[i].format == format)
2470 return &formats[i];
2471 }
2472
2473 return NULL;
2474 }
2475
2476 static const struct drm_format_info *
intel_get_format_info(const struct drm_mode_fb_cmd2 * cmd)2477 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2478 {
2479 switch (cmd->modifier[0]) {
2480 case I915_FORMAT_MOD_Y_TILED_CCS:
2481 case I915_FORMAT_MOD_Yf_TILED_CCS:
2482 return lookup_format_info(ccs_formats,
2483 ARRAY_SIZE(ccs_formats),
2484 cmd->pixel_format);
2485 default:
2486 return NULL;
2487 }
2488 }
2489
is_ccs_modifier(u64 modifier)2490 bool is_ccs_modifier(u64 modifier)
2491 {
2492 return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2493 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2494 }
2495
intel_plane_fb_max_stride(struct drm_i915_private * dev_priv,u32 pixel_format,u64 modifier)2496 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2497 u32 pixel_format, u64 modifier)
2498 {
2499 struct intel_crtc *crtc;
2500 struct intel_plane *plane;
2501
2502 /*
2503 * We assume the primary plane for pipe A has
2504 * the highest stride limits of them all.
2505 */
2506 crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
2507 if (!crtc)
2508 return 0;
2509
2510 plane = to_intel_plane(crtc->base.primary);
2511
2512 return plane->max_stride(plane, pixel_format, modifier,
2513 DRM_MODE_ROTATE_0);
2514 }
2515
2516 static
intel_fb_max_stride(struct drm_i915_private * dev_priv,u32 pixel_format,u64 modifier)2517 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2518 u32 pixel_format, u64 modifier)
2519 {
2520 /*
2521 * Arbitrary limit for gen4+ chosen to match the
2522 * render engine max stride.
2523 *
2524 * The new CCS hash mode makes remapping impossible
2525 */
2526 if (!is_ccs_modifier(modifier)) {
2527 if (INTEL_GEN(dev_priv) >= 7)
2528 return 256*1024;
2529 else if (INTEL_GEN(dev_priv) >= 4)
2530 return 128*1024;
2531 }
2532
2533 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2534 }
2535
2536 static u32
intel_fb_stride_alignment(const struct drm_framebuffer * fb,int color_plane)2537 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2538 {
2539 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2540
2541 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2542 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2543 fb->format->format,
2544 fb->modifier);
2545
2546 /*
2547 * To make remapping with linear generally feasible
2548 * we need the stride to be page aligned.
2549 */
2550 if (fb->pitches[color_plane] > max_stride)
2551 return intel_tile_size(dev_priv);
2552 else
2553 return 64;
2554 } else {
2555 return intel_tile_width_bytes(fb, color_plane);
2556 }
2557 }
2558
intel_plane_can_remap(const struct intel_plane_state * plane_state)2559 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2560 {
2561 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2562 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2563 const struct drm_framebuffer *fb = plane_state->base.fb;
2564 int i;
2565
2566 /* We don't want to deal with remapping with cursors */
2567 if (plane->id == PLANE_CURSOR)
2568 return false;
2569
2570 /*
2571 * The display engine limits already match/exceed the
2572 * render engine limits, so not much point in remapping.
2573 * Would also need to deal with the fence POT alignment
2574 * and gen2 2KiB GTT tile size.
2575 */
2576 if (INTEL_GEN(dev_priv) < 4)
2577 return false;
2578
2579 /*
2580 * The new CCS hash mode isn't compatible with remapping as
2581 * the virtual address of the pages affects the compressed data.
2582 */
2583 if (is_ccs_modifier(fb->modifier))
2584 return false;
2585
2586 /* Linear needs a page aligned stride for remapping */
2587 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2588 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2589
2590 for (i = 0; i < fb->format->num_planes; i++) {
2591 if (fb->pitches[i] & alignment)
2592 return false;
2593 }
2594 }
2595
2596 return true;
2597 }
2598
intel_plane_needs_remap(const struct intel_plane_state * plane_state)2599 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2600 {
2601 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2602 const struct drm_framebuffer *fb = plane_state->base.fb;
2603 unsigned int rotation = plane_state->base.rotation;
2604 u32 stride, max_stride;
2605
2606 /*
2607 * No remapping for invisible planes since we don't have
2608 * an actual source viewport to remap.
2609 */
2610 if (!plane_state->base.visible)
2611 return false;
2612
2613 if (!intel_plane_can_remap(plane_state))
2614 return false;
2615
2616 /*
2617 * FIXME: aux plane limits on gen9+ are
2618 * unclear in Bspec, for now no checking.
2619 */
2620 stride = intel_fb_pitch(fb, 0, rotation);
2621 max_stride = plane->max_stride(plane, fb->format->format,
2622 fb->modifier, rotation);
2623
2624 return stride > max_stride;
2625 }
2626
2627 static int
intel_fill_fb_info(struct drm_i915_private * dev_priv,struct drm_framebuffer * fb)2628 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2629 struct drm_framebuffer *fb)
2630 {
2631 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2632 struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2633 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2634 u32 gtt_offset_rotated = 0;
2635 unsigned int max_size = 0;
2636 int i, num_planes = fb->format->num_planes;
2637 unsigned int tile_size = intel_tile_size(dev_priv);
2638
2639 for (i = 0; i < num_planes; i++) {
2640 unsigned int width, height;
2641 unsigned int cpp, size;
2642 u32 offset;
2643 int x, y;
2644 int ret;
2645
2646 cpp = fb->format->cpp[i];
2647 width = drm_framebuffer_plane_width(fb->width, fb, i);
2648 height = drm_framebuffer_plane_height(fb->height, fb, i);
2649
2650 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2651 if (ret) {
2652 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2653 i, fb->offsets[i]);
2654 return ret;
2655 }
2656
2657 if (is_ccs_modifier(fb->modifier) && i == 1) {
2658 int hsub = fb->format->hsub;
2659 int vsub = fb->format->vsub;
2660 int tile_width, tile_height;
2661 int main_x, main_y;
2662 int ccs_x, ccs_y;
2663
2664 intel_tile_dims(fb, i, &tile_width, &tile_height);
2665 tile_width *= hsub;
2666 tile_height *= vsub;
2667
2668 ccs_x = (x * hsub) % tile_width;
2669 ccs_y = (y * vsub) % tile_height;
2670 main_x = intel_fb->normal[0].x % tile_width;
2671 main_y = intel_fb->normal[0].y % tile_height;
2672
2673 /*
2674 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2675 * x/y offsets must match between CCS and the main surface.
2676 */
2677 if (main_x != ccs_x || main_y != ccs_y) {
2678 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2679 main_x, main_y,
2680 ccs_x, ccs_y,
2681 intel_fb->normal[0].x,
2682 intel_fb->normal[0].y,
2683 x, y);
2684 return -EINVAL;
2685 }
2686 }
2687
2688 /*
2689 * The fence (if used) is aligned to the start of the object
2690 * so having the framebuffer wrap around across the edge of the
2691 * fenced region doesn't really work. We have no API to configure
2692 * the fence start offset within the object (nor could we probably
2693 * on gen2/3). So it's just easier if we just require that the
2694 * fb layout agrees with the fence layout. We already check that the
2695 * fb stride matches the fence stride elsewhere.
2696 */
2697 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2698 (x + width) * cpp > fb->pitches[i]) {
2699 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2700 i, fb->offsets[i]);
2701 return -EINVAL;
2702 }
2703
2704 /*
2705 * First pixel of the framebuffer from
2706 * the start of the normal gtt mapping.
2707 */
2708 intel_fb->normal[i].x = x;
2709 intel_fb->normal[i].y = y;
2710
2711 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2712 fb->pitches[i],
2713 DRM_MODE_ROTATE_0,
2714 tile_size);
2715 offset /= tile_size;
2716
2717 if (!is_surface_linear(fb->modifier, i)) {
2718 unsigned int tile_width, tile_height;
2719 unsigned int pitch_tiles;
2720 struct drm_rect r;
2721
2722 intel_tile_dims(fb, i, &tile_width, &tile_height);
2723
2724 rot_info->plane[i].offset = offset;
2725 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2726 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2727 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2728
2729 intel_fb->rotated[i].pitch =
2730 rot_info->plane[i].height * tile_height;
2731
2732 /* how many tiles does this plane need */
2733 size = rot_info->plane[i].stride * rot_info->plane[i].height;
2734 /*
2735 * If the plane isn't horizontally tile aligned,
2736 * we need one more tile.
2737 */
2738 if (x != 0)
2739 size++;
2740
2741 /* rotate the x/y offsets to match the GTT view */
2742 r.x1 = x;
2743 r.y1 = y;
2744 r.x2 = x + width;
2745 r.y2 = y + height;
2746 drm_rect_rotate(&r,
2747 rot_info->plane[i].width * tile_width,
2748 rot_info->plane[i].height * tile_height,
2749 DRM_MODE_ROTATE_270);
2750 x = r.x1;
2751 y = r.y1;
2752
2753 /* rotate the tile dimensions to match the GTT view */
2754 pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2755 swap(tile_width, tile_height);
2756
2757 /*
2758 * We only keep the x/y offsets, so push all of the
2759 * gtt offset into the x/y offsets.
2760 */
2761 intel_adjust_tile_offset(&x, &y,
2762 tile_width, tile_height,
2763 tile_size, pitch_tiles,
2764 gtt_offset_rotated * tile_size, 0);
2765
2766 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2767
2768 /*
2769 * First pixel of the framebuffer from
2770 * the start of the rotated gtt mapping.
2771 */
2772 intel_fb->rotated[i].x = x;
2773 intel_fb->rotated[i].y = y;
2774 } else {
2775 size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2776 x * cpp, tile_size);
2777 }
2778
2779 /* how many tiles in total needed in the bo */
2780 max_size = max(max_size, offset + size);
2781 }
2782
2783 if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2784 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2785 mul_u32_u32(max_size, tile_size), obj->base.size);
2786 return -EINVAL;
2787 }
2788
2789 return 0;
2790 }
2791
2792 static void
intel_plane_remap_gtt(struct intel_plane_state * plane_state)2793 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2794 {
2795 struct drm_i915_private *dev_priv =
2796 to_i915(plane_state->base.plane->dev);
2797 struct drm_framebuffer *fb = plane_state->base.fb;
2798 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2799 struct intel_rotation_info *info = &plane_state->view.rotated;
2800 unsigned int rotation = plane_state->base.rotation;
2801 int i, num_planes = fb->format->num_planes;
2802 unsigned int tile_size = intel_tile_size(dev_priv);
2803 unsigned int src_x, src_y;
2804 unsigned int src_w, src_h;
2805 u32 gtt_offset = 0;
2806
2807 memset(&plane_state->view, 0, sizeof(plane_state->view));
2808 plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2809 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2810
2811 src_x = plane_state->base.src.x1 >> 16;
2812 src_y = plane_state->base.src.y1 >> 16;
2813 src_w = drm_rect_width(&plane_state->base.src) >> 16;
2814 src_h = drm_rect_height(&plane_state->base.src) >> 16;
2815
2816 WARN_ON(is_ccs_modifier(fb->modifier));
2817
2818 /* Make src coordinates relative to the viewport */
2819 drm_rect_translate(&plane_state->base.src,
2820 -(src_x << 16), -(src_y << 16));
2821
2822 /* Rotate src coordinates to match rotated GTT view */
2823 if (drm_rotation_90_or_270(rotation))
2824 drm_rect_rotate(&plane_state->base.src,
2825 src_w << 16, src_h << 16,
2826 DRM_MODE_ROTATE_270);
2827
2828 for (i = 0; i < num_planes; i++) {
2829 unsigned int hsub = i ? fb->format->hsub : 1;
2830 unsigned int vsub = i ? fb->format->vsub : 1;
2831 unsigned int cpp = fb->format->cpp[i];
2832 unsigned int tile_width, tile_height;
2833 unsigned int width, height;
2834 unsigned int pitch_tiles;
2835 unsigned int x, y;
2836 u32 offset;
2837
2838 intel_tile_dims(fb, i, &tile_width, &tile_height);
2839
2840 x = src_x / hsub;
2841 y = src_y / vsub;
2842 width = src_w / hsub;
2843 height = src_h / vsub;
2844
2845 /*
2846 * First pixel of the src viewport from the
2847 * start of the normal gtt mapping.
2848 */
2849 x += intel_fb->normal[i].x;
2850 y += intel_fb->normal[i].y;
2851
2852 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2853 fb, i, fb->pitches[i],
2854 DRM_MODE_ROTATE_0, tile_size);
2855 offset /= tile_size;
2856
2857 info->plane[i].offset = offset;
2858 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2859 tile_width * cpp);
2860 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2861 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2862
2863 if (drm_rotation_90_or_270(rotation)) {
2864 struct drm_rect r;
2865
2866 /* rotate the x/y offsets to match the GTT view */
2867 r.x1 = x;
2868 r.y1 = y;
2869 r.x2 = x + width;
2870 r.y2 = y + height;
2871 drm_rect_rotate(&r,
2872 info->plane[i].width * tile_width,
2873 info->plane[i].height * tile_height,
2874 DRM_MODE_ROTATE_270);
2875 x = r.x1;
2876 y = r.y1;
2877
2878 pitch_tiles = info->plane[i].height;
2879 plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2880
2881 /* rotate the tile dimensions to match the GTT view */
2882 swap(tile_width, tile_height);
2883 } else {
2884 pitch_tiles = info->plane[i].width;
2885 plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2886 }
2887
2888 /*
2889 * We only keep the x/y offsets, so push all of the
2890 * gtt offset into the x/y offsets.
2891 */
2892 intel_adjust_tile_offset(&x, &y,
2893 tile_width, tile_height,
2894 tile_size, pitch_tiles,
2895 gtt_offset * tile_size, 0);
2896
2897 gtt_offset += info->plane[i].width * info->plane[i].height;
2898
2899 plane_state->color_plane[i].offset = 0;
2900 plane_state->color_plane[i].x = x;
2901 plane_state->color_plane[i].y = y;
2902 }
2903 }
2904
2905 static int
intel_plane_compute_gtt(struct intel_plane_state * plane_state)2906 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2907 {
2908 const struct intel_framebuffer *fb =
2909 to_intel_framebuffer(plane_state->base.fb);
2910 unsigned int rotation = plane_state->base.rotation;
2911 int i, num_planes;
2912
2913 if (!fb)
2914 return 0;
2915
2916 num_planes = fb->base.format->num_planes;
2917
2918 if (intel_plane_needs_remap(plane_state)) {
2919 intel_plane_remap_gtt(plane_state);
2920
2921 /*
2922 * Sometimes even remapping can't overcome
2923 * the stride limitations :( Can happen with
2924 * big plane sizes and suitably misaligned
2925 * offsets.
2926 */
2927 return intel_plane_check_stride(plane_state);
2928 }
2929
2930 intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2931
2932 for (i = 0; i < num_planes; i++) {
2933 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2934 plane_state->color_plane[i].offset = 0;
2935
2936 if (drm_rotation_90_or_270(rotation)) {
2937 plane_state->color_plane[i].x = fb->rotated[i].x;
2938 plane_state->color_plane[i].y = fb->rotated[i].y;
2939 } else {
2940 plane_state->color_plane[i].x = fb->normal[i].x;
2941 plane_state->color_plane[i].y = fb->normal[i].y;
2942 }
2943 }
2944
2945 /* Rotate src coordinates to match rotated GTT view */
2946 if (drm_rotation_90_or_270(rotation))
2947 drm_rect_rotate(&plane_state->base.src,
2948 fb->base.width << 16, fb->base.height << 16,
2949 DRM_MODE_ROTATE_270);
2950
2951 return intel_plane_check_stride(plane_state);
2952 }
2953
i9xx_format_to_fourcc(int format)2954 static int i9xx_format_to_fourcc(int format)
2955 {
2956 switch (format) {
2957 case DISPPLANE_8BPP:
2958 return DRM_FORMAT_C8;
2959 case DISPPLANE_BGRX555:
2960 return DRM_FORMAT_XRGB1555;
2961 case DISPPLANE_BGRX565:
2962 return DRM_FORMAT_RGB565;
2963 default:
2964 case DISPPLANE_BGRX888:
2965 return DRM_FORMAT_XRGB8888;
2966 case DISPPLANE_RGBX888:
2967 return DRM_FORMAT_XBGR8888;
2968 case DISPPLANE_BGRX101010:
2969 return DRM_FORMAT_XRGB2101010;
2970 case DISPPLANE_RGBX101010:
2971 return DRM_FORMAT_XBGR2101010;
2972 }
2973 }
2974
skl_format_to_fourcc(int format,bool rgb_order,bool alpha)2975 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2976 {
2977 switch (format) {
2978 case PLANE_CTL_FORMAT_RGB_565:
2979 return DRM_FORMAT_RGB565;
2980 case PLANE_CTL_FORMAT_NV12:
2981 return DRM_FORMAT_NV12;
2982 case PLANE_CTL_FORMAT_P010:
2983 return DRM_FORMAT_P010;
2984 case PLANE_CTL_FORMAT_P012:
2985 return DRM_FORMAT_P012;
2986 case PLANE_CTL_FORMAT_P016:
2987 return DRM_FORMAT_P016;
2988 case PLANE_CTL_FORMAT_Y210:
2989 return DRM_FORMAT_Y210;
2990 case PLANE_CTL_FORMAT_Y212:
2991 return DRM_FORMAT_Y212;
2992 case PLANE_CTL_FORMAT_Y216:
2993 return DRM_FORMAT_Y216;
2994 case PLANE_CTL_FORMAT_Y410:
2995 return DRM_FORMAT_XVYU2101010;
2996 case PLANE_CTL_FORMAT_Y412:
2997 return DRM_FORMAT_XVYU12_16161616;
2998 case PLANE_CTL_FORMAT_Y416:
2999 return DRM_FORMAT_XVYU16161616;
3000 default:
3001 case PLANE_CTL_FORMAT_XRGB_8888:
3002 if (rgb_order) {
3003 if (alpha)
3004 return DRM_FORMAT_ABGR8888;
3005 else
3006 return DRM_FORMAT_XBGR8888;
3007 } else {
3008 if (alpha)
3009 return DRM_FORMAT_ARGB8888;
3010 else
3011 return DRM_FORMAT_XRGB8888;
3012 }
3013 case PLANE_CTL_FORMAT_XRGB_2101010:
3014 if (rgb_order)
3015 return DRM_FORMAT_XBGR2101010;
3016 else
3017 return DRM_FORMAT_XRGB2101010;
3018 case PLANE_CTL_FORMAT_XRGB_16161616F:
3019 if (rgb_order) {
3020 if (alpha)
3021 return DRM_FORMAT_ABGR16161616F;
3022 else
3023 return DRM_FORMAT_XBGR16161616F;
3024 } else {
3025 if (alpha)
3026 return DRM_FORMAT_ARGB16161616F;
3027 else
3028 return DRM_FORMAT_XRGB16161616F;
3029 }
3030 }
3031 }
3032
3033 static bool
intel_alloc_initial_plane_obj(struct intel_crtc * crtc,struct intel_initial_plane_config * plane_config)3034 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3035 struct intel_initial_plane_config *plane_config)
3036 {
3037 struct drm_device *dev = crtc->base.dev;
3038 struct drm_i915_private *dev_priv = to_i915(dev);
3039 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3040 struct drm_framebuffer *fb = &plane_config->fb->base;
3041 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
3042 u32 size_aligned = round_up(plane_config->base + plane_config->size,
3043 PAGE_SIZE);
3044 struct drm_i915_gem_object *obj;
3045 bool ret = false;
3046
3047 size_aligned -= base_aligned;
3048
3049 if (plane_config->size == 0)
3050 return false;
3051
3052 /* If the FB is too big, just don't use it since fbdev is not very
3053 * important and we should probably use that space with FBC or other
3054 * features. */
3055 if (size_aligned * 2 > dev_priv->stolen_usable_size)
3056 return false;
3057
3058 switch (fb->modifier) {
3059 case DRM_FORMAT_MOD_LINEAR:
3060 case I915_FORMAT_MOD_X_TILED:
3061 case I915_FORMAT_MOD_Y_TILED:
3062 break;
3063 default:
3064 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
3065 fb->modifier);
3066 return false;
3067 }
3068
3069 mutex_lock(&dev->struct_mutex);
3070 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
3071 base_aligned,
3072 base_aligned,
3073 size_aligned);
3074 mutex_unlock(&dev->struct_mutex);
3075 if (!obj)
3076 return false;
3077
3078 switch (plane_config->tiling) {
3079 case I915_TILING_NONE:
3080 break;
3081 case I915_TILING_X:
3082 case I915_TILING_Y:
3083 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
3084 break;
3085 default:
3086 MISSING_CASE(plane_config->tiling);
3087 goto out;
3088 }
3089
3090 mode_cmd.pixel_format = fb->format->format;
3091 mode_cmd.width = fb->width;
3092 mode_cmd.height = fb->height;
3093 mode_cmd.pitches[0] = fb->pitches[0];
3094 mode_cmd.modifier[0] = fb->modifier;
3095 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3096
3097 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
3098 DRM_DEBUG_KMS("intel fb init failed\n");
3099 goto out;
3100 }
3101
3102
3103 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
3104 ret = true;
3105 out:
3106 i915_gem_object_put(obj);
3107 return ret;
3108 }
3109
3110 static void
intel_set_plane_visible(struct intel_crtc_state * crtc_state,struct intel_plane_state * plane_state,bool visible)3111 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3112 struct intel_plane_state *plane_state,
3113 bool visible)
3114 {
3115 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3116
3117 plane_state->base.visible = visible;
3118
3119 if (visible)
3120 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
3121 else
3122 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
3123 }
3124
fixup_active_planes(struct intel_crtc_state * crtc_state)3125 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3126 {
3127 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3128 struct drm_plane *plane;
3129
3130 /*
3131 * Active_planes aliases if multiple "primary" or cursor planes
3132 * have been used on the same (or wrong) pipe. plane_mask uses
3133 * unique ids, hence we can use that to reconstruct active_planes.
3134 */
3135 crtc_state->active_planes = 0;
3136
3137 drm_for_each_plane_mask(plane, &dev_priv->drm,
3138 crtc_state->base.plane_mask)
3139 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3140 }
3141
intel_plane_disable_noatomic(struct intel_crtc * crtc,struct intel_plane * plane)3142 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3143 struct intel_plane *plane)
3144 {
3145 struct intel_crtc_state *crtc_state =
3146 to_intel_crtc_state(crtc->base.state);
3147 struct intel_plane_state *plane_state =
3148 to_intel_plane_state(plane->base.state);
3149
3150 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3151 plane->base.base.id, plane->base.name,
3152 crtc->base.base.id, crtc->base.name);
3153
3154 intel_set_plane_visible(crtc_state, plane_state, false);
3155 fixup_active_planes(crtc_state);
3156 crtc_state->data_rate[plane->id] = 0;
3157
3158 if (plane->id == PLANE_PRIMARY)
3159 intel_pre_disable_primary_noatomic(&crtc->base);
3160
3161 intel_disable_plane(plane, crtc_state);
3162 }
3163
3164 static struct intel_frontbuffer *
to_intel_frontbuffer(struct drm_framebuffer * fb)3165 to_intel_frontbuffer(struct drm_framebuffer *fb)
3166 {
3167 return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
3168 }
3169
3170 static void
intel_find_initial_plane_obj(struct intel_crtc * intel_crtc,struct intel_initial_plane_config * plane_config)3171 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3172 struct intel_initial_plane_config *plane_config)
3173 {
3174 struct drm_device *dev = intel_crtc->base.dev;
3175 struct drm_i915_private *dev_priv = to_i915(dev);
3176 struct drm_crtc *c;
3177 struct drm_plane *primary = intel_crtc->base.primary;
3178 struct drm_plane_state *plane_state = primary->state;
3179 struct intel_plane *intel_plane = to_intel_plane(primary);
3180 struct intel_plane_state *intel_state =
3181 to_intel_plane_state(plane_state);
3182 struct drm_framebuffer *fb;
3183
3184 if (!plane_config->fb)
3185 return;
3186
3187 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3188 fb = &plane_config->fb->base;
3189 goto valid_fb;
3190 }
3191
3192 kfree(plane_config->fb);
3193
3194 /*
3195 * Failed to alloc the obj, check to see if we should share
3196 * an fb with another CRTC instead
3197 */
3198 for_each_crtc(dev, c) {
3199 struct intel_plane_state *state;
3200
3201 if (c == &intel_crtc->base)
3202 continue;
3203
3204 if (!to_intel_crtc(c)->active)
3205 continue;
3206
3207 state = to_intel_plane_state(c->primary->state);
3208 if (!state->vma)
3209 continue;
3210
3211 if (intel_plane_ggtt_offset(state) == plane_config->base) {
3212 fb = state->base.fb;
3213 drm_framebuffer_get(fb);
3214 goto valid_fb;
3215 }
3216 }
3217
3218 /*
3219 * We've failed to reconstruct the BIOS FB. Current display state
3220 * indicates that the primary plane is visible, but has a NULL FB,
3221 * which will lead to problems later if we don't fix it up. The
3222 * simplest solution is to just disable the primary plane now and
3223 * pretend the BIOS never had it enabled.
3224 */
3225 intel_plane_disable_noatomic(intel_crtc, intel_plane);
3226
3227 return;
3228
3229 valid_fb:
3230 intel_state->base.rotation = plane_config->rotation;
3231 intel_fill_fb_ggtt_view(&intel_state->view, fb,
3232 intel_state->base.rotation);
3233 intel_state->color_plane[0].stride =
3234 intel_fb_pitch(fb, 0, intel_state->base.rotation);
3235
3236 mutex_lock(&dev->struct_mutex);
3237 intel_state->vma =
3238 intel_pin_and_fence_fb_obj(fb,
3239 &intel_state->view,
3240 intel_plane_uses_fence(intel_state),
3241 &intel_state->flags);
3242 mutex_unlock(&dev->struct_mutex);
3243 if (IS_ERR(intel_state->vma)) {
3244 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
3245 intel_crtc->pipe, PTR_ERR(intel_state->vma));
3246
3247 intel_state->vma = NULL;
3248 drm_framebuffer_put(fb);
3249 return;
3250 }
3251
3252 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
3253
3254 plane_state->src_x = 0;
3255 plane_state->src_y = 0;
3256 plane_state->src_w = fb->width << 16;
3257 plane_state->src_h = fb->height << 16;
3258
3259 plane_state->crtc_x = 0;
3260 plane_state->crtc_y = 0;
3261 plane_state->crtc_w = fb->width;
3262 plane_state->crtc_h = fb->height;
3263
3264 intel_state->base.src = drm_plane_state_src(plane_state);
3265 intel_state->base.dst = drm_plane_state_dest(plane_state);
3266
3267 if (plane_config->tiling)
3268 dev_priv->preserve_bios_swizzle = true;
3269
3270 plane_state->fb = fb;
3271 plane_state->crtc = &intel_crtc->base;
3272
3273 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3274 &to_intel_frontbuffer(fb)->bits);
3275 }
3276
skl_max_plane_width(const struct drm_framebuffer * fb,int color_plane,unsigned int rotation)3277 static int skl_max_plane_width(const struct drm_framebuffer *fb,
3278 int color_plane,
3279 unsigned int rotation)
3280 {
3281 int cpp = fb->format->cpp[color_plane];
3282
3283 switch (fb->modifier) {
3284 case DRM_FORMAT_MOD_LINEAR:
3285 case I915_FORMAT_MOD_X_TILED:
3286 /*
3287 * Validated limit is 4k, but has 5k should
3288 * work apart from the following features:
3289 * - Ytile (already limited to 4k)
3290 * - FP16 (already limited to 4k)
3291 * - render compression (already limited to 4k)
3292 * - KVMR sprite and cursor (don't care)
3293 * - horizontal panning (TODO verify this)
3294 * - pipe and plane scaling (TODO verify this)
3295 */
3296 if (cpp == 8)
3297 return 4096;
3298 else
3299 return 5120;
3300 case I915_FORMAT_MOD_Y_TILED_CCS:
3301 case I915_FORMAT_MOD_Yf_TILED_CCS:
3302 /* FIXME AUX plane? */
3303 case I915_FORMAT_MOD_Y_TILED:
3304 case I915_FORMAT_MOD_Yf_TILED:
3305 if (cpp == 8)
3306 return 2048;
3307 else
3308 return 4096;
3309 default:
3310 MISSING_CASE(fb->modifier);
3311 return 2048;
3312 }
3313 }
3314
glk_max_plane_width(const struct drm_framebuffer * fb,int color_plane,unsigned int rotation)3315 static int glk_max_plane_width(const struct drm_framebuffer *fb,
3316 int color_plane,
3317 unsigned int rotation)
3318 {
3319 int cpp = fb->format->cpp[color_plane];
3320
3321 switch (fb->modifier) {
3322 case DRM_FORMAT_MOD_LINEAR:
3323 case I915_FORMAT_MOD_X_TILED:
3324 if (cpp == 8)
3325 return 4096;
3326 else
3327 return 5120;
3328 case I915_FORMAT_MOD_Y_TILED_CCS:
3329 case I915_FORMAT_MOD_Yf_TILED_CCS:
3330 /* FIXME AUX plane? */
3331 case I915_FORMAT_MOD_Y_TILED:
3332 case I915_FORMAT_MOD_Yf_TILED:
3333 if (cpp == 8)
3334 return 2048;
3335 else
3336 return 5120;
3337 default:
3338 MISSING_CASE(fb->modifier);
3339 return 2048;
3340 }
3341 }
3342
icl_max_plane_width(const struct drm_framebuffer * fb,int color_plane,unsigned int rotation)3343 static int icl_max_plane_width(const struct drm_framebuffer *fb,
3344 int color_plane,
3345 unsigned int rotation)
3346 {
3347 return 5120;
3348 }
3349
skl_check_main_ccs_coordinates(struct intel_plane_state * plane_state,int main_x,int main_y,u32 main_offset)3350 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3351 int main_x, int main_y, u32 main_offset)
3352 {
3353 const struct drm_framebuffer *fb = plane_state->base.fb;
3354 int hsub = fb->format->hsub;
3355 int vsub = fb->format->vsub;
3356 int aux_x = plane_state->color_plane[1].x;
3357 int aux_y = plane_state->color_plane[1].y;
3358 u32 aux_offset = plane_state->color_plane[1].offset;
3359 u32 alignment = intel_surf_alignment(fb, 1);
3360
3361 while (aux_offset >= main_offset && aux_y <= main_y) {
3362 int x, y;
3363
3364 if (aux_x == main_x && aux_y == main_y)
3365 break;
3366
3367 if (aux_offset == 0)
3368 break;
3369
3370 x = aux_x / hsub;
3371 y = aux_y / vsub;
3372 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3373 aux_offset, aux_offset - alignment);
3374 aux_x = x * hsub + aux_x % hsub;
3375 aux_y = y * vsub + aux_y % vsub;
3376 }
3377
3378 if (aux_x != main_x || aux_y != main_y)
3379 return false;
3380
3381 plane_state->color_plane[1].offset = aux_offset;
3382 plane_state->color_plane[1].x = aux_x;
3383 plane_state->color_plane[1].y = aux_y;
3384
3385 return true;
3386 }
3387
skl_check_main_surface(struct intel_plane_state * plane_state)3388 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3389 {
3390 struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
3391 const struct drm_framebuffer *fb = plane_state->base.fb;
3392 unsigned int rotation = plane_state->base.rotation;
3393 int x = plane_state->base.src.x1 >> 16;
3394 int y = plane_state->base.src.y1 >> 16;
3395 int w = drm_rect_width(&plane_state->base.src) >> 16;
3396 int h = drm_rect_height(&plane_state->base.src) >> 16;
3397 int max_width;
3398 int max_height = 4096;
3399 u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3400
3401 if (INTEL_GEN(dev_priv) >= 11)
3402 max_width = icl_max_plane_width(fb, 0, rotation);
3403 else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3404 max_width = glk_max_plane_width(fb, 0, rotation);
3405 else
3406 max_width = skl_max_plane_width(fb, 0, rotation);
3407
3408 if (w > max_width || h > max_height) {
3409 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3410 w, h, max_width, max_height);
3411 return -EINVAL;
3412 }
3413
3414 intel_add_fb_offsets(&x, &y, plane_state, 0);
3415 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3416 alignment = intel_surf_alignment(fb, 0);
3417
3418 /*
3419 * AUX surface offset is specified as the distance from the
3420 * main surface offset, and it must be non-negative. Make
3421 * sure that is what we will get.
3422 */
3423 if (offset > aux_offset)
3424 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3425 offset, aux_offset & ~(alignment - 1));
3426
3427 /*
3428 * When using an X-tiled surface, the plane blows up
3429 * if the x offset + width exceed the stride.
3430 *
3431 * TODO: linear and Y-tiled seem fine, Yf untested,
3432 */
3433 if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3434 int cpp = fb->format->cpp[0];
3435
3436 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3437 if (offset == 0) {
3438 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3439 return -EINVAL;
3440 }
3441
3442 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3443 offset, offset - alignment);
3444 }
3445 }
3446
3447 /*
3448 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3449 * they match with the main surface x/y offsets.
3450 */
3451 if (is_ccs_modifier(fb->modifier)) {
3452 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3453 if (offset == 0)
3454 break;
3455
3456 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3457 offset, offset - alignment);
3458 }
3459
3460 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3461 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3462 return -EINVAL;
3463 }
3464 }
3465
3466 plane_state->color_plane[0].offset = offset;
3467 plane_state->color_plane[0].x = x;
3468 plane_state->color_plane[0].y = y;
3469
3470 /*
3471 * Put the final coordinates back so that the src
3472 * coordinate checks will see the right values.
3473 */
3474 drm_rect_translate(&plane_state->base.src,
3475 (x << 16) - plane_state->base.src.x1,
3476 (y << 16) - plane_state->base.src.y1);
3477
3478 return 0;
3479 }
3480
skl_check_nv12_aux_surface(struct intel_plane_state * plane_state)3481 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3482 {
3483 const struct drm_framebuffer *fb = plane_state->base.fb;
3484 unsigned int rotation = plane_state->base.rotation;
3485 int max_width = skl_max_plane_width(fb, 1, rotation);
3486 int max_height = 4096;
3487 int x = plane_state->base.src.x1 >> 17;
3488 int y = plane_state->base.src.y1 >> 17;
3489 int w = drm_rect_width(&plane_state->base.src) >> 17;
3490 int h = drm_rect_height(&plane_state->base.src) >> 17;
3491 u32 offset;
3492
3493 intel_add_fb_offsets(&x, &y, plane_state, 1);
3494 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3495
3496 /* FIXME not quite sure how/if these apply to the chroma plane */
3497 if (w > max_width || h > max_height) {
3498 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3499 w, h, max_width, max_height);
3500 return -EINVAL;
3501 }
3502
3503 plane_state->color_plane[1].offset = offset;
3504 plane_state->color_plane[1].x = x;
3505 plane_state->color_plane[1].y = y;
3506
3507 return 0;
3508 }
3509
skl_check_ccs_aux_surface(struct intel_plane_state * plane_state)3510 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3511 {
3512 const struct drm_framebuffer *fb = plane_state->base.fb;
3513 int src_x = plane_state->base.src.x1 >> 16;
3514 int src_y = plane_state->base.src.y1 >> 16;
3515 int hsub = fb->format->hsub;
3516 int vsub = fb->format->vsub;
3517 int x = src_x / hsub;
3518 int y = src_y / vsub;
3519 u32 offset;
3520
3521 intel_add_fb_offsets(&x, &y, plane_state, 1);
3522 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3523
3524 plane_state->color_plane[1].offset = offset;
3525 plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3526 plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3527
3528 return 0;
3529 }
3530
skl_check_plane_surface(struct intel_plane_state * plane_state)3531 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3532 {
3533 const struct drm_framebuffer *fb = plane_state->base.fb;
3534 int ret;
3535
3536 ret = intel_plane_compute_gtt(plane_state);
3537 if (ret)
3538 return ret;
3539
3540 if (!plane_state->base.visible)
3541 return 0;
3542
3543 /*
3544 * Handle the AUX surface first since
3545 * the main surface setup depends on it.
3546 */
3547 if (is_planar_yuv_format(fb->format->format)) {
3548 ret = skl_check_nv12_aux_surface(plane_state);
3549 if (ret)
3550 return ret;
3551 } else if (is_ccs_modifier(fb->modifier)) {
3552 ret = skl_check_ccs_aux_surface(plane_state);
3553 if (ret)
3554 return ret;
3555 } else {
3556 plane_state->color_plane[1].offset = ~0xfff;
3557 plane_state->color_plane[1].x = 0;
3558 plane_state->color_plane[1].y = 0;
3559 }
3560
3561 ret = skl_check_main_surface(plane_state);
3562 if (ret)
3563 return ret;
3564
3565 return 0;
3566 }
3567
3568 unsigned int
i9xx_plane_max_stride(struct intel_plane * plane,u32 pixel_format,u64 modifier,unsigned int rotation)3569 i9xx_plane_max_stride(struct intel_plane *plane,
3570 u32 pixel_format, u64 modifier,
3571 unsigned int rotation)
3572 {
3573 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3574
3575 if (!HAS_GMCH(dev_priv)) {
3576 return 32*1024;
3577 } else if (INTEL_GEN(dev_priv) >= 4) {
3578 if (modifier == I915_FORMAT_MOD_X_TILED)
3579 return 16*1024;
3580 else
3581 return 32*1024;
3582 } else if (INTEL_GEN(dev_priv) >= 3) {
3583 if (modifier == I915_FORMAT_MOD_X_TILED)
3584 return 8*1024;
3585 else
3586 return 16*1024;
3587 } else {
3588 if (plane->i9xx_plane == PLANE_C)
3589 return 4*1024;
3590 else
3591 return 8*1024;
3592 }
3593 }
3594
i9xx_plane_ctl_crtc(const struct intel_crtc_state * crtc_state)3595 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3596 {
3597 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3598 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3599 u32 dspcntr = 0;
3600
3601 if (crtc_state->gamma_enable)
3602 dspcntr |= DISPPLANE_GAMMA_ENABLE;
3603
3604 if (crtc_state->csc_enable)
3605 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3606
3607 if (INTEL_GEN(dev_priv) < 5)
3608 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3609
3610 return dspcntr;
3611 }
3612
i9xx_plane_ctl(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)3613 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3614 const struct intel_plane_state *plane_state)
3615 {
3616 struct drm_i915_private *dev_priv =
3617 to_i915(plane_state->base.plane->dev);
3618 const struct drm_framebuffer *fb = plane_state->base.fb;
3619 unsigned int rotation = plane_state->base.rotation;
3620 u32 dspcntr;
3621
3622 dspcntr = DISPLAY_PLANE_ENABLE;
3623
3624 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3625 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3626 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3627
3628 switch (fb->format->format) {
3629 case DRM_FORMAT_C8:
3630 dspcntr |= DISPPLANE_8BPP;
3631 break;
3632 case DRM_FORMAT_XRGB1555:
3633 dspcntr |= DISPPLANE_BGRX555;
3634 break;
3635 case DRM_FORMAT_RGB565:
3636 dspcntr |= DISPPLANE_BGRX565;
3637 break;
3638 case DRM_FORMAT_XRGB8888:
3639 dspcntr |= DISPPLANE_BGRX888;
3640 break;
3641 case DRM_FORMAT_XBGR8888:
3642 dspcntr |= DISPPLANE_RGBX888;
3643 break;
3644 case DRM_FORMAT_XRGB2101010:
3645 dspcntr |= DISPPLANE_BGRX101010;
3646 break;
3647 case DRM_FORMAT_XBGR2101010:
3648 dspcntr |= DISPPLANE_RGBX101010;
3649 break;
3650 default:
3651 MISSING_CASE(fb->format->format);
3652 return 0;
3653 }
3654
3655 if (INTEL_GEN(dev_priv) >= 4 &&
3656 fb->modifier == I915_FORMAT_MOD_X_TILED)
3657 dspcntr |= DISPPLANE_TILED;
3658
3659 if (rotation & DRM_MODE_ROTATE_180)
3660 dspcntr |= DISPPLANE_ROTATE_180;
3661
3662 if (rotation & DRM_MODE_REFLECT_X)
3663 dspcntr |= DISPPLANE_MIRROR;
3664
3665 return dspcntr;
3666 }
3667
i9xx_check_plane_surface(struct intel_plane_state * plane_state)3668 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3669 {
3670 struct drm_i915_private *dev_priv =
3671 to_i915(plane_state->base.plane->dev);
3672 int src_x, src_y;
3673 u32 offset;
3674 int ret;
3675
3676 ret = intel_plane_compute_gtt(plane_state);
3677 if (ret)
3678 return ret;
3679
3680 if (!plane_state->base.visible)
3681 return 0;
3682
3683 src_x = plane_state->base.src.x1 >> 16;
3684 src_y = plane_state->base.src.y1 >> 16;
3685
3686 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3687
3688 if (INTEL_GEN(dev_priv) >= 4)
3689 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3690 plane_state, 0);
3691 else
3692 offset = 0;
3693
3694 /*
3695 * Put the final coordinates back so that the src
3696 * coordinate checks will see the right values.
3697 */
3698 drm_rect_translate(&plane_state->base.src,
3699 (src_x << 16) - plane_state->base.src.x1,
3700 (src_y << 16) - plane_state->base.src.y1);
3701
3702 /* HSW/BDW do this automagically in hardware */
3703 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3704 unsigned int rotation = plane_state->base.rotation;
3705 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3706 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3707
3708 if (rotation & DRM_MODE_ROTATE_180) {
3709 src_x += src_w - 1;
3710 src_y += src_h - 1;
3711 } else if (rotation & DRM_MODE_REFLECT_X) {
3712 src_x += src_w - 1;
3713 }
3714 }
3715
3716 plane_state->color_plane[0].offset = offset;
3717 plane_state->color_plane[0].x = src_x;
3718 plane_state->color_plane[0].y = src_y;
3719
3720 return 0;
3721 }
3722
i9xx_plane_has_windowing(struct intel_plane * plane)3723 static bool i9xx_plane_has_windowing(struct intel_plane *plane)
3724 {
3725 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3726 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3727
3728 if (IS_CHERRYVIEW(dev_priv))
3729 return i9xx_plane == PLANE_B;
3730 else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
3731 return false;
3732 else if (IS_GEN(dev_priv, 4))
3733 return i9xx_plane == PLANE_C;
3734 else
3735 return i9xx_plane == PLANE_B ||
3736 i9xx_plane == PLANE_C;
3737 }
3738
3739 static int
i9xx_plane_check(struct intel_crtc_state * crtc_state,struct intel_plane_state * plane_state)3740 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3741 struct intel_plane_state *plane_state)
3742 {
3743 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3744 int ret;
3745
3746 ret = chv_plane_check_rotation(plane_state);
3747 if (ret)
3748 return ret;
3749
3750 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3751 &crtc_state->base,
3752 DRM_PLANE_HELPER_NO_SCALING,
3753 DRM_PLANE_HELPER_NO_SCALING,
3754 i9xx_plane_has_windowing(plane),
3755 true);
3756 if (ret)
3757 return ret;
3758
3759 ret = i9xx_check_plane_surface(plane_state);
3760 if (ret)
3761 return ret;
3762
3763 if (!plane_state->base.visible)
3764 return 0;
3765
3766 ret = intel_plane_check_src_coordinates(plane_state);
3767 if (ret)
3768 return ret;
3769
3770 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3771
3772 return 0;
3773 }
3774
i9xx_update_plane(struct intel_plane * plane,const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)3775 static void i9xx_update_plane(struct intel_plane *plane,
3776 const struct intel_crtc_state *crtc_state,
3777 const struct intel_plane_state *plane_state)
3778 {
3779 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3780 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3781 u32 linear_offset;
3782 int x = plane_state->color_plane[0].x;
3783 int y = plane_state->color_plane[0].y;
3784 int crtc_x = plane_state->base.dst.x1;
3785 int crtc_y = plane_state->base.dst.y1;
3786 int crtc_w = drm_rect_width(&plane_state->base.dst);
3787 int crtc_h = drm_rect_height(&plane_state->base.dst);
3788 unsigned long irqflags;
3789 u32 dspaddr_offset;
3790 u32 dspcntr;
3791
3792 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
3793
3794 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3795
3796 if (INTEL_GEN(dev_priv) >= 4)
3797 dspaddr_offset = plane_state->color_plane[0].offset;
3798 else
3799 dspaddr_offset = linear_offset;
3800
3801 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3802
3803 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3804
3805 if (INTEL_GEN(dev_priv) < 4) {
3806 /*
3807 * PLANE_A doesn't actually have a full window
3808 * generator but let's assume we still need to
3809 * program whatever is there.
3810 */
3811 I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
3812 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3813 ((crtc_h - 1) << 16) | (crtc_w - 1));
3814 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3815 I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
3816 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3817 ((crtc_h - 1) << 16) | (crtc_w - 1));
3818 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3819 }
3820
3821 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3822 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3823 } else if (INTEL_GEN(dev_priv) >= 4) {
3824 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3825 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3826 }
3827
3828 /*
3829 * The control register self-arms if the plane was previously
3830 * disabled. Try to make the plane enable atomic by writing
3831 * the control register just before the surface register.
3832 */
3833 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3834 if (INTEL_GEN(dev_priv) >= 4)
3835 I915_WRITE_FW(DSPSURF(i9xx_plane),
3836 intel_plane_ggtt_offset(plane_state) +
3837 dspaddr_offset);
3838 else
3839 I915_WRITE_FW(DSPADDR(i9xx_plane),
3840 intel_plane_ggtt_offset(plane_state) +
3841 dspaddr_offset);
3842
3843 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3844 }
3845
i9xx_disable_plane(struct intel_plane * plane,const struct intel_crtc_state * crtc_state)3846 static void i9xx_disable_plane(struct intel_plane *plane,
3847 const struct intel_crtc_state *crtc_state)
3848 {
3849 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3850 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3851 unsigned long irqflags;
3852 u32 dspcntr;
3853
3854 /*
3855 * DSPCNTR pipe gamma enable on g4x+ and pipe csc
3856 * enable on ilk+ affect the pipe bottom color as
3857 * well, so we must configure them even if the plane
3858 * is disabled.
3859 *
3860 * On pre-g4x there is no way to gamma correct the
3861 * pipe bottom color but we'll keep on doing this
3862 * anyway so that the crtc state readout works correctly.
3863 */
3864 dspcntr = i9xx_plane_ctl_crtc(crtc_state);
3865
3866 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3867
3868 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3869 if (INTEL_GEN(dev_priv) >= 4)
3870 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3871 else
3872 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3873
3874 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3875 }
3876
i9xx_plane_get_hw_state(struct intel_plane * plane,enum pipe * pipe)3877 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3878 enum pipe *pipe)
3879 {
3880 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3881 enum intel_display_power_domain power_domain;
3882 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3883 intel_wakeref_t wakeref;
3884 bool ret;
3885 u32 val;
3886
3887 /*
3888 * Not 100% correct for planes that can move between pipes,
3889 * but that's only the case for gen2-4 which don't have any
3890 * display power wells.
3891 */
3892 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
3893 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3894 if (!wakeref)
3895 return false;
3896
3897 val = I915_READ(DSPCNTR(i9xx_plane));
3898
3899 ret = val & DISPLAY_PLANE_ENABLE;
3900
3901 if (INTEL_GEN(dev_priv) >= 5)
3902 *pipe = plane->pipe;
3903 else
3904 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3905 DISPPLANE_SEL_PIPE_SHIFT;
3906
3907 intel_display_power_put(dev_priv, power_domain, wakeref);
3908
3909 return ret;
3910 }
3911
skl_detach_scaler(struct intel_crtc * intel_crtc,int id)3912 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3913 {
3914 struct drm_device *dev = intel_crtc->base.dev;
3915 struct drm_i915_private *dev_priv = to_i915(dev);
3916
3917 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3918 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3919 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
3920 }
3921
3922 /*
3923 * This function detaches (aka. unbinds) unused scalers in hardware
3924 */
skl_detach_scalers(const struct intel_crtc_state * crtc_state)3925 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
3926 {
3927 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3928 const struct intel_crtc_scaler_state *scaler_state =
3929 &crtc_state->scaler_state;
3930 int i;
3931
3932 /* loop through and disable scalers that aren't in use */
3933 for (i = 0; i < intel_crtc->num_scalers; i++) {
3934 if (!scaler_state->scalers[i].in_use)
3935 skl_detach_scaler(intel_crtc, i);
3936 }
3937 }
3938
skl_plane_stride_mult(const struct drm_framebuffer * fb,int color_plane,unsigned int rotation)3939 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
3940 int color_plane, unsigned int rotation)
3941 {
3942 /*
3943 * The stride is either expressed as a multiple of 64 bytes chunks for
3944 * linear buffers or in number of tiles for tiled buffers.
3945 */
3946 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3947 return 64;
3948 else if (drm_rotation_90_or_270(rotation))
3949 return intel_tile_height(fb, color_plane);
3950 else
3951 return intel_tile_width_bytes(fb, color_plane);
3952 }
3953
skl_plane_stride(const struct intel_plane_state * plane_state,int color_plane)3954 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
3955 int color_plane)
3956 {
3957 const struct drm_framebuffer *fb = plane_state->base.fb;
3958 unsigned int rotation = plane_state->base.rotation;
3959 u32 stride = plane_state->color_plane[color_plane].stride;
3960
3961 if (color_plane >= fb->format->num_planes)
3962 return 0;
3963
3964 return stride / skl_plane_stride_mult(fb, color_plane, rotation);
3965 }
3966
skl_plane_ctl_format(u32 pixel_format)3967 static u32 skl_plane_ctl_format(u32 pixel_format)
3968 {
3969 switch (pixel_format) {
3970 case DRM_FORMAT_C8:
3971 return PLANE_CTL_FORMAT_INDEXED;
3972 case DRM_FORMAT_RGB565:
3973 return PLANE_CTL_FORMAT_RGB_565;
3974 case DRM_FORMAT_XBGR8888:
3975 case DRM_FORMAT_ABGR8888:
3976 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3977 case DRM_FORMAT_XRGB8888:
3978 case DRM_FORMAT_ARGB8888:
3979 return PLANE_CTL_FORMAT_XRGB_8888;
3980 case DRM_FORMAT_XBGR2101010:
3981 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
3982 case DRM_FORMAT_XRGB2101010:
3983 return PLANE_CTL_FORMAT_XRGB_2101010;
3984 case DRM_FORMAT_XBGR16161616F:
3985 case DRM_FORMAT_ABGR16161616F:
3986 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
3987 case DRM_FORMAT_XRGB16161616F:
3988 case DRM_FORMAT_ARGB16161616F:
3989 return PLANE_CTL_FORMAT_XRGB_16161616F;
3990 case DRM_FORMAT_YUYV:
3991 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3992 case DRM_FORMAT_YVYU:
3993 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3994 case DRM_FORMAT_UYVY:
3995 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3996 case DRM_FORMAT_VYUY:
3997 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3998 case DRM_FORMAT_NV12:
3999 return PLANE_CTL_FORMAT_NV12;
4000 case DRM_FORMAT_P010:
4001 return PLANE_CTL_FORMAT_P010;
4002 case DRM_FORMAT_P012:
4003 return PLANE_CTL_FORMAT_P012;
4004 case DRM_FORMAT_P016:
4005 return PLANE_CTL_FORMAT_P016;
4006 case DRM_FORMAT_Y210:
4007 return PLANE_CTL_FORMAT_Y210;
4008 case DRM_FORMAT_Y212:
4009 return PLANE_CTL_FORMAT_Y212;
4010 case DRM_FORMAT_Y216:
4011 return PLANE_CTL_FORMAT_Y216;
4012 case DRM_FORMAT_XVYU2101010:
4013 return PLANE_CTL_FORMAT_Y410;
4014 case DRM_FORMAT_XVYU12_16161616:
4015 return PLANE_CTL_FORMAT_Y412;
4016 case DRM_FORMAT_XVYU16161616:
4017 return PLANE_CTL_FORMAT_Y416;
4018 default:
4019 MISSING_CASE(pixel_format);
4020 }
4021
4022 return 0;
4023 }
4024
skl_plane_ctl_alpha(const struct intel_plane_state * plane_state)4025 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
4026 {
4027 if (!plane_state->base.fb->format->has_alpha)
4028 return PLANE_CTL_ALPHA_DISABLE;
4029
4030 switch (plane_state->base.pixel_blend_mode) {
4031 case DRM_MODE_BLEND_PIXEL_NONE:
4032 return PLANE_CTL_ALPHA_DISABLE;
4033 case DRM_MODE_BLEND_PREMULTI:
4034 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4035 case DRM_MODE_BLEND_COVERAGE:
4036 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4037 default:
4038 MISSING_CASE(plane_state->base.pixel_blend_mode);
4039 return PLANE_CTL_ALPHA_DISABLE;
4040 }
4041 }
4042
glk_plane_color_ctl_alpha(const struct intel_plane_state * plane_state)4043 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4044 {
4045 if (!plane_state->base.fb->format->has_alpha)
4046 return PLANE_COLOR_ALPHA_DISABLE;
4047
4048 switch (plane_state->base.pixel_blend_mode) {
4049 case DRM_MODE_BLEND_PIXEL_NONE:
4050 return PLANE_COLOR_ALPHA_DISABLE;
4051 case DRM_MODE_BLEND_PREMULTI:
4052 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4053 case DRM_MODE_BLEND_COVERAGE:
4054 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4055 default:
4056 MISSING_CASE(plane_state->base.pixel_blend_mode);
4057 return PLANE_COLOR_ALPHA_DISABLE;
4058 }
4059 }
4060
skl_plane_ctl_tiling(u64 fb_modifier)4061 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4062 {
4063 switch (fb_modifier) {
4064 case DRM_FORMAT_MOD_LINEAR:
4065 break;
4066 case I915_FORMAT_MOD_X_TILED:
4067 return PLANE_CTL_TILED_X;
4068 case I915_FORMAT_MOD_Y_TILED:
4069 return PLANE_CTL_TILED_Y;
4070 case I915_FORMAT_MOD_Y_TILED_CCS:
4071 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4072 case I915_FORMAT_MOD_Yf_TILED:
4073 return PLANE_CTL_TILED_YF;
4074 case I915_FORMAT_MOD_Yf_TILED_CCS:
4075 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4076 default:
4077 MISSING_CASE(fb_modifier);
4078 }
4079
4080 return 0;
4081 }
4082
skl_plane_ctl_rotate(unsigned int rotate)4083 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4084 {
4085 switch (rotate) {
4086 case DRM_MODE_ROTATE_0:
4087 break;
4088 /*
4089 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4090 * while i915 HW rotation is clockwise, thats why this swapping.
4091 */
4092 case DRM_MODE_ROTATE_90:
4093 return PLANE_CTL_ROTATE_270;
4094 case DRM_MODE_ROTATE_180:
4095 return PLANE_CTL_ROTATE_180;
4096 case DRM_MODE_ROTATE_270:
4097 return PLANE_CTL_ROTATE_90;
4098 default:
4099 MISSING_CASE(rotate);
4100 }
4101
4102 return 0;
4103 }
4104
cnl_plane_ctl_flip(unsigned int reflect)4105 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4106 {
4107 switch (reflect) {
4108 case 0:
4109 break;
4110 case DRM_MODE_REFLECT_X:
4111 return PLANE_CTL_FLIP_HORIZONTAL;
4112 case DRM_MODE_REFLECT_Y:
4113 default:
4114 MISSING_CASE(reflect);
4115 }
4116
4117 return 0;
4118 }
4119
skl_plane_ctl_crtc(const struct intel_crtc_state * crtc_state)4120 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4121 {
4122 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
4123 u32 plane_ctl = 0;
4124
4125 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4126 return plane_ctl;
4127
4128 if (crtc_state->gamma_enable)
4129 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4130
4131 if (crtc_state->csc_enable)
4132 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4133
4134 return plane_ctl;
4135 }
4136
skl_plane_ctl(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)4137 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4138 const struct intel_plane_state *plane_state)
4139 {
4140 struct drm_i915_private *dev_priv =
4141 to_i915(plane_state->base.plane->dev);
4142 const struct drm_framebuffer *fb = plane_state->base.fb;
4143 unsigned int rotation = plane_state->base.rotation;
4144 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4145 u32 plane_ctl;
4146
4147 plane_ctl = PLANE_CTL_ENABLE;
4148
4149 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4150 plane_ctl |= skl_plane_ctl_alpha(plane_state);
4151 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4152
4153 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
4154 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4155
4156 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4157 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4158 }
4159
4160 plane_ctl |= skl_plane_ctl_format(fb->format->format);
4161 plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4162 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4163
4164 if (INTEL_GEN(dev_priv) >= 10)
4165 plane_ctl |= cnl_plane_ctl_flip(rotation &
4166 DRM_MODE_REFLECT_MASK);
4167
4168 if (key->flags & I915_SET_COLORKEY_DESTINATION)
4169 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4170 else if (key->flags & I915_SET_COLORKEY_SOURCE)
4171 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4172
4173 return plane_ctl;
4174 }
4175
glk_plane_color_ctl_crtc(const struct intel_crtc_state * crtc_state)4176 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4177 {
4178 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
4179 u32 plane_color_ctl = 0;
4180
4181 if (INTEL_GEN(dev_priv) >= 11)
4182 return plane_color_ctl;
4183
4184 if (crtc_state->gamma_enable)
4185 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4186
4187 if (crtc_state->csc_enable)
4188 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4189
4190 return plane_color_ctl;
4191 }
4192
glk_plane_color_ctl(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)4193 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4194 const struct intel_plane_state *plane_state)
4195 {
4196 struct drm_i915_private *dev_priv =
4197 to_i915(plane_state->base.plane->dev);
4198 const struct drm_framebuffer *fb = plane_state->base.fb;
4199 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
4200 u32 plane_color_ctl = 0;
4201
4202 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4203 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4204
4205 if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4206 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
4207 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4208 else
4209 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
4210
4211 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4212 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4213 } else if (fb->format->is_yuv) {
4214 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4215 }
4216
4217 return plane_color_ctl;
4218 }
4219
4220 static int
__intel_display_resume(struct drm_device * dev,struct drm_atomic_state * state,struct drm_modeset_acquire_ctx * ctx)4221 __intel_display_resume(struct drm_device *dev,
4222 struct drm_atomic_state *state,
4223 struct drm_modeset_acquire_ctx *ctx)
4224 {
4225 struct drm_crtc_state *crtc_state;
4226 struct drm_crtc *crtc;
4227 int i, ret;
4228
4229 intel_modeset_setup_hw_state(dev, ctx);
4230 i915_redisable_vga(to_i915(dev));
4231
4232 if (!state)
4233 return 0;
4234
4235 /*
4236 * We've duplicated the state, pointers to the old state are invalid.
4237 *
4238 * Don't attempt to use the old state until we commit the duplicated state.
4239 */
4240 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4241 /*
4242 * Force recalculation even if we restore
4243 * current state. With fast modeset this may not result
4244 * in a modeset when the state is compatible.
4245 */
4246 crtc_state->mode_changed = true;
4247 }
4248
4249 /* ignore any reset values/BIOS leftovers in the WM registers */
4250 if (!HAS_GMCH(to_i915(dev)))
4251 to_intel_atomic_state(state)->skip_intermediate_wm = true;
4252
4253 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4254
4255 WARN_ON(ret == -EDEADLK);
4256 return ret;
4257 }
4258
gpu_reset_clobbers_display(struct drm_i915_private * dev_priv)4259 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4260 {
4261 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4262 intel_has_gpu_reset(dev_priv));
4263 }
4264
intel_prepare_reset(struct drm_i915_private * dev_priv)4265 void intel_prepare_reset(struct drm_i915_private *dev_priv)
4266 {
4267 struct drm_device *dev = &dev_priv->drm;
4268 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4269 struct drm_atomic_state *state;
4270 int ret;
4271
4272 /* reset doesn't touch the display */
4273 if (!i915_modparams.force_reset_modeset_test &&
4274 !gpu_reset_clobbers_display(dev_priv))
4275 return;
4276
4277 /* We have a modeset vs reset deadlock, defensively unbreak it. */
4278 set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4279 smp_mb__after_atomic();
4280 wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
4281
4282 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4283 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
4284 intel_gt_set_wedged(&dev_priv->gt);
4285 }
4286
4287 /*
4288 * Need mode_config.mutex so that we don't
4289 * trample ongoing ->detect() and whatnot.
4290 */
4291 mutex_lock(&dev->mode_config.mutex);
4292 drm_modeset_acquire_init(ctx, 0);
4293 while (1) {
4294 ret = drm_modeset_lock_all_ctx(dev, ctx);
4295 if (ret != -EDEADLK)
4296 break;
4297
4298 drm_modeset_backoff(ctx);
4299 }
4300 /*
4301 * Disabling the crtcs gracefully seems nicer. Also the
4302 * g33 docs say we should at least disable all the planes.
4303 */
4304 state = drm_atomic_helper_duplicate_state(dev, ctx);
4305 if (IS_ERR(state)) {
4306 ret = PTR_ERR(state);
4307 DRM_ERROR("Duplicating state failed with %i\n", ret);
4308 return;
4309 }
4310
4311 ret = drm_atomic_helper_disable_all(dev, ctx);
4312 if (ret) {
4313 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
4314 drm_atomic_state_put(state);
4315 return;
4316 }
4317
4318 dev_priv->modeset_restore_state = state;
4319 state->acquire_ctx = ctx;
4320 }
4321
intel_finish_reset(struct drm_i915_private * dev_priv)4322 void intel_finish_reset(struct drm_i915_private *dev_priv)
4323 {
4324 struct drm_device *dev = &dev_priv->drm;
4325 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4326 struct drm_atomic_state *state;
4327 int ret;
4328
4329 /* reset doesn't touch the display */
4330 if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
4331 return;
4332
4333 state = fetch_and_zero(&dev_priv->modeset_restore_state);
4334 if (!state)
4335 goto unlock;
4336
4337 /* reset doesn't touch the display */
4338 if (!gpu_reset_clobbers_display(dev_priv)) {
4339 /* for testing only restore the display */
4340 ret = __intel_display_resume(dev, state, ctx);
4341 if (ret)
4342 DRM_ERROR("Restoring old state failed with %i\n", ret);
4343 } else {
4344 /*
4345 * The display has been reset as well,
4346 * so need a full re-initialization.
4347 */
4348 intel_pps_unlock_regs_wa(dev_priv);
4349 intel_modeset_init_hw(dev);
4350 intel_init_clock_gating(dev_priv);
4351
4352 spin_lock_irq(&dev_priv->irq_lock);
4353 if (dev_priv->display.hpd_irq_setup)
4354 dev_priv->display.hpd_irq_setup(dev_priv);
4355 spin_unlock_irq(&dev_priv->irq_lock);
4356
4357 ret = __intel_display_resume(dev, state, ctx);
4358 if (ret)
4359 DRM_ERROR("Restoring old state failed with %i\n", ret);
4360
4361 intel_hpd_init(dev_priv);
4362 }
4363
4364 drm_atomic_state_put(state);
4365 unlock:
4366 drm_modeset_drop_locks(ctx);
4367 drm_modeset_acquire_fini(ctx);
4368 mutex_unlock(&dev->mode_config.mutex);
4369
4370 clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4371 }
4372
icl_set_pipe_chicken(struct intel_crtc * crtc)4373 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4374 {
4375 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4376 enum pipe pipe = crtc->pipe;
4377 u32 tmp;
4378
4379 tmp = I915_READ(PIPE_CHICKEN(pipe));
4380
4381 /*
4382 * Display WA #1153: icl
4383 * enable hardware to bypass the alpha math
4384 * and rounding for per-pixel values 00 and 0xff
4385 */
4386 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4387 /*
4388 * Display WA # 1605353570: icl
4389 * Set the pixel rounding bit to 1 for allowing
4390 * passthrough of Frame buffer pixels unmodified
4391 * across pipe
4392 */
4393 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4394 I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4395 }
4396
intel_update_pipe_config(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)4397 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
4398 const struct intel_crtc_state *new_crtc_state)
4399 {
4400 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
4401 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4402
4403 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
4404 crtc->base.mode = new_crtc_state->base.mode;
4405
4406 /*
4407 * Update pipe size and adjust fitter if needed: the reason for this is
4408 * that in compute_mode_changes we check the native mode (not the pfit
4409 * mode) to see if we can flip rather than do a full mode set. In the
4410 * fastboot case, we'll flip, but if we don't update the pipesrc and
4411 * pfit state, we'll end up with a big fb scanned out into the wrong
4412 * sized surface.
4413 */
4414
4415 I915_WRITE(PIPESRC(crtc->pipe),
4416 ((new_crtc_state->pipe_src_w - 1) << 16) |
4417 (new_crtc_state->pipe_src_h - 1));
4418
4419 /* on skylake this is done by detaching scalers */
4420 if (INTEL_GEN(dev_priv) >= 9) {
4421 skl_detach_scalers(new_crtc_state);
4422
4423 if (new_crtc_state->pch_pfit.enabled)
4424 skylake_pfit_enable(new_crtc_state);
4425 } else if (HAS_PCH_SPLIT(dev_priv)) {
4426 if (new_crtc_state->pch_pfit.enabled)
4427 ironlake_pfit_enable(new_crtc_state);
4428 else if (old_crtc_state->pch_pfit.enabled)
4429 ironlake_pfit_disable(old_crtc_state);
4430 }
4431
4432 if (INTEL_GEN(dev_priv) >= 11)
4433 icl_set_pipe_chicken(crtc);
4434 }
4435
intel_fdi_normal_train(struct intel_crtc * crtc)4436 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4437 {
4438 struct drm_device *dev = crtc->base.dev;
4439 struct drm_i915_private *dev_priv = to_i915(dev);
4440 int pipe = crtc->pipe;
4441 i915_reg_t reg;
4442 u32 temp;
4443
4444 /* enable normal train */
4445 reg = FDI_TX_CTL(pipe);
4446 temp = I915_READ(reg);
4447 if (IS_IVYBRIDGE(dev_priv)) {
4448 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4449 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4450 } else {
4451 temp &= ~FDI_LINK_TRAIN_NONE;
4452 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4453 }
4454 I915_WRITE(reg, temp);
4455
4456 reg = FDI_RX_CTL(pipe);
4457 temp = I915_READ(reg);
4458 if (HAS_PCH_CPT(dev_priv)) {
4459 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4460 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4461 } else {
4462 temp &= ~FDI_LINK_TRAIN_NONE;
4463 temp |= FDI_LINK_TRAIN_NONE;
4464 }
4465 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4466
4467 /* wait one idle pattern time */
4468 POSTING_READ(reg);
4469 udelay(1000);
4470
4471 /* IVB wants error correction enabled */
4472 if (IS_IVYBRIDGE(dev_priv))
4473 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4474 FDI_FE_ERRC_ENABLE);
4475 }
4476
4477 /* The FDI link training functions for ILK/Ibexpeak. */
ironlake_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)4478 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4479 const struct intel_crtc_state *crtc_state)
4480 {
4481 struct drm_device *dev = crtc->base.dev;
4482 struct drm_i915_private *dev_priv = to_i915(dev);
4483 int pipe = crtc->pipe;
4484 i915_reg_t reg;
4485 u32 temp, tries;
4486
4487 /* FDI needs bits from pipe first */
4488 assert_pipe_enabled(dev_priv, pipe);
4489
4490 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4491 for train result */
4492 reg = FDI_RX_IMR(pipe);
4493 temp = I915_READ(reg);
4494 temp &= ~FDI_RX_SYMBOL_LOCK;
4495 temp &= ~FDI_RX_BIT_LOCK;
4496 I915_WRITE(reg, temp);
4497 I915_READ(reg);
4498 udelay(150);
4499
4500 /* enable CPU FDI TX and PCH FDI RX */
4501 reg = FDI_TX_CTL(pipe);
4502 temp = I915_READ(reg);
4503 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4504 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4505 temp &= ~FDI_LINK_TRAIN_NONE;
4506 temp |= FDI_LINK_TRAIN_PATTERN_1;
4507 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4508
4509 reg = FDI_RX_CTL(pipe);
4510 temp = I915_READ(reg);
4511 temp &= ~FDI_LINK_TRAIN_NONE;
4512 temp |= FDI_LINK_TRAIN_PATTERN_1;
4513 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4514
4515 POSTING_READ(reg);
4516 udelay(150);
4517
4518 /* Ironlake workaround, enable clock pointer after FDI enable*/
4519 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4520 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4521 FDI_RX_PHASE_SYNC_POINTER_EN);
4522
4523 reg = FDI_RX_IIR(pipe);
4524 for (tries = 0; tries < 5; tries++) {
4525 temp = I915_READ(reg);
4526 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4527
4528 if ((temp & FDI_RX_BIT_LOCK)) {
4529 DRM_DEBUG_KMS("FDI train 1 done.\n");
4530 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4531 break;
4532 }
4533 }
4534 if (tries == 5)
4535 DRM_ERROR("FDI train 1 fail!\n");
4536
4537 /* Train 2 */
4538 reg = FDI_TX_CTL(pipe);
4539 temp = I915_READ(reg);
4540 temp &= ~FDI_LINK_TRAIN_NONE;
4541 temp |= FDI_LINK_TRAIN_PATTERN_2;
4542 I915_WRITE(reg, temp);
4543
4544 reg = FDI_RX_CTL(pipe);
4545 temp = I915_READ(reg);
4546 temp &= ~FDI_LINK_TRAIN_NONE;
4547 temp |= FDI_LINK_TRAIN_PATTERN_2;
4548 I915_WRITE(reg, temp);
4549
4550 POSTING_READ(reg);
4551 udelay(150);
4552
4553 reg = FDI_RX_IIR(pipe);
4554 for (tries = 0; tries < 5; tries++) {
4555 temp = I915_READ(reg);
4556 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4557
4558 if (temp & FDI_RX_SYMBOL_LOCK) {
4559 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4560 DRM_DEBUG_KMS("FDI train 2 done.\n");
4561 break;
4562 }
4563 }
4564 if (tries == 5)
4565 DRM_ERROR("FDI train 2 fail!\n");
4566
4567 DRM_DEBUG_KMS("FDI train done\n");
4568
4569 }
4570
4571 static const int snb_b_fdi_train_param[] = {
4572 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4573 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4574 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4575 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4576 };
4577
4578 /* The FDI link training functions for SNB/Cougarpoint. */
gen6_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)4579 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4580 const struct intel_crtc_state *crtc_state)
4581 {
4582 struct drm_device *dev = crtc->base.dev;
4583 struct drm_i915_private *dev_priv = to_i915(dev);
4584 int pipe = crtc->pipe;
4585 i915_reg_t reg;
4586 u32 temp, i, retry;
4587
4588 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4589 for train result */
4590 reg = FDI_RX_IMR(pipe);
4591 temp = I915_READ(reg);
4592 temp &= ~FDI_RX_SYMBOL_LOCK;
4593 temp &= ~FDI_RX_BIT_LOCK;
4594 I915_WRITE(reg, temp);
4595
4596 POSTING_READ(reg);
4597 udelay(150);
4598
4599 /* enable CPU FDI TX and PCH FDI RX */
4600 reg = FDI_TX_CTL(pipe);
4601 temp = I915_READ(reg);
4602 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4603 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4604 temp &= ~FDI_LINK_TRAIN_NONE;
4605 temp |= FDI_LINK_TRAIN_PATTERN_1;
4606 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4607 /* SNB-B */
4608 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4609 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4610
4611 I915_WRITE(FDI_RX_MISC(pipe),
4612 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4613
4614 reg = FDI_RX_CTL(pipe);
4615 temp = I915_READ(reg);
4616 if (HAS_PCH_CPT(dev_priv)) {
4617 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4618 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4619 } else {
4620 temp &= ~FDI_LINK_TRAIN_NONE;
4621 temp |= FDI_LINK_TRAIN_PATTERN_1;
4622 }
4623 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4624
4625 POSTING_READ(reg);
4626 udelay(150);
4627
4628 for (i = 0; i < 4; i++) {
4629 reg = FDI_TX_CTL(pipe);
4630 temp = I915_READ(reg);
4631 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4632 temp |= snb_b_fdi_train_param[i];
4633 I915_WRITE(reg, temp);
4634
4635 POSTING_READ(reg);
4636 udelay(500);
4637
4638 for (retry = 0; retry < 5; retry++) {
4639 reg = FDI_RX_IIR(pipe);
4640 temp = I915_READ(reg);
4641 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4642 if (temp & FDI_RX_BIT_LOCK) {
4643 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4644 DRM_DEBUG_KMS("FDI train 1 done.\n");
4645 break;
4646 }
4647 udelay(50);
4648 }
4649 if (retry < 5)
4650 break;
4651 }
4652 if (i == 4)
4653 DRM_ERROR("FDI train 1 fail!\n");
4654
4655 /* Train 2 */
4656 reg = FDI_TX_CTL(pipe);
4657 temp = I915_READ(reg);
4658 temp &= ~FDI_LINK_TRAIN_NONE;
4659 temp |= FDI_LINK_TRAIN_PATTERN_2;
4660 if (IS_GEN(dev_priv, 6)) {
4661 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4662 /* SNB-B */
4663 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4664 }
4665 I915_WRITE(reg, temp);
4666
4667 reg = FDI_RX_CTL(pipe);
4668 temp = I915_READ(reg);
4669 if (HAS_PCH_CPT(dev_priv)) {
4670 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4671 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4672 } else {
4673 temp &= ~FDI_LINK_TRAIN_NONE;
4674 temp |= FDI_LINK_TRAIN_PATTERN_2;
4675 }
4676 I915_WRITE(reg, temp);
4677
4678 POSTING_READ(reg);
4679 udelay(150);
4680
4681 for (i = 0; i < 4; i++) {
4682 reg = FDI_TX_CTL(pipe);
4683 temp = I915_READ(reg);
4684 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4685 temp |= snb_b_fdi_train_param[i];
4686 I915_WRITE(reg, temp);
4687
4688 POSTING_READ(reg);
4689 udelay(500);
4690
4691 for (retry = 0; retry < 5; retry++) {
4692 reg = FDI_RX_IIR(pipe);
4693 temp = I915_READ(reg);
4694 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4695 if (temp & FDI_RX_SYMBOL_LOCK) {
4696 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4697 DRM_DEBUG_KMS("FDI train 2 done.\n");
4698 break;
4699 }
4700 udelay(50);
4701 }
4702 if (retry < 5)
4703 break;
4704 }
4705 if (i == 4)
4706 DRM_ERROR("FDI train 2 fail!\n");
4707
4708 DRM_DEBUG_KMS("FDI train done.\n");
4709 }
4710
4711 /* Manual link training for Ivy Bridge A0 parts */
ivb_manual_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)4712 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4713 const struct intel_crtc_state *crtc_state)
4714 {
4715 struct drm_device *dev = crtc->base.dev;
4716 struct drm_i915_private *dev_priv = to_i915(dev);
4717 int pipe = crtc->pipe;
4718 i915_reg_t reg;
4719 u32 temp, i, j;
4720
4721 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4722 for train result */
4723 reg = FDI_RX_IMR(pipe);
4724 temp = I915_READ(reg);
4725 temp &= ~FDI_RX_SYMBOL_LOCK;
4726 temp &= ~FDI_RX_BIT_LOCK;
4727 I915_WRITE(reg, temp);
4728
4729 POSTING_READ(reg);
4730 udelay(150);
4731
4732 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4733 I915_READ(FDI_RX_IIR(pipe)));
4734
4735 /* Try each vswing and preemphasis setting twice before moving on */
4736 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4737 /* disable first in case we need to retry */
4738 reg = FDI_TX_CTL(pipe);
4739 temp = I915_READ(reg);
4740 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4741 temp &= ~FDI_TX_ENABLE;
4742 I915_WRITE(reg, temp);
4743
4744 reg = FDI_RX_CTL(pipe);
4745 temp = I915_READ(reg);
4746 temp &= ~FDI_LINK_TRAIN_AUTO;
4747 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4748 temp &= ~FDI_RX_ENABLE;
4749 I915_WRITE(reg, temp);
4750
4751 /* enable CPU FDI TX and PCH FDI RX */
4752 reg = FDI_TX_CTL(pipe);
4753 temp = I915_READ(reg);
4754 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4755 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4756 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4757 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4758 temp |= snb_b_fdi_train_param[j/2];
4759 temp |= FDI_COMPOSITE_SYNC;
4760 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4761
4762 I915_WRITE(FDI_RX_MISC(pipe),
4763 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4764
4765 reg = FDI_RX_CTL(pipe);
4766 temp = I915_READ(reg);
4767 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4768 temp |= FDI_COMPOSITE_SYNC;
4769 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4770
4771 POSTING_READ(reg);
4772 udelay(1); /* should be 0.5us */
4773
4774 for (i = 0; i < 4; i++) {
4775 reg = FDI_RX_IIR(pipe);
4776 temp = I915_READ(reg);
4777 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4778
4779 if (temp & FDI_RX_BIT_LOCK ||
4780 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4781 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4782 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4783 i);
4784 break;
4785 }
4786 udelay(1); /* should be 0.5us */
4787 }
4788 if (i == 4) {
4789 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4790 continue;
4791 }
4792
4793 /* Train 2 */
4794 reg = FDI_TX_CTL(pipe);
4795 temp = I915_READ(reg);
4796 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4797 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4798 I915_WRITE(reg, temp);
4799
4800 reg = FDI_RX_CTL(pipe);
4801 temp = I915_READ(reg);
4802 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4803 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4804 I915_WRITE(reg, temp);
4805
4806 POSTING_READ(reg);
4807 udelay(2); /* should be 1.5us */
4808
4809 for (i = 0; i < 4; i++) {
4810 reg = FDI_RX_IIR(pipe);
4811 temp = I915_READ(reg);
4812 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4813
4814 if (temp & FDI_RX_SYMBOL_LOCK ||
4815 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4816 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4817 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4818 i);
4819 goto train_done;
4820 }
4821 udelay(2); /* should be 1.5us */
4822 }
4823 if (i == 4)
4824 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4825 }
4826
4827 train_done:
4828 DRM_DEBUG_KMS("FDI train done.\n");
4829 }
4830
ironlake_fdi_pll_enable(const struct intel_crtc_state * crtc_state)4831 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4832 {
4833 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4834 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4835 int pipe = intel_crtc->pipe;
4836 i915_reg_t reg;
4837 u32 temp;
4838
4839 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4840 reg = FDI_RX_CTL(pipe);
4841 temp = I915_READ(reg);
4842 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4843 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4844 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4845 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4846
4847 POSTING_READ(reg);
4848 udelay(200);
4849
4850 /* Switch from Rawclk to PCDclk */
4851 temp = I915_READ(reg);
4852 I915_WRITE(reg, temp | FDI_PCDCLK);
4853
4854 POSTING_READ(reg);
4855 udelay(200);
4856
4857 /* Enable CPU FDI TX PLL, always on for Ironlake */
4858 reg = FDI_TX_CTL(pipe);
4859 temp = I915_READ(reg);
4860 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4861 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4862
4863 POSTING_READ(reg);
4864 udelay(100);
4865 }
4866 }
4867
ironlake_fdi_pll_disable(struct intel_crtc * intel_crtc)4868 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4869 {
4870 struct drm_device *dev = intel_crtc->base.dev;
4871 struct drm_i915_private *dev_priv = to_i915(dev);
4872 int pipe = intel_crtc->pipe;
4873 i915_reg_t reg;
4874 u32 temp;
4875
4876 /* Switch from PCDclk to Rawclk */
4877 reg = FDI_RX_CTL(pipe);
4878 temp = I915_READ(reg);
4879 I915_WRITE(reg, temp & ~FDI_PCDCLK);
4880
4881 /* Disable CPU FDI TX PLL */
4882 reg = FDI_TX_CTL(pipe);
4883 temp = I915_READ(reg);
4884 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4885
4886 POSTING_READ(reg);
4887 udelay(100);
4888
4889 reg = FDI_RX_CTL(pipe);
4890 temp = I915_READ(reg);
4891 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4892
4893 /* Wait for the clocks to turn off. */
4894 POSTING_READ(reg);
4895 udelay(100);
4896 }
4897
ironlake_fdi_disable(struct drm_crtc * crtc)4898 static void ironlake_fdi_disable(struct drm_crtc *crtc)
4899 {
4900 struct drm_device *dev = crtc->dev;
4901 struct drm_i915_private *dev_priv = to_i915(dev);
4902 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4903 int pipe = intel_crtc->pipe;
4904 i915_reg_t reg;
4905 u32 temp;
4906
4907 /* disable CPU FDI tx and PCH FDI rx */
4908 reg = FDI_TX_CTL(pipe);
4909 temp = I915_READ(reg);
4910 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4911 POSTING_READ(reg);
4912
4913 reg = FDI_RX_CTL(pipe);
4914 temp = I915_READ(reg);
4915 temp &= ~(0x7 << 16);
4916 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4917 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4918
4919 POSTING_READ(reg);
4920 udelay(100);
4921
4922 /* Ironlake workaround, disable clock pointer after downing FDI */
4923 if (HAS_PCH_IBX(dev_priv))
4924 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4925
4926 /* still set train pattern 1 */
4927 reg = FDI_TX_CTL(pipe);
4928 temp = I915_READ(reg);
4929 temp &= ~FDI_LINK_TRAIN_NONE;
4930 temp |= FDI_LINK_TRAIN_PATTERN_1;
4931 I915_WRITE(reg, temp);
4932
4933 reg = FDI_RX_CTL(pipe);
4934 temp = I915_READ(reg);
4935 if (HAS_PCH_CPT(dev_priv)) {
4936 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4937 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4938 } else {
4939 temp &= ~FDI_LINK_TRAIN_NONE;
4940 temp |= FDI_LINK_TRAIN_PATTERN_1;
4941 }
4942 /* BPC in FDI rx is consistent with that in PIPECONF */
4943 temp &= ~(0x07 << 16);
4944 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4945 I915_WRITE(reg, temp);
4946
4947 POSTING_READ(reg);
4948 udelay(100);
4949 }
4950
intel_has_pending_fb_unpin(struct drm_i915_private * dev_priv)4951 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
4952 {
4953 struct drm_crtc *crtc;
4954 bool cleanup_done;
4955
4956 drm_for_each_crtc(crtc, &dev_priv->drm) {
4957 struct drm_crtc_commit *commit;
4958 spin_lock(&crtc->commit_lock);
4959 commit = list_first_entry_or_null(&crtc->commit_list,
4960 struct drm_crtc_commit, commit_entry);
4961 cleanup_done = commit ?
4962 try_wait_for_completion(&commit->cleanup_done) : true;
4963 spin_unlock(&crtc->commit_lock);
4964
4965 if (cleanup_done)
4966 continue;
4967
4968 drm_crtc_wait_one_vblank(crtc);
4969
4970 return true;
4971 }
4972
4973 return false;
4974 }
4975
lpt_disable_iclkip(struct drm_i915_private * dev_priv)4976 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
4977 {
4978 u32 temp;
4979
4980 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4981
4982 mutex_lock(&dev_priv->sb_lock);
4983
4984 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4985 temp |= SBI_SSCCTL_DISABLE;
4986 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4987
4988 mutex_unlock(&dev_priv->sb_lock);
4989 }
4990
4991 /* Program iCLKIP clock to the desired frequency */
lpt_program_iclkip(const struct intel_crtc_state * crtc_state)4992 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
4993 {
4994 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4995 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4996 int clock = crtc_state->base.adjusted_mode.crtc_clock;
4997 u32 divsel, phaseinc, auxdiv, phasedir = 0;
4998 u32 temp;
4999
5000 lpt_disable_iclkip(dev_priv);
5001
5002 /* The iCLK virtual clock root frequency is in MHz,
5003 * but the adjusted_mode->crtc_clock in in KHz. To get the
5004 * divisors, it is necessary to divide one by another, so we
5005 * convert the virtual clock precision to KHz here for higher
5006 * precision.
5007 */
5008 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
5009 u32 iclk_virtual_root_freq = 172800 * 1000;
5010 u32 iclk_pi_range = 64;
5011 u32 desired_divisor;
5012
5013 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5014 clock << auxdiv);
5015 divsel = (desired_divisor / iclk_pi_range) - 2;
5016 phaseinc = desired_divisor % iclk_pi_range;
5017
5018 /*
5019 * Near 20MHz is a corner case which is
5020 * out of range for the 7-bit divisor
5021 */
5022 if (divsel <= 0x7f)
5023 break;
5024 }
5025
5026 /* This should not happen with any sane values */
5027 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
5028 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5029 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
5030 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5031
5032 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5033 clock,
5034 auxdiv,
5035 divsel,
5036 phasedir,
5037 phaseinc);
5038
5039 mutex_lock(&dev_priv->sb_lock);
5040
5041 /* Program SSCDIVINTPHASE6 */
5042 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5043 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5044 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5045 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5046 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5047 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5048 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5049 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5050
5051 /* Program SSCAUXDIV */
5052 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5053 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5054 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5055 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5056
5057 /* Enable modulator and associated divider */
5058 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5059 temp &= ~SBI_SSCCTL_DISABLE;
5060 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5061
5062 mutex_unlock(&dev_priv->sb_lock);
5063
5064 /* Wait for initialization time */
5065 udelay(24);
5066
5067 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5068 }
5069
lpt_get_iclkip(struct drm_i915_private * dev_priv)5070 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5071 {
5072 u32 divsel, phaseinc, auxdiv;
5073 u32 iclk_virtual_root_freq = 172800 * 1000;
5074 u32 iclk_pi_range = 64;
5075 u32 desired_divisor;
5076 u32 temp;
5077
5078 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5079 return 0;
5080
5081 mutex_lock(&dev_priv->sb_lock);
5082
5083 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5084 if (temp & SBI_SSCCTL_DISABLE) {
5085 mutex_unlock(&dev_priv->sb_lock);
5086 return 0;
5087 }
5088
5089 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5090 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5091 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5092 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5093 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5094
5095 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5096 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5097 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5098
5099 mutex_unlock(&dev_priv->sb_lock);
5100
5101 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5102
5103 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5104 desired_divisor << auxdiv);
5105 }
5106
ironlake_pch_transcoder_set_timings(const struct intel_crtc_state * crtc_state,enum pipe pch_transcoder)5107 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5108 enum pipe pch_transcoder)
5109 {
5110 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5111 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5112 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5113
5114 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
5115 I915_READ(HTOTAL(cpu_transcoder)));
5116 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
5117 I915_READ(HBLANK(cpu_transcoder)));
5118 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
5119 I915_READ(HSYNC(cpu_transcoder)));
5120
5121 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
5122 I915_READ(VTOTAL(cpu_transcoder)));
5123 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
5124 I915_READ(VBLANK(cpu_transcoder)));
5125 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
5126 I915_READ(VSYNC(cpu_transcoder)));
5127 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5128 I915_READ(VSYNCSHIFT(cpu_transcoder)));
5129 }
5130
cpt_set_fdi_bc_bifurcation(struct drm_i915_private * dev_priv,bool enable)5131 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5132 {
5133 u32 temp;
5134
5135 temp = I915_READ(SOUTH_CHICKEN1);
5136 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5137 return;
5138
5139 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5140 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5141
5142 temp &= ~FDI_BC_BIFURCATION_SELECT;
5143 if (enable)
5144 temp |= FDI_BC_BIFURCATION_SELECT;
5145
5146 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
5147 I915_WRITE(SOUTH_CHICKEN1, temp);
5148 POSTING_READ(SOUTH_CHICKEN1);
5149 }
5150
ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state * crtc_state)5151 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5152 {
5153 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5154 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5155
5156 switch (crtc->pipe) {
5157 case PIPE_A:
5158 break;
5159 case PIPE_B:
5160 if (crtc_state->fdi_lanes > 2)
5161 cpt_set_fdi_bc_bifurcation(dev_priv, false);
5162 else
5163 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5164
5165 break;
5166 case PIPE_C:
5167 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5168
5169 break;
5170 default:
5171 BUG();
5172 }
5173 }
5174
5175 /*
5176 * Finds the encoder associated with the given CRTC. This can only be
5177 * used when we know that the CRTC isn't feeding multiple encoders!
5178 */
5179 static struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state * state,const struct intel_crtc_state * crtc_state)5180 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5181 const struct intel_crtc_state *crtc_state)
5182 {
5183 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5184 const struct drm_connector_state *connector_state;
5185 const struct drm_connector *connector;
5186 struct intel_encoder *encoder = NULL;
5187 int num_encoders = 0;
5188 int i;
5189
5190 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5191 if (connector_state->crtc != &crtc->base)
5192 continue;
5193
5194 encoder = to_intel_encoder(connector_state->best_encoder);
5195 num_encoders++;
5196 }
5197
5198 WARN(num_encoders != 1, "%d encoders for pipe %c\n",
5199 num_encoders, pipe_name(crtc->pipe));
5200
5201 return encoder;
5202 }
5203
5204 /*
5205 * Enable PCH resources required for PCH ports:
5206 * - PCH PLLs
5207 * - FDI training & RX/TX
5208 * - update transcoder timings
5209 * - DP transcoding bits
5210 * - transcoder
5211 */
ironlake_pch_enable(const struct intel_atomic_state * state,const struct intel_crtc_state * crtc_state)5212 static void ironlake_pch_enable(const struct intel_atomic_state *state,
5213 const struct intel_crtc_state *crtc_state)
5214 {
5215 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5216 struct drm_device *dev = crtc->base.dev;
5217 struct drm_i915_private *dev_priv = to_i915(dev);
5218 int pipe = crtc->pipe;
5219 u32 temp;
5220
5221 assert_pch_transcoder_disabled(dev_priv, pipe);
5222
5223 if (IS_IVYBRIDGE(dev_priv))
5224 ivybridge_update_fdi_bc_bifurcation(crtc_state);
5225
5226 /* Write the TU size bits before fdi link training, so that error
5227 * detection works. */
5228 I915_WRITE(FDI_RX_TUSIZE1(pipe),
5229 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5230
5231 /* For PCH output, training FDI link */
5232 dev_priv->display.fdi_link_train(crtc, crtc_state);
5233
5234 /* We need to program the right clock selection before writing the pixel
5235 * mutliplier into the DPLL. */
5236 if (HAS_PCH_CPT(dev_priv)) {
5237 u32 sel;
5238
5239 temp = I915_READ(PCH_DPLL_SEL);
5240 temp |= TRANS_DPLL_ENABLE(pipe);
5241 sel = TRANS_DPLLB_SEL(pipe);
5242 if (crtc_state->shared_dpll ==
5243 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5244 temp |= sel;
5245 else
5246 temp &= ~sel;
5247 I915_WRITE(PCH_DPLL_SEL, temp);
5248 }
5249
5250 /* XXX: pch pll's can be enabled any time before we enable the PCH
5251 * transcoder, and we actually should do this to not upset any PCH
5252 * transcoder that already use the clock when we share it.
5253 *
5254 * Note that enable_shared_dpll tries to do the right thing, but
5255 * get_shared_dpll unconditionally resets the pll - we need that to have
5256 * the right LVDS enable sequence. */
5257 intel_enable_shared_dpll(crtc_state);
5258
5259 /* set transcoder timing, panel must allow it */
5260 assert_panel_unlocked(dev_priv, pipe);
5261 ironlake_pch_transcoder_set_timings(crtc_state, pipe);
5262
5263 intel_fdi_normal_train(crtc);
5264
5265 /* For PCH DP, enable TRANS_DP_CTL */
5266 if (HAS_PCH_CPT(dev_priv) &&
5267 intel_crtc_has_dp_encoder(crtc_state)) {
5268 const struct drm_display_mode *adjusted_mode =
5269 &crtc_state->base.adjusted_mode;
5270 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5271 i915_reg_t reg = TRANS_DP_CTL(pipe);
5272 enum port port;
5273
5274 temp = I915_READ(reg);
5275 temp &= ~(TRANS_DP_PORT_SEL_MASK |
5276 TRANS_DP_SYNC_MASK |
5277 TRANS_DP_BPC_MASK);
5278 temp |= TRANS_DP_OUTPUT_ENABLE;
5279 temp |= bpc << 9; /* same format but at 11:9 */
5280
5281 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5282 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5283 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5284 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5285
5286 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5287 WARN_ON(port < PORT_B || port > PORT_D);
5288 temp |= TRANS_DP_PORT_SEL(port);
5289
5290 I915_WRITE(reg, temp);
5291 }
5292
5293 ironlake_enable_pch_transcoder(crtc_state);
5294 }
5295
lpt_pch_enable(const struct intel_atomic_state * state,const struct intel_crtc_state * crtc_state)5296 static void lpt_pch_enable(const struct intel_atomic_state *state,
5297 const struct intel_crtc_state *crtc_state)
5298 {
5299 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5300 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5301 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5302
5303 assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5304
5305 lpt_program_iclkip(crtc_state);
5306
5307 /* Set transcoder timing. */
5308 ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
5309
5310 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5311 }
5312
cpt_verify_modeset(struct drm_device * dev,int pipe)5313 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
5314 {
5315 struct drm_i915_private *dev_priv = to_i915(dev);
5316 i915_reg_t dslreg = PIPEDSL(pipe);
5317 u32 temp;
5318
5319 temp = I915_READ(dslreg);
5320 udelay(500);
5321 if (wait_for(I915_READ(dslreg) != temp, 5)) {
5322 if (wait_for(I915_READ(dslreg) != temp, 5))
5323 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
5324 }
5325 }
5326
5327 /*
5328 * The hardware phase 0.0 refers to the center of the pixel.
5329 * We want to start from the top/left edge which is phase
5330 * -0.5. That matches how the hardware calculates the scaling
5331 * factors (from top-left of the first pixel to bottom-right
5332 * of the last pixel, as opposed to the pixel centers).
5333 *
5334 * For 4:2:0 subsampled chroma planes we obviously have to
5335 * adjust that so that the chroma sample position lands in
5336 * the right spot.
5337 *
5338 * Note that for packed YCbCr 4:2:2 formats there is no way to
5339 * control chroma siting. The hardware simply replicates the
5340 * chroma samples for both of the luma samples, and thus we don't
5341 * actually get the expected MPEG2 chroma siting convention :(
5342 * The same behaviour is observed on pre-SKL platforms as well.
5343 *
5344 * Theory behind the formula (note that we ignore sub-pixel
5345 * source coordinates):
5346 * s = source sample position
5347 * d = destination sample position
5348 *
5349 * Downscaling 4:1:
5350 * -0.5
5351 * | 0.0
5352 * | | 1.5 (initial phase)
5353 * | | |
5354 * v v v
5355 * | s | s | s | s |
5356 * | d |
5357 *
5358 * Upscaling 1:4:
5359 * -0.5
5360 * | -0.375 (initial phase)
5361 * | | 0.0
5362 * | | |
5363 * v v v
5364 * | s |
5365 * | d | d | d | d |
5366 */
skl_scaler_calc_phase(int sub,int scale,bool chroma_cosited)5367 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5368 {
5369 int phase = -0x8000;
5370 u16 trip = 0;
5371
5372 if (chroma_cosited)
5373 phase += (sub - 1) * 0x8000 / sub;
5374
5375 phase += scale / (2 * sub);
5376
5377 /*
5378 * Hardware initial phase limited to [-0.5:1.5].
5379 * Since the max hardware scale factor is 3.0, we
5380 * should never actually excdeed 1.0 here.
5381 */
5382 WARN_ON(phase < -0x8000 || phase > 0x18000);
5383
5384 if (phase < 0)
5385 phase = 0x10000 + phase;
5386 else
5387 trip = PS_PHASE_TRIP;
5388
5389 return ((phase >> 2) & PS_PHASE_MASK) | trip;
5390 }
5391
5392 #define SKL_MIN_SRC_W 8
5393 #define SKL_MAX_SRC_W 4096
5394 #define SKL_MIN_SRC_H 8
5395 #define SKL_MAX_SRC_H 4096
5396 #define SKL_MIN_DST_W 8
5397 #define SKL_MAX_DST_W 4096
5398 #define SKL_MIN_DST_H 8
5399 #define SKL_MAX_DST_H 4096
5400 #define ICL_MAX_SRC_W 5120
5401 #define ICL_MAX_SRC_H 4096
5402 #define ICL_MAX_DST_W 5120
5403 #define ICL_MAX_DST_H 4096
5404 #define SKL_MIN_YUV_420_SRC_W 16
5405 #define SKL_MIN_YUV_420_SRC_H 16
5406
5407 static int
skl_update_scaler(struct intel_crtc_state * crtc_state,bool force_detach,unsigned int scaler_user,int * scaler_id,int src_w,int src_h,int dst_w,int dst_h,const struct drm_format_info * format,bool need_scaler)5408 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5409 unsigned int scaler_user, int *scaler_id,
5410 int src_w, int src_h, int dst_w, int dst_h,
5411 const struct drm_format_info *format, bool need_scaler)
5412 {
5413 struct intel_crtc_scaler_state *scaler_state =
5414 &crtc_state->scaler_state;
5415 struct intel_crtc *intel_crtc =
5416 to_intel_crtc(crtc_state->base.crtc);
5417 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5418 const struct drm_display_mode *adjusted_mode =
5419 &crtc_state->base.adjusted_mode;
5420
5421 /*
5422 * Src coordinates are already rotated by 270 degrees for
5423 * the 90/270 degree plane rotation cases (to match the
5424 * GTT mapping), hence no need to account for rotation here.
5425 */
5426 if (src_w != dst_w || src_h != dst_h)
5427 need_scaler = true;
5428
5429 /*
5430 * Scaling/fitting not supported in IF-ID mode in GEN9+
5431 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5432 * Once NV12 is enabled, handle it here while allocating scaler
5433 * for NV12.
5434 */
5435 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
5436 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5437 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5438 return -EINVAL;
5439 }
5440
5441 /*
5442 * if plane is being disabled or scaler is no more required or force detach
5443 * - free scaler binded to this plane/crtc
5444 * - in order to do this, update crtc->scaler_usage
5445 *
5446 * Here scaler state in crtc_state is set free so that
5447 * scaler can be assigned to other user. Actual register
5448 * update to free the scaler is done in plane/panel-fit programming.
5449 * For this purpose crtc/plane_state->scaler_id isn't reset here.
5450 */
5451 if (force_detach || !need_scaler) {
5452 if (*scaler_id >= 0) {
5453 scaler_state->scaler_users &= ~(1 << scaler_user);
5454 scaler_state->scalers[*scaler_id].in_use = 0;
5455
5456 DRM_DEBUG_KMS("scaler_user index %u.%u: "
5457 "Staged freeing scaler id %d scaler_users = 0x%x\n",
5458 intel_crtc->pipe, scaler_user, *scaler_id,
5459 scaler_state->scaler_users);
5460 *scaler_id = -1;
5461 }
5462 return 0;
5463 }
5464
5465 if (format && is_planar_yuv_format(format->format) &&
5466 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5467 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
5468 return -EINVAL;
5469 }
5470
5471 /* range checks */
5472 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5473 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5474 (INTEL_GEN(dev_priv) >= 11 &&
5475 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5476 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5477 (INTEL_GEN(dev_priv) < 11 &&
5478 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5479 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
5480 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5481 "size is out of scaler range\n",
5482 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
5483 return -EINVAL;
5484 }
5485
5486 /* mark this plane as a scaler user in crtc_state */
5487 scaler_state->scaler_users |= (1 << scaler_user);
5488 DRM_DEBUG_KMS("scaler_user index %u.%u: "
5489 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5490 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5491 scaler_state->scaler_users);
5492
5493 return 0;
5494 }
5495
5496 /**
5497 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5498 *
5499 * @state: crtc's scaler state
5500 *
5501 * Return
5502 * 0 - scaler_usage updated successfully
5503 * error - requested scaling cannot be supported or other error condition
5504 */
skl_update_scaler_crtc(struct intel_crtc_state * state)5505 int skl_update_scaler_crtc(struct intel_crtc_state *state)
5506 {
5507 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
5508 bool need_scaler = false;
5509
5510 if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5511 need_scaler = true;
5512
5513 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
5514 &state->scaler_state.scaler_id,
5515 state->pipe_src_w, state->pipe_src_h,
5516 adjusted_mode->crtc_hdisplay,
5517 adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5518 }
5519
5520 /**
5521 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5522 * @crtc_state: crtc's scaler state
5523 * @plane_state: atomic plane state to update
5524 *
5525 * Return
5526 * 0 - scaler_usage updated successfully
5527 * error - requested scaling cannot be supported or other error condition
5528 */
skl_update_scaler_plane(struct intel_crtc_state * crtc_state,struct intel_plane_state * plane_state)5529 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5530 struct intel_plane_state *plane_state)
5531 {
5532 struct intel_plane *intel_plane =
5533 to_intel_plane(plane_state->base.plane);
5534 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5535 struct drm_framebuffer *fb = plane_state->base.fb;
5536 int ret;
5537 bool force_detach = !fb || !plane_state->base.visible;
5538 bool need_scaler = false;
5539
5540 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5541 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5542 fb && is_planar_yuv_format(fb->format->format))
5543 need_scaler = true;
5544
5545 ret = skl_update_scaler(crtc_state, force_detach,
5546 drm_plane_index(&intel_plane->base),
5547 &plane_state->scaler_id,
5548 drm_rect_width(&plane_state->base.src) >> 16,
5549 drm_rect_height(&plane_state->base.src) >> 16,
5550 drm_rect_width(&plane_state->base.dst),
5551 drm_rect_height(&plane_state->base.dst),
5552 fb ? fb->format : NULL, need_scaler);
5553
5554 if (ret || plane_state->scaler_id < 0)
5555 return ret;
5556
5557 /* check colorkey */
5558 if (plane_state->ckey.flags) {
5559 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5560 intel_plane->base.base.id,
5561 intel_plane->base.name);
5562 return -EINVAL;
5563 }
5564
5565 /* Check src format */
5566 switch (fb->format->format) {
5567 case DRM_FORMAT_RGB565:
5568 case DRM_FORMAT_XBGR8888:
5569 case DRM_FORMAT_XRGB8888:
5570 case DRM_FORMAT_ABGR8888:
5571 case DRM_FORMAT_ARGB8888:
5572 case DRM_FORMAT_XRGB2101010:
5573 case DRM_FORMAT_XBGR2101010:
5574 case DRM_FORMAT_XBGR16161616F:
5575 case DRM_FORMAT_ABGR16161616F:
5576 case DRM_FORMAT_XRGB16161616F:
5577 case DRM_FORMAT_ARGB16161616F:
5578 case DRM_FORMAT_YUYV:
5579 case DRM_FORMAT_YVYU:
5580 case DRM_FORMAT_UYVY:
5581 case DRM_FORMAT_VYUY:
5582 case DRM_FORMAT_NV12:
5583 case DRM_FORMAT_P010:
5584 case DRM_FORMAT_P012:
5585 case DRM_FORMAT_P016:
5586 case DRM_FORMAT_Y210:
5587 case DRM_FORMAT_Y212:
5588 case DRM_FORMAT_Y216:
5589 case DRM_FORMAT_XVYU2101010:
5590 case DRM_FORMAT_XVYU12_16161616:
5591 case DRM_FORMAT_XVYU16161616:
5592 break;
5593 default:
5594 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5595 intel_plane->base.base.id, intel_plane->base.name,
5596 fb->base.id, fb->format->format);
5597 return -EINVAL;
5598 }
5599
5600 return 0;
5601 }
5602
skylake_scaler_disable(struct intel_crtc * crtc)5603 static void skylake_scaler_disable(struct intel_crtc *crtc)
5604 {
5605 int i;
5606
5607 for (i = 0; i < crtc->num_scalers; i++)
5608 skl_detach_scaler(crtc, i);
5609 }
5610
skylake_pfit_enable(const struct intel_crtc_state * crtc_state)5611 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5612 {
5613 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5614 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5615 enum pipe pipe = crtc->pipe;
5616 const struct intel_crtc_scaler_state *scaler_state =
5617 &crtc_state->scaler_state;
5618
5619 if (crtc_state->pch_pfit.enabled) {
5620 u16 uv_rgb_hphase, uv_rgb_vphase;
5621 int pfit_w, pfit_h, hscale, vscale;
5622 int id;
5623
5624 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5625 return;
5626
5627 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5628 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5629
5630 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5631 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5632
5633 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5634 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5635
5636 id = scaler_state->scaler_id;
5637 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5638 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5639 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5640 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5641 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5642 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5643 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5644 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5645 }
5646 }
5647
ironlake_pfit_enable(const struct intel_crtc_state * crtc_state)5648 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5649 {
5650 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5651 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5652 int pipe = crtc->pipe;
5653
5654 if (crtc_state->pch_pfit.enabled) {
5655 /* Force use of hard-coded filter coefficients
5656 * as some pre-programmed values are broken,
5657 * e.g. x201.
5658 */
5659 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5660 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5661 PF_PIPE_SEL_IVB(pipe));
5662 else
5663 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5664 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5665 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5666 }
5667 }
5668
hsw_enable_ips(const struct intel_crtc_state * crtc_state)5669 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5670 {
5671 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5672 struct drm_device *dev = crtc->base.dev;
5673 struct drm_i915_private *dev_priv = to_i915(dev);
5674
5675 if (!crtc_state->ips_enabled)
5676 return;
5677
5678 /*
5679 * We can only enable IPS after we enable a plane and wait for a vblank
5680 * This function is called from post_plane_update, which is run after
5681 * a vblank wait.
5682 */
5683 WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5684
5685 if (IS_BROADWELL(dev_priv)) {
5686 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5687 IPS_ENABLE | IPS_PCODE_CONTROL));
5688 /* Quoting Art Runyan: "its not safe to expect any particular
5689 * value in IPS_CTL bit 31 after enabling IPS through the
5690 * mailbox." Moreover, the mailbox may return a bogus state,
5691 * so we need to just enable it and continue on.
5692 */
5693 } else {
5694 I915_WRITE(IPS_CTL, IPS_ENABLE);
5695 /* The bit only becomes 1 in the next vblank, so this wait here
5696 * is essentially intel_wait_for_vblank. If we don't have this
5697 * and don't wait for vblanks until the end of crtc_enable, then
5698 * the HW state readout code will complain that the expected
5699 * IPS_CTL value is not the one we read. */
5700 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
5701 DRM_ERROR("Timed out waiting for IPS enable\n");
5702 }
5703 }
5704
hsw_disable_ips(const struct intel_crtc_state * crtc_state)5705 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5706 {
5707 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5708 struct drm_device *dev = crtc->base.dev;
5709 struct drm_i915_private *dev_priv = to_i915(dev);
5710
5711 if (!crtc_state->ips_enabled)
5712 return;
5713
5714 if (IS_BROADWELL(dev_priv)) {
5715 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5716 /*
5717 * Wait for PCODE to finish disabling IPS. The BSpec specified
5718 * 42ms timeout value leads to occasional timeouts so use 100ms
5719 * instead.
5720 */
5721 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
5722 DRM_ERROR("Timed out waiting for IPS disable\n");
5723 } else {
5724 I915_WRITE(IPS_CTL, 0);
5725 POSTING_READ(IPS_CTL);
5726 }
5727
5728 /* We need to wait for a vblank before we can disable the plane. */
5729 intel_wait_for_vblank(dev_priv, crtc->pipe);
5730 }
5731
intel_crtc_dpms_overlay_disable(struct intel_crtc * intel_crtc)5732 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5733 {
5734 if (intel_crtc->overlay) {
5735 struct drm_device *dev = intel_crtc->base.dev;
5736
5737 mutex_lock(&dev->struct_mutex);
5738 (void) intel_overlay_switch_off(intel_crtc->overlay);
5739 mutex_unlock(&dev->struct_mutex);
5740 }
5741
5742 /* Let userspace switch the overlay on again. In most cases userspace
5743 * has to recompute where to put it anyway.
5744 */
5745 }
5746
5747 /**
5748 * intel_post_enable_primary - Perform operations after enabling primary plane
5749 * @crtc: the CRTC whose primary plane was just enabled
5750 * @new_crtc_state: the enabling state
5751 *
5752 * Performs potentially sleeping operations that must be done after the primary
5753 * plane is enabled, such as updating FBC and IPS. Note that this may be
5754 * called due to an explicit primary plane update, or due to an implicit
5755 * re-enable that is caused when a sprite plane is updated to no longer
5756 * completely hide the primary plane.
5757 */
5758 static void
intel_post_enable_primary(struct drm_crtc * crtc,const struct intel_crtc_state * new_crtc_state)5759 intel_post_enable_primary(struct drm_crtc *crtc,
5760 const struct intel_crtc_state *new_crtc_state)
5761 {
5762 struct drm_device *dev = crtc->dev;
5763 struct drm_i915_private *dev_priv = to_i915(dev);
5764 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5765 int pipe = intel_crtc->pipe;
5766
5767 /*
5768 * Gen2 reports pipe underruns whenever all planes are disabled.
5769 * So don't enable underrun reporting before at least some planes
5770 * are enabled.
5771 * FIXME: Need to fix the logic to work when we turn off all planes
5772 * but leave the pipe running.
5773 */
5774 if (IS_GEN(dev_priv, 2))
5775 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5776
5777 /* Underruns don't always raise interrupts, so check manually. */
5778 intel_check_cpu_fifo_underruns(dev_priv);
5779 intel_check_pch_fifo_underruns(dev_priv);
5780 }
5781
5782 /* FIXME get rid of this and use pre_plane_update */
5783 static void
intel_pre_disable_primary_noatomic(struct drm_crtc * crtc)5784 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5785 {
5786 struct drm_device *dev = crtc->dev;
5787 struct drm_i915_private *dev_priv = to_i915(dev);
5788 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5789 int pipe = intel_crtc->pipe;
5790
5791 /*
5792 * Gen2 reports pipe underruns whenever all planes are disabled.
5793 * So disable underrun reporting before all the planes get disabled.
5794 */
5795 if (IS_GEN(dev_priv, 2))
5796 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5797
5798 hsw_disable_ips(to_intel_crtc_state(crtc->state));
5799
5800 /*
5801 * Vblank time updates from the shadow to live plane control register
5802 * are blocked if the memory self-refresh mode is active at that
5803 * moment. So to make sure the plane gets truly disabled, disable
5804 * first the self-refresh mode. The self-refresh enable bit in turn
5805 * will be checked/applied by the HW only at the next frame start
5806 * event which is after the vblank start event, so we need to have a
5807 * wait-for-vblank between disabling the plane and the pipe.
5808 */
5809 if (HAS_GMCH(dev_priv) &&
5810 intel_set_memory_cxsr(dev_priv, false))
5811 intel_wait_for_vblank(dev_priv, pipe);
5812 }
5813
hsw_pre_update_disable_ips(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)5814 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5815 const struct intel_crtc_state *new_crtc_state)
5816 {
5817 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5818 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5819
5820 if (!old_crtc_state->ips_enabled)
5821 return false;
5822
5823 if (needs_modeset(new_crtc_state))
5824 return true;
5825
5826 /*
5827 * Workaround : Do not read or write the pipe palette/gamma data while
5828 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5829 *
5830 * Disable IPS before we program the LUT.
5831 */
5832 if (IS_HASWELL(dev_priv) &&
5833 (new_crtc_state->base.color_mgmt_changed ||
5834 new_crtc_state->update_pipe) &&
5835 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5836 return true;
5837
5838 return !new_crtc_state->ips_enabled;
5839 }
5840
hsw_post_update_enable_ips(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)5841 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5842 const struct intel_crtc_state *new_crtc_state)
5843 {
5844 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5845 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5846
5847 if (!new_crtc_state->ips_enabled)
5848 return false;
5849
5850 if (needs_modeset(new_crtc_state))
5851 return true;
5852
5853 /*
5854 * Workaround : Do not read or write the pipe palette/gamma data while
5855 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5856 *
5857 * Re-enable IPS after the LUT has been programmed.
5858 */
5859 if (IS_HASWELL(dev_priv) &&
5860 (new_crtc_state->base.color_mgmt_changed ||
5861 new_crtc_state->update_pipe) &&
5862 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5863 return true;
5864
5865 /*
5866 * We can't read out IPS on broadwell, assume the worst and
5867 * forcibly enable IPS on the first fastset.
5868 */
5869 if (new_crtc_state->update_pipe &&
5870 old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5871 return true;
5872
5873 return !old_crtc_state->ips_enabled;
5874 }
5875
needs_nv12_wa(struct drm_i915_private * dev_priv,const struct intel_crtc_state * crtc_state)5876 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5877 const struct intel_crtc_state *crtc_state)
5878 {
5879 if (!crtc_state->nv12_planes)
5880 return false;
5881
5882 /* WA Display #0827: Gen9:all */
5883 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
5884 return true;
5885
5886 return false;
5887 }
5888
needs_scalerclk_wa(struct drm_i915_private * dev_priv,const struct intel_crtc_state * crtc_state)5889 static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
5890 const struct intel_crtc_state *crtc_state)
5891 {
5892 /* Wa_2006604312:icl */
5893 if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
5894 return true;
5895
5896 return false;
5897 }
5898
intel_post_plane_update(struct intel_crtc_state * old_crtc_state)5899 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5900 {
5901 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5902 struct drm_device *dev = crtc->base.dev;
5903 struct drm_i915_private *dev_priv = to_i915(dev);
5904 struct drm_atomic_state *state = old_crtc_state->base.state;
5905 struct intel_crtc_state *pipe_config =
5906 intel_atomic_get_new_crtc_state(to_intel_atomic_state(state),
5907 crtc);
5908 struct drm_plane *primary = crtc->base.primary;
5909 struct drm_plane_state *old_primary_state =
5910 drm_atomic_get_old_plane_state(state, primary);
5911
5912 intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
5913
5914 if (pipe_config->update_wm_post && pipe_config->base.active)
5915 intel_update_watermarks(crtc);
5916
5917 if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5918 hsw_enable_ips(pipe_config);
5919
5920 if (old_primary_state) {
5921 struct drm_plane_state *new_primary_state =
5922 drm_atomic_get_new_plane_state(state, primary);
5923
5924 intel_fbc_post_update(crtc);
5925
5926 if (new_primary_state->visible &&
5927 (needs_modeset(pipe_config) ||
5928 !old_primary_state->visible))
5929 intel_post_enable_primary(&crtc->base, pipe_config);
5930 }
5931
5932 if (needs_nv12_wa(dev_priv, old_crtc_state) &&
5933 !needs_nv12_wa(dev_priv, pipe_config))
5934 skl_wa_827(dev_priv, crtc->pipe, false);
5935
5936 if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
5937 !needs_scalerclk_wa(dev_priv, pipe_config))
5938 icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
5939 }
5940
intel_pre_plane_update(struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * pipe_config)5941 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5942 struct intel_crtc_state *pipe_config)
5943 {
5944 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5945 struct drm_device *dev = crtc->base.dev;
5946 struct drm_i915_private *dev_priv = to_i915(dev);
5947 struct drm_atomic_state *state = old_crtc_state->base.state;
5948 struct drm_plane *primary = crtc->base.primary;
5949 struct drm_plane_state *old_primary_state =
5950 drm_atomic_get_old_plane_state(state, primary);
5951 bool modeset = needs_modeset(pipe_config);
5952 struct intel_atomic_state *intel_state =
5953 to_intel_atomic_state(state);
5954
5955 if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5956 hsw_disable_ips(old_crtc_state);
5957
5958 if (old_primary_state) {
5959 struct intel_plane_state *new_primary_state =
5960 intel_atomic_get_new_plane_state(intel_state,
5961 to_intel_plane(primary));
5962
5963 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
5964 /*
5965 * Gen2 reports pipe underruns whenever all planes are disabled.
5966 * So disable underrun reporting before all the planes get disabled.
5967 */
5968 if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
5969 (modeset || !new_primary_state->base.visible))
5970 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5971 }
5972
5973 /* Display WA 827 */
5974 if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
5975 needs_nv12_wa(dev_priv, pipe_config))
5976 skl_wa_827(dev_priv, crtc->pipe, true);
5977
5978 /* Wa_2006604312:icl */
5979 if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
5980 needs_scalerclk_wa(dev_priv, pipe_config))
5981 icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
5982
5983 /*
5984 * Vblank time updates from the shadow to live plane control register
5985 * are blocked if the memory self-refresh mode is active at that
5986 * moment. So to make sure the plane gets truly disabled, disable
5987 * first the self-refresh mode. The self-refresh enable bit in turn
5988 * will be checked/applied by the HW only at the next frame start
5989 * event which is after the vblank start event, so we need to have a
5990 * wait-for-vblank between disabling the plane and the pipe.
5991 */
5992 if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
5993 pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5994 intel_wait_for_vblank(dev_priv, crtc->pipe);
5995
5996 /*
5997 * IVB workaround: must disable low power watermarks for at least
5998 * one frame before enabling scaling. LP watermarks can be re-enabled
5999 * when scaling is disabled.
6000 *
6001 * WaCxSRDisabledForSpriteScaling:ivb
6002 */
6003 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
6004 old_crtc_state->base.active)
6005 intel_wait_for_vblank(dev_priv, crtc->pipe);
6006
6007 /*
6008 * If we're doing a modeset, we're done. No need to do any pre-vblank
6009 * watermark programming here.
6010 */
6011 if (needs_modeset(pipe_config))
6012 return;
6013
6014 /*
6015 * For platforms that support atomic watermarks, program the
6016 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
6017 * will be the intermediate values that are safe for both pre- and
6018 * post- vblank; when vblank happens, the 'active' values will be set
6019 * to the final 'target' values and we'll do this again to get the
6020 * optimal watermarks. For gen9+ platforms, the values we program here
6021 * will be the final target values which will get automatically latched
6022 * at vblank time; no further programming will be necessary.
6023 *
6024 * If a platform hasn't been transitioned to atomic watermarks yet,
6025 * we'll continue to update watermarks the old way, if flags tell
6026 * us to.
6027 */
6028 if (dev_priv->display.initial_watermarks != NULL)
6029 dev_priv->display.initial_watermarks(intel_state,
6030 pipe_config);
6031 else if (pipe_config->update_wm_pre)
6032 intel_update_watermarks(crtc);
6033 }
6034
intel_crtc_disable_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)6035 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6036 struct intel_crtc *crtc)
6037 {
6038 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6039 const struct intel_crtc_state *new_crtc_state =
6040 intel_atomic_get_new_crtc_state(state, crtc);
6041 unsigned int update_mask = new_crtc_state->update_planes;
6042 const struct intel_plane_state *old_plane_state;
6043 struct intel_plane *plane;
6044 unsigned fb_bits = 0;
6045 int i;
6046
6047 intel_crtc_dpms_overlay_disable(crtc);
6048
6049 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6050 if (crtc->pipe != plane->pipe ||
6051 !(update_mask & BIT(plane->id)))
6052 continue;
6053
6054 intel_disable_plane(plane, new_crtc_state);
6055
6056 if (old_plane_state->base.visible)
6057 fb_bits |= plane->frontbuffer_bit;
6058 }
6059
6060 intel_frontbuffer_flip(dev_priv, fb_bits);
6061 }
6062
6063 /*
6064 * intel_connector_primary_encoder - get the primary encoder for a connector
6065 * @connector: connector for which to return the encoder
6066 *
6067 * Returns the primary encoder for a connector. There is a 1:1 mapping from
6068 * all connectors to their encoder, except for DP-MST connectors which have
6069 * both a virtual and a primary encoder. These DP-MST primary encoders can be
6070 * pointed to by as many DP-MST connectors as there are pipes.
6071 */
6072 static struct intel_encoder *
intel_connector_primary_encoder(struct intel_connector * connector)6073 intel_connector_primary_encoder(struct intel_connector *connector)
6074 {
6075 struct intel_encoder *encoder;
6076
6077 if (connector->mst_port)
6078 return &dp_to_dig_port(connector->mst_port)->base;
6079
6080 encoder = intel_attached_encoder(&connector->base);
6081 WARN_ON(!encoder);
6082
6083 return encoder;
6084 }
6085
6086 static bool
intel_connector_needs_modeset(struct intel_atomic_state * state,const struct drm_connector_state * old_conn_state,const struct drm_connector_state * new_conn_state)6087 intel_connector_needs_modeset(struct intel_atomic_state *state,
6088 const struct drm_connector_state *old_conn_state,
6089 const struct drm_connector_state *new_conn_state)
6090 {
6091 struct intel_crtc *old_crtc = old_conn_state->crtc ?
6092 to_intel_crtc(old_conn_state->crtc) : NULL;
6093 struct intel_crtc *new_crtc = new_conn_state->crtc ?
6094 to_intel_crtc(new_conn_state->crtc) : NULL;
6095
6096 return new_crtc != old_crtc ||
6097 (new_crtc &&
6098 needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc)));
6099 }
6100
intel_encoders_update_prepare(struct intel_atomic_state * state)6101 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
6102 {
6103 struct drm_connector_state *old_conn_state;
6104 struct drm_connector_state *new_conn_state;
6105 struct drm_connector *conn;
6106 int i;
6107
6108 for_each_oldnew_connector_in_state(&state->base, conn,
6109 old_conn_state, new_conn_state, i) {
6110 struct intel_encoder *encoder;
6111 struct intel_crtc *crtc;
6112
6113 if (!intel_connector_needs_modeset(state,
6114 old_conn_state,
6115 new_conn_state))
6116 continue;
6117
6118 encoder = intel_connector_primary_encoder(to_intel_connector(conn));
6119 if (!encoder->update_prepare)
6120 continue;
6121
6122 crtc = new_conn_state->crtc ?
6123 to_intel_crtc(new_conn_state->crtc) : NULL;
6124 encoder->update_prepare(state, encoder, crtc);
6125 }
6126 }
6127
intel_encoders_update_complete(struct intel_atomic_state * state)6128 static void intel_encoders_update_complete(struct intel_atomic_state *state)
6129 {
6130 struct drm_connector_state *old_conn_state;
6131 struct drm_connector_state *new_conn_state;
6132 struct drm_connector *conn;
6133 int i;
6134
6135 for_each_oldnew_connector_in_state(&state->base, conn,
6136 old_conn_state, new_conn_state, i) {
6137 struct intel_encoder *encoder;
6138 struct intel_crtc *crtc;
6139
6140 if (!intel_connector_needs_modeset(state,
6141 old_conn_state,
6142 new_conn_state))
6143 continue;
6144
6145 encoder = intel_connector_primary_encoder(to_intel_connector(conn));
6146 if (!encoder->update_complete)
6147 continue;
6148
6149 crtc = new_conn_state->crtc ?
6150 to_intel_crtc(new_conn_state->crtc) : NULL;
6151 encoder->update_complete(state, encoder, crtc);
6152 }
6153 }
6154
intel_encoders_pre_pll_enable(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state,struct intel_atomic_state * state)6155 static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc,
6156 struct intel_crtc_state *crtc_state,
6157 struct intel_atomic_state *state)
6158 {
6159 struct drm_connector_state *conn_state;
6160 struct drm_connector *conn;
6161 int i;
6162
6163 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6164 struct intel_encoder *encoder =
6165 to_intel_encoder(conn_state->best_encoder);
6166
6167 if (conn_state->crtc != &crtc->base)
6168 continue;
6169
6170 if (encoder->pre_pll_enable)
6171 encoder->pre_pll_enable(encoder, crtc_state, conn_state);
6172 }
6173 }
6174
intel_encoders_pre_enable(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state,struct intel_atomic_state * state)6175 static void intel_encoders_pre_enable(struct intel_crtc *crtc,
6176 struct intel_crtc_state *crtc_state,
6177 struct intel_atomic_state *state)
6178 {
6179 struct drm_connector_state *conn_state;
6180 struct drm_connector *conn;
6181 int i;
6182
6183 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6184 struct intel_encoder *encoder =
6185 to_intel_encoder(conn_state->best_encoder);
6186
6187 if (conn_state->crtc != &crtc->base)
6188 continue;
6189
6190 if (encoder->pre_enable)
6191 encoder->pre_enable(encoder, crtc_state, conn_state);
6192 }
6193 }
6194
intel_encoders_enable(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state,struct intel_atomic_state * state)6195 static void intel_encoders_enable(struct intel_crtc *crtc,
6196 struct intel_crtc_state *crtc_state,
6197 struct intel_atomic_state *state)
6198 {
6199 struct drm_connector_state *conn_state;
6200 struct drm_connector *conn;
6201 int i;
6202
6203 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6204 struct intel_encoder *encoder =
6205 to_intel_encoder(conn_state->best_encoder);
6206
6207 if (conn_state->crtc != &crtc->base)
6208 continue;
6209
6210 if (encoder->enable)
6211 encoder->enable(encoder, crtc_state, conn_state);
6212 intel_opregion_notify_encoder(encoder, true);
6213 }
6214 }
6215
intel_encoders_disable(struct intel_crtc * crtc,struct intel_crtc_state * old_crtc_state,struct intel_atomic_state * state)6216 static void intel_encoders_disable(struct intel_crtc *crtc,
6217 struct intel_crtc_state *old_crtc_state,
6218 struct intel_atomic_state *state)
6219 {
6220 struct drm_connector_state *old_conn_state;
6221 struct drm_connector *conn;
6222 int i;
6223
6224 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6225 struct intel_encoder *encoder =
6226 to_intel_encoder(old_conn_state->best_encoder);
6227
6228 if (old_conn_state->crtc != &crtc->base)
6229 continue;
6230
6231 intel_opregion_notify_encoder(encoder, false);
6232 if (encoder->disable)
6233 encoder->disable(encoder, old_crtc_state, old_conn_state);
6234 }
6235 }
6236
intel_encoders_post_disable(struct intel_crtc * crtc,struct intel_crtc_state * old_crtc_state,struct intel_atomic_state * state)6237 static void intel_encoders_post_disable(struct intel_crtc *crtc,
6238 struct intel_crtc_state *old_crtc_state,
6239 struct intel_atomic_state *state)
6240 {
6241 struct drm_connector_state *old_conn_state;
6242 struct drm_connector *conn;
6243 int i;
6244
6245 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6246 struct intel_encoder *encoder =
6247 to_intel_encoder(old_conn_state->best_encoder);
6248
6249 if (old_conn_state->crtc != &crtc->base)
6250 continue;
6251
6252 if (encoder->post_disable)
6253 encoder->post_disable(encoder, old_crtc_state, old_conn_state);
6254 }
6255 }
6256
intel_encoders_post_pll_disable(struct intel_crtc * crtc,struct intel_crtc_state * old_crtc_state,struct intel_atomic_state * state)6257 static void intel_encoders_post_pll_disable(struct intel_crtc *crtc,
6258 struct intel_crtc_state *old_crtc_state,
6259 struct intel_atomic_state *state)
6260 {
6261 struct drm_connector_state *old_conn_state;
6262 struct drm_connector *conn;
6263 int i;
6264
6265 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6266 struct intel_encoder *encoder =
6267 to_intel_encoder(old_conn_state->best_encoder);
6268
6269 if (old_conn_state->crtc != &crtc->base)
6270 continue;
6271
6272 if (encoder->post_pll_disable)
6273 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
6274 }
6275 }
6276
intel_encoders_update_pipe(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state,struct intel_atomic_state * state)6277 static void intel_encoders_update_pipe(struct intel_crtc *crtc,
6278 struct intel_crtc_state *crtc_state,
6279 struct intel_atomic_state *state)
6280 {
6281 struct drm_connector_state *conn_state;
6282 struct drm_connector *conn;
6283 int i;
6284
6285 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6286 struct intel_encoder *encoder =
6287 to_intel_encoder(conn_state->best_encoder);
6288
6289 if (conn_state->crtc != &crtc->base)
6290 continue;
6291
6292 if (encoder->update_pipe)
6293 encoder->update_pipe(encoder, crtc_state, conn_state);
6294 }
6295 }
6296
intel_disable_primary_plane(const struct intel_crtc_state * crtc_state)6297 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6298 {
6299 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6300 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6301
6302 plane->disable_plane(plane, crtc_state);
6303 }
6304
ironlake_crtc_enable(struct intel_crtc_state * pipe_config,struct intel_atomic_state * state)6305 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
6306 struct intel_atomic_state *state)
6307 {
6308 struct drm_crtc *crtc = pipe_config->base.crtc;
6309 struct drm_device *dev = crtc->dev;
6310 struct drm_i915_private *dev_priv = to_i915(dev);
6311 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6312 int pipe = intel_crtc->pipe;
6313
6314 if (WARN_ON(intel_crtc->active))
6315 return;
6316
6317 /*
6318 * Sometimes spurious CPU pipe underruns happen during FDI
6319 * training, at least with VGA+HDMI cloning. Suppress them.
6320 *
6321 * On ILK we get an occasional spurious CPU pipe underruns
6322 * between eDP port A enable and vdd enable. Also PCH port
6323 * enable seems to result in the occasional CPU pipe underrun.
6324 *
6325 * Spurious PCH underruns also occur during PCH enabling.
6326 */
6327 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6328 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6329
6330 if (pipe_config->has_pch_encoder)
6331 intel_prepare_shared_dpll(pipe_config);
6332
6333 if (intel_crtc_has_dp_encoder(pipe_config))
6334 intel_dp_set_m_n(pipe_config, M1_N1);
6335
6336 intel_set_pipe_timings(pipe_config);
6337 intel_set_pipe_src_size(pipe_config);
6338
6339 if (pipe_config->has_pch_encoder) {
6340 intel_cpu_transcoder_set_m_n(pipe_config,
6341 &pipe_config->fdi_m_n, NULL);
6342 }
6343
6344 ironlake_set_pipeconf(pipe_config);
6345
6346 intel_crtc->active = true;
6347
6348 intel_encoders_pre_enable(intel_crtc, pipe_config, state);
6349
6350 if (pipe_config->has_pch_encoder) {
6351 /* Note: FDI PLL enabling _must_ be done before we enable the
6352 * cpu pipes, hence this is separate from all the other fdi/pch
6353 * enabling. */
6354 ironlake_fdi_pll_enable(pipe_config);
6355 } else {
6356 assert_fdi_tx_disabled(dev_priv, pipe);
6357 assert_fdi_rx_disabled(dev_priv, pipe);
6358 }
6359
6360 ironlake_pfit_enable(pipe_config);
6361
6362 /*
6363 * On ILK+ LUT must be loaded before the pipe is running but with
6364 * clocks enabled
6365 */
6366 intel_color_load_luts(pipe_config);
6367 intel_color_commit(pipe_config);
6368 /* update DSPCNTR to configure gamma for pipe bottom color */
6369 intel_disable_primary_plane(pipe_config);
6370
6371 if (dev_priv->display.initial_watermarks != NULL)
6372 dev_priv->display.initial_watermarks(state, pipe_config);
6373 intel_enable_pipe(pipe_config);
6374
6375 if (pipe_config->has_pch_encoder)
6376 ironlake_pch_enable(state, pipe_config);
6377
6378 assert_vblank_disabled(crtc);
6379 intel_crtc_vblank_on(pipe_config);
6380
6381 intel_encoders_enable(intel_crtc, pipe_config, state);
6382
6383 if (HAS_PCH_CPT(dev_priv))
6384 cpt_verify_modeset(dev, intel_crtc->pipe);
6385
6386 /*
6387 * Must wait for vblank to avoid spurious PCH FIFO underruns.
6388 * And a second vblank wait is needed at least on ILK with
6389 * some interlaced HDMI modes. Let's do the double wait always
6390 * in case there are more corner cases we don't know about.
6391 */
6392 if (pipe_config->has_pch_encoder) {
6393 intel_wait_for_vblank(dev_priv, pipe);
6394 intel_wait_for_vblank(dev_priv, pipe);
6395 }
6396 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6397 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6398 }
6399
6400 /* IPS only exists on ULT machines and is tied to pipe A. */
hsw_crtc_supports_ips(struct intel_crtc * crtc)6401 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6402 {
6403 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6404 }
6405
glk_pipe_scaler_clock_gating_wa(struct drm_i915_private * dev_priv,enum pipe pipe,bool apply)6406 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6407 enum pipe pipe, bool apply)
6408 {
6409 u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
6410 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6411
6412 if (apply)
6413 val |= mask;
6414 else
6415 val &= ~mask;
6416
6417 I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
6418 }
6419
icl_pipe_mbus_enable(struct intel_crtc * crtc)6420 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6421 {
6422 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6423 enum pipe pipe = crtc->pipe;
6424 u32 val;
6425
6426 val = MBUS_DBOX_A_CREDIT(2);
6427
6428 if (INTEL_GEN(dev_priv) >= 12) {
6429 val |= MBUS_DBOX_BW_CREDIT(2);
6430 val |= MBUS_DBOX_B_CREDIT(12);
6431 } else {
6432 val |= MBUS_DBOX_BW_CREDIT(1);
6433 val |= MBUS_DBOX_B_CREDIT(8);
6434 }
6435
6436 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
6437 }
6438
haswell_crtc_enable(struct intel_crtc_state * pipe_config,struct intel_atomic_state * state)6439 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
6440 struct intel_atomic_state *state)
6441 {
6442 struct drm_crtc *crtc = pipe_config->base.crtc;
6443 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6444 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6445 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
6446 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6447 bool psl_clkgate_wa;
6448
6449 if (WARN_ON(intel_crtc->active))
6450 return;
6451
6452 intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
6453
6454 if (pipe_config->shared_dpll)
6455 intel_enable_shared_dpll(pipe_config);
6456
6457 intel_encoders_pre_enable(intel_crtc, pipe_config, state);
6458
6459 if (intel_crtc_has_dp_encoder(pipe_config))
6460 intel_dp_set_m_n(pipe_config, M1_N1);
6461
6462 if (!transcoder_is_dsi(cpu_transcoder))
6463 intel_set_pipe_timings(pipe_config);
6464
6465 intel_set_pipe_src_size(pipe_config);
6466
6467 if (cpu_transcoder != TRANSCODER_EDP &&
6468 !transcoder_is_dsi(cpu_transcoder)) {
6469 I915_WRITE(PIPE_MULT(cpu_transcoder),
6470 pipe_config->pixel_multiplier - 1);
6471 }
6472
6473 if (pipe_config->has_pch_encoder) {
6474 intel_cpu_transcoder_set_m_n(pipe_config,
6475 &pipe_config->fdi_m_n, NULL);
6476 }
6477
6478 if (!transcoder_is_dsi(cpu_transcoder))
6479 haswell_set_pipeconf(pipe_config);
6480
6481 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6482 bdw_set_pipemisc(pipe_config);
6483
6484 intel_crtc->active = true;
6485
6486 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
6487 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
6488 pipe_config->pch_pfit.enabled;
6489 if (psl_clkgate_wa)
6490 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
6491
6492 if (INTEL_GEN(dev_priv) >= 9)
6493 skylake_pfit_enable(pipe_config);
6494 else
6495 ironlake_pfit_enable(pipe_config);
6496
6497 /*
6498 * On ILK+ LUT must be loaded before the pipe is running but with
6499 * clocks enabled
6500 */
6501 intel_color_load_luts(pipe_config);
6502 intel_color_commit(pipe_config);
6503 /* update DSPCNTR to configure gamma/csc for pipe bottom color */
6504 if (INTEL_GEN(dev_priv) < 9)
6505 intel_disable_primary_plane(pipe_config);
6506
6507 if (INTEL_GEN(dev_priv) >= 11)
6508 icl_set_pipe_chicken(intel_crtc);
6509
6510 intel_ddi_set_pipe_settings(pipe_config);
6511 if (!transcoder_is_dsi(cpu_transcoder))
6512 intel_ddi_enable_transcoder_func(pipe_config);
6513
6514 if (dev_priv->display.initial_watermarks != NULL)
6515 dev_priv->display.initial_watermarks(state, pipe_config);
6516
6517 if (INTEL_GEN(dev_priv) >= 11)
6518 icl_pipe_mbus_enable(intel_crtc);
6519
6520 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6521 if (!transcoder_is_dsi(cpu_transcoder))
6522 intel_enable_pipe(pipe_config);
6523
6524 if (pipe_config->has_pch_encoder)
6525 lpt_pch_enable(state, pipe_config);
6526
6527 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
6528 intel_ddi_set_vc_payload_alloc(pipe_config, true);
6529
6530 assert_vblank_disabled(crtc);
6531 intel_crtc_vblank_on(pipe_config);
6532
6533 intel_encoders_enable(intel_crtc, pipe_config, state);
6534
6535 if (psl_clkgate_wa) {
6536 intel_wait_for_vblank(dev_priv, pipe);
6537 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6538 }
6539
6540 /* If we change the relative order between pipe/planes enabling, we need
6541 * to change the workaround. */
6542 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
6543 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6544 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6545 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6546 }
6547 }
6548
ironlake_pfit_disable(const struct intel_crtc_state * old_crtc_state)6549 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6550 {
6551 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6552 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6553 enum pipe pipe = crtc->pipe;
6554
6555 /* To avoid upsetting the power well on haswell only disable the pfit if
6556 * it's in use. The hw state code will make sure we get this right. */
6557 if (old_crtc_state->pch_pfit.enabled) {
6558 I915_WRITE(PF_CTL(pipe), 0);
6559 I915_WRITE(PF_WIN_POS(pipe), 0);
6560 I915_WRITE(PF_WIN_SZ(pipe), 0);
6561 }
6562 }
6563
ironlake_crtc_disable(struct intel_crtc_state * old_crtc_state,struct intel_atomic_state * state)6564 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
6565 struct intel_atomic_state *state)
6566 {
6567 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6568 struct drm_device *dev = crtc->dev;
6569 struct drm_i915_private *dev_priv = to_i915(dev);
6570 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6571 int pipe = intel_crtc->pipe;
6572
6573 /*
6574 * Sometimes spurious CPU pipe underruns happen when the
6575 * pipe is already disabled, but FDI RX/TX is still enabled.
6576 * Happens at least with VGA+HDMI cloning. Suppress them.
6577 */
6578 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6579 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6580
6581 intel_encoders_disable(intel_crtc, old_crtc_state, state);
6582
6583 drm_crtc_vblank_off(crtc);
6584 assert_vblank_disabled(crtc);
6585
6586 intel_disable_pipe(old_crtc_state);
6587
6588 ironlake_pfit_disable(old_crtc_state);
6589
6590 if (old_crtc_state->has_pch_encoder)
6591 ironlake_fdi_disable(crtc);
6592
6593 intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
6594
6595 if (old_crtc_state->has_pch_encoder) {
6596 ironlake_disable_pch_transcoder(dev_priv, pipe);
6597
6598 if (HAS_PCH_CPT(dev_priv)) {
6599 i915_reg_t reg;
6600 u32 temp;
6601
6602 /* disable TRANS_DP_CTL */
6603 reg = TRANS_DP_CTL(pipe);
6604 temp = I915_READ(reg);
6605 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6606 TRANS_DP_PORT_SEL_MASK);
6607 temp |= TRANS_DP_PORT_SEL_NONE;
6608 I915_WRITE(reg, temp);
6609
6610 /* disable DPLL_SEL */
6611 temp = I915_READ(PCH_DPLL_SEL);
6612 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6613 I915_WRITE(PCH_DPLL_SEL, temp);
6614 }
6615
6616 ironlake_fdi_pll_disable(intel_crtc);
6617 }
6618
6619 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6620 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6621 }
6622
haswell_crtc_disable(struct intel_crtc_state * old_crtc_state,struct intel_atomic_state * state)6623 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
6624 struct intel_atomic_state *state)
6625 {
6626 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6627 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6628 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6629 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
6630
6631 intel_encoders_disable(intel_crtc, old_crtc_state, state);
6632
6633 drm_crtc_vblank_off(crtc);
6634 assert_vblank_disabled(crtc);
6635
6636 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6637 if (!transcoder_is_dsi(cpu_transcoder))
6638 intel_disable_pipe(old_crtc_state);
6639
6640 if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
6641 intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
6642
6643 if (!transcoder_is_dsi(cpu_transcoder))
6644 intel_ddi_disable_transcoder_func(old_crtc_state);
6645
6646 intel_dsc_disable(old_crtc_state);
6647
6648 if (INTEL_GEN(dev_priv) >= 9)
6649 skylake_scaler_disable(intel_crtc);
6650 else
6651 ironlake_pfit_disable(old_crtc_state);
6652
6653 intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
6654
6655 intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
6656 }
6657
i9xx_pfit_enable(const struct intel_crtc_state * crtc_state)6658 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6659 {
6660 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6661 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6662
6663 if (!crtc_state->gmch_pfit.control)
6664 return;
6665
6666 /*
6667 * The panel fitter should only be adjusted whilst the pipe is disabled,
6668 * according to register description and PRM.
6669 */
6670 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6671 assert_pipe_disabled(dev_priv, crtc->pipe);
6672
6673 I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6674 I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
6675
6676 /* Border color in case we don't scale up to the full screen. Black by
6677 * default, change to something else for debugging. */
6678 I915_WRITE(BCLRPAT(crtc->pipe), 0);
6679 }
6680
intel_phy_is_combo(struct drm_i915_private * dev_priv,enum phy phy)6681 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
6682 {
6683 if (phy == PHY_NONE)
6684 return false;
6685
6686 if (IS_ELKHARTLAKE(dev_priv))
6687 return phy <= PHY_C;
6688
6689 if (INTEL_GEN(dev_priv) >= 11)
6690 return phy <= PHY_B;
6691
6692 return false;
6693 }
6694
intel_phy_is_tc(struct drm_i915_private * dev_priv,enum phy phy)6695 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
6696 {
6697 if (INTEL_GEN(dev_priv) >= 12)
6698 return phy >= PHY_D && phy <= PHY_I;
6699
6700 if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
6701 return phy >= PHY_C && phy <= PHY_F;
6702
6703 return false;
6704 }
6705
intel_port_to_phy(struct drm_i915_private * i915,enum port port)6706 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
6707 {
6708 if (IS_ELKHARTLAKE(i915) && port == PORT_D)
6709 return PHY_A;
6710
6711 return (enum phy)port;
6712 }
6713
intel_port_to_tc(struct drm_i915_private * dev_priv,enum port port)6714 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6715 {
6716 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
6717 return PORT_TC_NONE;
6718
6719 if (INTEL_GEN(dev_priv) >= 12)
6720 return port - PORT_D;
6721
6722 return port - PORT_C;
6723 }
6724
intel_port_to_power_domain(enum port port)6725 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6726 {
6727 switch (port) {
6728 case PORT_A:
6729 return POWER_DOMAIN_PORT_DDI_A_LANES;
6730 case PORT_B:
6731 return POWER_DOMAIN_PORT_DDI_B_LANES;
6732 case PORT_C:
6733 return POWER_DOMAIN_PORT_DDI_C_LANES;
6734 case PORT_D:
6735 return POWER_DOMAIN_PORT_DDI_D_LANES;
6736 case PORT_E:
6737 return POWER_DOMAIN_PORT_DDI_E_LANES;
6738 case PORT_F:
6739 return POWER_DOMAIN_PORT_DDI_F_LANES;
6740 default:
6741 MISSING_CASE(port);
6742 return POWER_DOMAIN_PORT_OTHER;
6743 }
6744 }
6745
6746 enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port * dig_port)6747 intel_aux_power_domain(struct intel_digital_port *dig_port)
6748 {
6749 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
6750 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
6751
6752 if (intel_phy_is_tc(dev_priv, phy) &&
6753 dig_port->tc_mode == TC_PORT_TBT_ALT) {
6754 switch (dig_port->aux_ch) {
6755 case AUX_CH_C:
6756 return POWER_DOMAIN_AUX_TBT1;
6757 case AUX_CH_D:
6758 return POWER_DOMAIN_AUX_TBT2;
6759 case AUX_CH_E:
6760 return POWER_DOMAIN_AUX_TBT3;
6761 case AUX_CH_F:
6762 return POWER_DOMAIN_AUX_TBT4;
6763 default:
6764 MISSING_CASE(dig_port->aux_ch);
6765 return POWER_DOMAIN_AUX_TBT1;
6766 }
6767 }
6768
6769 switch (dig_port->aux_ch) {
6770 case AUX_CH_A:
6771 return POWER_DOMAIN_AUX_A;
6772 case AUX_CH_B:
6773 return POWER_DOMAIN_AUX_B;
6774 case AUX_CH_C:
6775 return POWER_DOMAIN_AUX_C;
6776 case AUX_CH_D:
6777 return POWER_DOMAIN_AUX_D;
6778 case AUX_CH_E:
6779 return POWER_DOMAIN_AUX_E;
6780 case AUX_CH_F:
6781 return POWER_DOMAIN_AUX_F;
6782 default:
6783 MISSING_CASE(dig_port->aux_ch);
6784 return POWER_DOMAIN_AUX_A;
6785 }
6786 }
6787
get_crtc_power_domains(struct intel_crtc_state * crtc_state)6788 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
6789 {
6790 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6791 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6792 struct drm_encoder *encoder;
6793 enum pipe pipe = crtc->pipe;
6794 u64 mask;
6795 enum transcoder transcoder = crtc_state->cpu_transcoder;
6796
6797 if (!crtc_state->base.active)
6798 return 0;
6799
6800 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6801 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
6802 if (crtc_state->pch_pfit.enabled ||
6803 crtc_state->pch_pfit.force_thru)
6804 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6805
6806 drm_for_each_encoder_mask(encoder, &dev_priv->drm,
6807 crtc_state->base.encoder_mask) {
6808 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6809
6810 mask |= BIT_ULL(intel_encoder->power_domain);
6811 }
6812
6813 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
6814 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
6815
6816 if (crtc_state->shared_dpll)
6817 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
6818
6819 return mask;
6820 }
6821
6822 static u64
modeset_get_crtc_power_domains(struct intel_crtc_state * crtc_state)6823 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
6824 {
6825 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6826 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6827 enum intel_display_power_domain domain;
6828 u64 domains, new_domains, old_domains;
6829
6830 old_domains = crtc->enabled_power_domains;
6831 crtc->enabled_power_domains = new_domains =
6832 get_crtc_power_domains(crtc_state);
6833
6834 domains = new_domains & ~old_domains;
6835
6836 for_each_power_domain(domain, domains)
6837 intel_display_power_get(dev_priv, domain);
6838
6839 return old_domains & ~new_domains;
6840 }
6841
modeset_put_power_domains(struct drm_i915_private * dev_priv,u64 domains)6842 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
6843 u64 domains)
6844 {
6845 enum intel_display_power_domain domain;
6846
6847 for_each_power_domain(domain, domains)
6848 intel_display_power_put_unchecked(dev_priv, domain);
6849 }
6850
valleyview_crtc_enable(struct intel_crtc_state * pipe_config,struct intel_atomic_state * state)6851 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6852 struct intel_atomic_state *state)
6853 {
6854 struct drm_crtc *crtc = pipe_config->base.crtc;
6855 struct drm_device *dev = crtc->dev;
6856 struct drm_i915_private *dev_priv = to_i915(dev);
6857 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6858 int pipe = intel_crtc->pipe;
6859
6860 if (WARN_ON(intel_crtc->active))
6861 return;
6862
6863 if (intel_crtc_has_dp_encoder(pipe_config))
6864 intel_dp_set_m_n(pipe_config, M1_N1);
6865
6866 intel_set_pipe_timings(pipe_config);
6867 intel_set_pipe_src_size(pipe_config);
6868
6869 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6870 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6871 I915_WRITE(CHV_CANVAS(pipe), 0);
6872 }
6873
6874 i9xx_set_pipeconf(pipe_config);
6875
6876 intel_crtc->active = true;
6877
6878 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6879
6880 intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
6881
6882 if (IS_CHERRYVIEW(dev_priv)) {
6883 chv_prepare_pll(intel_crtc, pipe_config);
6884 chv_enable_pll(intel_crtc, pipe_config);
6885 } else {
6886 vlv_prepare_pll(intel_crtc, pipe_config);
6887 vlv_enable_pll(intel_crtc, pipe_config);
6888 }
6889
6890 intel_encoders_pre_enable(intel_crtc, pipe_config, state);
6891
6892 i9xx_pfit_enable(pipe_config);
6893
6894 intel_color_load_luts(pipe_config);
6895 intel_color_commit(pipe_config);
6896 /* update DSPCNTR to configure gamma for pipe bottom color */
6897 intel_disable_primary_plane(pipe_config);
6898
6899 dev_priv->display.initial_watermarks(state, pipe_config);
6900 intel_enable_pipe(pipe_config);
6901
6902 assert_vblank_disabled(crtc);
6903 intel_crtc_vblank_on(pipe_config);
6904
6905 intel_encoders_enable(intel_crtc, pipe_config, state);
6906 }
6907
i9xx_set_pll_dividers(const struct intel_crtc_state * crtc_state)6908 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
6909 {
6910 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6911 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6912
6913 I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
6914 I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
6915 }
6916
i9xx_crtc_enable(struct intel_crtc_state * pipe_config,struct intel_atomic_state * state)6917 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6918 struct intel_atomic_state *state)
6919 {
6920 struct drm_crtc *crtc = pipe_config->base.crtc;
6921 struct drm_device *dev = crtc->dev;
6922 struct drm_i915_private *dev_priv = to_i915(dev);
6923 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6924 enum pipe pipe = intel_crtc->pipe;
6925
6926 if (WARN_ON(intel_crtc->active))
6927 return;
6928
6929 i9xx_set_pll_dividers(pipe_config);
6930
6931 if (intel_crtc_has_dp_encoder(pipe_config))
6932 intel_dp_set_m_n(pipe_config, M1_N1);
6933
6934 intel_set_pipe_timings(pipe_config);
6935 intel_set_pipe_src_size(pipe_config);
6936
6937 i9xx_set_pipeconf(pipe_config);
6938
6939 intel_crtc->active = true;
6940
6941 if (!IS_GEN(dev_priv, 2))
6942 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6943
6944 intel_encoders_pre_enable(intel_crtc, pipe_config, state);
6945
6946 i9xx_enable_pll(intel_crtc, pipe_config);
6947
6948 i9xx_pfit_enable(pipe_config);
6949
6950 intel_color_load_luts(pipe_config);
6951 intel_color_commit(pipe_config);
6952 /* update DSPCNTR to configure gamma for pipe bottom color */
6953 intel_disable_primary_plane(pipe_config);
6954
6955 if (dev_priv->display.initial_watermarks != NULL)
6956 dev_priv->display.initial_watermarks(state,
6957 pipe_config);
6958 else
6959 intel_update_watermarks(intel_crtc);
6960 intel_enable_pipe(pipe_config);
6961
6962 assert_vblank_disabled(crtc);
6963 intel_crtc_vblank_on(pipe_config);
6964
6965 intel_encoders_enable(intel_crtc, pipe_config, state);
6966 }
6967
i9xx_pfit_disable(const struct intel_crtc_state * old_crtc_state)6968 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6969 {
6970 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6971 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6972
6973 if (!old_crtc_state->gmch_pfit.control)
6974 return;
6975
6976 assert_pipe_disabled(dev_priv, crtc->pipe);
6977
6978 DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
6979 I915_READ(PFIT_CONTROL));
6980 I915_WRITE(PFIT_CONTROL, 0);
6981 }
6982
i9xx_crtc_disable(struct intel_crtc_state * old_crtc_state,struct intel_atomic_state * state)6983 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6984 struct intel_atomic_state *state)
6985 {
6986 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6987 struct drm_device *dev = crtc->dev;
6988 struct drm_i915_private *dev_priv = to_i915(dev);
6989 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6990 int pipe = intel_crtc->pipe;
6991
6992 /*
6993 * On gen2 planes are double buffered but the pipe isn't, so we must
6994 * wait for planes to fully turn off before disabling the pipe.
6995 */
6996 if (IS_GEN(dev_priv, 2))
6997 intel_wait_for_vblank(dev_priv, pipe);
6998
6999 intel_encoders_disable(intel_crtc, old_crtc_state, state);
7000
7001 drm_crtc_vblank_off(crtc);
7002 assert_vblank_disabled(crtc);
7003
7004 intel_disable_pipe(old_crtc_state);
7005
7006 i9xx_pfit_disable(old_crtc_state);
7007
7008 intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
7009
7010 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
7011 if (IS_CHERRYVIEW(dev_priv))
7012 chv_disable_pll(dev_priv, pipe);
7013 else if (IS_VALLEYVIEW(dev_priv))
7014 vlv_disable_pll(dev_priv, pipe);
7015 else
7016 i9xx_disable_pll(old_crtc_state);
7017 }
7018
7019 intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
7020
7021 if (!IS_GEN(dev_priv, 2))
7022 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7023
7024 if (!dev_priv->display.initial_watermarks)
7025 intel_update_watermarks(intel_crtc);
7026
7027 /* clock the pipe down to 640x480@60 to potentially save power */
7028 if (IS_I830(dev_priv))
7029 i830_enable_pipe(dev_priv, pipe);
7030 }
7031
intel_crtc_disable_noatomic(struct drm_crtc * crtc,struct drm_modeset_acquire_ctx * ctx)7032 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
7033 struct drm_modeset_acquire_ctx *ctx)
7034 {
7035 struct intel_encoder *encoder;
7036 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7037 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
7038 struct intel_bw_state *bw_state =
7039 to_intel_bw_state(dev_priv->bw_obj.state);
7040 enum intel_display_power_domain domain;
7041 struct intel_plane *plane;
7042 u64 domains;
7043 struct drm_atomic_state *state;
7044 struct intel_crtc_state *crtc_state;
7045 int ret;
7046
7047 if (!intel_crtc->active)
7048 return;
7049
7050 for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
7051 const struct intel_plane_state *plane_state =
7052 to_intel_plane_state(plane->base.state);
7053
7054 if (plane_state->base.visible)
7055 intel_plane_disable_noatomic(intel_crtc, plane);
7056 }
7057
7058 state = drm_atomic_state_alloc(crtc->dev);
7059 if (!state) {
7060 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
7061 crtc->base.id, crtc->name);
7062 return;
7063 }
7064
7065 state->acquire_ctx = ctx;
7066
7067 /* Everything's already locked, -EDEADLK can't happen. */
7068 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
7069 ret = drm_atomic_add_affected_connectors(state, crtc);
7070
7071 WARN_ON(IS_ERR(crtc_state) || ret);
7072
7073 dev_priv->display.crtc_disable(crtc_state, to_intel_atomic_state(state));
7074
7075 drm_atomic_state_put(state);
7076
7077 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
7078 crtc->base.id, crtc->name);
7079
7080 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
7081 crtc->state->active = false;
7082 intel_crtc->active = false;
7083 crtc->enabled = false;
7084 crtc->state->connector_mask = 0;
7085 crtc->state->encoder_mask = 0;
7086
7087 for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
7088 encoder->base.crtc = NULL;
7089
7090 intel_fbc_disable(intel_crtc);
7091 intel_update_watermarks(intel_crtc);
7092 intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
7093
7094 domains = intel_crtc->enabled_power_domains;
7095 for_each_power_domain(domain, domains)
7096 intel_display_power_put_unchecked(dev_priv, domain);
7097 intel_crtc->enabled_power_domains = 0;
7098
7099 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
7100 dev_priv->min_cdclk[intel_crtc->pipe] = 0;
7101 dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
7102
7103 bw_state->data_rate[intel_crtc->pipe] = 0;
7104 bw_state->num_active_planes[intel_crtc->pipe] = 0;
7105 }
7106
7107 /*
7108 * turn all crtc's off, but do not adjust state
7109 * This has to be paired with a call to intel_modeset_setup_hw_state.
7110 */
intel_display_suspend(struct drm_device * dev)7111 int intel_display_suspend(struct drm_device *dev)
7112 {
7113 struct drm_i915_private *dev_priv = to_i915(dev);
7114 struct drm_atomic_state *state;
7115 int ret;
7116
7117 state = drm_atomic_helper_suspend(dev);
7118 ret = PTR_ERR_OR_ZERO(state);
7119 if (ret)
7120 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
7121 else
7122 dev_priv->modeset_restore_state = state;
7123 return ret;
7124 }
7125
intel_encoder_destroy(struct drm_encoder * encoder)7126 void intel_encoder_destroy(struct drm_encoder *encoder)
7127 {
7128 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7129
7130 drm_encoder_cleanup(encoder);
7131 kfree(intel_encoder);
7132 }
7133
7134 /* Cross check the actual hw state with our own modeset state tracking (and it's
7135 * internal consistency). */
intel_connector_verify_state(struct intel_crtc_state * crtc_state,struct drm_connector_state * conn_state)7136 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
7137 struct drm_connector_state *conn_state)
7138 {
7139 struct intel_connector *connector = to_intel_connector(conn_state->connector);
7140
7141 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
7142 connector->base.base.id,
7143 connector->base.name);
7144
7145 if (connector->get_hw_state(connector)) {
7146 struct intel_encoder *encoder = connector->encoder;
7147
7148 I915_STATE_WARN(!crtc_state,
7149 "connector enabled without attached crtc\n");
7150
7151 if (!crtc_state)
7152 return;
7153
7154 I915_STATE_WARN(!crtc_state->base.active,
7155 "connector is active, but attached crtc isn't\n");
7156
7157 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7158 return;
7159
7160 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7161 "atomic encoder doesn't match attached encoder\n");
7162
7163 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7164 "attached encoder crtc differs from connector crtc\n");
7165 } else {
7166 I915_STATE_WARN(crtc_state && crtc_state->base.active,
7167 "attached crtc is active, but connector isn't\n");
7168 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7169 "best encoder set without crtc!\n");
7170 }
7171 }
7172
pipe_required_fdi_lanes(struct intel_crtc_state * crtc_state)7173 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7174 {
7175 if (crtc_state->base.enable && crtc_state->has_pch_encoder)
7176 return crtc_state->fdi_lanes;
7177
7178 return 0;
7179 }
7180
ironlake_check_fdi_lanes(struct drm_device * dev,enum pipe pipe,struct intel_crtc_state * pipe_config)7181 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7182 struct intel_crtc_state *pipe_config)
7183 {
7184 struct drm_i915_private *dev_priv = to_i915(dev);
7185 struct drm_atomic_state *state = pipe_config->base.state;
7186 struct intel_crtc *other_crtc;
7187 struct intel_crtc_state *other_crtc_state;
7188
7189 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
7190 pipe_name(pipe), pipe_config->fdi_lanes);
7191 if (pipe_config->fdi_lanes > 4) {
7192 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
7193 pipe_name(pipe), pipe_config->fdi_lanes);
7194 return -EINVAL;
7195 }
7196
7197 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7198 if (pipe_config->fdi_lanes > 2) {
7199 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
7200 pipe_config->fdi_lanes);
7201 return -EINVAL;
7202 } else {
7203 return 0;
7204 }
7205 }
7206
7207 if (INTEL_INFO(dev_priv)->num_pipes == 2)
7208 return 0;
7209
7210 /* Ivybridge 3 pipe is really complicated */
7211 switch (pipe) {
7212 case PIPE_A:
7213 return 0;
7214 case PIPE_B:
7215 if (pipe_config->fdi_lanes <= 2)
7216 return 0;
7217
7218 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7219 other_crtc_state =
7220 intel_atomic_get_crtc_state(state, other_crtc);
7221 if (IS_ERR(other_crtc_state))
7222 return PTR_ERR(other_crtc_state);
7223
7224 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7225 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
7226 pipe_name(pipe), pipe_config->fdi_lanes);
7227 return -EINVAL;
7228 }
7229 return 0;
7230 case PIPE_C:
7231 if (pipe_config->fdi_lanes > 2) {
7232 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
7233 pipe_name(pipe), pipe_config->fdi_lanes);
7234 return -EINVAL;
7235 }
7236
7237 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7238 other_crtc_state =
7239 intel_atomic_get_crtc_state(state, other_crtc);
7240 if (IS_ERR(other_crtc_state))
7241 return PTR_ERR(other_crtc_state);
7242
7243 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7244 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
7245 return -EINVAL;
7246 }
7247 return 0;
7248 default:
7249 BUG();
7250 }
7251 }
7252
7253 #define RETRY 1
ironlake_fdi_compute_config(struct intel_crtc * intel_crtc,struct intel_crtc_state * pipe_config)7254 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
7255 struct intel_crtc_state *pipe_config)
7256 {
7257 struct drm_device *dev = intel_crtc->base.dev;
7258 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
7259 int lane, link_bw, fdi_dotclock, ret;
7260 bool needs_recompute = false;
7261
7262 retry:
7263 /* FDI is a binary signal running at ~2.7GHz, encoding
7264 * each output octet as 10 bits. The actual frequency
7265 * is stored as a divider into a 100MHz clock, and the
7266 * mode pixel clock is stored in units of 1KHz.
7267 * Hence the bw of each lane in terms of the mode signal
7268 * is:
7269 */
7270 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
7271
7272 fdi_dotclock = adjusted_mode->crtc_clock;
7273
7274 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
7275 pipe_config->pipe_bpp);
7276
7277 pipe_config->fdi_lanes = lane;
7278
7279 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7280 link_bw, &pipe_config->fdi_m_n, false, false);
7281
7282 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7283 if (ret == -EDEADLK)
7284 return ret;
7285
7286 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7287 pipe_config->pipe_bpp -= 2*3;
7288 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
7289 pipe_config->pipe_bpp);
7290 needs_recompute = true;
7291 pipe_config->bw_constrained = true;
7292
7293 goto retry;
7294 }
7295
7296 if (needs_recompute)
7297 return RETRY;
7298
7299 return ret;
7300 }
7301
hsw_crtc_state_ips_capable(const struct intel_crtc_state * crtc_state)7302 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7303 {
7304 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7305 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7306
7307 /* IPS only exists on ULT machines and is tied to pipe A. */
7308 if (!hsw_crtc_supports_ips(crtc))
7309 return false;
7310
7311 if (!i915_modparams.enable_ips)
7312 return false;
7313
7314 if (crtc_state->pipe_bpp > 24)
7315 return false;
7316
7317 /*
7318 * We compare against max which means we must take
7319 * the increased cdclk requirement into account when
7320 * calculating the new cdclk.
7321 *
7322 * Should measure whether using a lower cdclk w/o IPS
7323 */
7324 if (IS_BROADWELL(dev_priv) &&
7325 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7326 return false;
7327
7328 return true;
7329 }
7330
hsw_compute_ips_config(struct intel_crtc_state * crtc_state)7331 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7332 {
7333 struct drm_i915_private *dev_priv =
7334 to_i915(crtc_state->base.crtc->dev);
7335 struct intel_atomic_state *intel_state =
7336 to_intel_atomic_state(crtc_state->base.state);
7337
7338 if (!hsw_crtc_state_ips_capable(crtc_state))
7339 return false;
7340
7341 /*
7342 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7343 * enabled and disabled dynamically based on package C states,
7344 * user space can't make reliable use of the CRCs, so let's just
7345 * completely disable it.
7346 */
7347 if (crtc_state->crc_enabled)
7348 return false;
7349
7350 /* IPS should be fine as long as at least one plane is enabled. */
7351 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7352 return false;
7353
7354 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7355 if (IS_BROADWELL(dev_priv) &&
7356 crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
7357 return false;
7358
7359 return true;
7360 }
7361
intel_crtc_supports_double_wide(const struct intel_crtc * crtc)7362 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7363 {
7364 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7365
7366 /* GDG double wide on either pipe, otherwise pipe A only */
7367 return INTEL_GEN(dev_priv) < 4 &&
7368 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7369 }
7370
ilk_pipe_pixel_rate(const struct intel_crtc_state * pipe_config)7371 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
7372 {
7373 u32 pixel_rate;
7374
7375 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
7376
7377 /*
7378 * We only use IF-ID interlacing. If we ever use
7379 * PF-ID we'll need to adjust the pixel_rate here.
7380 */
7381
7382 if (pipe_config->pch_pfit.enabled) {
7383 u64 pipe_w, pipe_h, pfit_w, pfit_h;
7384 u32 pfit_size = pipe_config->pch_pfit.size;
7385
7386 pipe_w = pipe_config->pipe_src_w;
7387 pipe_h = pipe_config->pipe_src_h;
7388
7389 pfit_w = (pfit_size >> 16) & 0xFFFF;
7390 pfit_h = pfit_size & 0xFFFF;
7391 if (pipe_w < pfit_w)
7392 pipe_w = pfit_w;
7393 if (pipe_h < pfit_h)
7394 pipe_h = pfit_h;
7395
7396 if (WARN_ON(!pfit_w || !pfit_h))
7397 return pixel_rate;
7398
7399 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7400 pfit_w * pfit_h);
7401 }
7402
7403 return pixel_rate;
7404 }
7405
intel_crtc_compute_pixel_rate(struct intel_crtc_state * crtc_state)7406 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7407 {
7408 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
7409
7410 if (HAS_GMCH(dev_priv))
7411 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
7412 crtc_state->pixel_rate =
7413 crtc_state->base.adjusted_mode.crtc_clock;
7414 else
7415 crtc_state->pixel_rate =
7416 ilk_pipe_pixel_rate(crtc_state);
7417 }
7418
intel_crtc_compute_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)7419 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7420 struct intel_crtc_state *pipe_config)
7421 {
7422 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7423 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
7424 int clock_limit = dev_priv->max_dotclk_freq;
7425
7426 if (INTEL_GEN(dev_priv) < 4) {
7427 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7428
7429 /*
7430 * Enable double wide mode when the dot clock
7431 * is > 90% of the (display) core speed.
7432 */
7433 if (intel_crtc_supports_double_wide(crtc) &&
7434 adjusted_mode->crtc_clock > clock_limit) {
7435 clock_limit = dev_priv->max_dotclk_freq;
7436 pipe_config->double_wide = true;
7437 }
7438 }
7439
7440 if (adjusted_mode->crtc_clock > clock_limit) {
7441 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7442 adjusted_mode->crtc_clock, clock_limit,
7443 yesno(pipe_config->double_wide));
7444 return -EINVAL;
7445 }
7446
7447 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7448 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
7449 pipe_config->base.ctm) {
7450 /*
7451 * There is only one pipe CSC unit per pipe, and we need that
7452 * for output conversion from RGB->YCBCR. So if CTM is already
7453 * applied we can't support YCBCR420 output.
7454 */
7455 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
7456 return -EINVAL;
7457 }
7458
7459 /*
7460 * Pipe horizontal size must be even in:
7461 * - DVO ganged mode
7462 * - LVDS dual channel mode
7463 * - Double wide pipe
7464 */
7465 if (pipe_config->pipe_src_w & 1) {
7466 if (pipe_config->double_wide) {
7467 DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
7468 return -EINVAL;
7469 }
7470
7471 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
7472 intel_is_dual_link_lvds(dev_priv)) {
7473 DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
7474 return -EINVAL;
7475 }
7476 }
7477
7478 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
7479 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7480 */
7481 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
7482 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
7483 return -EINVAL;
7484
7485 intel_crtc_compute_pixel_rate(pipe_config);
7486
7487 if (pipe_config->has_pch_encoder)
7488 return ironlake_fdi_compute_config(crtc, pipe_config);
7489
7490 return 0;
7491 }
7492
7493 static void
intel_reduce_m_n_ratio(u32 * num,u32 * den)7494 intel_reduce_m_n_ratio(u32 *num, u32 *den)
7495 {
7496 while (*num > DATA_LINK_M_N_MASK ||
7497 *den > DATA_LINK_M_N_MASK) {
7498 *num >>= 1;
7499 *den >>= 1;
7500 }
7501 }
7502
compute_m_n(unsigned int m,unsigned int n,u32 * ret_m,u32 * ret_n,bool constant_n)7503 static void compute_m_n(unsigned int m, unsigned int n,
7504 u32 *ret_m, u32 *ret_n,
7505 bool constant_n)
7506 {
7507 /*
7508 * Several DP dongles in particular seem to be fussy about
7509 * too large link M/N values. Give N value as 0x8000 that
7510 * should be acceptable by specific devices. 0x8000 is the
7511 * specified fixed N value for asynchronous clock mode,
7512 * which the devices expect also in synchronous clock mode.
7513 */
7514 if (constant_n)
7515 *ret_n = 0x8000;
7516 else
7517 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7518
7519 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
7520 intel_reduce_m_n_ratio(ret_m, ret_n);
7521 }
7522
7523 void
intel_link_compute_m_n(u16 bits_per_pixel,int nlanes,int pixel_clock,int link_clock,struct intel_link_m_n * m_n,bool constant_n,bool fec_enable)7524 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
7525 int pixel_clock, int link_clock,
7526 struct intel_link_m_n *m_n,
7527 bool constant_n, bool fec_enable)
7528 {
7529 u32 data_clock = bits_per_pixel * pixel_clock;
7530
7531 if (fec_enable)
7532 data_clock = intel_dp_mode_to_fec_clock(data_clock);
7533
7534 m_n->tu = 64;
7535 compute_m_n(data_clock,
7536 link_clock * nlanes * 8,
7537 &m_n->gmch_m, &m_n->gmch_n,
7538 constant_n);
7539
7540 compute_m_n(pixel_clock, link_clock,
7541 &m_n->link_m, &m_n->link_n,
7542 constant_n);
7543 }
7544
intel_panel_use_ssc(struct drm_i915_private * dev_priv)7545 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7546 {
7547 if (i915_modparams.panel_use_ssc >= 0)
7548 return i915_modparams.panel_use_ssc != 0;
7549 return dev_priv->vbt.lvds_use_ssc
7550 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7551 }
7552
pnv_dpll_compute_fp(struct dpll * dpll)7553 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
7554 {
7555 return (1 << dpll->n) << 16 | dpll->m2;
7556 }
7557
i9xx_dpll_compute_fp(struct dpll * dpll)7558 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7559 {
7560 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7561 }
7562
i9xx_update_pll_dividers(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state,struct dpll * reduced_clock)7563 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7564 struct intel_crtc_state *crtc_state,
7565 struct dpll *reduced_clock)
7566 {
7567 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7568 u32 fp, fp2 = 0;
7569
7570 if (IS_PINEVIEW(dev_priv)) {
7571 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7572 if (reduced_clock)
7573 fp2 = pnv_dpll_compute_fp(reduced_clock);
7574 } else {
7575 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7576 if (reduced_clock)
7577 fp2 = i9xx_dpll_compute_fp(reduced_clock);
7578 }
7579
7580 crtc_state->dpll_hw_state.fp0 = fp;
7581
7582 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7583 reduced_clock) {
7584 crtc_state->dpll_hw_state.fp1 = fp2;
7585 } else {
7586 crtc_state->dpll_hw_state.fp1 = fp;
7587 }
7588 }
7589
vlv_pllb_recal_opamp(struct drm_i915_private * dev_priv,enum pipe pipe)7590 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7591 pipe)
7592 {
7593 u32 reg_val;
7594
7595 /*
7596 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7597 * and set it to a reasonable value instead.
7598 */
7599 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7600 reg_val &= 0xffffff00;
7601 reg_val |= 0x00000030;
7602 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7603
7604 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7605 reg_val &= 0x00ffffff;
7606 reg_val |= 0x8c000000;
7607 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7608
7609 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7610 reg_val &= 0xffffff00;
7611 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7612
7613 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7614 reg_val &= 0x00ffffff;
7615 reg_val |= 0xb0000000;
7616 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7617 }
7618
intel_pch_transcoder_set_m_n(const struct intel_crtc_state * crtc_state,const struct intel_link_m_n * m_n)7619 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7620 const struct intel_link_m_n *m_n)
7621 {
7622 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7623 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7624 enum pipe pipe = crtc->pipe;
7625
7626 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7627 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7628 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7629 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7630 }
7631
transcoder_has_m2_n2(struct drm_i915_private * dev_priv,enum transcoder transcoder)7632 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7633 enum transcoder transcoder)
7634 {
7635 if (IS_HASWELL(dev_priv))
7636 return transcoder == TRANSCODER_EDP;
7637
7638 /*
7639 * Strictly speaking some registers are available before
7640 * gen7, but we only support DRRS on gen7+
7641 */
7642 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
7643 }
7644
intel_cpu_transcoder_set_m_n(const struct intel_crtc_state * crtc_state,const struct intel_link_m_n * m_n,const struct intel_link_m_n * m2_n2)7645 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7646 const struct intel_link_m_n *m_n,
7647 const struct intel_link_m_n *m2_n2)
7648 {
7649 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7650 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7651 enum pipe pipe = crtc->pipe;
7652 enum transcoder transcoder = crtc_state->cpu_transcoder;
7653
7654 if (INTEL_GEN(dev_priv) >= 5) {
7655 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7656 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7657 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7658 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7659 /*
7660 * M2_N2 registers are set only if DRRS is supported
7661 * (to make sure the registers are not unnecessarily accessed).
7662 */
7663 if (m2_n2 && crtc_state->has_drrs &&
7664 transcoder_has_m2_n2(dev_priv, transcoder)) {
7665 I915_WRITE(PIPE_DATA_M2(transcoder),
7666 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7667 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7668 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7669 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7670 }
7671 } else {
7672 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7673 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7674 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7675 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7676 }
7677 }
7678
intel_dp_set_m_n(const struct intel_crtc_state * crtc_state,enum link_m_n_set m_n)7679 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
7680 {
7681 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7682
7683 if (m_n == M1_N1) {
7684 dp_m_n = &crtc_state->dp_m_n;
7685 dp_m2_n2 = &crtc_state->dp_m2_n2;
7686 } else if (m_n == M2_N2) {
7687
7688 /*
7689 * M2_N2 registers are not supported. Hence m2_n2 divider value
7690 * needs to be programmed into M1_N1.
7691 */
7692 dp_m_n = &crtc_state->dp_m2_n2;
7693 } else {
7694 DRM_ERROR("Unsupported divider value\n");
7695 return;
7696 }
7697
7698 if (crtc_state->has_pch_encoder)
7699 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
7700 else
7701 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
7702 }
7703
vlv_compute_dpll(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)7704 static void vlv_compute_dpll(struct intel_crtc *crtc,
7705 struct intel_crtc_state *pipe_config)
7706 {
7707 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7708 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7709 if (crtc->pipe != PIPE_A)
7710 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7711
7712 /* DPLL not used with DSI, but still need the rest set up */
7713 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7714 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7715 DPLL_EXT_BUFFER_ENABLE_VLV;
7716
7717 pipe_config->dpll_hw_state.dpll_md =
7718 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7719 }
7720
chv_compute_dpll(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)7721 static void chv_compute_dpll(struct intel_crtc *crtc,
7722 struct intel_crtc_state *pipe_config)
7723 {
7724 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7725 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7726 if (crtc->pipe != PIPE_A)
7727 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7728
7729 /* DPLL not used with DSI, but still need the rest set up */
7730 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7731 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7732
7733 pipe_config->dpll_hw_state.dpll_md =
7734 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7735 }
7736
vlv_prepare_pll(struct intel_crtc * crtc,const struct intel_crtc_state * pipe_config)7737 static void vlv_prepare_pll(struct intel_crtc *crtc,
7738 const struct intel_crtc_state *pipe_config)
7739 {
7740 struct drm_device *dev = crtc->base.dev;
7741 struct drm_i915_private *dev_priv = to_i915(dev);
7742 enum pipe pipe = crtc->pipe;
7743 u32 mdiv;
7744 u32 bestn, bestm1, bestm2, bestp1, bestp2;
7745 u32 coreclk, reg_val;
7746
7747 /* Enable Refclk */
7748 I915_WRITE(DPLL(pipe),
7749 pipe_config->dpll_hw_state.dpll &
7750 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7751
7752 /* No need to actually set up the DPLL with DSI */
7753 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7754 return;
7755
7756 vlv_dpio_get(dev_priv);
7757
7758 bestn = pipe_config->dpll.n;
7759 bestm1 = pipe_config->dpll.m1;
7760 bestm2 = pipe_config->dpll.m2;
7761 bestp1 = pipe_config->dpll.p1;
7762 bestp2 = pipe_config->dpll.p2;
7763
7764 /* See eDP HDMI DPIO driver vbios notes doc */
7765
7766 /* PLL B needs special handling */
7767 if (pipe == PIPE_B)
7768 vlv_pllb_recal_opamp(dev_priv, pipe);
7769
7770 /* Set up Tx target for periodic Rcomp update */
7771 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7772
7773 /* Disable target IRef on PLL */
7774 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7775 reg_val &= 0x00ffffff;
7776 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7777
7778 /* Disable fast lock */
7779 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7780
7781 /* Set idtafcrecal before PLL is enabled */
7782 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7783 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7784 mdiv |= ((bestn << DPIO_N_SHIFT));
7785 mdiv |= (1 << DPIO_K_SHIFT);
7786
7787 /*
7788 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7789 * but we don't support that).
7790 * Note: don't use the DAC post divider as it seems unstable.
7791 */
7792 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7793 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7794
7795 mdiv |= DPIO_ENABLE_CALIBRATION;
7796 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7797
7798 /* Set HBR and RBR LPF coefficients */
7799 if (pipe_config->port_clock == 162000 ||
7800 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
7801 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
7802 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7803 0x009f0003);
7804 else
7805 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7806 0x00d0000f);
7807
7808 if (intel_crtc_has_dp_encoder(pipe_config)) {
7809 /* Use SSC source */
7810 if (pipe == PIPE_A)
7811 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7812 0x0df40000);
7813 else
7814 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7815 0x0df70000);
7816 } else { /* HDMI or VGA */
7817 /* Use bend source */
7818 if (pipe == PIPE_A)
7819 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7820 0x0df70000);
7821 else
7822 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7823 0x0df40000);
7824 }
7825
7826 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7827 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7828 if (intel_crtc_has_dp_encoder(pipe_config))
7829 coreclk |= 0x01000000;
7830 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7831
7832 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7833
7834 vlv_dpio_put(dev_priv);
7835 }
7836
chv_prepare_pll(struct intel_crtc * crtc,const struct intel_crtc_state * pipe_config)7837 static void chv_prepare_pll(struct intel_crtc *crtc,
7838 const struct intel_crtc_state *pipe_config)
7839 {
7840 struct drm_device *dev = crtc->base.dev;
7841 struct drm_i915_private *dev_priv = to_i915(dev);
7842 enum pipe pipe = crtc->pipe;
7843 enum dpio_channel port = vlv_pipe_to_channel(pipe);
7844 u32 loopfilter, tribuf_calcntr;
7845 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7846 u32 dpio_val;
7847 int vco;
7848
7849 /* Enable Refclk and SSC */
7850 I915_WRITE(DPLL(pipe),
7851 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7852
7853 /* No need to actually set up the DPLL with DSI */
7854 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7855 return;
7856
7857 bestn = pipe_config->dpll.n;
7858 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7859 bestm1 = pipe_config->dpll.m1;
7860 bestm2 = pipe_config->dpll.m2 >> 22;
7861 bestp1 = pipe_config->dpll.p1;
7862 bestp2 = pipe_config->dpll.p2;
7863 vco = pipe_config->dpll.vco;
7864 dpio_val = 0;
7865 loopfilter = 0;
7866
7867 vlv_dpio_get(dev_priv);
7868
7869 /* p1 and p2 divider */
7870 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7871 5 << DPIO_CHV_S1_DIV_SHIFT |
7872 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7873 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7874 1 << DPIO_CHV_K_DIV_SHIFT);
7875
7876 /* Feedback post-divider - m2 */
7877 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7878
7879 /* Feedback refclk divider - n and m1 */
7880 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7881 DPIO_CHV_M1_DIV_BY_2 |
7882 1 << DPIO_CHV_N_DIV_SHIFT);
7883
7884 /* M2 fraction division */
7885 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7886
7887 /* M2 fraction division enable */
7888 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7889 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7890 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7891 if (bestm2_frac)
7892 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7893 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7894
7895 /* Program digital lock detect threshold */
7896 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7897 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7898 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7899 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7900 if (!bestm2_frac)
7901 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7902 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7903
7904 /* Loop filter */
7905 if (vco == 5400000) {
7906 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7907 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7908 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7909 tribuf_calcntr = 0x9;
7910 } else if (vco <= 6200000) {
7911 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7912 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7913 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7914 tribuf_calcntr = 0x9;
7915 } else if (vco <= 6480000) {
7916 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7917 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7918 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7919 tribuf_calcntr = 0x8;
7920 } else {
7921 /* Not supported. Apply the same limits as in the max case */
7922 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7923 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7924 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7925 tribuf_calcntr = 0;
7926 }
7927 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7928
7929 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7930 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7931 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7932 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7933
7934 /* AFC Recal */
7935 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7936 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7937 DPIO_AFC_RECAL);
7938
7939 vlv_dpio_put(dev_priv);
7940 }
7941
7942 /**
7943 * vlv_force_pll_on - forcibly enable just the PLL
7944 * @dev_priv: i915 private structure
7945 * @pipe: pipe PLL to enable
7946 * @dpll: PLL configuration
7947 *
7948 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7949 * in cases where we need the PLL enabled even when @pipe is not going to
7950 * be enabled.
7951 */
vlv_force_pll_on(struct drm_i915_private * dev_priv,enum pipe pipe,const struct dpll * dpll)7952 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
7953 const struct dpll *dpll)
7954 {
7955 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
7956 struct intel_crtc_state *pipe_config;
7957
7958 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7959 if (!pipe_config)
7960 return -ENOMEM;
7961
7962 pipe_config->base.crtc = &crtc->base;
7963 pipe_config->pixel_multiplier = 1;
7964 pipe_config->dpll = *dpll;
7965
7966 if (IS_CHERRYVIEW(dev_priv)) {
7967 chv_compute_dpll(crtc, pipe_config);
7968 chv_prepare_pll(crtc, pipe_config);
7969 chv_enable_pll(crtc, pipe_config);
7970 } else {
7971 vlv_compute_dpll(crtc, pipe_config);
7972 vlv_prepare_pll(crtc, pipe_config);
7973 vlv_enable_pll(crtc, pipe_config);
7974 }
7975
7976 kfree(pipe_config);
7977
7978 return 0;
7979 }
7980
7981 /**
7982 * vlv_force_pll_off - forcibly disable just the PLL
7983 * @dev_priv: i915 private structure
7984 * @pipe: pipe PLL to disable
7985 *
7986 * Disable the PLL for @pipe. To be used in cases where we need
7987 * the PLL enabled even when @pipe is not going to be enabled.
7988 */
vlv_force_pll_off(struct drm_i915_private * dev_priv,enum pipe pipe)7989 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
7990 {
7991 if (IS_CHERRYVIEW(dev_priv))
7992 chv_disable_pll(dev_priv, pipe);
7993 else
7994 vlv_disable_pll(dev_priv, pipe);
7995 }
7996
i9xx_compute_dpll(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state,struct dpll * reduced_clock)7997 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7998 struct intel_crtc_state *crtc_state,
7999 struct dpll *reduced_clock)
8000 {
8001 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8002 u32 dpll;
8003 struct dpll *clock = &crtc_state->dpll;
8004
8005 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8006
8007 dpll = DPLL_VGA_MODE_DIS;
8008
8009 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8010 dpll |= DPLLB_MODE_LVDS;
8011 else
8012 dpll |= DPLLB_MODE_DAC_SERIAL;
8013
8014 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8015 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8016 dpll |= (crtc_state->pixel_multiplier - 1)
8017 << SDVO_MULTIPLIER_SHIFT_HIRES;
8018 }
8019
8020 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8021 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8022 dpll |= DPLL_SDVO_HIGH_SPEED;
8023
8024 if (intel_crtc_has_dp_encoder(crtc_state))
8025 dpll |= DPLL_SDVO_HIGH_SPEED;
8026
8027 /* compute bitmask from p1 value */
8028 if (IS_PINEVIEW(dev_priv))
8029 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
8030 else {
8031 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8032 if (IS_G4X(dev_priv) && reduced_clock)
8033 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8034 }
8035 switch (clock->p2) {
8036 case 5:
8037 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8038 break;
8039 case 7:
8040 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8041 break;
8042 case 10:
8043 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8044 break;
8045 case 14:
8046 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8047 break;
8048 }
8049 if (INTEL_GEN(dev_priv) >= 4)
8050 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
8051
8052 if (crtc_state->sdvo_tv_clock)
8053 dpll |= PLL_REF_INPUT_TVCLKINBC;
8054 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8055 intel_panel_use_ssc(dev_priv))
8056 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8057 else
8058 dpll |= PLL_REF_INPUT_DREFCLK;
8059
8060 dpll |= DPLL_VCO_ENABLE;
8061 crtc_state->dpll_hw_state.dpll = dpll;
8062
8063 if (INTEL_GEN(dev_priv) >= 4) {
8064 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
8065 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8066 crtc_state->dpll_hw_state.dpll_md = dpll_md;
8067 }
8068 }
8069
i8xx_compute_dpll(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state,struct dpll * reduced_clock)8070 static void i8xx_compute_dpll(struct intel_crtc *crtc,
8071 struct intel_crtc_state *crtc_state,
8072 struct dpll *reduced_clock)
8073 {
8074 struct drm_device *dev = crtc->base.dev;
8075 struct drm_i915_private *dev_priv = to_i915(dev);
8076 u32 dpll;
8077 struct dpll *clock = &crtc_state->dpll;
8078
8079 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8080
8081 dpll = DPLL_VGA_MODE_DIS;
8082
8083 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8084 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8085 } else {
8086 if (clock->p1 == 2)
8087 dpll |= PLL_P1_DIVIDE_BY_TWO;
8088 else
8089 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8090 if (clock->p2 == 4)
8091 dpll |= PLL_P2_DIVIDE_BY_4;
8092 }
8093
8094 /*
8095 * Bspec:
8096 * "[Almador Errata}: For the correct operation of the muxed DVO pins
8097 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
8098 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
8099 * Enable) must be set to “1” in both the DPLL A Control Register
8100 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
8101 *
8102 * For simplicity We simply keep both bits always enabled in
8103 * both DPLLS. The spec says we should disable the DVO 2X clock
8104 * when not needed, but this seems to work fine in practice.
8105 */
8106 if (IS_I830(dev_priv) ||
8107 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
8108 dpll |= DPLL_DVO_2X_MODE;
8109
8110 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8111 intel_panel_use_ssc(dev_priv))
8112 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8113 else
8114 dpll |= PLL_REF_INPUT_DREFCLK;
8115
8116 dpll |= DPLL_VCO_ENABLE;
8117 crtc_state->dpll_hw_state.dpll = dpll;
8118 }
8119
intel_set_pipe_timings(const struct intel_crtc_state * crtc_state)8120 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
8121 {
8122 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8123 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8124 enum pipe pipe = crtc->pipe;
8125 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8126 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
8127 u32 crtc_vtotal, crtc_vblank_end;
8128 int vsyncshift = 0;
8129
8130 /* We need to be careful not to changed the adjusted mode, for otherwise
8131 * the hw state checker will get angry at the mismatch. */
8132 crtc_vtotal = adjusted_mode->crtc_vtotal;
8133 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
8134
8135 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
8136 /* the chip adds 2 halflines automatically */
8137 crtc_vtotal -= 1;
8138 crtc_vblank_end -= 1;
8139
8140 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8141 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
8142 else
8143 vsyncshift = adjusted_mode->crtc_hsync_start -
8144 adjusted_mode->crtc_htotal / 2;
8145 if (vsyncshift < 0)
8146 vsyncshift += adjusted_mode->crtc_htotal;
8147 }
8148
8149 if (INTEL_GEN(dev_priv) > 3)
8150 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
8151
8152 I915_WRITE(HTOTAL(cpu_transcoder),
8153 (adjusted_mode->crtc_hdisplay - 1) |
8154 ((adjusted_mode->crtc_htotal - 1) << 16));
8155 I915_WRITE(HBLANK(cpu_transcoder),
8156 (adjusted_mode->crtc_hblank_start - 1) |
8157 ((adjusted_mode->crtc_hblank_end - 1) << 16));
8158 I915_WRITE(HSYNC(cpu_transcoder),
8159 (adjusted_mode->crtc_hsync_start - 1) |
8160 ((adjusted_mode->crtc_hsync_end - 1) << 16));
8161
8162 I915_WRITE(VTOTAL(cpu_transcoder),
8163 (adjusted_mode->crtc_vdisplay - 1) |
8164 ((crtc_vtotal - 1) << 16));
8165 I915_WRITE(VBLANK(cpu_transcoder),
8166 (adjusted_mode->crtc_vblank_start - 1) |
8167 ((crtc_vblank_end - 1) << 16));
8168 I915_WRITE(VSYNC(cpu_transcoder),
8169 (adjusted_mode->crtc_vsync_start - 1) |
8170 ((adjusted_mode->crtc_vsync_end - 1) << 16));
8171
8172 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8173 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8174 * documented on the DDI_FUNC_CTL register description, EDP Input Select
8175 * bits. */
8176 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8177 (pipe == PIPE_B || pipe == PIPE_C))
8178 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
8179
8180 }
8181
intel_set_pipe_src_size(const struct intel_crtc_state * crtc_state)8182 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8183 {
8184 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8185 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8186 enum pipe pipe = crtc->pipe;
8187
8188 /* pipesrc controls the size that is scaled from, which should
8189 * always be the user's requested size.
8190 */
8191 I915_WRITE(PIPESRC(pipe),
8192 ((crtc_state->pipe_src_w - 1) << 16) |
8193 (crtc_state->pipe_src_h - 1));
8194 }
8195
intel_get_pipe_timings(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)8196 static void intel_get_pipe_timings(struct intel_crtc *crtc,
8197 struct intel_crtc_state *pipe_config)
8198 {
8199 struct drm_device *dev = crtc->base.dev;
8200 struct drm_i915_private *dev_priv = to_i915(dev);
8201 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8202 u32 tmp;
8203
8204 tmp = I915_READ(HTOTAL(cpu_transcoder));
8205 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8206 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8207
8208 if (!transcoder_is_dsi(cpu_transcoder)) {
8209 tmp = I915_READ(HBLANK(cpu_transcoder));
8210 pipe_config->base.adjusted_mode.crtc_hblank_start =
8211 (tmp & 0xffff) + 1;
8212 pipe_config->base.adjusted_mode.crtc_hblank_end =
8213 ((tmp >> 16) & 0xffff) + 1;
8214 }
8215 tmp = I915_READ(HSYNC(cpu_transcoder));
8216 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8217 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8218
8219 tmp = I915_READ(VTOTAL(cpu_transcoder));
8220 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8221 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8222
8223 if (!transcoder_is_dsi(cpu_transcoder)) {
8224 tmp = I915_READ(VBLANK(cpu_transcoder));
8225 pipe_config->base.adjusted_mode.crtc_vblank_start =
8226 (tmp & 0xffff) + 1;
8227 pipe_config->base.adjusted_mode.crtc_vblank_end =
8228 ((tmp >> 16) & 0xffff) + 1;
8229 }
8230 tmp = I915_READ(VSYNC(cpu_transcoder));
8231 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8232 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8233
8234 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
8235 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8236 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
8237 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
8238 }
8239 }
8240
intel_get_pipe_src_size(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)8241 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8242 struct intel_crtc_state *pipe_config)
8243 {
8244 struct drm_device *dev = crtc->base.dev;
8245 struct drm_i915_private *dev_priv = to_i915(dev);
8246 u32 tmp;
8247
8248 tmp = I915_READ(PIPESRC(crtc->pipe));
8249 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8250 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8251
8252 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
8253 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
8254 }
8255
intel_mode_from_pipe_config(struct drm_display_mode * mode,struct intel_crtc_state * pipe_config)8256 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8257 struct intel_crtc_state *pipe_config)
8258 {
8259 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
8260 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
8261 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
8262 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
8263
8264 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
8265 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
8266 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
8267 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
8268
8269 mode->flags = pipe_config->base.adjusted_mode.flags;
8270 mode->type = DRM_MODE_TYPE_DRIVER;
8271
8272 mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
8273
8274 mode->hsync = drm_mode_hsync(mode);
8275 mode->vrefresh = drm_mode_vrefresh(mode);
8276 drm_mode_set_name(mode);
8277 }
8278
i9xx_set_pipeconf(const struct intel_crtc_state * crtc_state)8279 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8280 {
8281 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8282 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8283 u32 pipeconf;
8284
8285 pipeconf = 0;
8286
8287 /* we keep both pipes enabled on 830 */
8288 if (IS_I830(dev_priv))
8289 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8290
8291 if (crtc_state->double_wide)
8292 pipeconf |= PIPECONF_DOUBLE_WIDE;
8293
8294 /* only g4x and later have fancy bpc/dither controls */
8295 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8296 IS_CHERRYVIEW(dev_priv)) {
8297 /* Bspec claims that we can't use dithering for 30bpp pipes. */
8298 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8299 pipeconf |= PIPECONF_DITHER_EN |
8300 PIPECONF_DITHER_TYPE_SP;
8301
8302 switch (crtc_state->pipe_bpp) {
8303 case 18:
8304 pipeconf |= PIPECONF_6BPC;
8305 break;
8306 case 24:
8307 pipeconf |= PIPECONF_8BPC;
8308 break;
8309 case 30:
8310 pipeconf |= PIPECONF_10BPC;
8311 break;
8312 default:
8313 /* Case prevented by intel_choose_pipe_bpp_dither. */
8314 BUG();
8315 }
8316 }
8317
8318 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8319 if (INTEL_GEN(dev_priv) < 4 ||
8320 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8321 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8322 else
8323 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8324 } else {
8325 pipeconf |= PIPECONF_PROGRESSIVE;
8326 }
8327
8328 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8329 crtc_state->limited_color_range)
8330 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8331
8332 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8333
8334 I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
8335 POSTING_READ(PIPECONF(crtc->pipe));
8336 }
8337
i8xx_crtc_compute_clock(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state)8338 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8339 struct intel_crtc_state *crtc_state)
8340 {
8341 struct drm_device *dev = crtc->base.dev;
8342 struct drm_i915_private *dev_priv = to_i915(dev);
8343 const struct intel_limit *limit;
8344 int refclk = 48000;
8345
8346 memset(&crtc_state->dpll_hw_state, 0,
8347 sizeof(crtc_state->dpll_hw_state));
8348
8349 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8350 if (intel_panel_use_ssc(dev_priv)) {
8351 refclk = dev_priv->vbt.lvds_ssc_freq;
8352 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8353 }
8354
8355 limit = &intel_limits_i8xx_lvds;
8356 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8357 limit = &intel_limits_i8xx_dvo;
8358 } else {
8359 limit = &intel_limits_i8xx_dac;
8360 }
8361
8362 if (!crtc_state->clock_set &&
8363 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8364 refclk, NULL, &crtc_state->dpll)) {
8365 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8366 return -EINVAL;
8367 }
8368
8369 i8xx_compute_dpll(crtc, crtc_state, NULL);
8370
8371 return 0;
8372 }
8373
g4x_crtc_compute_clock(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state)8374 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8375 struct intel_crtc_state *crtc_state)
8376 {
8377 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8378 const struct intel_limit *limit;
8379 int refclk = 96000;
8380
8381 memset(&crtc_state->dpll_hw_state, 0,
8382 sizeof(crtc_state->dpll_hw_state));
8383
8384 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8385 if (intel_panel_use_ssc(dev_priv)) {
8386 refclk = dev_priv->vbt.lvds_ssc_freq;
8387 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8388 }
8389
8390 if (intel_is_dual_link_lvds(dev_priv))
8391 limit = &intel_limits_g4x_dual_channel_lvds;
8392 else
8393 limit = &intel_limits_g4x_single_channel_lvds;
8394 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8395 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
8396 limit = &intel_limits_g4x_hdmi;
8397 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
8398 limit = &intel_limits_g4x_sdvo;
8399 } else {
8400 /* The option is for other outputs */
8401 limit = &intel_limits_i9xx_sdvo;
8402 }
8403
8404 if (!crtc_state->clock_set &&
8405 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8406 refclk, NULL, &crtc_state->dpll)) {
8407 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8408 return -EINVAL;
8409 }
8410
8411 i9xx_compute_dpll(crtc, crtc_state, NULL);
8412
8413 return 0;
8414 }
8415
pnv_crtc_compute_clock(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state)8416 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
8417 struct intel_crtc_state *crtc_state)
8418 {
8419 struct drm_device *dev = crtc->base.dev;
8420 struct drm_i915_private *dev_priv = to_i915(dev);
8421 const struct intel_limit *limit;
8422 int refclk = 96000;
8423
8424 memset(&crtc_state->dpll_hw_state, 0,
8425 sizeof(crtc_state->dpll_hw_state));
8426
8427 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8428 if (intel_panel_use_ssc(dev_priv)) {
8429 refclk = dev_priv->vbt.lvds_ssc_freq;
8430 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8431 }
8432
8433 limit = &intel_limits_pineview_lvds;
8434 } else {
8435 limit = &intel_limits_pineview_sdvo;
8436 }
8437
8438 if (!crtc_state->clock_set &&
8439 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8440 refclk, NULL, &crtc_state->dpll)) {
8441 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8442 return -EINVAL;
8443 }
8444
8445 i9xx_compute_dpll(crtc, crtc_state, NULL);
8446
8447 return 0;
8448 }
8449
i9xx_crtc_compute_clock(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state)8450 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8451 struct intel_crtc_state *crtc_state)
8452 {
8453 struct drm_device *dev = crtc->base.dev;
8454 struct drm_i915_private *dev_priv = to_i915(dev);
8455 const struct intel_limit *limit;
8456 int refclk = 96000;
8457
8458 memset(&crtc_state->dpll_hw_state, 0,
8459 sizeof(crtc_state->dpll_hw_state));
8460
8461 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8462 if (intel_panel_use_ssc(dev_priv)) {
8463 refclk = dev_priv->vbt.lvds_ssc_freq;
8464 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8465 }
8466
8467 limit = &intel_limits_i9xx_lvds;
8468 } else {
8469 limit = &intel_limits_i9xx_sdvo;
8470 }
8471
8472 if (!crtc_state->clock_set &&
8473 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8474 refclk, NULL, &crtc_state->dpll)) {
8475 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8476 return -EINVAL;
8477 }
8478
8479 i9xx_compute_dpll(crtc, crtc_state, NULL);
8480
8481 return 0;
8482 }
8483
chv_crtc_compute_clock(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state)8484 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8485 struct intel_crtc_state *crtc_state)
8486 {
8487 int refclk = 100000;
8488 const struct intel_limit *limit = &intel_limits_chv;
8489
8490 memset(&crtc_state->dpll_hw_state, 0,
8491 sizeof(crtc_state->dpll_hw_state));
8492
8493 if (!crtc_state->clock_set &&
8494 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8495 refclk, NULL, &crtc_state->dpll)) {
8496 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8497 return -EINVAL;
8498 }
8499
8500 chv_compute_dpll(crtc, crtc_state);
8501
8502 return 0;
8503 }
8504
vlv_crtc_compute_clock(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state)8505 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8506 struct intel_crtc_state *crtc_state)
8507 {
8508 int refclk = 100000;
8509 const struct intel_limit *limit = &intel_limits_vlv;
8510
8511 memset(&crtc_state->dpll_hw_state, 0,
8512 sizeof(crtc_state->dpll_hw_state));
8513
8514 if (!crtc_state->clock_set &&
8515 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8516 refclk, NULL, &crtc_state->dpll)) {
8517 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8518 return -EINVAL;
8519 }
8520
8521 vlv_compute_dpll(crtc, crtc_state);
8522
8523 return 0;
8524 }
8525
i9xx_has_pfit(struct drm_i915_private * dev_priv)8526 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
8527 {
8528 if (IS_I830(dev_priv))
8529 return false;
8530
8531 return INTEL_GEN(dev_priv) >= 4 ||
8532 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
8533 }
8534
i9xx_get_pfit_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)8535 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8536 struct intel_crtc_state *pipe_config)
8537 {
8538 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8539 u32 tmp;
8540
8541 if (!i9xx_has_pfit(dev_priv))
8542 return;
8543
8544 tmp = I915_READ(PFIT_CONTROL);
8545 if (!(tmp & PFIT_ENABLE))
8546 return;
8547
8548 /* Check whether the pfit is attached to our pipe. */
8549 if (INTEL_GEN(dev_priv) < 4) {
8550 if (crtc->pipe != PIPE_B)
8551 return;
8552 } else {
8553 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8554 return;
8555 }
8556
8557 pipe_config->gmch_pfit.control = tmp;
8558 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8559 }
8560
vlv_crtc_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)8561 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8562 struct intel_crtc_state *pipe_config)
8563 {
8564 struct drm_device *dev = crtc->base.dev;
8565 struct drm_i915_private *dev_priv = to_i915(dev);
8566 int pipe = pipe_config->cpu_transcoder;
8567 struct dpll clock;
8568 u32 mdiv;
8569 int refclk = 100000;
8570
8571 /* In case of DSI, DPLL will not be used */
8572 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8573 return;
8574
8575 vlv_dpio_get(dev_priv);
8576 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8577 vlv_dpio_put(dev_priv);
8578
8579 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8580 clock.m2 = mdiv & DPIO_M2DIV_MASK;
8581 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8582 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8583 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8584
8585 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8586 }
8587
8588 static void
i9xx_get_initial_plane_config(struct intel_crtc * crtc,struct intel_initial_plane_config * plane_config)8589 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8590 struct intel_initial_plane_config *plane_config)
8591 {
8592 struct drm_device *dev = crtc->base.dev;
8593 struct drm_i915_private *dev_priv = to_i915(dev);
8594 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8595 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8596 enum pipe pipe;
8597 u32 val, base, offset;
8598 int fourcc, pixel_format;
8599 unsigned int aligned_height;
8600 struct drm_framebuffer *fb;
8601 struct intel_framebuffer *intel_fb;
8602
8603 if (!plane->get_hw_state(plane, &pipe))
8604 return;
8605
8606 WARN_ON(pipe != crtc->pipe);
8607
8608 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8609 if (!intel_fb) {
8610 DRM_DEBUG_KMS("failed to alloc fb\n");
8611 return;
8612 }
8613
8614 fb = &intel_fb->base;
8615
8616 fb->dev = dev;
8617
8618 val = I915_READ(DSPCNTR(i9xx_plane));
8619
8620 if (INTEL_GEN(dev_priv) >= 4) {
8621 if (val & DISPPLANE_TILED) {
8622 plane_config->tiling = I915_TILING_X;
8623 fb->modifier = I915_FORMAT_MOD_X_TILED;
8624 }
8625
8626 if (val & DISPPLANE_ROTATE_180)
8627 plane_config->rotation = DRM_MODE_ROTATE_180;
8628 }
8629
8630 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
8631 val & DISPPLANE_MIRROR)
8632 plane_config->rotation |= DRM_MODE_REFLECT_X;
8633
8634 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8635 fourcc = i9xx_format_to_fourcc(pixel_format);
8636 fb->format = drm_format_info(fourcc);
8637
8638 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8639 offset = I915_READ(DSPOFFSET(i9xx_plane));
8640 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8641 } else if (INTEL_GEN(dev_priv) >= 4) {
8642 if (plane_config->tiling)
8643 offset = I915_READ(DSPTILEOFF(i9xx_plane));
8644 else
8645 offset = I915_READ(DSPLINOFF(i9xx_plane));
8646 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8647 } else {
8648 base = I915_READ(DSPADDR(i9xx_plane));
8649 }
8650 plane_config->base = base;
8651
8652 val = I915_READ(PIPESRC(pipe));
8653 fb->width = ((val >> 16) & 0xfff) + 1;
8654 fb->height = ((val >> 0) & 0xfff) + 1;
8655
8656 val = I915_READ(DSPSTRIDE(i9xx_plane));
8657 fb->pitches[0] = val & 0xffffffc0;
8658
8659 aligned_height = intel_fb_align_height(fb, 0, fb->height);
8660
8661 plane_config->size = fb->pitches[0] * aligned_height;
8662
8663 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8664 crtc->base.name, plane->base.name, fb->width, fb->height,
8665 fb->format->cpp[0] * 8, base, fb->pitches[0],
8666 plane_config->size);
8667
8668 plane_config->fb = intel_fb;
8669 }
8670
chv_crtc_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)8671 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8672 struct intel_crtc_state *pipe_config)
8673 {
8674 struct drm_device *dev = crtc->base.dev;
8675 struct drm_i915_private *dev_priv = to_i915(dev);
8676 int pipe = pipe_config->cpu_transcoder;
8677 enum dpio_channel port = vlv_pipe_to_channel(pipe);
8678 struct dpll clock;
8679 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8680 int refclk = 100000;
8681
8682 /* In case of DSI, DPLL will not be used */
8683 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8684 return;
8685
8686 vlv_dpio_get(dev_priv);
8687 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8688 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8689 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8690 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8691 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8692 vlv_dpio_put(dev_priv);
8693
8694 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8695 clock.m2 = (pll_dw0 & 0xff) << 22;
8696 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8697 clock.m2 |= pll_dw2 & 0x3fffff;
8698 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8699 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8700 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8701
8702 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8703 }
8704
intel_get_crtc_ycbcr_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)8705 static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
8706 struct intel_crtc_state *pipe_config)
8707 {
8708 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8709 enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
8710
8711 pipe_config->lspcon_downsampling = false;
8712
8713 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8714 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
8715
8716 if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
8717 bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
8718 bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
8719
8720 if (ycbcr420_enabled) {
8721 /* We support 4:2:0 in full blend mode only */
8722 if (!blend)
8723 output = INTEL_OUTPUT_FORMAT_INVALID;
8724 else if (!(IS_GEMINILAKE(dev_priv) ||
8725 INTEL_GEN(dev_priv) >= 10))
8726 output = INTEL_OUTPUT_FORMAT_INVALID;
8727 else
8728 output = INTEL_OUTPUT_FORMAT_YCBCR420;
8729 } else {
8730 /*
8731 * Currently there is no interface defined to
8732 * check user preference between RGB/YCBCR444
8733 * or YCBCR420. So the only possible case for
8734 * YCBCR444 usage is driving YCBCR420 output
8735 * with LSPCON, when pipe is configured for
8736 * YCBCR444 output and LSPCON takes care of
8737 * downsampling it.
8738 */
8739 pipe_config->lspcon_downsampling = true;
8740 output = INTEL_OUTPUT_FORMAT_YCBCR444;
8741 }
8742 }
8743 }
8744
8745 pipe_config->output_format = output;
8746 }
8747
i9xx_get_pipe_color_config(struct intel_crtc_state * crtc_state)8748 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
8749 {
8750 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8751 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8752 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8753 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8754 u32 tmp;
8755
8756 tmp = I915_READ(DSPCNTR(i9xx_plane));
8757
8758 if (tmp & DISPPLANE_GAMMA_ENABLE)
8759 crtc_state->gamma_enable = true;
8760
8761 if (!HAS_GMCH(dev_priv) &&
8762 tmp & DISPPLANE_PIPE_CSC_ENABLE)
8763 crtc_state->csc_enable = true;
8764 }
8765
i9xx_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)8766 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8767 struct intel_crtc_state *pipe_config)
8768 {
8769 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8770 enum intel_display_power_domain power_domain;
8771 intel_wakeref_t wakeref;
8772 u32 tmp;
8773 bool ret;
8774
8775 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8776 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
8777 if (!wakeref)
8778 return false;
8779
8780 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8781 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8782 pipe_config->shared_dpll = NULL;
8783
8784 ret = false;
8785
8786 tmp = I915_READ(PIPECONF(crtc->pipe));
8787 if (!(tmp & PIPECONF_ENABLE))
8788 goto out;
8789
8790 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8791 IS_CHERRYVIEW(dev_priv)) {
8792 switch (tmp & PIPECONF_BPC_MASK) {
8793 case PIPECONF_6BPC:
8794 pipe_config->pipe_bpp = 18;
8795 break;
8796 case PIPECONF_8BPC:
8797 pipe_config->pipe_bpp = 24;
8798 break;
8799 case PIPECONF_10BPC:
8800 pipe_config->pipe_bpp = 30;
8801 break;
8802 default:
8803 break;
8804 }
8805 }
8806
8807 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8808 (tmp & PIPECONF_COLOR_RANGE_SELECT))
8809 pipe_config->limited_color_range = true;
8810
8811 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
8812 PIPECONF_GAMMA_MODE_SHIFT;
8813
8814 if (IS_CHERRYVIEW(dev_priv))
8815 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
8816
8817 i9xx_get_pipe_color_config(pipe_config);
8818 intel_color_get_config(pipe_config);
8819
8820 if (INTEL_GEN(dev_priv) < 4)
8821 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8822
8823 intel_get_pipe_timings(crtc, pipe_config);
8824 intel_get_pipe_src_size(crtc, pipe_config);
8825
8826 i9xx_get_pfit_config(crtc, pipe_config);
8827
8828 if (INTEL_GEN(dev_priv) >= 4) {
8829 /* No way to read it out on pipes B and C */
8830 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
8831 tmp = dev_priv->chv_dpll_md[crtc->pipe];
8832 else
8833 tmp = I915_READ(DPLL_MD(crtc->pipe));
8834 pipe_config->pixel_multiplier =
8835 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8836 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8837 pipe_config->dpll_hw_state.dpll_md = tmp;
8838 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8839 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8840 tmp = I915_READ(DPLL(crtc->pipe));
8841 pipe_config->pixel_multiplier =
8842 ((tmp & SDVO_MULTIPLIER_MASK)
8843 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8844 } else {
8845 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8846 * port and will be fixed up in the encoder->get_config
8847 * function. */
8848 pipe_config->pixel_multiplier = 1;
8849 }
8850 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8851 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
8852 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8853 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8854 } else {
8855 /* Mask out read-only status bits. */
8856 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8857 DPLL_PORTC_READY_MASK |
8858 DPLL_PORTB_READY_MASK);
8859 }
8860
8861 if (IS_CHERRYVIEW(dev_priv))
8862 chv_crtc_clock_get(crtc, pipe_config);
8863 else if (IS_VALLEYVIEW(dev_priv))
8864 vlv_crtc_clock_get(crtc, pipe_config);
8865 else
8866 i9xx_crtc_clock_get(crtc, pipe_config);
8867
8868 /*
8869 * Normally the dotclock is filled in by the encoder .get_config()
8870 * but in case the pipe is enabled w/o any ports we need a sane
8871 * default.
8872 */
8873 pipe_config->base.adjusted_mode.crtc_clock =
8874 pipe_config->port_clock / pipe_config->pixel_multiplier;
8875
8876 ret = true;
8877
8878 out:
8879 intel_display_power_put(dev_priv, power_domain, wakeref);
8880
8881 return ret;
8882 }
8883
ironlake_init_pch_refclk(struct drm_i915_private * dev_priv)8884 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
8885 {
8886 struct intel_encoder *encoder;
8887 int i;
8888 u32 val, final;
8889 bool has_lvds = false;
8890 bool has_cpu_edp = false;
8891 bool has_panel = false;
8892 bool has_ck505 = false;
8893 bool can_ssc = false;
8894 bool using_ssc_source = false;
8895
8896 /* We need to take the global config into account */
8897 for_each_intel_encoder(&dev_priv->drm, encoder) {
8898 switch (encoder->type) {
8899 case INTEL_OUTPUT_LVDS:
8900 has_panel = true;
8901 has_lvds = true;
8902 break;
8903 case INTEL_OUTPUT_EDP:
8904 has_panel = true;
8905 if (encoder->port == PORT_A)
8906 has_cpu_edp = true;
8907 break;
8908 default:
8909 break;
8910 }
8911 }
8912
8913 if (HAS_PCH_IBX(dev_priv)) {
8914 has_ck505 = dev_priv->vbt.display_clock_mode;
8915 can_ssc = has_ck505;
8916 } else {
8917 has_ck505 = false;
8918 can_ssc = true;
8919 }
8920
8921 /* Check if any DPLLs are using the SSC source */
8922 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8923 u32 temp = I915_READ(PCH_DPLL(i));
8924
8925 if (!(temp & DPLL_VCO_ENABLE))
8926 continue;
8927
8928 if ((temp & PLL_REF_INPUT_MASK) ==
8929 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8930 using_ssc_source = true;
8931 break;
8932 }
8933 }
8934
8935 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8936 has_panel, has_lvds, has_ck505, using_ssc_source);
8937
8938 /* Ironlake: try to setup display ref clock before DPLL
8939 * enabling. This is only under driver's control after
8940 * PCH B stepping, previous chipset stepping should be
8941 * ignoring this setting.
8942 */
8943 val = I915_READ(PCH_DREF_CONTROL);
8944
8945 /* As we must carefully and slowly disable/enable each source in turn,
8946 * compute the final state we want first and check if we need to
8947 * make any changes at all.
8948 */
8949 final = val;
8950 final &= ~DREF_NONSPREAD_SOURCE_MASK;
8951 if (has_ck505)
8952 final |= DREF_NONSPREAD_CK505_ENABLE;
8953 else
8954 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8955
8956 final &= ~DREF_SSC_SOURCE_MASK;
8957 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8958 final &= ~DREF_SSC1_ENABLE;
8959
8960 if (has_panel) {
8961 final |= DREF_SSC_SOURCE_ENABLE;
8962
8963 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8964 final |= DREF_SSC1_ENABLE;
8965
8966 if (has_cpu_edp) {
8967 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8968 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8969 else
8970 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8971 } else
8972 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8973 } else if (using_ssc_source) {
8974 final |= DREF_SSC_SOURCE_ENABLE;
8975 final |= DREF_SSC1_ENABLE;
8976 }
8977
8978 if (final == val)
8979 return;
8980
8981 /* Always enable nonspread source */
8982 val &= ~DREF_NONSPREAD_SOURCE_MASK;
8983
8984 if (has_ck505)
8985 val |= DREF_NONSPREAD_CK505_ENABLE;
8986 else
8987 val |= DREF_NONSPREAD_SOURCE_ENABLE;
8988
8989 if (has_panel) {
8990 val &= ~DREF_SSC_SOURCE_MASK;
8991 val |= DREF_SSC_SOURCE_ENABLE;
8992
8993 /* SSC must be turned on before enabling the CPU output */
8994 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8995 DRM_DEBUG_KMS("Using SSC on panel\n");
8996 val |= DREF_SSC1_ENABLE;
8997 } else
8998 val &= ~DREF_SSC1_ENABLE;
8999
9000 /* Get SSC going before enabling the outputs */
9001 I915_WRITE(PCH_DREF_CONTROL, val);
9002 POSTING_READ(PCH_DREF_CONTROL);
9003 udelay(200);
9004
9005 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9006
9007 /* Enable CPU source on CPU attached eDP */
9008 if (has_cpu_edp) {
9009 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9010 DRM_DEBUG_KMS("Using SSC on eDP\n");
9011 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9012 } else
9013 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9014 } else
9015 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9016
9017 I915_WRITE(PCH_DREF_CONTROL, val);
9018 POSTING_READ(PCH_DREF_CONTROL);
9019 udelay(200);
9020 } else {
9021 DRM_DEBUG_KMS("Disabling CPU source output\n");
9022
9023 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9024
9025 /* Turn off CPU output */
9026 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9027
9028 I915_WRITE(PCH_DREF_CONTROL, val);
9029 POSTING_READ(PCH_DREF_CONTROL);
9030 udelay(200);
9031
9032 if (!using_ssc_source) {
9033 DRM_DEBUG_KMS("Disabling SSC source\n");
9034
9035 /* Turn off the SSC source */
9036 val &= ~DREF_SSC_SOURCE_MASK;
9037 val |= DREF_SSC_SOURCE_DISABLE;
9038
9039 /* Turn off SSC1 */
9040 val &= ~DREF_SSC1_ENABLE;
9041
9042 I915_WRITE(PCH_DREF_CONTROL, val);
9043 POSTING_READ(PCH_DREF_CONTROL);
9044 udelay(200);
9045 }
9046 }
9047
9048 BUG_ON(val != final);
9049 }
9050
lpt_reset_fdi_mphy(struct drm_i915_private * dev_priv)9051 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
9052 {
9053 u32 tmp;
9054
9055 tmp = I915_READ(SOUTH_CHICKEN2);
9056 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
9057 I915_WRITE(SOUTH_CHICKEN2, tmp);
9058
9059 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
9060 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
9061 DRM_ERROR("FDI mPHY reset assert timeout\n");
9062
9063 tmp = I915_READ(SOUTH_CHICKEN2);
9064 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
9065 I915_WRITE(SOUTH_CHICKEN2, tmp);
9066
9067 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
9068 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
9069 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
9070 }
9071
9072 /* WaMPhyProgramming:hsw */
lpt_program_fdi_mphy(struct drm_i915_private * dev_priv)9073 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
9074 {
9075 u32 tmp;
9076
9077 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
9078 tmp &= ~(0xFF << 24);
9079 tmp |= (0x12 << 24);
9080 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
9081
9082 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
9083 tmp |= (1 << 11);
9084 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
9085
9086 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
9087 tmp |= (1 << 11);
9088 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
9089
9090 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
9091 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9092 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
9093
9094 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
9095 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9096 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
9097
9098 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
9099 tmp &= ~(7 << 13);
9100 tmp |= (5 << 13);
9101 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
9102
9103 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
9104 tmp &= ~(7 << 13);
9105 tmp |= (5 << 13);
9106 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
9107
9108 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
9109 tmp &= ~0xFF;
9110 tmp |= 0x1C;
9111 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
9112
9113 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
9114 tmp &= ~0xFF;
9115 tmp |= 0x1C;
9116 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
9117
9118 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
9119 tmp &= ~(0xFF << 16);
9120 tmp |= (0x1C << 16);
9121 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
9122
9123 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
9124 tmp &= ~(0xFF << 16);
9125 tmp |= (0x1C << 16);
9126 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
9127
9128 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
9129 tmp |= (1 << 27);
9130 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
9131
9132 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
9133 tmp |= (1 << 27);
9134 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
9135
9136 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
9137 tmp &= ~(0xF << 28);
9138 tmp |= (4 << 28);
9139 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
9140
9141 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
9142 tmp &= ~(0xF << 28);
9143 tmp |= (4 << 28);
9144 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
9145 }
9146
9147 /* Implements 3 different sequences from BSpec chapter "Display iCLK
9148 * Programming" based on the parameters passed:
9149 * - Sequence to enable CLKOUT_DP
9150 * - Sequence to enable CLKOUT_DP without spread
9151 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
9152 */
lpt_enable_clkout_dp(struct drm_i915_private * dev_priv,bool with_spread,bool with_fdi)9153 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9154 bool with_spread, bool with_fdi)
9155 {
9156 u32 reg, tmp;
9157
9158 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
9159 with_spread = true;
9160 if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
9161 with_fdi, "LP PCH doesn't have FDI\n"))
9162 with_fdi = false;
9163
9164 mutex_lock(&dev_priv->sb_lock);
9165
9166 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9167 tmp &= ~SBI_SSCCTL_DISABLE;
9168 tmp |= SBI_SSCCTL_PATHALT;
9169 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9170
9171 udelay(24);
9172
9173 if (with_spread) {
9174 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9175 tmp &= ~SBI_SSCCTL_PATHALT;
9176 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9177
9178 if (with_fdi) {
9179 lpt_reset_fdi_mphy(dev_priv);
9180 lpt_program_fdi_mphy(dev_priv);
9181 }
9182 }
9183
9184 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9185 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9186 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9187 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9188
9189 mutex_unlock(&dev_priv->sb_lock);
9190 }
9191
9192 /* Sequence to disable CLKOUT_DP */
lpt_disable_clkout_dp(struct drm_i915_private * dev_priv)9193 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9194 {
9195 u32 reg, tmp;
9196
9197 mutex_lock(&dev_priv->sb_lock);
9198
9199 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9200 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9201 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9202 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9203
9204 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9205 if (!(tmp & SBI_SSCCTL_DISABLE)) {
9206 if (!(tmp & SBI_SSCCTL_PATHALT)) {
9207 tmp |= SBI_SSCCTL_PATHALT;
9208 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9209 udelay(32);
9210 }
9211 tmp |= SBI_SSCCTL_DISABLE;
9212 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9213 }
9214
9215 mutex_unlock(&dev_priv->sb_lock);
9216 }
9217
9218 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9219
9220 static const u16 sscdivintphase[] = {
9221 [BEND_IDX( 50)] = 0x3B23,
9222 [BEND_IDX( 45)] = 0x3B23,
9223 [BEND_IDX( 40)] = 0x3C23,
9224 [BEND_IDX( 35)] = 0x3C23,
9225 [BEND_IDX( 30)] = 0x3D23,
9226 [BEND_IDX( 25)] = 0x3D23,
9227 [BEND_IDX( 20)] = 0x3E23,
9228 [BEND_IDX( 15)] = 0x3E23,
9229 [BEND_IDX( 10)] = 0x3F23,
9230 [BEND_IDX( 5)] = 0x3F23,
9231 [BEND_IDX( 0)] = 0x0025,
9232 [BEND_IDX( -5)] = 0x0025,
9233 [BEND_IDX(-10)] = 0x0125,
9234 [BEND_IDX(-15)] = 0x0125,
9235 [BEND_IDX(-20)] = 0x0225,
9236 [BEND_IDX(-25)] = 0x0225,
9237 [BEND_IDX(-30)] = 0x0325,
9238 [BEND_IDX(-35)] = 0x0325,
9239 [BEND_IDX(-40)] = 0x0425,
9240 [BEND_IDX(-45)] = 0x0425,
9241 [BEND_IDX(-50)] = 0x0525,
9242 };
9243
9244 /*
9245 * Bend CLKOUT_DP
9246 * steps -50 to 50 inclusive, in steps of 5
9247 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9248 * change in clock period = -(steps / 10) * 5.787 ps
9249 */
lpt_bend_clkout_dp(struct drm_i915_private * dev_priv,int steps)9250 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9251 {
9252 u32 tmp;
9253 int idx = BEND_IDX(steps);
9254
9255 if (WARN_ON(steps % 5 != 0))
9256 return;
9257
9258 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
9259 return;
9260
9261 mutex_lock(&dev_priv->sb_lock);
9262
9263 if (steps % 10 != 0)
9264 tmp = 0xAAAAAAAB;
9265 else
9266 tmp = 0x00000000;
9267 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9268
9269 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9270 tmp &= 0xffff0000;
9271 tmp |= sscdivintphase[idx];
9272 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9273
9274 mutex_unlock(&dev_priv->sb_lock);
9275 }
9276
9277 #undef BEND_IDX
9278
spll_uses_pch_ssc(struct drm_i915_private * dev_priv)9279 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
9280 {
9281 u32 fuse_strap = I915_READ(FUSE_STRAP);
9282 u32 ctl = I915_READ(SPLL_CTL);
9283
9284 if ((ctl & SPLL_PLL_ENABLE) == 0)
9285 return false;
9286
9287 if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
9288 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9289 return true;
9290
9291 if (IS_BROADWELL(dev_priv) &&
9292 (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
9293 return true;
9294
9295 return false;
9296 }
9297
wrpll_uses_pch_ssc(struct drm_i915_private * dev_priv,enum intel_dpll_id id)9298 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
9299 enum intel_dpll_id id)
9300 {
9301 u32 fuse_strap = I915_READ(FUSE_STRAP);
9302 u32 ctl = I915_READ(WRPLL_CTL(id));
9303
9304 if ((ctl & WRPLL_PLL_ENABLE) == 0)
9305 return false;
9306
9307 if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
9308 return true;
9309
9310 if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
9311 (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
9312 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9313 return true;
9314
9315 return false;
9316 }
9317
lpt_init_pch_refclk(struct drm_i915_private * dev_priv)9318 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9319 {
9320 struct intel_encoder *encoder;
9321 bool has_fdi = false;
9322
9323 for_each_intel_encoder(&dev_priv->drm, encoder) {
9324 switch (encoder->type) {
9325 case INTEL_OUTPUT_ANALOG:
9326 has_fdi = true;
9327 break;
9328 default:
9329 break;
9330 }
9331 }
9332
9333 /*
9334 * The BIOS may have decided to use the PCH SSC
9335 * reference so we must not disable it until the
9336 * relevant PLLs have stopped relying on it. We'll
9337 * just leave the PCH SSC reference enabled in case
9338 * any active PLL is using it. It will get disabled
9339 * after runtime suspend if we don't have FDI.
9340 *
9341 * TODO: Move the whole reference clock handling
9342 * to the modeset sequence proper so that we can
9343 * actually enable/disable/reconfigure these things
9344 * safely. To do that we need to introduce a real
9345 * clock hierarchy. That would also allow us to do
9346 * clock bending finally.
9347 */
9348 dev_priv->pch_ssc_use = 0;
9349
9350 if (spll_uses_pch_ssc(dev_priv)) {
9351 DRM_DEBUG_KMS("SPLL using PCH SSC\n");
9352 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
9353 }
9354
9355 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
9356 DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
9357 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
9358 }
9359
9360 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
9361 DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
9362 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
9363 }
9364
9365 if (dev_priv->pch_ssc_use)
9366 return;
9367
9368 if (has_fdi) {
9369 lpt_bend_clkout_dp(dev_priv, 0);
9370 lpt_enable_clkout_dp(dev_priv, true, true);
9371 } else {
9372 lpt_disable_clkout_dp(dev_priv);
9373 }
9374 }
9375
9376 /*
9377 * Initialize reference clocks when the driver loads
9378 */
intel_init_pch_refclk(struct drm_i915_private * dev_priv)9379 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
9380 {
9381 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
9382 ironlake_init_pch_refclk(dev_priv);
9383 else if (HAS_PCH_LPT(dev_priv))
9384 lpt_init_pch_refclk(dev_priv);
9385 }
9386
ironlake_set_pipeconf(const struct intel_crtc_state * crtc_state)9387 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
9388 {
9389 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9390 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9391 enum pipe pipe = crtc->pipe;
9392 u32 val;
9393
9394 val = 0;
9395
9396 switch (crtc_state->pipe_bpp) {
9397 case 18:
9398 val |= PIPECONF_6BPC;
9399 break;
9400 case 24:
9401 val |= PIPECONF_8BPC;
9402 break;
9403 case 30:
9404 val |= PIPECONF_10BPC;
9405 break;
9406 case 36:
9407 val |= PIPECONF_12BPC;
9408 break;
9409 default:
9410 /* Case prevented by intel_choose_pipe_bpp_dither. */
9411 BUG();
9412 }
9413
9414 if (crtc_state->dither)
9415 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9416
9417 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9418 val |= PIPECONF_INTERLACED_ILK;
9419 else
9420 val |= PIPECONF_PROGRESSIVE;
9421
9422 if (crtc_state->limited_color_range)
9423 val |= PIPECONF_COLOR_RANGE_SELECT;
9424
9425 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
9426
9427 I915_WRITE(PIPECONF(pipe), val);
9428 POSTING_READ(PIPECONF(pipe));
9429 }
9430
haswell_set_pipeconf(const struct intel_crtc_state * crtc_state)9431 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
9432 {
9433 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9434 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9435 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
9436 u32 val = 0;
9437
9438 if (IS_HASWELL(dev_priv) && crtc_state->dither)
9439 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9440
9441 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9442 val |= PIPECONF_INTERLACED_ILK;
9443 else
9444 val |= PIPECONF_PROGRESSIVE;
9445
9446 I915_WRITE(PIPECONF(cpu_transcoder), val);
9447 POSTING_READ(PIPECONF(cpu_transcoder));
9448 }
9449
bdw_set_pipemisc(const struct intel_crtc_state * crtc_state)9450 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
9451 {
9452 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9453 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9454 u32 val = 0;
9455
9456 switch (crtc_state->pipe_bpp) {
9457 case 18:
9458 val |= PIPEMISC_DITHER_6_BPC;
9459 break;
9460 case 24:
9461 val |= PIPEMISC_DITHER_8_BPC;
9462 break;
9463 case 30:
9464 val |= PIPEMISC_DITHER_10_BPC;
9465 break;
9466 case 36:
9467 val |= PIPEMISC_DITHER_12_BPC;
9468 break;
9469 default:
9470 MISSING_CASE(crtc_state->pipe_bpp);
9471 break;
9472 }
9473
9474 if (crtc_state->dither)
9475 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
9476
9477 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
9478 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
9479 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
9480
9481 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
9482 val |= PIPEMISC_YUV420_ENABLE |
9483 PIPEMISC_YUV420_MODE_FULL_BLEND;
9484
9485 if (INTEL_GEN(dev_priv) >= 11 &&
9486 (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
9487 BIT(PLANE_CURSOR))) == 0)
9488 val |= PIPEMISC_HDR_MODE_PRECISION;
9489
9490 I915_WRITE(PIPEMISC(crtc->pipe), val);
9491 }
9492
bdw_get_pipemisc_bpp(struct intel_crtc * crtc)9493 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
9494 {
9495 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9496 u32 tmp;
9497
9498 tmp = I915_READ(PIPEMISC(crtc->pipe));
9499
9500 switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
9501 case PIPEMISC_DITHER_6_BPC:
9502 return 18;
9503 case PIPEMISC_DITHER_8_BPC:
9504 return 24;
9505 case PIPEMISC_DITHER_10_BPC:
9506 return 30;
9507 case PIPEMISC_DITHER_12_BPC:
9508 return 36;
9509 default:
9510 MISSING_CASE(tmp);
9511 return 0;
9512 }
9513 }
9514
ironlake_get_lanes_required(int target_clock,int link_bw,int bpp)9515 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
9516 {
9517 /*
9518 * Account for spread spectrum to avoid
9519 * oversubscribing the link. Max center spread
9520 * is 2.5%; use 5% for safety's sake.
9521 */
9522 u32 bps = target_clock * bpp * 21 / 20;
9523 return DIV_ROUND_UP(bps, link_bw * 8);
9524 }
9525
ironlake_needs_fb_cb_tune(struct dpll * dpll,int factor)9526 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
9527 {
9528 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
9529 }
9530
ironlake_compute_dpll(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state,struct dpll * reduced_clock)9531 static void ironlake_compute_dpll(struct intel_crtc *crtc,
9532 struct intel_crtc_state *crtc_state,
9533 struct dpll *reduced_clock)
9534 {
9535 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9536 u32 dpll, fp, fp2;
9537 int factor;
9538
9539 /* Enable autotuning of the PLL clock (if permissible) */
9540 factor = 21;
9541 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9542 if ((intel_panel_use_ssc(dev_priv) &&
9543 dev_priv->vbt.lvds_ssc_freq == 100000) ||
9544 (HAS_PCH_IBX(dev_priv) &&
9545 intel_is_dual_link_lvds(dev_priv)))
9546 factor = 25;
9547 } else if (crtc_state->sdvo_tv_clock) {
9548 factor = 20;
9549 }
9550
9551 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9552
9553 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
9554 fp |= FP_CB_TUNE;
9555
9556 if (reduced_clock) {
9557 fp2 = i9xx_dpll_compute_fp(reduced_clock);
9558
9559 if (reduced_clock->m < factor * reduced_clock->n)
9560 fp2 |= FP_CB_TUNE;
9561 } else {
9562 fp2 = fp;
9563 }
9564
9565 dpll = 0;
9566
9567 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
9568 dpll |= DPLLB_MODE_LVDS;
9569 else
9570 dpll |= DPLLB_MODE_DAC_SERIAL;
9571
9572 dpll |= (crtc_state->pixel_multiplier - 1)
9573 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
9574
9575 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
9576 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
9577 dpll |= DPLL_SDVO_HIGH_SPEED;
9578
9579 if (intel_crtc_has_dp_encoder(crtc_state))
9580 dpll |= DPLL_SDVO_HIGH_SPEED;
9581
9582 /*
9583 * The high speed IO clock is only really required for
9584 * SDVO/HDMI/DP, but we also enable it for CRT to make it
9585 * possible to share the DPLL between CRT and HDMI. Enabling
9586 * the clock needlessly does no real harm, except use up a
9587 * bit of power potentially.
9588 *
9589 * We'll limit this to IVB with 3 pipes, since it has only two
9590 * DPLLs and so DPLL sharing is the only way to get three pipes
9591 * driving PCH ports at the same time. On SNB we could do this,
9592 * and potentially avoid enabling the second DPLL, but it's not
9593 * clear if it''s a win or loss power wise. No point in doing
9594 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
9595 */
9596 if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
9597 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
9598 dpll |= DPLL_SDVO_HIGH_SPEED;
9599
9600 /* compute bitmask from p1 value */
9601 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9602 /* also FPA1 */
9603 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
9604
9605 switch (crtc_state->dpll.p2) {
9606 case 5:
9607 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9608 break;
9609 case 7:
9610 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9611 break;
9612 case 10:
9613 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9614 break;
9615 case 14:
9616 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9617 break;
9618 }
9619
9620 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9621 intel_panel_use_ssc(dev_priv))
9622 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9623 else
9624 dpll |= PLL_REF_INPUT_DREFCLK;
9625
9626 dpll |= DPLL_VCO_ENABLE;
9627
9628 crtc_state->dpll_hw_state.dpll = dpll;
9629 crtc_state->dpll_hw_state.fp0 = fp;
9630 crtc_state->dpll_hw_state.fp1 = fp2;
9631 }
9632
ironlake_crtc_compute_clock(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state)9633 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9634 struct intel_crtc_state *crtc_state)
9635 {
9636 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9637 struct intel_atomic_state *state =
9638 to_intel_atomic_state(crtc_state->base.state);
9639 const struct intel_limit *limit;
9640 int refclk = 120000;
9641
9642 memset(&crtc_state->dpll_hw_state, 0,
9643 sizeof(crtc_state->dpll_hw_state));
9644
9645 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9646 if (!crtc_state->has_pch_encoder)
9647 return 0;
9648
9649 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9650 if (intel_panel_use_ssc(dev_priv)) {
9651 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9652 dev_priv->vbt.lvds_ssc_freq);
9653 refclk = dev_priv->vbt.lvds_ssc_freq;
9654 }
9655
9656 if (intel_is_dual_link_lvds(dev_priv)) {
9657 if (refclk == 100000)
9658 limit = &intel_limits_ironlake_dual_lvds_100m;
9659 else
9660 limit = &intel_limits_ironlake_dual_lvds;
9661 } else {
9662 if (refclk == 100000)
9663 limit = &intel_limits_ironlake_single_lvds_100m;
9664 else
9665 limit = &intel_limits_ironlake_single_lvds;
9666 }
9667 } else {
9668 limit = &intel_limits_ironlake_dac;
9669 }
9670
9671 if (!crtc_state->clock_set &&
9672 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9673 refclk, NULL, &crtc_state->dpll)) {
9674 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9675 return -EINVAL;
9676 }
9677
9678 ironlake_compute_dpll(crtc, crtc_state, NULL);
9679
9680 if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
9681 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9682 pipe_name(crtc->pipe));
9683 return -EINVAL;
9684 }
9685
9686 return 0;
9687 }
9688
intel_pch_transcoder_get_m_n(struct intel_crtc * crtc,struct intel_link_m_n * m_n)9689 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9690 struct intel_link_m_n *m_n)
9691 {
9692 struct drm_device *dev = crtc->base.dev;
9693 struct drm_i915_private *dev_priv = to_i915(dev);
9694 enum pipe pipe = crtc->pipe;
9695
9696 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9697 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9698 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9699 & ~TU_SIZE_MASK;
9700 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9701 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9702 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9703 }
9704
intel_cpu_transcoder_get_m_n(struct intel_crtc * crtc,enum transcoder transcoder,struct intel_link_m_n * m_n,struct intel_link_m_n * m2_n2)9705 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9706 enum transcoder transcoder,
9707 struct intel_link_m_n *m_n,
9708 struct intel_link_m_n *m2_n2)
9709 {
9710 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9711 enum pipe pipe = crtc->pipe;
9712
9713 if (INTEL_GEN(dev_priv) >= 5) {
9714 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9715 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9716 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9717 & ~TU_SIZE_MASK;
9718 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9719 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9720 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9721
9722 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
9723 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9724 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9725 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9726 & ~TU_SIZE_MASK;
9727 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9728 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9729 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9730 }
9731 } else {
9732 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9733 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9734 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9735 & ~TU_SIZE_MASK;
9736 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9737 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9738 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9739 }
9740 }
9741
intel_dp_get_m_n(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)9742 void intel_dp_get_m_n(struct intel_crtc *crtc,
9743 struct intel_crtc_state *pipe_config)
9744 {
9745 if (pipe_config->has_pch_encoder)
9746 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9747 else
9748 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9749 &pipe_config->dp_m_n,
9750 &pipe_config->dp_m2_n2);
9751 }
9752
ironlake_get_fdi_m_n_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)9753 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9754 struct intel_crtc_state *pipe_config)
9755 {
9756 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9757 &pipe_config->fdi_m_n, NULL);
9758 }
9759
skylake_get_pfit_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)9760 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9761 struct intel_crtc_state *pipe_config)
9762 {
9763 struct drm_device *dev = crtc->base.dev;
9764 struct drm_i915_private *dev_priv = to_i915(dev);
9765 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9766 u32 ps_ctrl = 0;
9767 int id = -1;
9768 int i;
9769
9770 /* find scaler attached to this pipe */
9771 for (i = 0; i < crtc->num_scalers; i++) {
9772 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9773 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9774 id = i;
9775 pipe_config->pch_pfit.enabled = true;
9776 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9777 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9778 scaler_state->scalers[i].in_use = true;
9779 break;
9780 }
9781 }
9782
9783 scaler_state->scaler_id = id;
9784 if (id >= 0) {
9785 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9786 } else {
9787 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9788 }
9789 }
9790
9791 static void
skylake_get_initial_plane_config(struct intel_crtc * crtc,struct intel_initial_plane_config * plane_config)9792 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9793 struct intel_initial_plane_config *plane_config)
9794 {
9795 struct drm_device *dev = crtc->base.dev;
9796 struct drm_i915_private *dev_priv = to_i915(dev);
9797 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9798 enum plane_id plane_id = plane->id;
9799 enum pipe pipe;
9800 u32 val, base, offset, stride_mult, tiling, alpha;
9801 int fourcc, pixel_format;
9802 unsigned int aligned_height;
9803 struct drm_framebuffer *fb;
9804 struct intel_framebuffer *intel_fb;
9805
9806 if (!plane->get_hw_state(plane, &pipe))
9807 return;
9808
9809 WARN_ON(pipe != crtc->pipe);
9810
9811 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9812 if (!intel_fb) {
9813 DRM_DEBUG_KMS("failed to alloc fb\n");
9814 return;
9815 }
9816
9817 fb = &intel_fb->base;
9818
9819 fb->dev = dev;
9820
9821 val = I915_READ(PLANE_CTL(pipe, plane_id));
9822
9823 if (INTEL_GEN(dev_priv) >= 11)
9824 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
9825 else
9826 pixel_format = val & PLANE_CTL_FORMAT_MASK;
9827
9828 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
9829 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
9830 alpha &= PLANE_COLOR_ALPHA_MASK;
9831 } else {
9832 alpha = val & PLANE_CTL_ALPHA_MASK;
9833 }
9834
9835 fourcc = skl_format_to_fourcc(pixel_format,
9836 val & PLANE_CTL_ORDER_RGBX, alpha);
9837 fb->format = drm_format_info(fourcc);
9838
9839 tiling = val & PLANE_CTL_TILED_MASK;
9840 switch (tiling) {
9841 case PLANE_CTL_TILED_LINEAR:
9842 fb->modifier = DRM_FORMAT_MOD_LINEAR;
9843 break;
9844 case PLANE_CTL_TILED_X:
9845 plane_config->tiling = I915_TILING_X;
9846 fb->modifier = I915_FORMAT_MOD_X_TILED;
9847 break;
9848 case PLANE_CTL_TILED_Y:
9849 plane_config->tiling = I915_TILING_Y;
9850 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9851 fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
9852 else
9853 fb->modifier = I915_FORMAT_MOD_Y_TILED;
9854 break;
9855 case PLANE_CTL_TILED_YF:
9856 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9857 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
9858 else
9859 fb->modifier = I915_FORMAT_MOD_Yf_TILED;
9860 break;
9861 default:
9862 MISSING_CASE(tiling);
9863 goto error;
9864 }
9865
9866 /*
9867 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
9868 * while i915 HW rotation is clockwise, thats why this swapping.
9869 */
9870 switch (val & PLANE_CTL_ROTATE_MASK) {
9871 case PLANE_CTL_ROTATE_0:
9872 plane_config->rotation = DRM_MODE_ROTATE_0;
9873 break;
9874 case PLANE_CTL_ROTATE_90:
9875 plane_config->rotation = DRM_MODE_ROTATE_270;
9876 break;
9877 case PLANE_CTL_ROTATE_180:
9878 plane_config->rotation = DRM_MODE_ROTATE_180;
9879 break;
9880 case PLANE_CTL_ROTATE_270:
9881 plane_config->rotation = DRM_MODE_ROTATE_90;
9882 break;
9883 }
9884
9885 if (INTEL_GEN(dev_priv) >= 10 &&
9886 val & PLANE_CTL_FLIP_HORIZONTAL)
9887 plane_config->rotation |= DRM_MODE_REFLECT_X;
9888
9889 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
9890 plane_config->base = base;
9891
9892 offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
9893
9894 val = I915_READ(PLANE_SIZE(pipe, plane_id));
9895 fb->height = ((val >> 16) & 0xfff) + 1;
9896 fb->width = ((val >> 0) & 0x1fff) + 1;
9897
9898 val = I915_READ(PLANE_STRIDE(pipe, plane_id));
9899 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
9900 fb->pitches[0] = (val & 0x3ff) * stride_mult;
9901
9902 aligned_height = intel_fb_align_height(fb, 0, fb->height);
9903
9904 plane_config->size = fb->pitches[0] * aligned_height;
9905
9906 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9907 crtc->base.name, plane->base.name, fb->width, fb->height,
9908 fb->format->cpp[0] * 8, base, fb->pitches[0],
9909 plane_config->size);
9910
9911 plane_config->fb = intel_fb;
9912 return;
9913
9914 error:
9915 kfree(intel_fb);
9916 }
9917
ironlake_get_pfit_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)9918 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9919 struct intel_crtc_state *pipe_config)
9920 {
9921 struct drm_device *dev = crtc->base.dev;
9922 struct drm_i915_private *dev_priv = to_i915(dev);
9923 u32 tmp;
9924
9925 tmp = I915_READ(PF_CTL(crtc->pipe));
9926
9927 if (tmp & PF_ENABLE) {
9928 pipe_config->pch_pfit.enabled = true;
9929 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9930 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9931
9932 /* We currently do not free assignements of panel fitters on
9933 * ivb/hsw (since we don't use the higher upscaling modes which
9934 * differentiates them) so just WARN about this case for now. */
9935 if (IS_GEN(dev_priv, 7)) {
9936 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9937 PF_PIPE_SEL_IVB(crtc->pipe));
9938 }
9939 }
9940 }
9941
ironlake_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)9942 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9943 struct intel_crtc_state *pipe_config)
9944 {
9945 struct drm_device *dev = crtc->base.dev;
9946 struct drm_i915_private *dev_priv = to_i915(dev);
9947 enum intel_display_power_domain power_domain;
9948 intel_wakeref_t wakeref;
9949 u32 tmp;
9950 bool ret;
9951
9952 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9953 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9954 if (!wakeref)
9955 return false;
9956
9957 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9958 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9959 pipe_config->shared_dpll = NULL;
9960
9961 ret = false;
9962 tmp = I915_READ(PIPECONF(crtc->pipe));
9963 if (!(tmp & PIPECONF_ENABLE))
9964 goto out;
9965
9966 switch (tmp & PIPECONF_BPC_MASK) {
9967 case PIPECONF_6BPC:
9968 pipe_config->pipe_bpp = 18;
9969 break;
9970 case PIPECONF_8BPC:
9971 pipe_config->pipe_bpp = 24;
9972 break;
9973 case PIPECONF_10BPC:
9974 pipe_config->pipe_bpp = 30;
9975 break;
9976 case PIPECONF_12BPC:
9977 pipe_config->pipe_bpp = 36;
9978 break;
9979 default:
9980 break;
9981 }
9982
9983 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9984 pipe_config->limited_color_range = true;
9985
9986 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
9987 PIPECONF_GAMMA_MODE_SHIFT;
9988
9989 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
9990
9991 i9xx_get_pipe_color_config(pipe_config);
9992 intel_color_get_config(pipe_config);
9993
9994 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9995 struct intel_shared_dpll *pll;
9996 enum intel_dpll_id pll_id;
9997
9998 pipe_config->has_pch_encoder = true;
9999
10000 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
10001 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10002 FDI_DP_PORT_WIDTH_SHIFT) + 1;
10003
10004 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10005
10006 if (HAS_PCH_IBX(dev_priv)) {
10007 /*
10008 * The pipe->pch transcoder and pch transcoder->pll
10009 * mapping is fixed.
10010 */
10011 pll_id = (enum intel_dpll_id) crtc->pipe;
10012 } else {
10013 tmp = I915_READ(PCH_DPLL_SEL);
10014 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
10015 pll_id = DPLL_ID_PCH_PLL_B;
10016 else
10017 pll_id= DPLL_ID_PCH_PLL_A;
10018 }
10019
10020 pipe_config->shared_dpll =
10021 intel_get_shared_dpll_by_id(dev_priv, pll_id);
10022 pll = pipe_config->shared_dpll;
10023
10024 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10025 &pipe_config->dpll_hw_state));
10026
10027 tmp = pipe_config->dpll_hw_state.dpll;
10028 pipe_config->pixel_multiplier =
10029 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
10030 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
10031
10032 ironlake_pch_clock_get(crtc, pipe_config);
10033 } else {
10034 pipe_config->pixel_multiplier = 1;
10035 }
10036
10037 intel_get_pipe_timings(crtc, pipe_config);
10038 intel_get_pipe_src_size(crtc, pipe_config);
10039
10040 ironlake_get_pfit_config(crtc, pipe_config);
10041
10042 ret = true;
10043
10044 out:
10045 intel_display_power_put(dev_priv, power_domain, wakeref);
10046
10047 return ret;
10048 }
haswell_crtc_compute_clock(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state)10049 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
10050 struct intel_crtc_state *crtc_state)
10051 {
10052 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10053 struct intel_atomic_state *state =
10054 to_intel_atomic_state(crtc_state->base.state);
10055
10056 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
10057 INTEL_GEN(dev_priv) >= 11) {
10058 struct intel_encoder *encoder =
10059 intel_get_crtc_new_encoder(state, crtc_state);
10060
10061 if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
10062 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
10063 pipe_name(crtc->pipe));
10064 return -EINVAL;
10065 }
10066 }
10067
10068 return 0;
10069 }
10070
cannonlake_get_ddi_pll(struct drm_i915_private * dev_priv,enum port port,struct intel_crtc_state * pipe_config)10071 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
10072 enum port port,
10073 struct intel_crtc_state *pipe_config)
10074 {
10075 enum intel_dpll_id id;
10076 u32 temp;
10077
10078 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
10079 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
10080
10081 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
10082 return;
10083
10084 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10085 }
10086
icelake_get_ddi_pll(struct drm_i915_private * dev_priv,enum port port,struct intel_crtc_state * pipe_config)10087 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
10088 enum port port,
10089 struct intel_crtc_state *pipe_config)
10090 {
10091 enum phy phy = intel_port_to_phy(dev_priv, port);
10092 enum icl_port_dpll_id port_dpll_id;
10093 enum intel_dpll_id id;
10094 u32 temp;
10095
10096 if (intel_phy_is_combo(dev_priv, phy)) {
10097 temp = I915_READ(ICL_DPCLKA_CFGCR0) &
10098 ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10099 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
10100 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10101 } else if (intel_phy_is_tc(dev_priv, phy)) {
10102 u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
10103
10104 if (clk_sel == DDI_CLK_SEL_MG) {
10105 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
10106 port));
10107 port_dpll_id = ICL_PORT_DPLL_MG_PHY;
10108 } else {
10109 WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
10110 id = DPLL_ID_ICL_TBTPLL;
10111 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10112 }
10113 } else {
10114 WARN(1, "Invalid port %x\n", port);
10115 return;
10116 }
10117
10118 pipe_config->icl_port_dplls[port_dpll_id].pll =
10119 intel_get_shared_dpll_by_id(dev_priv, id);
10120
10121 icl_set_active_port_dpll(pipe_config, port_dpll_id);
10122 }
10123
bxt_get_ddi_pll(struct drm_i915_private * dev_priv,enum port port,struct intel_crtc_state * pipe_config)10124 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
10125 enum port port,
10126 struct intel_crtc_state *pipe_config)
10127 {
10128 enum intel_dpll_id id;
10129
10130 switch (port) {
10131 case PORT_A:
10132 id = DPLL_ID_SKL_DPLL0;
10133 break;
10134 case PORT_B:
10135 id = DPLL_ID_SKL_DPLL1;
10136 break;
10137 case PORT_C:
10138 id = DPLL_ID_SKL_DPLL2;
10139 break;
10140 default:
10141 DRM_ERROR("Incorrect port type\n");
10142 return;
10143 }
10144
10145 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10146 }
10147
skylake_get_ddi_pll(struct drm_i915_private * dev_priv,enum port port,struct intel_crtc_state * pipe_config)10148 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
10149 enum port port,
10150 struct intel_crtc_state *pipe_config)
10151 {
10152 enum intel_dpll_id id;
10153 u32 temp;
10154
10155 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
10156 id = temp >> (port * 3 + 1);
10157
10158 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
10159 return;
10160
10161 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10162 }
10163
haswell_get_ddi_pll(struct drm_i915_private * dev_priv,enum port port,struct intel_crtc_state * pipe_config)10164 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
10165 enum port port,
10166 struct intel_crtc_state *pipe_config)
10167 {
10168 enum intel_dpll_id id;
10169 u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
10170
10171 switch (ddi_pll_sel) {
10172 case PORT_CLK_SEL_WRPLL1:
10173 id = DPLL_ID_WRPLL1;
10174 break;
10175 case PORT_CLK_SEL_WRPLL2:
10176 id = DPLL_ID_WRPLL2;
10177 break;
10178 case PORT_CLK_SEL_SPLL:
10179 id = DPLL_ID_SPLL;
10180 break;
10181 case PORT_CLK_SEL_LCPLL_810:
10182 id = DPLL_ID_LCPLL_810;
10183 break;
10184 case PORT_CLK_SEL_LCPLL_1350:
10185 id = DPLL_ID_LCPLL_1350;
10186 break;
10187 case PORT_CLK_SEL_LCPLL_2700:
10188 id = DPLL_ID_LCPLL_2700;
10189 break;
10190 default:
10191 MISSING_CASE(ddi_pll_sel);
10192 /* fall through */
10193 case PORT_CLK_SEL_NONE:
10194 return;
10195 }
10196
10197 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10198 }
10199
hsw_get_transcoder_state(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config,u64 * power_domain_mask,intel_wakeref_t * wakerefs)10200 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10201 struct intel_crtc_state *pipe_config,
10202 u64 *power_domain_mask,
10203 intel_wakeref_t *wakerefs)
10204 {
10205 struct drm_device *dev = crtc->base.dev;
10206 struct drm_i915_private *dev_priv = to_i915(dev);
10207 enum intel_display_power_domain power_domain;
10208 unsigned long panel_transcoder_mask = 0;
10209 unsigned long enabled_panel_transcoders = 0;
10210 enum transcoder panel_transcoder;
10211 intel_wakeref_t wf;
10212 u32 tmp;
10213
10214 if (INTEL_GEN(dev_priv) >= 11)
10215 panel_transcoder_mask |=
10216 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
10217
10218 if (HAS_TRANSCODER_EDP(dev_priv))
10219 panel_transcoder_mask |= BIT(TRANSCODER_EDP);
10220
10221 /*
10222 * The pipe->transcoder mapping is fixed with the exception of the eDP
10223 * and DSI transcoders handled below.
10224 */
10225 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10226
10227 /*
10228 * XXX: Do intel_display_power_get_if_enabled before reading this (for
10229 * consistency and less surprising code; it's in always on power).
10230 */
10231 for_each_set_bit(panel_transcoder,
10232 &panel_transcoder_mask,
10233 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
10234 bool force_thru = false;
10235 enum pipe trans_pipe;
10236
10237 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
10238 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
10239 continue;
10240
10241 /*
10242 * Log all enabled ones, only use the first one.
10243 *
10244 * FIXME: This won't work for two separate DSI displays.
10245 */
10246 enabled_panel_transcoders |= BIT(panel_transcoder);
10247 if (enabled_panel_transcoders != BIT(panel_transcoder))
10248 continue;
10249
10250 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10251 default:
10252 WARN(1, "unknown pipe linked to transcoder %s\n",
10253 transcoder_name(panel_transcoder));
10254 /* fall through */
10255 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10256 force_thru = true;
10257 /* fall through */
10258 case TRANS_DDI_EDP_INPUT_A_ON:
10259 trans_pipe = PIPE_A;
10260 break;
10261 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10262 trans_pipe = PIPE_B;
10263 break;
10264 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10265 trans_pipe = PIPE_C;
10266 break;
10267 }
10268
10269 if (trans_pipe == crtc->pipe) {
10270 pipe_config->cpu_transcoder = panel_transcoder;
10271 pipe_config->pch_pfit.force_thru = force_thru;
10272 }
10273 }
10274
10275 /*
10276 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10277 */
10278 WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10279 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10280
10281 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10282 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10283
10284 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10285 if (!wf)
10286 return false;
10287
10288 wakerefs[power_domain] = wf;
10289 *power_domain_mask |= BIT_ULL(power_domain);
10290
10291 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10292
10293 return tmp & PIPECONF_ENABLE;
10294 }
10295
bxt_get_dsi_transcoder_state(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config,u64 * power_domain_mask,intel_wakeref_t * wakerefs)10296 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10297 struct intel_crtc_state *pipe_config,
10298 u64 *power_domain_mask,
10299 intel_wakeref_t *wakerefs)
10300 {
10301 struct drm_device *dev = crtc->base.dev;
10302 struct drm_i915_private *dev_priv = to_i915(dev);
10303 enum intel_display_power_domain power_domain;
10304 enum transcoder cpu_transcoder;
10305 intel_wakeref_t wf;
10306 enum port port;
10307 u32 tmp;
10308
10309 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10310 if (port == PORT_A)
10311 cpu_transcoder = TRANSCODER_DSI_A;
10312 else
10313 cpu_transcoder = TRANSCODER_DSI_C;
10314
10315 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10316 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10317
10318 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10319 if (!wf)
10320 continue;
10321
10322 wakerefs[power_domain] = wf;
10323 *power_domain_mask |= BIT_ULL(power_domain);
10324
10325 /*
10326 * The PLL needs to be enabled with a valid divider
10327 * configuration, otherwise accessing DSI registers will hang
10328 * the machine. See BSpec North Display Engine
10329 * registers/MIPI[BXT]. We can break out here early, since we
10330 * need the same DSI PLL to be enabled for both DSI ports.
10331 */
10332 if (!bxt_dsi_pll_is_enabled(dev_priv))
10333 break;
10334
10335 /* XXX: this works for video mode only */
10336 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
10337 if (!(tmp & DPI_ENABLE))
10338 continue;
10339
10340 tmp = I915_READ(MIPI_CTRL(port));
10341 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10342 continue;
10343
10344 pipe_config->cpu_transcoder = cpu_transcoder;
10345 break;
10346 }
10347
10348 return transcoder_is_dsi(pipe_config->cpu_transcoder);
10349 }
10350
haswell_get_ddi_port_state(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)10351 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
10352 struct intel_crtc_state *pipe_config)
10353 {
10354 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10355 struct intel_shared_dpll *pll;
10356 enum port port;
10357 u32 tmp;
10358
10359 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
10360
10361 if (INTEL_GEN(dev_priv) >= 12)
10362 port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10363 else
10364 port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10365
10366 if (INTEL_GEN(dev_priv) >= 11)
10367 icelake_get_ddi_pll(dev_priv, port, pipe_config);
10368 else if (IS_CANNONLAKE(dev_priv))
10369 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
10370 else if (IS_GEN9_BC(dev_priv))
10371 skylake_get_ddi_pll(dev_priv, port, pipe_config);
10372 else if (IS_GEN9_LP(dev_priv))
10373 bxt_get_ddi_pll(dev_priv, port, pipe_config);
10374 else
10375 haswell_get_ddi_pll(dev_priv, port, pipe_config);
10376
10377 pll = pipe_config->shared_dpll;
10378 if (pll) {
10379 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10380 &pipe_config->dpll_hw_state));
10381 }
10382
10383 /*
10384 * Haswell has only FDI/PCH transcoder A. It is which is connected to
10385 * DDI E. So just check whether this pipe is wired to DDI E and whether
10386 * the PCH transcoder is on.
10387 */
10388 if (INTEL_GEN(dev_priv) < 9 &&
10389 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
10390 pipe_config->has_pch_encoder = true;
10391
10392 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10393 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10394 FDI_DP_PORT_WIDTH_SHIFT) + 1;
10395
10396 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10397 }
10398 }
10399
haswell_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)10400 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10401 struct intel_crtc_state *pipe_config)
10402 {
10403 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10404 intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
10405 enum intel_display_power_domain power_domain;
10406 u64 power_domain_mask;
10407 bool active;
10408
10409 intel_crtc_init_scalers(crtc, pipe_config);
10410
10411 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10412 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10413 if (!wf)
10414 return false;
10415
10416 wakerefs[power_domain] = wf;
10417 power_domain_mask = BIT_ULL(power_domain);
10418
10419 pipe_config->shared_dpll = NULL;
10420
10421 active = hsw_get_transcoder_state(crtc, pipe_config,
10422 &power_domain_mask, wakerefs);
10423
10424 if (IS_GEN9_LP(dev_priv) &&
10425 bxt_get_dsi_transcoder_state(crtc, pipe_config,
10426 &power_domain_mask, wakerefs)) {
10427 WARN_ON(active);
10428 active = true;
10429 }
10430
10431 if (!active)
10432 goto out;
10433
10434 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
10435 INTEL_GEN(dev_priv) >= 11) {
10436 haswell_get_ddi_port_state(crtc, pipe_config);
10437 intel_get_pipe_timings(crtc, pipe_config);
10438 }
10439
10440 intel_get_pipe_src_size(crtc, pipe_config);
10441 intel_get_crtc_ycbcr_config(crtc, pipe_config);
10442
10443 pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
10444
10445 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10446
10447 if (INTEL_GEN(dev_priv) >= 9) {
10448 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
10449
10450 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
10451 pipe_config->gamma_enable = true;
10452
10453 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
10454 pipe_config->csc_enable = true;
10455 } else {
10456 i9xx_get_pipe_color_config(pipe_config);
10457 }
10458
10459 intel_color_get_config(pipe_config);
10460
10461 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10462 WARN_ON(power_domain_mask & BIT_ULL(power_domain));
10463
10464 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10465 if (wf) {
10466 wakerefs[power_domain] = wf;
10467 power_domain_mask |= BIT_ULL(power_domain);
10468
10469 if (INTEL_GEN(dev_priv) >= 9)
10470 skylake_get_pfit_config(crtc, pipe_config);
10471 else
10472 ironlake_get_pfit_config(crtc, pipe_config);
10473 }
10474
10475 if (hsw_crtc_supports_ips(crtc)) {
10476 if (IS_HASWELL(dev_priv))
10477 pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
10478 else {
10479 /*
10480 * We cannot readout IPS state on broadwell, set to
10481 * true so we can set it to a defined state on first
10482 * commit.
10483 */
10484 pipe_config->ips_enabled = true;
10485 }
10486 }
10487
10488 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10489 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10490 pipe_config->pixel_multiplier =
10491 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10492 } else {
10493 pipe_config->pixel_multiplier = 1;
10494 }
10495
10496 out:
10497 for_each_power_domain(power_domain, power_domain_mask)
10498 intel_display_power_put(dev_priv,
10499 power_domain, wakerefs[power_domain]);
10500
10501 return active;
10502 }
10503
intel_cursor_base(const struct intel_plane_state * plane_state)10504 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
10505 {
10506 struct drm_i915_private *dev_priv =
10507 to_i915(plane_state->base.plane->dev);
10508 const struct drm_framebuffer *fb = plane_state->base.fb;
10509 const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10510 u32 base;
10511
10512 if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
10513 base = obj->phys_handle->busaddr;
10514 else
10515 base = intel_plane_ggtt_offset(plane_state);
10516
10517 base += plane_state->color_plane[0].offset;
10518
10519 /* ILK+ do this automagically */
10520 if (HAS_GMCH(dev_priv) &&
10521 plane_state->base.rotation & DRM_MODE_ROTATE_180)
10522 base += (plane_state->base.crtc_h *
10523 plane_state->base.crtc_w - 1) * fb->format->cpp[0];
10524
10525 return base;
10526 }
10527
intel_cursor_position(const struct intel_plane_state * plane_state)10528 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
10529 {
10530 int x = plane_state->base.crtc_x;
10531 int y = plane_state->base.crtc_y;
10532 u32 pos = 0;
10533
10534 if (x < 0) {
10535 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10536 x = -x;
10537 }
10538 pos |= x << CURSOR_X_SHIFT;
10539
10540 if (y < 0) {
10541 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10542 y = -y;
10543 }
10544 pos |= y << CURSOR_Y_SHIFT;
10545
10546 return pos;
10547 }
10548
intel_cursor_size_ok(const struct intel_plane_state * plane_state)10549 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
10550 {
10551 const struct drm_mode_config *config =
10552 &plane_state->base.plane->dev->mode_config;
10553 int width = plane_state->base.crtc_w;
10554 int height = plane_state->base.crtc_h;
10555
10556 return width > 0 && width <= config->cursor_width &&
10557 height > 0 && height <= config->cursor_height;
10558 }
10559
intel_cursor_check_surface(struct intel_plane_state * plane_state)10560 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
10561 {
10562 int src_x, src_y;
10563 u32 offset;
10564 int ret;
10565
10566 ret = intel_plane_compute_gtt(plane_state);
10567 if (ret)
10568 return ret;
10569
10570 if (!plane_state->base.visible)
10571 return 0;
10572
10573 src_x = plane_state->base.src_x >> 16;
10574 src_y = plane_state->base.src_y >> 16;
10575
10576 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
10577 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
10578 plane_state, 0);
10579
10580 if (src_x != 0 || src_y != 0) {
10581 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
10582 return -EINVAL;
10583 }
10584
10585 plane_state->color_plane[0].offset = offset;
10586
10587 return 0;
10588 }
10589
intel_check_cursor(struct intel_crtc_state * crtc_state,struct intel_plane_state * plane_state)10590 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
10591 struct intel_plane_state *plane_state)
10592 {
10593 const struct drm_framebuffer *fb = plane_state->base.fb;
10594 int ret;
10595
10596 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
10597 DRM_DEBUG_KMS("cursor cannot be tiled\n");
10598 return -EINVAL;
10599 }
10600
10601 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
10602 &crtc_state->base,
10603 DRM_PLANE_HELPER_NO_SCALING,
10604 DRM_PLANE_HELPER_NO_SCALING,
10605 true, true);
10606 if (ret)
10607 return ret;
10608
10609 ret = intel_cursor_check_surface(plane_state);
10610 if (ret)
10611 return ret;
10612
10613 if (!plane_state->base.visible)
10614 return 0;
10615
10616 ret = intel_plane_check_src_coordinates(plane_state);
10617 if (ret)
10618 return ret;
10619
10620 return 0;
10621 }
10622
10623 static unsigned int
i845_cursor_max_stride(struct intel_plane * plane,u32 pixel_format,u64 modifier,unsigned int rotation)10624 i845_cursor_max_stride(struct intel_plane *plane,
10625 u32 pixel_format, u64 modifier,
10626 unsigned int rotation)
10627 {
10628 return 2048;
10629 }
10630
i845_cursor_ctl_crtc(const struct intel_crtc_state * crtc_state)10631 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10632 {
10633 u32 cntl = 0;
10634
10635 if (crtc_state->gamma_enable)
10636 cntl |= CURSOR_GAMMA_ENABLE;
10637
10638 return cntl;
10639 }
10640
i845_cursor_ctl(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)10641 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
10642 const struct intel_plane_state *plane_state)
10643 {
10644 return CURSOR_ENABLE |
10645 CURSOR_FORMAT_ARGB |
10646 CURSOR_STRIDE(plane_state->color_plane[0].stride);
10647 }
10648
i845_cursor_size_ok(const struct intel_plane_state * plane_state)10649 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
10650 {
10651 int width = plane_state->base.crtc_w;
10652
10653 /*
10654 * 845g/865g are only limited by the width of their cursors,
10655 * the height is arbitrary up to the precision of the register.
10656 */
10657 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
10658 }
10659
i845_check_cursor(struct intel_crtc_state * crtc_state,struct intel_plane_state * plane_state)10660 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
10661 struct intel_plane_state *plane_state)
10662 {
10663 const struct drm_framebuffer *fb = plane_state->base.fb;
10664 int ret;
10665
10666 ret = intel_check_cursor(crtc_state, plane_state);
10667 if (ret)
10668 return ret;
10669
10670 /* if we want to turn off the cursor ignore width and height */
10671 if (!fb)
10672 return 0;
10673
10674 /* Check for which cursor types we support */
10675 if (!i845_cursor_size_ok(plane_state)) {
10676 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10677 plane_state->base.crtc_w,
10678 plane_state->base.crtc_h);
10679 return -EINVAL;
10680 }
10681
10682 WARN_ON(plane_state->base.visible &&
10683 plane_state->color_plane[0].stride != fb->pitches[0]);
10684
10685 switch (fb->pitches[0]) {
10686 case 256:
10687 case 512:
10688 case 1024:
10689 case 2048:
10690 break;
10691 default:
10692 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
10693 fb->pitches[0]);
10694 return -EINVAL;
10695 }
10696
10697 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
10698
10699 return 0;
10700 }
10701
i845_update_cursor(struct intel_plane * plane,const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)10702 static void i845_update_cursor(struct intel_plane *plane,
10703 const struct intel_crtc_state *crtc_state,
10704 const struct intel_plane_state *plane_state)
10705 {
10706 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10707 u32 cntl = 0, base = 0, pos = 0, size = 0;
10708 unsigned long irqflags;
10709
10710 if (plane_state && plane_state->base.visible) {
10711 unsigned int width = plane_state->base.crtc_w;
10712 unsigned int height = plane_state->base.crtc_h;
10713
10714 cntl = plane_state->ctl |
10715 i845_cursor_ctl_crtc(crtc_state);
10716
10717 size = (height << 12) | width;
10718
10719 base = intel_cursor_base(plane_state);
10720 pos = intel_cursor_position(plane_state);
10721 }
10722
10723 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10724
10725 /* On these chipsets we can only modify the base/size/stride
10726 * whilst the cursor is disabled.
10727 */
10728 if (plane->cursor.base != base ||
10729 plane->cursor.size != size ||
10730 plane->cursor.cntl != cntl) {
10731 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
10732 I915_WRITE_FW(CURBASE(PIPE_A), base);
10733 I915_WRITE_FW(CURSIZE, size);
10734 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10735 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
10736
10737 plane->cursor.base = base;
10738 plane->cursor.size = size;
10739 plane->cursor.cntl = cntl;
10740 } else {
10741 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10742 }
10743
10744 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10745 }
10746
i845_disable_cursor(struct intel_plane * plane,const struct intel_crtc_state * crtc_state)10747 static void i845_disable_cursor(struct intel_plane *plane,
10748 const struct intel_crtc_state *crtc_state)
10749 {
10750 i845_update_cursor(plane, crtc_state, NULL);
10751 }
10752
i845_cursor_get_hw_state(struct intel_plane * plane,enum pipe * pipe)10753 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
10754 enum pipe *pipe)
10755 {
10756 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10757 enum intel_display_power_domain power_domain;
10758 intel_wakeref_t wakeref;
10759 bool ret;
10760
10761 power_domain = POWER_DOMAIN_PIPE(PIPE_A);
10762 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10763 if (!wakeref)
10764 return false;
10765
10766 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
10767
10768 *pipe = PIPE_A;
10769
10770 intel_display_power_put(dev_priv, power_domain, wakeref);
10771
10772 return ret;
10773 }
10774
10775 static unsigned int
i9xx_cursor_max_stride(struct intel_plane * plane,u32 pixel_format,u64 modifier,unsigned int rotation)10776 i9xx_cursor_max_stride(struct intel_plane *plane,
10777 u32 pixel_format, u64 modifier,
10778 unsigned int rotation)
10779 {
10780 return plane->base.dev->mode_config.cursor_width * 4;
10781 }
10782
i9xx_cursor_ctl_crtc(const struct intel_crtc_state * crtc_state)10783 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10784 {
10785 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
10786 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10787 u32 cntl = 0;
10788
10789 if (INTEL_GEN(dev_priv) >= 11)
10790 return cntl;
10791
10792 if (crtc_state->gamma_enable)
10793 cntl = MCURSOR_GAMMA_ENABLE;
10794
10795 if (crtc_state->csc_enable)
10796 cntl |= MCURSOR_PIPE_CSC_ENABLE;
10797
10798 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10799 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
10800
10801 return cntl;
10802 }
10803
i9xx_cursor_ctl(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)10804 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
10805 const struct intel_plane_state *plane_state)
10806 {
10807 struct drm_i915_private *dev_priv =
10808 to_i915(plane_state->base.plane->dev);
10809 u32 cntl = 0;
10810
10811 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
10812 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
10813
10814 switch (plane_state->base.crtc_w) {
10815 case 64:
10816 cntl |= MCURSOR_MODE_64_ARGB_AX;
10817 break;
10818 case 128:
10819 cntl |= MCURSOR_MODE_128_ARGB_AX;
10820 break;
10821 case 256:
10822 cntl |= MCURSOR_MODE_256_ARGB_AX;
10823 break;
10824 default:
10825 MISSING_CASE(plane_state->base.crtc_w);
10826 return 0;
10827 }
10828
10829 if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
10830 cntl |= MCURSOR_ROTATE_180;
10831
10832 return cntl;
10833 }
10834
i9xx_cursor_size_ok(const struct intel_plane_state * plane_state)10835 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
10836 {
10837 struct drm_i915_private *dev_priv =
10838 to_i915(plane_state->base.plane->dev);
10839 int width = plane_state->base.crtc_w;
10840 int height = plane_state->base.crtc_h;
10841
10842 if (!intel_cursor_size_ok(plane_state))
10843 return false;
10844
10845 /* Cursor width is limited to a few power-of-two sizes */
10846 switch (width) {
10847 case 256:
10848 case 128:
10849 case 64:
10850 break;
10851 default:
10852 return false;
10853 }
10854
10855 /*
10856 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
10857 * height from 8 lines up to the cursor width, when the
10858 * cursor is not rotated. Everything else requires square
10859 * cursors.
10860 */
10861 if (HAS_CUR_FBC(dev_priv) &&
10862 plane_state->base.rotation & DRM_MODE_ROTATE_0) {
10863 if (height < 8 || height > width)
10864 return false;
10865 } else {
10866 if (height != width)
10867 return false;
10868 }
10869
10870 return true;
10871 }
10872
i9xx_check_cursor(struct intel_crtc_state * crtc_state,struct intel_plane_state * plane_state)10873 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
10874 struct intel_plane_state *plane_state)
10875 {
10876 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
10877 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10878 const struct drm_framebuffer *fb = plane_state->base.fb;
10879 enum pipe pipe = plane->pipe;
10880 int ret;
10881
10882 ret = intel_check_cursor(crtc_state, plane_state);
10883 if (ret)
10884 return ret;
10885
10886 /* if we want to turn off the cursor ignore width and height */
10887 if (!fb)
10888 return 0;
10889
10890 /* Check for which cursor types we support */
10891 if (!i9xx_cursor_size_ok(plane_state)) {
10892 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10893 plane_state->base.crtc_w,
10894 plane_state->base.crtc_h);
10895 return -EINVAL;
10896 }
10897
10898 WARN_ON(plane_state->base.visible &&
10899 plane_state->color_plane[0].stride != fb->pitches[0]);
10900
10901 if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
10902 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
10903 fb->pitches[0], plane_state->base.crtc_w);
10904 return -EINVAL;
10905 }
10906
10907 /*
10908 * There's something wrong with the cursor on CHV pipe C.
10909 * If it straddles the left edge of the screen then
10910 * moving it away from the edge or disabling it often
10911 * results in a pipe underrun, and often that can lead to
10912 * dead pipe (constant underrun reported, and it scans
10913 * out just a solid color). To recover from that, the
10914 * display power well must be turned off and on again.
10915 * Refuse the put the cursor into that compromised position.
10916 */
10917 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
10918 plane_state->base.visible && plane_state->base.crtc_x < 0) {
10919 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
10920 return -EINVAL;
10921 }
10922
10923 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
10924
10925 return 0;
10926 }
10927
i9xx_update_cursor(struct intel_plane * plane,const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)10928 static void i9xx_update_cursor(struct intel_plane *plane,
10929 const struct intel_crtc_state *crtc_state,
10930 const struct intel_plane_state *plane_state)
10931 {
10932 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10933 enum pipe pipe = plane->pipe;
10934 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
10935 unsigned long irqflags;
10936
10937 if (plane_state && plane_state->base.visible) {
10938 cntl = plane_state->ctl |
10939 i9xx_cursor_ctl_crtc(crtc_state);
10940
10941 if (plane_state->base.crtc_h != plane_state->base.crtc_w)
10942 fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
10943
10944 base = intel_cursor_base(plane_state);
10945 pos = intel_cursor_position(plane_state);
10946 }
10947
10948 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10949
10950 /*
10951 * On some platforms writing CURCNTR first will also
10952 * cause CURPOS to be armed by the CURBASE write.
10953 * Without the CURCNTR write the CURPOS write would
10954 * arm itself. Thus we always update CURCNTR before
10955 * CURPOS.
10956 *
10957 * On other platforms CURPOS always requires the
10958 * CURBASE write to arm the update. Additonally
10959 * a write to any of the cursor register will cancel
10960 * an already armed cursor update. Thus leaving out
10961 * the CURBASE write after CURPOS could lead to a
10962 * cursor that doesn't appear to move, or even change
10963 * shape. Thus we always write CURBASE.
10964 *
10965 * The other registers are armed by by the CURBASE write
10966 * except when the plane is getting enabled at which time
10967 * the CURCNTR write arms the update.
10968 */
10969
10970 if (INTEL_GEN(dev_priv) >= 9)
10971 skl_write_cursor_wm(plane, crtc_state);
10972
10973 if (plane->cursor.base != base ||
10974 plane->cursor.size != fbc_ctl ||
10975 plane->cursor.cntl != cntl) {
10976 if (HAS_CUR_FBC(dev_priv))
10977 I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
10978 I915_WRITE_FW(CURCNTR(pipe), cntl);
10979 I915_WRITE_FW(CURPOS(pipe), pos);
10980 I915_WRITE_FW(CURBASE(pipe), base);
10981
10982 plane->cursor.base = base;
10983 plane->cursor.size = fbc_ctl;
10984 plane->cursor.cntl = cntl;
10985 } else {
10986 I915_WRITE_FW(CURPOS(pipe), pos);
10987 I915_WRITE_FW(CURBASE(pipe), base);
10988 }
10989
10990 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10991 }
10992
i9xx_disable_cursor(struct intel_plane * plane,const struct intel_crtc_state * crtc_state)10993 static void i9xx_disable_cursor(struct intel_plane *plane,
10994 const struct intel_crtc_state *crtc_state)
10995 {
10996 i9xx_update_cursor(plane, crtc_state, NULL);
10997 }
10998
i9xx_cursor_get_hw_state(struct intel_plane * plane,enum pipe * pipe)10999 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
11000 enum pipe *pipe)
11001 {
11002 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11003 enum intel_display_power_domain power_domain;
11004 intel_wakeref_t wakeref;
11005 bool ret;
11006 u32 val;
11007
11008 /*
11009 * Not 100% correct for planes that can move between pipes,
11010 * but that's only the case for gen2-3 which don't have any
11011 * display power wells.
11012 */
11013 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
11014 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11015 if (!wakeref)
11016 return false;
11017
11018 val = I915_READ(CURCNTR(plane->pipe));
11019
11020 ret = val & MCURSOR_MODE;
11021
11022 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
11023 *pipe = plane->pipe;
11024 else
11025 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
11026 MCURSOR_PIPE_SELECT_SHIFT;
11027
11028 intel_display_power_put(dev_priv, power_domain, wakeref);
11029
11030 return ret;
11031 }
11032
11033 /* VESA 640x480x72Hz mode to set on the pipe */
11034 static const struct drm_display_mode load_detect_mode = {
11035 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
11036 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
11037 };
11038
11039 struct drm_framebuffer *
intel_framebuffer_create(struct drm_i915_gem_object * obj,struct drm_mode_fb_cmd2 * mode_cmd)11040 intel_framebuffer_create(struct drm_i915_gem_object *obj,
11041 struct drm_mode_fb_cmd2 *mode_cmd)
11042 {
11043 struct intel_framebuffer *intel_fb;
11044 int ret;
11045
11046 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
11047 if (!intel_fb)
11048 return ERR_PTR(-ENOMEM);
11049
11050 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
11051 if (ret)
11052 goto err;
11053
11054 return &intel_fb->base;
11055
11056 err:
11057 kfree(intel_fb);
11058 return ERR_PTR(ret);
11059 }
11060
intel_modeset_disable_planes(struct drm_atomic_state * state,struct drm_crtc * crtc)11061 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
11062 struct drm_crtc *crtc)
11063 {
11064 struct drm_plane *plane;
11065 struct drm_plane_state *plane_state;
11066 int ret, i;
11067
11068 ret = drm_atomic_add_affected_planes(state, crtc);
11069 if (ret)
11070 return ret;
11071
11072 for_each_new_plane_in_state(state, plane, plane_state, i) {
11073 if (plane_state->crtc != crtc)
11074 continue;
11075
11076 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
11077 if (ret)
11078 return ret;
11079
11080 drm_atomic_set_fb_for_plane(plane_state, NULL);
11081 }
11082
11083 return 0;
11084 }
11085
intel_get_load_detect_pipe(struct drm_connector * connector,const struct drm_display_mode * mode,struct intel_load_detect_pipe * old,struct drm_modeset_acquire_ctx * ctx)11086 int intel_get_load_detect_pipe(struct drm_connector *connector,
11087 const struct drm_display_mode *mode,
11088 struct intel_load_detect_pipe *old,
11089 struct drm_modeset_acquire_ctx *ctx)
11090 {
11091 struct intel_crtc *intel_crtc;
11092 struct intel_encoder *intel_encoder =
11093 intel_attached_encoder(connector);
11094 struct drm_crtc *possible_crtc;
11095 struct drm_encoder *encoder = &intel_encoder->base;
11096 struct drm_crtc *crtc = NULL;
11097 struct drm_device *dev = encoder->dev;
11098 struct drm_i915_private *dev_priv = to_i915(dev);
11099 struct drm_mode_config *config = &dev->mode_config;
11100 struct drm_atomic_state *state = NULL, *restore_state = NULL;
11101 struct drm_connector_state *connector_state;
11102 struct intel_crtc_state *crtc_state;
11103 int ret, i = -1;
11104
11105 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11106 connector->base.id, connector->name,
11107 encoder->base.id, encoder->name);
11108
11109 old->restore_state = NULL;
11110
11111 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
11112
11113 /*
11114 * Algorithm gets a little messy:
11115 *
11116 * - if the connector already has an assigned crtc, use it (but make
11117 * sure it's on first)
11118 *
11119 * - try to find the first unused crtc that can drive this connector,
11120 * and use that if we find one
11121 */
11122
11123 /* See if we already have a CRTC for this connector */
11124 if (connector->state->crtc) {
11125 crtc = connector->state->crtc;
11126
11127 ret = drm_modeset_lock(&crtc->mutex, ctx);
11128 if (ret)
11129 goto fail;
11130
11131 /* Make sure the crtc and connector are running */
11132 goto found;
11133 }
11134
11135 /* Find an unused one (if possible) */
11136 for_each_crtc(dev, possible_crtc) {
11137 i++;
11138 if (!(encoder->possible_crtcs & (1 << i)))
11139 continue;
11140
11141 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
11142 if (ret)
11143 goto fail;
11144
11145 if (possible_crtc->state->enable) {
11146 drm_modeset_unlock(&possible_crtc->mutex);
11147 continue;
11148 }
11149
11150 crtc = possible_crtc;
11151 break;
11152 }
11153
11154 /*
11155 * If we didn't find an unused CRTC, don't use any.
11156 */
11157 if (!crtc) {
11158 DRM_DEBUG_KMS("no pipe available for load-detect\n");
11159 ret = -ENODEV;
11160 goto fail;
11161 }
11162
11163 found:
11164 intel_crtc = to_intel_crtc(crtc);
11165
11166 state = drm_atomic_state_alloc(dev);
11167 restore_state = drm_atomic_state_alloc(dev);
11168 if (!state || !restore_state) {
11169 ret = -ENOMEM;
11170 goto fail;
11171 }
11172
11173 state->acquire_ctx = ctx;
11174 restore_state->acquire_ctx = ctx;
11175
11176 connector_state = drm_atomic_get_connector_state(state, connector);
11177 if (IS_ERR(connector_state)) {
11178 ret = PTR_ERR(connector_state);
11179 goto fail;
11180 }
11181
11182 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
11183 if (ret)
11184 goto fail;
11185
11186 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
11187 if (IS_ERR(crtc_state)) {
11188 ret = PTR_ERR(crtc_state);
11189 goto fail;
11190 }
11191
11192 crtc_state->base.active = crtc_state->base.enable = true;
11193
11194 if (!mode)
11195 mode = &load_detect_mode;
11196
11197 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
11198 if (ret)
11199 goto fail;
11200
11201 ret = intel_modeset_disable_planes(state, crtc);
11202 if (ret)
11203 goto fail;
11204
11205 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
11206 if (!ret)
11207 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
11208 if (!ret)
11209 ret = drm_atomic_add_affected_planes(restore_state, crtc);
11210 if (ret) {
11211 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
11212 goto fail;
11213 }
11214
11215 ret = drm_atomic_commit(state);
11216 if (ret) {
11217 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
11218 goto fail;
11219 }
11220
11221 old->restore_state = restore_state;
11222 drm_atomic_state_put(state);
11223
11224 /* let the connector get through one full cycle before testing */
11225 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
11226 return true;
11227
11228 fail:
11229 if (state) {
11230 drm_atomic_state_put(state);
11231 state = NULL;
11232 }
11233 if (restore_state) {
11234 drm_atomic_state_put(restore_state);
11235 restore_state = NULL;
11236 }
11237
11238 if (ret == -EDEADLK)
11239 return ret;
11240
11241 return false;
11242 }
11243
intel_release_load_detect_pipe(struct drm_connector * connector,struct intel_load_detect_pipe * old,struct drm_modeset_acquire_ctx * ctx)11244 void intel_release_load_detect_pipe(struct drm_connector *connector,
11245 struct intel_load_detect_pipe *old,
11246 struct drm_modeset_acquire_ctx *ctx)
11247 {
11248 struct intel_encoder *intel_encoder =
11249 intel_attached_encoder(connector);
11250 struct drm_encoder *encoder = &intel_encoder->base;
11251 struct drm_atomic_state *state = old->restore_state;
11252 int ret;
11253
11254 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11255 connector->base.id, connector->name,
11256 encoder->base.id, encoder->name);
11257
11258 if (!state)
11259 return;
11260
11261 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
11262 if (ret)
11263 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
11264 drm_atomic_state_put(state);
11265 }
11266
i9xx_pll_refclk(struct drm_device * dev,const struct intel_crtc_state * pipe_config)11267 static int i9xx_pll_refclk(struct drm_device *dev,
11268 const struct intel_crtc_state *pipe_config)
11269 {
11270 struct drm_i915_private *dev_priv = to_i915(dev);
11271 u32 dpll = pipe_config->dpll_hw_state.dpll;
11272
11273 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
11274 return dev_priv->vbt.lvds_ssc_freq;
11275 else if (HAS_PCH_SPLIT(dev_priv))
11276 return 120000;
11277 else if (!IS_GEN(dev_priv, 2))
11278 return 96000;
11279 else
11280 return 48000;
11281 }
11282
11283 /* Returns the clock of the currently programmed mode of the given pipe. */
i9xx_crtc_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)11284 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
11285 struct intel_crtc_state *pipe_config)
11286 {
11287 struct drm_device *dev = crtc->base.dev;
11288 struct drm_i915_private *dev_priv = to_i915(dev);
11289 int pipe = pipe_config->cpu_transcoder;
11290 u32 dpll = pipe_config->dpll_hw_state.dpll;
11291 u32 fp;
11292 struct dpll clock;
11293 int port_clock;
11294 int refclk = i9xx_pll_refclk(dev, pipe_config);
11295
11296 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
11297 fp = pipe_config->dpll_hw_state.fp0;
11298 else
11299 fp = pipe_config->dpll_hw_state.fp1;
11300
11301 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
11302 if (IS_PINEVIEW(dev_priv)) {
11303 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
11304 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
11305 } else {
11306 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
11307 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
11308 }
11309
11310 if (!IS_GEN(dev_priv, 2)) {
11311 if (IS_PINEVIEW(dev_priv))
11312 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
11313 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
11314 else
11315 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
11316 DPLL_FPA01_P1_POST_DIV_SHIFT);
11317
11318 switch (dpll & DPLL_MODE_MASK) {
11319 case DPLLB_MODE_DAC_SERIAL:
11320 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
11321 5 : 10;
11322 break;
11323 case DPLLB_MODE_LVDS:
11324 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
11325 7 : 14;
11326 break;
11327 default:
11328 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
11329 "mode\n", (int)(dpll & DPLL_MODE_MASK));
11330 return;
11331 }
11332
11333 if (IS_PINEVIEW(dev_priv))
11334 port_clock = pnv_calc_dpll_params(refclk, &clock);
11335 else
11336 port_clock = i9xx_calc_dpll_params(refclk, &clock);
11337 } else {
11338 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
11339 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
11340
11341 if (is_lvds) {
11342 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
11343 DPLL_FPA01_P1_POST_DIV_SHIFT);
11344
11345 if (lvds & LVDS_CLKB_POWER_UP)
11346 clock.p2 = 7;
11347 else
11348 clock.p2 = 14;
11349 } else {
11350 if (dpll & PLL_P1_DIVIDE_BY_TWO)
11351 clock.p1 = 2;
11352 else {
11353 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
11354 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
11355 }
11356 if (dpll & PLL_P2_DIVIDE_BY_4)
11357 clock.p2 = 4;
11358 else
11359 clock.p2 = 2;
11360 }
11361
11362 port_clock = i9xx_calc_dpll_params(refclk, &clock);
11363 }
11364
11365 /*
11366 * This value includes pixel_multiplier. We will use
11367 * port_clock to compute adjusted_mode.crtc_clock in the
11368 * encoder's get_config() function.
11369 */
11370 pipe_config->port_clock = port_clock;
11371 }
11372
intel_dotclock_calculate(int link_freq,const struct intel_link_m_n * m_n)11373 int intel_dotclock_calculate(int link_freq,
11374 const struct intel_link_m_n *m_n)
11375 {
11376 /*
11377 * The calculation for the data clock is:
11378 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
11379 * But we want to avoid losing precison if possible, so:
11380 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
11381 *
11382 * and the link clock is simpler:
11383 * link_clock = (m * link_clock) / n
11384 */
11385
11386 if (!m_n->link_n)
11387 return 0;
11388
11389 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
11390 }
11391
ironlake_pch_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)11392 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
11393 struct intel_crtc_state *pipe_config)
11394 {
11395 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11396
11397 /* read out port_clock from the DPLL */
11398 i9xx_crtc_clock_get(crtc, pipe_config);
11399
11400 /*
11401 * In case there is an active pipe without active ports,
11402 * we may need some idea for the dotclock anyway.
11403 * Calculate one based on the FDI configuration.
11404 */
11405 pipe_config->base.adjusted_mode.crtc_clock =
11406 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11407 &pipe_config->fdi_m_n);
11408 }
11409
11410 /* Returns the currently programmed mode of the given encoder. */
11411 struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder * encoder)11412 intel_encoder_current_mode(struct intel_encoder *encoder)
11413 {
11414 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
11415 struct intel_crtc_state *crtc_state;
11416 struct drm_display_mode *mode;
11417 struct intel_crtc *crtc;
11418 enum pipe pipe;
11419
11420 if (!encoder->get_hw_state(encoder, &pipe))
11421 return NULL;
11422
11423 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11424
11425 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
11426 if (!mode)
11427 return NULL;
11428
11429 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
11430 if (!crtc_state) {
11431 kfree(mode);
11432 return NULL;
11433 }
11434
11435 crtc_state->base.crtc = &crtc->base;
11436
11437 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
11438 kfree(crtc_state);
11439 kfree(mode);
11440 return NULL;
11441 }
11442
11443 encoder->get_config(encoder, crtc_state);
11444
11445 intel_mode_from_pipe_config(mode, crtc_state);
11446
11447 kfree(crtc_state);
11448
11449 return mode;
11450 }
11451
intel_crtc_destroy(struct drm_crtc * crtc)11452 static void intel_crtc_destroy(struct drm_crtc *crtc)
11453 {
11454 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11455
11456 drm_crtc_cleanup(crtc);
11457 kfree(intel_crtc);
11458 }
11459
11460 /**
11461 * intel_wm_need_update - Check whether watermarks need updating
11462 * @cur: current plane state
11463 * @new: new plane state
11464 *
11465 * Check current plane state versus the new one to determine whether
11466 * watermarks need to be recalculated.
11467 *
11468 * Returns true or false.
11469 */
intel_wm_need_update(const struct intel_plane_state * cur,struct intel_plane_state * new)11470 static bool intel_wm_need_update(const struct intel_plane_state *cur,
11471 struct intel_plane_state *new)
11472 {
11473 /* Update watermarks on tiling or size changes. */
11474 if (new->base.visible != cur->base.visible)
11475 return true;
11476
11477 if (!cur->base.fb || !new->base.fb)
11478 return false;
11479
11480 if (cur->base.fb->modifier != new->base.fb->modifier ||
11481 cur->base.rotation != new->base.rotation ||
11482 drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
11483 drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
11484 drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
11485 drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
11486 return true;
11487
11488 return false;
11489 }
11490
needs_scaling(const struct intel_plane_state * state)11491 static bool needs_scaling(const struct intel_plane_state *state)
11492 {
11493 int src_w = drm_rect_width(&state->base.src) >> 16;
11494 int src_h = drm_rect_height(&state->base.src) >> 16;
11495 int dst_w = drm_rect_width(&state->base.dst);
11496 int dst_h = drm_rect_height(&state->base.dst);
11497
11498 return (src_w != dst_w || src_h != dst_h);
11499 }
11500
intel_plane_atomic_calc_changes(const struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * crtc_state,const struct intel_plane_state * old_plane_state,struct intel_plane_state * plane_state)11501 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
11502 struct intel_crtc_state *crtc_state,
11503 const struct intel_plane_state *old_plane_state,
11504 struct intel_plane_state *plane_state)
11505 {
11506 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
11507 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
11508 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11509 bool mode_changed = needs_modeset(crtc_state);
11510 bool was_crtc_enabled = old_crtc_state->base.active;
11511 bool is_crtc_enabled = crtc_state->base.active;
11512 bool turn_off, turn_on, visible, was_visible;
11513 struct drm_framebuffer *fb = plane_state->base.fb;
11514 int ret;
11515
11516 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
11517 ret = skl_update_scaler_plane(crtc_state, plane_state);
11518 if (ret)
11519 return ret;
11520 }
11521
11522 was_visible = old_plane_state->base.visible;
11523 visible = plane_state->base.visible;
11524
11525 if (!was_crtc_enabled && WARN_ON(was_visible))
11526 was_visible = false;
11527
11528 /*
11529 * Visibility is calculated as if the crtc was on, but
11530 * after scaler setup everything depends on it being off
11531 * when the crtc isn't active.
11532 *
11533 * FIXME this is wrong for watermarks. Watermarks should also
11534 * be computed as if the pipe would be active. Perhaps move
11535 * per-plane wm computation to the .check_plane() hook, and
11536 * only combine the results from all planes in the current place?
11537 */
11538 if (!is_crtc_enabled) {
11539 plane_state->base.visible = visible = false;
11540 crtc_state->active_planes &= ~BIT(plane->id);
11541 crtc_state->data_rate[plane->id] = 0;
11542 }
11543
11544 if (!was_visible && !visible)
11545 return 0;
11546
11547 if (fb != old_plane_state->base.fb)
11548 crtc_state->fb_changed = true;
11549
11550 turn_off = was_visible && (!visible || mode_changed);
11551 turn_on = visible && (!was_visible || mode_changed);
11552
11553 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
11554 crtc->base.base.id, crtc->base.name,
11555 plane->base.base.id, plane->base.name,
11556 fb ? fb->base.id : -1);
11557
11558 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11559 plane->base.base.id, plane->base.name,
11560 was_visible, visible,
11561 turn_off, turn_on, mode_changed);
11562
11563 if (turn_on) {
11564 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11565 crtc_state->update_wm_pre = true;
11566
11567 /* must disable cxsr around plane enable/disable */
11568 if (plane->id != PLANE_CURSOR)
11569 crtc_state->disable_cxsr = true;
11570 } else if (turn_off) {
11571 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11572 crtc_state->update_wm_post = true;
11573
11574 /* must disable cxsr around plane enable/disable */
11575 if (plane->id != PLANE_CURSOR)
11576 crtc_state->disable_cxsr = true;
11577 } else if (intel_wm_need_update(old_plane_state, plane_state)) {
11578 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
11579 /* FIXME bollocks */
11580 crtc_state->update_wm_pre = true;
11581 crtc_state->update_wm_post = true;
11582 }
11583 }
11584
11585 if (visible || was_visible)
11586 crtc_state->fb_bits |= plane->frontbuffer_bit;
11587
11588 /*
11589 * ILK/SNB DVSACNTR/Sprite Enable
11590 * IVB SPR_CTL/Sprite Enable
11591 * "When in Self Refresh Big FIFO mode, a write to enable the
11592 * plane will be internally buffered and delayed while Big FIFO
11593 * mode is exiting."
11594 *
11595 * Which means that enabling the sprite can take an extra frame
11596 * when we start in big FIFO mode (LP1+). Thus we need to drop
11597 * down to LP0 and wait for vblank in order to make sure the
11598 * sprite gets enabled on the next vblank after the register write.
11599 * Doing otherwise would risk enabling the sprite one frame after
11600 * we've already signalled flip completion. We can resume LP1+
11601 * once the sprite has been enabled.
11602 *
11603 *
11604 * WaCxSRDisabledForSpriteScaling:ivb
11605 * IVB SPR_SCALE/Scaling Enable
11606 * "Low Power watermarks must be disabled for at least one
11607 * frame before enabling sprite scaling, and kept disabled
11608 * until sprite scaling is disabled."
11609 *
11610 * ILK/SNB DVSASCALE/Scaling Enable
11611 * "When in Self Refresh Big FIFO mode, scaling enable will be
11612 * masked off while Big FIFO mode is exiting."
11613 *
11614 * Despite the w/a only being listed for IVB we assume that
11615 * the ILK/SNB note has similar ramifications, hence we apply
11616 * the w/a on all three platforms.
11617 *
11618 * With experimental results seems this is needed also for primary
11619 * plane, not only sprite plane.
11620 */
11621 if (plane->id != PLANE_CURSOR &&
11622 (IS_GEN_RANGE(dev_priv, 5, 6) ||
11623 IS_IVYBRIDGE(dev_priv)) &&
11624 (turn_on || (!needs_scaling(old_plane_state) &&
11625 needs_scaling(plane_state))))
11626 crtc_state->disable_lp_wm = true;
11627
11628 return 0;
11629 }
11630
encoders_cloneable(const struct intel_encoder * a,const struct intel_encoder * b)11631 static bool encoders_cloneable(const struct intel_encoder *a,
11632 const struct intel_encoder *b)
11633 {
11634 /* masks could be asymmetric, so check both ways */
11635 return a == b || (a->cloneable & (1 << b->type) &&
11636 b->cloneable & (1 << a->type));
11637 }
11638
check_single_encoder_cloning(struct drm_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)11639 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11640 struct intel_crtc *crtc,
11641 struct intel_encoder *encoder)
11642 {
11643 struct intel_encoder *source_encoder;
11644 struct drm_connector *connector;
11645 struct drm_connector_state *connector_state;
11646 int i;
11647
11648 for_each_new_connector_in_state(state, connector, connector_state, i) {
11649 if (connector_state->crtc != &crtc->base)
11650 continue;
11651
11652 source_encoder =
11653 to_intel_encoder(connector_state->best_encoder);
11654 if (!encoders_cloneable(encoder, source_encoder))
11655 return false;
11656 }
11657
11658 return true;
11659 }
11660
icl_add_linked_planes(struct intel_atomic_state * state)11661 static int icl_add_linked_planes(struct intel_atomic_state *state)
11662 {
11663 struct intel_plane *plane, *linked;
11664 struct intel_plane_state *plane_state, *linked_plane_state;
11665 int i;
11666
11667 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11668 linked = plane_state->linked_plane;
11669
11670 if (!linked)
11671 continue;
11672
11673 linked_plane_state = intel_atomic_get_plane_state(state, linked);
11674 if (IS_ERR(linked_plane_state))
11675 return PTR_ERR(linked_plane_state);
11676
11677 WARN_ON(linked_plane_state->linked_plane != plane);
11678 WARN_ON(linked_plane_state->slave == plane_state->slave);
11679 }
11680
11681 return 0;
11682 }
11683
icl_check_nv12_planes(struct intel_crtc_state * crtc_state)11684 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
11685 {
11686 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
11687 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11688 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
11689 struct intel_plane *plane, *linked;
11690 struct intel_plane_state *plane_state;
11691 int i;
11692
11693 if (INTEL_GEN(dev_priv) < 11)
11694 return 0;
11695
11696 /*
11697 * Destroy all old plane links and make the slave plane invisible
11698 * in the crtc_state->active_planes mask.
11699 */
11700 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11701 if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
11702 continue;
11703
11704 plane_state->linked_plane = NULL;
11705 if (plane_state->slave && !plane_state->base.visible) {
11706 crtc_state->active_planes &= ~BIT(plane->id);
11707 crtc_state->update_planes |= BIT(plane->id);
11708 }
11709
11710 plane_state->slave = false;
11711 }
11712
11713 if (!crtc_state->nv12_planes)
11714 return 0;
11715
11716 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11717 struct intel_plane_state *linked_state = NULL;
11718
11719 if (plane->pipe != crtc->pipe ||
11720 !(crtc_state->nv12_planes & BIT(plane->id)))
11721 continue;
11722
11723 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
11724 if (!icl_is_nv12_y_plane(linked->id))
11725 continue;
11726
11727 if (crtc_state->active_planes & BIT(linked->id))
11728 continue;
11729
11730 linked_state = intel_atomic_get_plane_state(state, linked);
11731 if (IS_ERR(linked_state))
11732 return PTR_ERR(linked_state);
11733
11734 break;
11735 }
11736
11737 if (!linked_state) {
11738 DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
11739 hweight8(crtc_state->nv12_planes));
11740
11741 return -EINVAL;
11742 }
11743
11744 plane_state->linked_plane = linked;
11745
11746 linked_state->slave = true;
11747 linked_state->linked_plane = plane;
11748 crtc_state->active_planes |= BIT(linked->id);
11749 crtc_state->update_planes |= BIT(linked->id);
11750 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
11751 }
11752
11753 return 0;
11754 }
11755
c8_planes_changed(const struct intel_crtc_state * new_crtc_state)11756 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
11757 {
11758 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
11759 struct intel_atomic_state *state =
11760 to_intel_atomic_state(new_crtc_state->base.state);
11761 const struct intel_crtc_state *old_crtc_state =
11762 intel_atomic_get_old_crtc_state(state, crtc);
11763
11764 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
11765 }
11766
intel_crtc_atomic_check(struct drm_crtc * crtc,struct drm_crtc_state * crtc_state)11767 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11768 struct drm_crtc_state *crtc_state)
11769 {
11770 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
11771 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11772 struct intel_crtc_state *pipe_config =
11773 to_intel_crtc_state(crtc_state);
11774 int ret;
11775 bool mode_changed = needs_modeset(pipe_config);
11776
11777 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
11778 mode_changed && !crtc_state->active)
11779 pipe_config->update_wm_post = true;
11780
11781 if (mode_changed && crtc_state->enable &&
11782 dev_priv->display.crtc_compute_clock &&
11783 !WARN_ON(pipe_config->shared_dpll)) {
11784 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11785 pipe_config);
11786 if (ret)
11787 return ret;
11788 }
11789
11790 /*
11791 * May need to update pipe gamma enable bits
11792 * when C8 planes are getting enabled/disabled.
11793 */
11794 if (c8_planes_changed(pipe_config))
11795 crtc_state->color_mgmt_changed = true;
11796
11797 if (mode_changed || pipe_config->update_pipe ||
11798 crtc_state->color_mgmt_changed) {
11799 ret = intel_color_check(pipe_config);
11800 if (ret)
11801 return ret;
11802 }
11803
11804 ret = 0;
11805 if (dev_priv->display.compute_pipe_wm) {
11806 ret = dev_priv->display.compute_pipe_wm(pipe_config);
11807 if (ret) {
11808 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
11809 return ret;
11810 }
11811 }
11812
11813 if (dev_priv->display.compute_intermediate_wm) {
11814 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
11815 return 0;
11816
11817 /*
11818 * Calculate 'intermediate' watermarks that satisfy both the
11819 * old state and the new state. We can program these
11820 * immediately.
11821 */
11822 ret = dev_priv->display.compute_intermediate_wm(pipe_config);
11823 if (ret) {
11824 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
11825 return ret;
11826 }
11827 }
11828
11829 if (INTEL_GEN(dev_priv) >= 9) {
11830 if (mode_changed || pipe_config->update_pipe)
11831 ret = skl_update_scaler_crtc(pipe_config);
11832
11833 if (!ret)
11834 ret = icl_check_nv12_planes(pipe_config);
11835 if (!ret)
11836 ret = skl_check_pipe_max_pixel_rate(intel_crtc,
11837 pipe_config);
11838 if (!ret)
11839 ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
11840 pipe_config);
11841 }
11842
11843 if (HAS_IPS(dev_priv))
11844 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
11845
11846 return ret;
11847 }
11848
11849 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
11850 .atomic_check = intel_crtc_atomic_check,
11851 };
11852
intel_modeset_update_connector_atomic_state(struct drm_device * dev)11853 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11854 {
11855 struct intel_connector *connector;
11856 struct drm_connector_list_iter conn_iter;
11857
11858 drm_connector_list_iter_begin(dev, &conn_iter);
11859 for_each_intel_connector_iter(connector, &conn_iter) {
11860 if (connector->base.state->crtc)
11861 drm_connector_put(&connector->base);
11862
11863 if (connector->base.encoder) {
11864 connector->base.state->best_encoder =
11865 connector->base.encoder;
11866 connector->base.state->crtc =
11867 connector->base.encoder->crtc;
11868
11869 drm_connector_get(&connector->base);
11870 } else {
11871 connector->base.state->best_encoder = NULL;
11872 connector->base.state->crtc = NULL;
11873 }
11874 }
11875 drm_connector_list_iter_end(&conn_iter);
11876 }
11877
11878 static int
compute_sink_pipe_bpp(const struct drm_connector_state * conn_state,struct intel_crtc_state * pipe_config)11879 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
11880 struct intel_crtc_state *pipe_config)
11881 {
11882 struct drm_connector *connector = conn_state->connector;
11883 const struct drm_display_info *info = &connector->display_info;
11884 int bpp;
11885
11886 switch (conn_state->max_bpc) {
11887 case 6 ... 7:
11888 bpp = 6 * 3;
11889 break;
11890 case 8 ... 9:
11891 bpp = 8 * 3;
11892 break;
11893 case 10 ... 11:
11894 bpp = 10 * 3;
11895 break;
11896 case 12:
11897 bpp = 12 * 3;
11898 break;
11899 default:
11900 return -EINVAL;
11901 }
11902
11903 if (bpp < pipe_config->pipe_bpp) {
11904 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
11905 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
11906 connector->base.id, connector->name,
11907 bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
11908 pipe_config->pipe_bpp);
11909
11910 pipe_config->pipe_bpp = bpp;
11911 }
11912
11913 return 0;
11914 }
11915
11916 static int
compute_baseline_pipe_bpp(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)11917 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
11918 struct intel_crtc_state *pipe_config)
11919 {
11920 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11921 struct drm_atomic_state *state = pipe_config->base.state;
11922 struct drm_connector *connector;
11923 struct drm_connector_state *connector_state;
11924 int bpp, i;
11925
11926 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
11927 IS_CHERRYVIEW(dev_priv)))
11928 bpp = 10*3;
11929 else if (INTEL_GEN(dev_priv) >= 5)
11930 bpp = 12*3;
11931 else
11932 bpp = 8*3;
11933
11934 pipe_config->pipe_bpp = bpp;
11935
11936 /* Clamp display bpp to connector max bpp */
11937 for_each_new_connector_in_state(state, connector, connector_state, i) {
11938 int ret;
11939
11940 if (connector_state->crtc != &crtc->base)
11941 continue;
11942
11943 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
11944 if (ret)
11945 return ret;
11946 }
11947
11948 return 0;
11949 }
11950
intel_dump_crtc_timings(const struct drm_display_mode * mode)11951 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
11952 {
11953 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
11954 "type: 0x%x flags: 0x%x\n",
11955 mode->crtc_clock,
11956 mode->crtc_hdisplay, mode->crtc_hsync_start,
11957 mode->crtc_hsync_end, mode->crtc_htotal,
11958 mode->crtc_vdisplay, mode->crtc_vsync_start,
11959 mode->crtc_vsync_end, mode->crtc_vtotal,
11960 mode->type, mode->flags);
11961 }
11962
11963 static inline void
intel_dump_m_n_config(const struct intel_crtc_state * pipe_config,const char * id,unsigned int lane_count,const struct intel_link_m_n * m_n)11964 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
11965 const char *id, unsigned int lane_count,
11966 const struct intel_link_m_n *m_n)
11967 {
11968 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
11969 id, lane_count,
11970 m_n->gmch_m, m_n->gmch_n,
11971 m_n->link_m, m_n->link_n, m_n->tu);
11972 }
11973
11974 static void
intel_dump_infoframe(struct drm_i915_private * dev_priv,const union hdmi_infoframe * frame)11975 intel_dump_infoframe(struct drm_i915_private *dev_priv,
11976 const union hdmi_infoframe *frame)
11977 {
11978 if ((drm_debug & DRM_UT_KMS) == 0)
11979 return;
11980
11981 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
11982 }
11983
11984 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
11985
11986 static const char * const output_type_str[] = {
11987 OUTPUT_TYPE(UNUSED),
11988 OUTPUT_TYPE(ANALOG),
11989 OUTPUT_TYPE(DVO),
11990 OUTPUT_TYPE(SDVO),
11991 OUTPUT_TYPE(LVDS),
11992 OUTPUT_TYPE(TVOUT),
11993 OUTPUT_TYPE(HDMI),
11994 OUTPUT_TYPE(DP),
11995 OUTPUT_TYPE(EDP),
11996 OUTPUT_TYPE(DSI),
11997 OUTPUT_TYPE(DDI),
11998 OUTPUT_TYPE(DP_MST),
11999 };
12000
12001 #undef OUTPUT_TYPE
12002
snprintf_output_types(char * buf,size_t len,unsigned int output_types)12003 static void snprintf_output_types(char *buf, size_t len,
12004 unsigned int output_types)
12005 {
12006 char *str = buf;
12007 int i;
12008
12009 str[0] = '\0';
12010
12011 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
12012 int r;
12013
12014 if ((output_types & BIT(i)) == 0)
12015 continue;
12016
12017 r = snprintf(str, len, "%s%s",
12018 str != buf ? "," : "", output_type_str[i]);
12019 if (r >= len)
12020 break;
12021 str += r;
12022 len -= r;
12023
12024 output_types &= ~BIT(i);
12025 }
12026
12027 WARN_ON_ONCE(output_types != 0);
12028 }
12029
12030 static const char * const output_format_str[] = {
12031 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
12032 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
12033 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
12034 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
12035 };
12036
output_formats(enum intel_output_format format)12037 static const char *output_formats(enum intel_output_format format)
12038 {
12039 if (format >= ARRAY_SIZE(output_format_str))
12040 format = INTEL_OUTPUT_FORMAT_INVALID;
12041 return output_format_str[format];
12042 }
12043
intel_dump_plane_state(const struct intel_plane_state * plane_state)12044 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
12045 {
12046 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
12047 const struct drm_framebuffer *fb = plane_state->base.fb;
12048 struct drm_format_name_buf format_name;
12049
12050 if (!fb) {
12051 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
12052 plane->base.base.id, plane->base.name,
12053 yesno(plane_state->base.visible));
12054 return;
12055 }
12056
12057 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
12058 plane->base.base.id, plane->base.name,
12059 fb->base.id, fb->width, fb->height,
12060 drm_get_format_name(fb->format->format, &format_name),
12061 yesno(plane_state->base.visible));
12062 DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
12063 plane_state->base.rotation, plane_state->scaler_id);
12064 if (plane_state->base.visible)
12065 DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
12066 DRM_RECT_FP_ARG(&plane_state->base.src),
12067 DRM_RECT_ARG(&plane_state->base.dst));
12068 }
12069
intel_dump_pipe_config(const struct intel_crtc_state * pipe_config,struct intel_atomic_state * state,const char * context)12070 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
12071 struct intel_atomic_state *state,
12072 const char *context)
12073 {
12074 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
12075 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12076 const struct intel_plane_state *plane_state;
12077 struct intel_plane *plane;
12078 char buf[64];
12079 int i;
12080
12081 DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
12082 crtc->base.base.id, crtc->base.name,
12083 yesno(pipe_config->base.enable), context);
12084
12085 if (!pipe_config->base.enable)
12086 goto dump_planes;
12087
12088 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
12089 DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
12090 yesno(pipe_config->base.active),
12091 buf, pipe_config->output_types,
12092 output_formats(pipe_config->output_format));
12093
12094 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
12095 transcoder_name(pipe_config->cpu_transcoder),
12096 pipe_config->pipe_bpp, pipe_config->dither);
12097
12098 if (pipe_config->has_pch_encoder)
12099 intel_dump_m_n_config(pipe_config, "fdi",
12100 pipe_config->fdi_lanes,
12101 &pipe_config->fdi_m_n);
12102
12103 if (intel_crtc_has_dp_encoder(pipe_config)) {
12104 intel_dump_m_n_config(pipe_config, "dp m_n",
12105 pipe_config->lane_count, &pipe_config->dp_m_n);
12106 if (pipe_config->has_drrs)
12107 intel_dump_m_n_config(pipe_config, "dp m2_n2",
12108 pipe_config->lane_count,
12109 &pipe_config->dp_m2_n2);
12110 }
12111
12112 DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
12113 pipe_config->has_audio, pipe_config->has_infoframe,
12114 pipe_config->infoframes.enable);
12115
12116 if (pipe_config->infoframes.enable &
12117 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
12118 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
12119 if (pipe_config->infoframes.enable &
12120 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
12121 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
12122 if (pipe_config->infoframes.enable &
12123 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
12124 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
12125 if (pipe_config->infoframes.enable &
12126 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
12127 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
12128
12129 DRM_DEBUG_KMS("requested mode:\n");
12130 drm_mode_debug_printmodeline(&pipe_config->base.mode);
12131 DRM_DEBUG_KMS("adjusted mode:\n");
12132 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
12133 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
12134 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
12135 pipe_config->port_clock,
12136 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
12137 pipe_config->pixel_rate);
12138
12139 if (INTEL_GEN(dev_priv) >= 9)
12140 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12141 crtc->num_scalers,
12142 pipe_config->scaler_state.scaler_users,
12143 pipe_config->scaler_state.scaler_id);
12144
12145 if (HAS_GMCH(dev_priv))
12146 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12147 pipe_config->gmch_pfit.control,
12148 pipe_config->gmch_pfit.pgm_ratios,
12149 pipe_config->gmch_pfit.lvds_border_bits);
12150 else
12151 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
12152 pipe_config->pch_pfit.pos,
12153 pipe_config->pch_pfit.size,
12154 enableddisabled(pipe_config->pch_pfit.enabled),
12155 yesno(pipe_config->pch_pfit.force_thru));
12156
12157 DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
12158 pipe_config->ips_enabled, pipe_config->double_wide);
12159
12160 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
12161
12162 dump_planes:
12163 if (!state)
12164 return;
12165
12166 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12167 if (plane->pipe == crtc->pipe)
12168 intel_dump_plane_state(plane_state);
12169 }
12170 }
12171
check_digital_port_conflicts(struct intel_atomic_state * state)12172 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
12173 {
12174 struct drm_device *dev = state->base.dev;
12175 struct drm_connector *connector;
12176 struct drm_connector_list_iter conn_iter;
12177 unsigned int used_ports = 0;
12178 unsigned int used_mst_ports = 0;
12179 bool ret = true;
12180
12181 /*
12182 * Walk the connector list instead of the encoder
12183 * list to detect the problem on ddi platforms
12184 * where there's just one encoder per digital port.
12185 */
12186 drm_connector_list_iter_begin(dev, &conn_iter);
12187 drm_for_each_connector_iter(connector, &conn_iter) {
12188 struct drm_connector_state *connector_state;
12189 struct intel_encoder *encoder;
12190
12191 connector_state =
12192 drm_atomic_get_new_connector_state(&state->base,
12193 connector);
12194 if (!connector_state)
12195 connector_state = connector->state;
12196
12197 if (!connector_state->best_encoder)
12198 continue;
12199
12200 encoder = to_intel_encoder(connector_state->best_encoder);
12201
12202 WARN_ON(!connector_state->crtc);
12203
12204 switch (encoder->type) {
12205 unsigned int port_mask;
12206 case INTEL_OUTPUT_DDI:
12207 if (WARN_ON(!HAS_DDI(to_i915(dev))))
12208 break;
12209 /* else, fall through */
12210 case INTEL_OUTPUT_DP:
12211 case INTEL_OUTPUT_HDMI:
12212 case INTEL_OUTPUT_EDP:
12213 port_mask = 1 << encoder->port;
12214
12215 /* the same port mustn't appear more than once */
12216 if (used_ports & port_mask)
12217 ret = false;
12218
12219 used_ports |= port_mask;
12220 break;
12221 case INTEL_OUTPUT_DP_MST:
12222 used_mst_ports |=
12223 1 << encoder->port;
12224 break;
12225 default:
12226 break;
12227 }
12228 }
12229 drm_connector_list_iter_end(&conn_iter);
12230
12231 /* can't mix MST and SST/HDMI on the same port */
12232 if (used_ports & used_mst_ports)
12233 return false;
12234
12235 return ret;
12236 }
12237
12238 static int
clear_intel_crtc_state(struct intel_crtc_state * crtc_state)12239 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12240 {
12241 struct drm_i915_private *dev_priv =
12242 to_i915(crtc_state->base.crtc->dev);
12243 struct intel_crtc_state *saved_state;
12244
12245 saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
12246 if (!saved_state)
12247 return -ENOMEM;
12248
12249 /* FIXME: before the switch to atomic started, a new pipe_config was
12250 * kzalloc'd. Code that depends on any field being zero should be
12251 * fixed, so that the crtc_state can be safely duplicated. For now,
12252 * only fields that are know to not cause problems are preserved. */
12253
12254 saved_state->scaler_state = crtc_state->scaler_state;
12255 saved_state->shared_dpll = crtc_state->shared_dpll;
12256 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
12257 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
12258 sizeof(saved_state->icl_port_dplls));
12259 saved_state->crc_enabled = crtc_state->crc_enabled;
12260 if (IS_G4X(dev_priv) ||
12261 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12262 saved_state->wm = crtc_state->wm;
12263
12264 /* Keep base drm_crtc_state intact, only clear our extended struct */
12265 BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
12266 memcpy(&crtc_state->base + 1, &saved_state->base + 1,
12267 sizeof(*crtc_state) - sizeof(crtc_state->base));
12268
12269 kfree(saved_state);
12270 return 0;
12271 }
12272
12273 static int
intel_modeset_pipe_config(struct intel_crtc_state * pipe_config)12274 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
12275 {
12276 struct drm_crtc *crtc = pipe_config->base.crtc;
12277 struct drm_atomic_state *state = pipe_config->base.state;
12278 struct intel_encoder *encoder;
12279 struct drm_connector *connector;
12280 struct drm_connector_state *connector_state;
12281 int base_bpp, ret;
12282 int i;
12283 bool retry = true;
12284
12285 ret = clear_intel_crtc_state(pipe_config);
12286 if (ret)
12287 return ret;
12288
12289 pipe_config->cpu_transcoder =
12290 (enum transcoder) to_intel_crtc(crtc)->pipe;
12291
12292 /*
12293 * Sanitize sync polarity flags based on requested ones. If neither
12294 * positive or negative polarity is requested, treat this as meaning
12295 * negative polarity.
12296 */
12297 if (!(pipe_config->base.adjusted_mode.flags &
12298 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12299 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12300
12301 if (!(pipe_config->base.adjusted_mode.flags &
12302 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12303 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12304
12305 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12306 pipe_config);
12307 if (ret)
12308 return ret;
12309
12310 base_bpp = pipe_config->pipe_bpp;
12311
12312 /*
12313 * Determine the real pipe dimensions. Note that stereo modes can
12314 * increase the actual pipe size due to the frame doubling and
12315 * insertion of additional space for blanks between the frame. This
12316 * is stored in the crtc timings. We use the requested mode to do this
12317 * computation to clearly distinguish it from the adjusted mode, which
12318 * can be changed by the connectors in the below retry loop.
12319 */
12320 drm_mode_get_hv_timing(&pipe_config->base.mode,
12321 &pipe_config->pipe_src_w,
12322 &pipe_config->pipe_src_h);
12323
12324 for_each_new_connector_in_state(state, connector, connector_state, i) {
12325 if (connector_state->crtc != crtc)
12326 continue;
12327
12328 encoder = to_intel_encoder(connector_state->best_encoder);
12329
12330 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
12331 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12332 return -EINVAL;
12333 }
12334
12335 /*
12336 * Determine output_types before calling the .compute_config()
12337 * hooks so that the hooks can use this information safely.
12338 */
12339 if (encoder->compute_output_type)
12340 pipe_config->output_types |=
12341 BIT(encoder->compute_output_type(encoder, pipe_config,
12342 connector_state));
12343 else
12344 pipe_config->output_types |= BIT(encoder->type);
12345 }
12346
12347 encoder_retry:
12348 /* Ensure the port clock defaults are reset when retrying. */
12349 pipe_config->port_clock = 0;
12350 pipe_config->pixel_multiplier = 1;
12351
12352 /* Fill in default crtc timings, allow encoders to overwrite them. */
12353 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12354 CRTC_STEREO_DOUBLE);
12355
12356 /* Pass our mode to the connectors and the CRTC to give them a chance to
12357 * adjust it according to limitations or connector properties, and also
12358 * a chance to reject the mode entirely.
12359 */
12360 for_each_new_connector_in_state(state, connector, connector_state, i) {
12361 if (connector_state->crtc != crtc)
12362 continue;
12363
12364 encoder = to_intel_encoder(connector_state->best_encoder);
12365 ret = encoder->compute_config(encoder, pipe_config,
12366 connector_state);
12367 if (ret < 0) {
12368 if (ret != -EDEADLK)
12369 DRM_DEBUG_KMS("Encoder config failure: %d\n",
12370 ret);
12371 return ret;
12372 }
12373 }
12374
12375 /* Set default port clock if not overwritten by the encoder. Needs to be
12376 * done afterwards in case the encoder adjusts the mode. */
12377 if (!pipe_config->port_clock)
12378 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
12379 * pipe_config->pixel_multiplier;
12380
12381 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12382 if (ret == -EDEADLK)
12383 return ret;
12384 if (ret < 0) {
12385 DRM_DEBUG_KMS("CRTC fixup failed\n");
12386 return ret;
12387 }
12388
12389 if (ret == RETRY) {
12390 if (WARN(!retry, "loop in pipe configuration computation\n"))
12391 return -EINVAL;
12392
12393 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12394 retry = false;
12395 goto encoder_retry;
12396 }
12397
12398 /* Dithering seems to not pass-through bits correctly when it should, so
12399 * only enable it on 6bpc panels and when its not a compliance
12400 * test requesting 6bpc video pattern.
12401 */
12402 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
12403 !pipe_config->dither_force_disable;
12404 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12405 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12406
12407 return 0;
12408 }
12409
intel_fuzzy_clock_check(int clock1,int clock2)12410 bool intel_fuzzy_clock_check(int clock1, int clock2)
12411 {
12412 int diff;
12413
12414 if (clock1 == clock2)
12415 return true;
12416
12417 if (!clock1 || !clock2)
12418 return false;
12419
12420 diff = abs(clock1 - clock2);
12421
12422 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12423 return true;
12424
12425 return false;
12426 }
12427
12428 static bool
intel_compare_m_n(unsigned int m,unsigned int n,unsigned int m2,unsigned int n2,bool exact)12429 intel_compare_m_n(unsigned int m, unsigned int n,
12430 unsigned int m2, unsigned int n2,
12431 bool exact)
12432 {
12433 if (m == m2 && n == n2)
12434 return true;
12435
12436 if (exact || !m || !n || !m2 || !n2)
12437 return false;
12438
12439 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12440
12441 if (n > n2) {
12442 while (n > n2) {
12443 m2 <<= 1;
12444 n2 <<= 1;
12445 }
12446 } else if (n < n2) {
12447 while (n < n2) {
12448 m <<= 1;
12449 n <<= 1;
12450 }
12451 }
12452
12453 if (n != n2)
12454 return false;
12455
12456 return intel_fuzzy_clock_check(m, m2);
12457 }
12458
12459 static bool
intel_compare_link_m_n(const struct intel_link_m_n * m_n,const struct intel_link_m_n * m2_n2,bool exact)12460 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12461 const struct intel_link_m_n *m2_n2,
12462 bool exact)
12463 {
12464 return m_n->tu == m2_n2->tu &&
12465 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12466 m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
12467 intel_compare_m_n(m_n->link_m, m_n->link_n,
12468 m2_n2->link_m, m2_n2->link_n, exact);
12469 }
12470
12471 static bool
intel_compare_infoframe(const union hdmi_infoframe * a,const union hdmi_infoframe * b)12472 intel_compare_infoframe(const union hdmi_infoframe *a,
12473 const union hdmi_infoframe *b)
12474 {
12475 return memcmp(a, b, sizeof(*a)) == 0;
12476 }
12477
12478 static void
pipe_config_infoframe_mismatch(struct drm_i915_private * dev_priv,bool fastset,const char * name,const union hdmi_infoframe * a,const union hdmi_infoframe * b)12479 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
12480 bool fastset, const char *name,
12481 const union hdmi_infoframe *a,
12482 const union hdmi_infoframe *b)
12483 {
12484 if (fastset) {
12485 if ((drm_debug & DRM_UT_KMS) == 0)
12486 return;
12487
12488 drm_dbg(DRM_UT_KMS, "fastset mismatch in %s infoframe", name);
12489 drm_dbg(DRM_UT_KMS, "expected:");
12490 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
12491 drm_dbg(DRM_UT_KMS, "found");
12492 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
12493 } else {
12494 drm_err("mismatch in %s infoframe", name);
12495 drm_err("expected:");
12496 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
12497 drm_err("found");
12498 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
12499 }
12500 }
12501
12502 static void __printf(3, 4)
pipe_config_mismatch(bool fastset,const char * name,const char * format,...)12503 pipe_config_mismatch(bool fastset, const char *name, const char *format, ...)
12504 {
12505 struct va_format vaf;
12506 va_list args;
12507
12508 va_start(args, format);
12509 vaf.fmt = format;
12510 vaf.va = &args;
12511
12512 if (fastset)
12513 drm_dbg(DRM_UT_KMS, "fastset mismatch in %s %pV", name, &vaf);
12514 else
12515 drm_err("mismatch in %s %pV", name, &vaf);
12516
12517 va_end(args);
12518 }
12519
fastboot_enabled(struct drm_i915_private * dev_priv)12520 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
12521 {
12522 if (i915_modparams.fastboot != -1)
12523 return i915_modparams.fastboot;
12524
12525 /* Enable fastboot by default on Skylake and newer */
12526 if (INTEL_GEN(dev_priv) >= 9)
12527 return true;
12528
12529 /* Enable fastboot by default on VLV and CHV */
12530 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12531 return true;
12532
12533 /* Disabled by default on all others */
12534 return false;
12535 }
12536
12537 static bool
intel_pipe_config_compare(const struct intel_crtc_state * current_config,const struct intel_crtc_state * pipe_config,bool fastset)12538 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
12539 const struct intel_crtc_state *pipe_config,
12540 bool fastset)
12541 {
12542 struct drm_i915_private *dev_priv = to_i915(current_config->base.crtc->dev);
12543 bool ret = true;
12544 bool fixup_inherited = fastset &&
12545 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
12546 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
12547
12548 if (fixup_inherited && !fastboot_enabled(dev_priv)) {
12549 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
12550 ret = false;
12551 }
12552
12553 #define PIPE_CONF_CHECK_X(name) do { \
12554 if (current_config->name != pipe_config->name) { \
12555 pipe_config_mismatch(fastset, __stringify(name), \
12556 "(expected 0x%08x, found 0x%08x)\n", \
12557 current_config->name, \
12558 pipe_config->name); \
12559 ret = false; \
12560 } \
12561 } while (0)
12562
12563 #define PIPE_CONF_CHECK_I(name) do { \
12564 if (current_config->name != pipe_config->name) { \
12565 pipe_config_mismatch(fastset, __stringify(name), \
12566 "(expected %i, found %i)\n", \
12567 current_config->name, \
12568 pipe_config->name); \
12569 ret = false; \
12570 } \
12571 } while (0)
12572
12573 #define PIPE_CONF_CHECK_BOOL(name) do { \
12574 if (current_config->name != pipe_config->name) { \
12575 pipe_config_mismatch(fastset, __stringify(name), \
12576 "(expected %s, found %s)\n", \
12577 yesno(current_config->name), \
12578 yesno(pipe_config->name)); \
12579 ret = false; \
12580 } \
12581 } while (0)
12582
12583 /*
12584 * Checks state where we only read out the enabling, but not the entire
12585 * state itself (like full infoframes or ELD for audio). These states
12586 * require a full modeset on bootup to fix up.
12587 */
12588 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
12589 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
12590 PIPE_CONF_CHECK_BOOL(name); \
12591 } else { \
12592 pipe_config_mismatch(fastset, __stringify(name), \
12593 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
12594 yesno(current_config->name), \
12595 yesno(pipe_config->name)); \
12596 ret = false; \
12597 } \
12598 } while (0)
12599
12600 #define PIPE_CONF_CHECK_P(name) do { \
12601 if (current_config->name != pipe_config->name) { \
12602 pipe_config_mismatch(fastset, __stringify(name), \
12603 "(expected %p, found %p)\n", \
12604 current_config->name, \
12605 pipe_config->name); \
12606 ret = false; \
12607 } \
12608 } while (0)
12609
12610 #define PIPE_CONF_CHECK_M_N(name) do { \
12611 if (!intel_compare_link_m_n(¤t_config->name, \
12612 &pipe_config->name,\
12613 !fastset)) { \
12614 pipe_config_mismatch(fastset, __stringify(name), \
12615 "(expected tu %i gmch %i/%i link %i/%i, " \
12616 "found tu %i, gmch %i/%i link %i/%i)\n", \
12617 current_config->name.tu, \
12618 current_config->name.gmch_m, \
12619 current_config->name.gmch_n, \
12620 current_config->name.link_m, \
12621 current_config->name.link_n, \
12622 pipe_config->name.tu, \
12623 pipe_config->name.gmch_m, \
12624 pipe_config->name.gmch_n, \
12625 pipe_config->name.link_m, \
12626 pipe_config->name.link_n); \
12627 ret = false; \
12628 } \
12629 } while (0)
12630
12631 /* This is required for BDW+ where there is only one set of registers for
12632 * switching between high and low RR.
12633 * This macro can be used whenever a comparison has to be made between one
12634 * hw state and multiple sw state variables.
12635 */
12636 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
12637 if (!intel_compare_link_m_n(¤t_config->name, \
12638 &pipe_config->name, !fastset) && \
12639 !intel_compare_link_m_n(¤t_config->alt_name, \
12640 &pipe_config->name, !fastset)) { \
12641 pipe_config_mismatch(fastset, __stringify(name), \
12642 "(expected tu %i gmch %i/%i link %i/%i, " \
12643 "or tu %i gmch %i/%i link %i/%i, " \
12644 "found tu %i, gmch %i/%i link %i/%i)\n", \
12645 current_config->name.tu, \
12646 current_config->name.gmch_m, \
12647 current_config->name.gmch_n, \
12648 current_config->name.link_m, \
12649 current_config->name.link_n, \
12650 current_config->alt_name.tu, \
12651 current_config->alt_name.gmch_m, \
12652 current_config->alt_name.gmch_n, \
12653 current_config->alt_name.link_m, \
12654 current_config->alt_name.link_n, \
12655 pipe_config->name.tu, \
12656 pipe_config->name.gmch_m, \
12657 pipe_config->name.gmch_n, \
12658 pipe_config->name.link_m, \
12659 pipe_config->name.link_n); \
12660 ret = false; \
12661 } \
12662 } while (0)
12663
12664 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
12665 if ((current_config->name ^ pipe_config->name) & (mask)) { \
12666 pipe_config_mismatch(fastset, __stringify(name), \
12667 "(%x) (expected %i, found %i)\n", \
12668 (mask), \
12669 current_config->name & (mask), \
12670 pipe_config->name & (mask)); \
12671 ret = false; \
12672 } \
12673 } while (0)
12674
12675 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
12676 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12677 pipe_config_mismatch(fastset, __stringify(name), \
12678 "(expected %i, found %i)\n", \
12679 current_config->name, \
12680 pipe_config->name); \
12681 ret = false; \
12682 } \
12683 } while (0)
12684
12685 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
12686 if (!intel_compare_infoframe(¤t_config->infoframes.name, \
12687 &pipe_config->infoframes.name)) { \
12688 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
12689 ¤t_config->infoframes.name, \
12690 &pipe_config->infoframes.name); \
12691 ret = false; \
12692 } \
12693 } while (0)
12694
12695 #define PIPE_CONF_QUIRK(quirk) \
12696 ((current_config->quirks | pipe_config->quirks) & (quirk))
12697
12698 PIPE_CONF_CHECK_I(cpu_transcoder);
12699
12700 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
12701 PIPE_CONF_CHECK_I(fdi_lanes);
12702 PIPE_CONF_CHECK_M_N(fdi_m_n);
12703
12704 PIPE_CONF_CHECK_I(lane_count);
12705 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
12706
12707 if (INTEL_GEN(dev_priv) < 8) {
12708 PIPE_CONF_CHECK_M_N(dp_m_n);
12709
12710 if (current_config->has_drrs)
12711 PIPE_CONF_CHECK_M_N(dp_m2_n2);
12712 } else
12713 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12714
12715 PIPE_CONF_CHECK_X(output_types);
12716
12717 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12718 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12719 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12720 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12721 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12722 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12723
12724 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12725 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12726 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12727 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12728 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12729 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12730
12731 PIPE_CONF_CHECK_I(pixel_multiplier);
12732 PIPE_CONF_CHECK_I(output_format);
12733 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
12734 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
12735 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12736 PIPE_CONF_CHECK_BOOL(limited_color_range);
12737
12738 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
12739 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
12740 PIPE_CONF_CHECK_BOOL(has_infoframe);
12741
12742 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
12743
12744 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12745 DRM_MODE_FLAG_INTERLACE);
12746
12747 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12748 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12749 DRM_MODE_FLAG_PHSYNC);
12750 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12751 DRM_MODE_FLAG_NHSYNC);
12752 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12753 DRM_MODE_FLAG_PVSYNC);
12754 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12755 DRM_MODE_FLAG_NVSYNC);
12756 }
12757
12758 PIPE_CONF_CHECK_X(gmch_pfit.control);
12759 /* pfit ratios are autocomputed by the hw on gen4+ */
12760 if (INTEL_GEN(dev_priv) < 4)
12761 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
12762 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12763
12764 /*
12765 * Changing the EDP transcoder input mux
12766 * (A_ONOFF vs. A_ON) requires a full modeset.
12767 */
12768 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
12769
12770 if (!fastset) {
12771 PIPE_CONF_CHECK_I(pipe_src_w);
12772 PIPE_CONF_CHECK_I(pipe_src_h);
12773
12774 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
12775 if (current_config->pch_pfit.enabled) {
12776 PIPE_CONF_CHECK_X(pch_pfit.pos);
12777 PIPE_CONF_CHECK_X(pch_pfit.size);
12778 }
12779
12780 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12781 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
12782
12783 PIPE_CONF_CHECK_X(gamma_mode);
12784 if (IS_CHERRYVIEW(dev_priv))
12785 PIPE_CONF_CHECK_X(cgm_mode);
12786 else
12787 PIPE_CONF_CHECK_X(csc_mode);
12788 PIPE_CONF_CHECK_BOOL(gamma_enable);
12789 PIPE_CONF_CHECK_BOOL(csc_enable);
12790 }
12791
12792 PIPE_CONF_CHECK_BOOL(double_wide);
12793
12794 PIPE_CONF_CHECK_P(shared_dpll);
12795 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12796 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12797 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12798 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12799 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12800 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12801 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12802 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12803 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12804 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
12805 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
12806 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
12807 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
12808 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
12809 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
12810 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
12811 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
12812 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
12813 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
12814 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
12815 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
12816 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
12817 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
12818 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
12819 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
12820 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
12821 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
12822 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
12823 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
12824 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
12825 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
12826
12827 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12828 PIPE_CONF_CHECK_X(dsi_pll.div);
12829
12830 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
12831 PIPE_CONF_CHECK_I(pipe_bpp);
12832
12833 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12834 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12835
12836 PIPE_CONF_CHECK_I(min_voltage_level);
12837
12838 PIPE_CONF_CHECK_X(infoframes.enable);
12839 PIPE_CONF_CHECK_X(infoframes.gcp);
12840 PIPE_CONF_CHECK_INFOFRAME(avi);
12841 PIPE_CONF_CHECK_INFOFRAME(spd);
12842 PIPE_CONF_CHECK_INFOFRAME(hdmi);
12843 PIPE_CONF_CHECK_INFOFRAME(drm);
12844
12845 #undef PIPE_CONF_CHECK_X
12846 #undef PIPE_CONF_CHECK_I
12847 #undef PIPE_CONF_CHECK_BOOL
12848 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
12849 #undef PIPE_CONF_CHECK_P
12850 #undef PIPE_CONF_CHECK_FLAGS
12851 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12852 #undef PIPE_CONF_QUIRK
12853
12854 return ret;
12855 }
12856
intel_pipe_config_sanity_check(struct drm_i915_private * dev_priv,const struct intel_crtc_state * pipe_config)12857 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12858 const struct intel_crtc_state *pipe_config)
12859 {
12860 if (pipe_config->has_pch_encoder) {
12861 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12862 &pipe_config->fdi_m_n);
12863 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12864
12865 /*
12866 * FDI already provided one idea for the dotclock.
12867 * Yell if the encoder disagrees.
12868 */
12869 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12870 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12871 fdi_dotclock, dotclock);
12872 }
12873 }
12874
verify_wm_state(struct intel_crtc * crtc,struct intel_crtc_state * new_crtc_state)12875 static void verify_wm_state(struct intel_crtc *crtc,
12876 struct intel_crtc_state *new_crtc_state)
12877 {
12878 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12879 struct skl_hw_state {
12880 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
12881 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
12882 struct skl_ddb_allocation ddb;
12883 struct skl_pipe_wm wm;
12884 } *hw;
12885 struct skl_ddb_allocation *sw_ddb;
12886 struct skl_pipe_wm *sw_wm;
12887 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
12888 const enum pipe pipe = crtc->pipe;
12889 int plane, level, max_level = ilk_wm_max_level(dev_priv);
12890
12891 if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->base.active)
12892 return;
12893
12894 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
12895 if (!hw)
12896 return;
12897
12898 skl_pipe_wm_get_hw_state(crtc, &hw->wm);
12899 sw_wm = &new_crtc_state->wm.skl.optimal;
12900
12901 skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
12902
12903 skl_ddb_get_hw_state(dev_priv, &hw->ddb);
12904 sw_ddb = &dev_priv->wm.skl_hw.ddb;
12905
12906 if (INTEL_GEN(dev_priv) >= 11 &&
12907 hw->ddb.enabled_slices != sw_ddb->enabled_slices)
12908 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
12909 sw_ddb->enabled_slices,
12910 hw->ddb.enabled_slices);
12911
12912 /* planes */
12913 for_each_universal_plane(dev_priv, pipe, plane) {
12914 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12915
12916 hw_plane_wm = &hw->wm.planes[plane];
12917 sw_plane_wm = &sw_wm->planes[plane];
12918
12919 /* Watermarks */
12920 for (level = 0; level <= max_level; level++) {
12921 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12922 &sw_plane_wm->wm[level]))
12923 continue;
12924
12925 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12926 pipe_name(pipe), plane + 1, level,
12927 sw_plane_wm->wm[level].plane_en,
12928 sw_plane_wm->wm[level].plane_res_b,
12929 sw_plane_wm->wm[level].plane_res_l,
12930 hw_plane_wm->wm[level].plane_en,
12931 hw_plane_wm->wm[level].plane_res_b,
12932 hw_plane_wm->wm[level].plane_res_l);
12933 }
12934
12935 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12936 &sw_plane_wm->trans_wm)) {
12937 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12938 pipe_name(pipe), plane + 1,
12939 sw_plane_wm->trans_wm.plane_en,
12940 sw_plane_wm->trans_wm.plane_res_b,
12941 sw_plane_wm->trans_wm.plane_res_l,
12942 hw_plane_wm->trans_wm.plane_en,
12943 hw_plane_wm->trans_wm.plane_res_b,
12944 hw_plane_wm->trans_wm.plane_res_l);
12945 }
12946
12947 /* DDB */
12948 hw_ddb_entry = &hw->ddb_y[plane];
12949 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
12950
12951 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12952 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
12953 pipe_name(pipe), plane + 1,
12954 sw_ddb_entry->start, sw_ddb_entry->end,
12955 hw_ddb_entry->start, hw_ddb_entry->end);
12956 }
12957 }
12958
12959 /*
12960 * cursor
12961 * If the cursor plane isn't active, we may not have updated it's ddb
12962 * allocation. In that case since the ddb allocation will be updated
12963 * once the plane becomes visible, we can skip this check
12964 */
12965 if (1) {
12966 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12967
12968 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
12969 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
12970
12971 /* Watermarks */
12972 for (level = 0; level <= max_level; level++) {
12973 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12974 &sw_plane_wm->wm[level]))
12975 continue;
12976
12977 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12978 pipe_name(pipe), level,
12979 sw_plane_wm->wm[level].plane_en,
12980 sw_plane_wm->wm[level].plane_res_b,
12981 sw_plane_wm->wm[level].plane_res_l,
12982 hw_plane_wm->wm[level].plane_en,
12983 hw_plane_wm->wm[level].plane_res_b,
12984 hw_plane_wm->wm[level].plane_res_l);
12985 }
12986
12987 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12988 &sw_plane_wm->trans_wm)) {
12989 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12990 pipe_name(pipe),
12991 sw_plane_wm->trans_wm.plane_en,
12992 sw_plane_wm->trans_wm.plane_res_b,
12993 sw_plane_wm->trans_wm.plane_res_l,
12994 hw_plane_wm->trans_wm.plane_en,
12995 hw_plane_wm->trans_wm.plane_res_b,
12996 hw_plane_wm->trans_wm.plane_res_l);
12997 }
12998
12999 /* DDB */
13000 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
13001 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
13002
13003 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13004 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
13005 pipe_name(pipe),
13006 sw_ddb_entry->start, sw_ddb_entry->end,
13007 hw_ddb_entry->start, hw_ddb_entry->end);
13008 }
13009 }
13010
13011 kfree(hw);
13012 }
13013
13014 static void
verify_connector_state(struct intel_atomic_state * state,struct intel_crtc * crtc)13015 verify_connector_state(struct intel_atomic_state *state,
13016 struct intel_crtc *crtc)
13017 {
13018 struct drm_connector *connector;
13019 struct drm_connector_state *new_conn_state;
13020 int i;
13021
13022 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
13023 struct drm_encoder *encoder = connector->encoder;
13024 struct intel_crtc_state *crtc_state = NULL;
13025
13026 if (new_conn_state->crtc != &crtc->base)
13027 continue;
13028
13029 if (crtc)
13030 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
13031
13032 intel_connector_verify_state(crtc_state, new_conn_state);
13033
13034 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
13035 "connector's atomic encoder doesn't match legacy encoder\n");
13036 }
13037 }
13038
13039 static void
verify_encoder_state(struct drm_i915_private * dev_priv,struct intel_atomic_state * state)13040 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
13041 {
13042 struct intel_encoder *encoder;
13043 struct drm_connector *connector;
13044 struct drm_connector_state *old_conn_state, *new_conn_state;
13045 int i;
13046
13047 for_each_intel_encoder(&dev_priv->drm, encoder) {
13048 bool enabled = false, found = false;
13049 enum pipe pipe;
13050
13051 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
13052 encoder->base.base.id,
13053 encoder->base.name);
13054
13055 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
13056 new_conn_state, i) {
13057 if (old_conn_state->best_encoder == &encoder->base)
13058 found = true;
13059
13060 if (new_conn_state->best_encoder != &encoder->base)
13061 continue;
13062 found = enabled = true;
13063
13064 I915_STATE_WARN(new_conn_state->crtc !=
13065 encoder->base.crtc,
13066 "connector's crtc doesn't match encoder crtc\n");
13067 }
13068
13069 if (!found)
13070 continue;
13071
13072 I915_STATE_WARN(!!encoder->base.crtc != enabled,
13073 "encoder's enabled state mismatch "
13074 "(expected %i, found %i)\n",
13075 !!encoder->base.crtc, enabled);
13076
13077 if (!encoder->base.crtc) {
13078 bool active;
13079
13080 active = encoder->get_hw_state(encoder, &pipe);
13081 I915_STATE_WARN(active,
13082 "encoder detached but still enabled on pipe %c.\n",
13083 pipe_name(pipe));
13084 }
13085 }
13086 }
13087
13088 static void
verify_crtc_state(struct intel_crtc * crtc,struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)13089 verify_crtc_state(struct intel_crtc *crtc,
13090 struct intel_crtc_state *old_crtc_state,
13091 struct intel_crtc_state *new_crtc_state)
13092 {
13093 struct drm_device *dev = crtc->base.dev;
13094 struct drm_i915_private *dev_priv = to_i915(dev);
13095 struct intel_encoder *encoder;
13096 struct intel_crtc_state *pipe_config;
13097 struct drm_atomic_state *state;
13098 bool active;
13099
13100 state = old_crtc_state->base.state;
13101 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->base);
13102 pipe_config = old_crtc_state;
13103 memset(pipe_config, 0, sizeof(*pipe_config));
13104 pipe_config->base.crtc = &crtc->base;
13105 pipe_config->base.state = state;
13106
13107 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
13108
13109 active = dev_priv->display.get_pipe_config(crtc, pipe_config);
13110
13111 /* we keep both pipes enabled on 830 */
13112 if (IS_I830(dev_priv))
13113 active = new_crtc_state->base.active;
13114
13115 I915_STATE_WARN(new_crtc_state->base.active != active,
13116 "crtc active state doesn't match with hw state "
13117 "(expected %i, found %i)\n", new_crtc_state->base.active, active);
13118
13119 I915_STATE_WARN(crtc->active != new_crtc_state->base.active,
13120 "transitional active state does not match atomic hw state "
13121 "(expected %i, found %i)\n", new_crtc_state->base.active, crtc->active);
13122
13123 for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
13124 enum pipe pipe;
13125
13126 active = encoder->get_hw_state(encoder, &pipe);
13127 I915_STATE_WARN(active != new_crtc_state->base.active,
13128 "[ENCODER:%i] active %i with crtc active %i\n",
13129 encoder->base.base.id, active, new_crtc_state->base.active);
13130
13131 I915_STATE_WARN(active && crtc->pipe != pipe,
13132 "Encoder connected to wrong pipe %c\n",
13133 pipe_name(pipe));
13134
13135 if (active)
13136 encoder->get_config(encoder, pipe_config);
13137 }
13138
13139 intel_crtc_compute_pixel_rate(pipe_config);
13140
13141 if (!new_crtc_state->base.active)
13142 return;
13143
13144 intel_pipe_config_sanity_check(dev_priv, pipe_config);
13145
13146 if (!intel_pipe_config_compare(new_crtc_state,
13147 pipe_config, false)) {
13148 I915_STATE_WARN(1, "pipe state doesn't match!\n");
13149 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
13150 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
13151 }
13152 }
13153
13154 static void
intel_verify_planes(struct intel_atomic_state * state)13155 intel_verify_planes(struct intel_atomic_state *state)
13156 {
13157 struct intel_plane *plane;
13158 const struct intel_plane_state *plane_state;
13159 int i;
13160
13161 for_each_new_intel_plane_in_state(state, plane,
13162 plane_state, i)
13163 assert_plane(plane, plane_state->slave ||
13164 plane_state->base.visible);
13165 }
13166
13167 static void
verify_single_dpll_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_crtc * crtc,struct intel_crtc_state * new_crtc_state)13168 verify_single_dpll_state(struct drm_i915_private *dev_priv,
13169 struct intel_shared_dpll *pll,
13170 struct intel_crtc *crtc,
13171 struct intel_crtc_state *new_crtc_state)
13172 {
13173 struct intel_dpll_hw_state dpll_hw_state;
13174 unsigned int crtc_mask;
13175 bool active;
13176
13177 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13178
13179 DRM_DEBUG_KMS("%s\n", pll->info->name);
13180
13181 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
13182
13183 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
13184 I915_STATE_WARN(!pll->on && pll->active_mask,
13185 "pll in active use but not on in sw tracking\n");
13186 I915_STATE_WARN(pll->on && !pll->active_mask,
13187 "pll is on but not used by any active crtc\n");
13188 I915_STATE_WARN(pll->on != active,
13189 "pll on state mismatch (expected %i, found %i)\n",
13190 pll->on, active);
13191 }
13192
13193 if (!crtc) {
13194 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
13195 "more active pll users than references: %x vs %x\n",
13196 pll->active_mask, pll->state.crtc_mask);
13197
13198 return;
13199 }
13200
13201 crtc_mask = drm_crtc_mask(&crtc->base);
13202
13203 if (new_crtc_state->base.active)
13204 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13205 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13206 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13207 else
13208 I915_STATE_WARN(pll->active_mask & crtc_mask,
13209 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13210 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13211
13212 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
13213 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13214 crtc_mask, pll->state.crtc_mask);
13215
13216 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
13217 &dpll_hw_state,
13218 sizeof(dpll_hw_state)),
13219 "pll hw state mismatch\n");
13220 }
13221
13222 static void
verify_shared_dpll_state(struct intel_crtc * crtc,struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)13223 verify_shared_dpll_state(struct intel_crtc *crtc,
13224 struct intel_crtc_state *old_crtc_state,
13225 struct intel_crtc_state *new_crtc_state)
13226 {
13227 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13228
13229 if (new_crtc_state->shared_dpll)
13230 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
13231
13232 if (old_crtc_state->shared_dpll &&
13233 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
13234 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
13235 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
13236
13237 I915_STATE_WARN(pll->active_mask & crtc_mask,
13238 "pll active mismatch (didn't expect pipe %c in active mask)\n",
13239 pipe_name(drm_crtc_index(&crtc->base)));
13240 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
13241 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
13242 pipe_name(drm_crtc_index(&crtc->base)));
13243 }
13244 }
13245
13246 static void
intel_modeset_verify_crtc(struct intel_crtc * crtc,struct intel_atomic_state * state,struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)13247 intel_modeset_verify_crtc(struct intel_crtc *crtc,
13248 struct intel_atomic_state *state,
13249 struct intel_crtc_state *old_crtc_state,
13250 struct intel_crtc_state *new_crtc_state)
13251 {
13252 if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
13253 return;
13254
13255 verify_wm_state(crtc, new_crtc_state);
13256 verify_connector_state(state, crtc);
13257 verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
13258 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
13259 }
13260
13261 static void
verify_disabled_dpll_state(struct drm_i915_private * dev_priv)13262 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
13263 {
13264 int i;
13265
13266 for (i = 0; i < dev_priv->num_shared_dpll; i++)
13267 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
13268 }
13269
13270 static void
intel_modeset_verify_disabled(struct drm_i915_private * dev_priv,struct intel_atomic_state * state)13271 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
13272 struct intel_atomic_state *state)
13273 {
13274 verify_encoder_state(dev_priv, state);
13275 verify_connector_state(state, NULL);
13276 verify_disabled_dpll_state(dev_priv);
13277 }
13278
update_scanline_offset(const struct intel_crtc_state * crtc_state)13279 static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
13280 {
13281 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
13282 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13283
13284 /*
13285 * The scanline counter increments at the leading edge of hsync.
13286 *
13287 * On most platforms it starts counting from vtotal-1 on the
13288 * first active line. That means the scanline counter value is
13289 * always one less than what we would expect. Ie. just after
13290 * start of vblank, which also occurs at start of hsync (on the
13291 * last active line), the scanline counter will read vblank_start-1.
13292 *
13293 * On gen2 the scanline counter starts counting from 1 instead
13294 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13295 * to keep the value positive), instead of adding one.
13296 *
13297 * On HSW+ the behaviour of the scanline counter depends on the output
13298 * type. For DP ports it behaves like most other platforms, but on HDMI
13299 * there's an extra 1 line difference. So we need to add two instead of
13300 * one to the value.
13301 *
13302 * On VLV/CHV DSI the scanline counter would appear to increment
13303 * approx. 1/3 of a scanline before start of vblank. Unfortunately
13304 * that means we can't tell whether we're in vblank or not while
13305 * we're on that particular line. We must still set scanline_offset
13306 * to 1 so that the vblank timestamps come out correct when we query
13307 * the scanline counter from within the vblank interrupt handler.
13308 * However if queried just before the start of vblank we'll get an
13309 * answer that's slightly in the future.
13310 */
13311 if (IS_GEN(dev_priv, 2)) {
13312 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
13313 int vtotal;
13314
13315 vtotal = adjusted_mode->crtc_vtotal;
13316 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13317 vtotal /= 2;
13318
13319 crtc->scanline_offset = vtotal - 1;
13320 } else if (HAS_DDI(dev_priv) &&
13321 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
13322 crtc->scanline_offset = 2;
13323 } else
13324 crtc->scanline_offset = 1;
13325 }
13326
intel_modeset_clear_plls(struct intel_atomic_state * state)13327 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
13328 {
13329 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13330 struct intel_crtc_state *new_crtc_state;
13331 struct intel_crtc *crtc;
13332 int i;
13333
13334 if (!dev_priv->display.crtc_compute_clock)
13335 return;
13336
13337 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
13338 if (!needs_modeset(new_crtc_state))
13339 continue;
13340
13341 intel_release_shared_dplls(state, crtc);
13342 }
13343 }
13344
13345 /*
13346 * This implements the workaround described in the "notes" section of the mode
13347 * set sequence documentation. When going from no pipes or single pipe to
13348 * multiple pipes, and planes are enabled after the pipe, we need to wait at
13349 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13350 */
haswell_mode_set_planes_workaround(struct intel_atomic_state * state)13351 static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
13352 {
13353 struct intel_crtc_state *crtc_state;
13354 struct intel_crtc *crtc;
13355 struct intel_crtc_state *first_crtc_state = NULL;
13356 struct intel_crtc_state *other_crtc_state = NULL;
13357 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13358 int i;
13359
13360 /* look at all crtc's that are going to be enabled in during modeset */
13361 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
13362 if (!crtc_state->base.active ||
13363 !needs_modeset(crtc_state))
13364 continue;
13365
13366 if (first_crtc_state) {
13367 other_crtc_state = crtc_state;
13368 break;
13369 } else {
13370 first_crtc_state = crtc_state;
13371 first_pipe = crtc->pipe;
13372 }
13373 }
13374
13375 /* No workaround needed? */
13376 if (!first_crtc_state)
13377 return 0;
13378
13379 /* w/a possibly needed, check how many crtc's are already enabled. */
13380 for_each_intel_crtc(state->base.dev, crtc) {
13381 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
13382 if (IS_ERR(crtc_state))
13383 return PTR_ERR(crtc_state);
13384
13385 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
13386
13387 if (!crtc_state->base.active ||
13388 needs_modeset(crtc_state))
13389 continue;
13390
13391 /* 2 or more enabled crtcs means no need for w/a */
13392 if (enabled_pipe != INVALID_PIPE)
13393 return 0;
13394
13395 enabled_pipe = crtc->pipe;
13396 }
13397
13398 if (enabled_pipe != INVALID_PIPE)
13399 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13400 else if (other_crtc_state)
13401 other_crtc_state->hsw_workaround_pipe = first_pipe;
13402
13403 return 0;
13404 }
13405
intel_lock_all_pipes(struct intel_atomic_state * state)13406 static int intel_lock_all_pipes(struct intel_atomic_state *state)
13407 {
13408 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13409 struct intel_crtc *crtc;
13410
13411 /* Add all pipes to the state */
13412 for_each_intel_crtc(&dev_priv->drm, crtc) {
13413 struct intel_crtc_state *crtc_state;
13414
13415 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
13416 if (IS_ERR(crtc_state))
13417 return PTR_ERR(crtc_state);
13418 }
13419
13420 return 0;
13421 }
13422
intel_modeset_all_pipes(struct intel_atomic_state * state)13423 static int intel_modeset_all_pipes(struct intel_atomic_state *state)
13424 {
13425 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13426 struct intel_crtc *crtc;
13427
13428 /*
13429 * Add all pipes to the state, and force
13430 * a modeset on all the active ones.
13431 */
13432 for_each_intel_crtc(&dev_priv->drm, crtc) {
13433 struct intel_crtc_state *crtc_state;
13434 int ret;
13435
13436 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
13437 if (IS_ERR(crtc_state))
13438 return PTR_ERR(crtc_state);
13439
13440 if (!crtc_state->base.active || needs_modeset(crtc_state))
13441 continue;
13442
13443 crtc_state->base.mode_changed = true;
13444
13445 ret = drm_atomic_add_affected_connectors(&state->base,
13446 &crtc->base);
13447 if (ret)
13448 return ret;
13449
13450 ret = drm_atomic_add_affected_planes(&state->base,
13451 &crtc->base);
13452 if (ret)
13453 return ret;
13454 }
13455
13456 return 0;
13457 }
13458
intel_modeset_checks(struct intel_atomic_state * state)13459 static int intel_modeset_checks(struct intel_atomic_state *state)
13460 {
13461 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13462 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13463 struct intel_crtc *crtc;
13464 int ret = 0, i;
13465
13466 if (!check_digital_port_conflicts(state)) {
13467 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13468 return -EINVAL;
13469 }
13470
13471 /* keep the current setting */
13472 if (!state->cdclk.force_min_cdclk_changed)
13473 state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
13474
13475 state->modeset = true;
13476 state->active_crtcs = dev_priv->active_crtcs;
13477 state->cdclk.logical = dev_priv->cdclk.logical;
13478 state->cdclk.actual = dev_priv->cdclk.actual;
13479 state->cdclk.pipe = INVALID_PIPE;
13480
13481 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13482 new_crtc_state, i) {
13483 if (new_crtc_state->base.active)
13484 state->active_crtcs |= 1 << i;
13485 else
13486 state->active_crtcs &= ~(1 << i);
13487
13488 if (old_crtc_state->base.active != new_crtc_state->base.active)
13489 state->active_pipe_changes |= drm_crtc_mask(&crtc->base);
13490 }
13491
13492 /*
13493 * See if the config requires any additional preparation, e.g.
13494 * to adjust global state with pipes off. We need to do this
13495 * here so we can get the modeset_pipe updated config for the new
13496 * mode set on this crtc. For other crtcs we need to use the
13497 * adjusted_mode bits in the crtc directly.
13498 */
13499 if (dev_priv->display.modeset_calc_cdclk) {
13500 enum pipe pipe;
13501
13502 ret = dev_priv->display.modeset_calc_cdclk(state);
13503 if (ret < 0)
13504 return ret;
13505
13506 /*
13507 * Writes to dev_priv->cdclk.logical must protected by
13508 * holding all the crtc locks, even if we don't end up
13509 * touching the hardware
13510 */
13511 if (intel_cdclk_changed(&dev_priv->cdclk.logical,
13512 &state->cdclk.logical)) {
13513 ret = intel_lock_all_pipes(state);
13514 if (ret < 0)
13515 return ret;
13516 }
13517
13518 if (is_power_of_2(state->active_crtcs)) {
13519 struct intel_crtc *crtc;
13520 struct intel_crtc_state *crtc_state;
13521
13522 pipe = ilog2(state->active_crtcs);
13523 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
13524 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
13525 if (crtc_state && needs_modeset(crtc_state))
13526 pipe = INVALID_PIPE;
13527 } else {
13528 pipe = INVALID_PIPE;
13529 }
13530
13531 /* All pipes must be switched off while we change the cdclk. */
13532 if (pipe != INVALID_PIPE &&
13533 intel_cdclk_needs_cd2x_update(dev_priv,
13534 &dev_priv->cdclk.actual,
13535 &state->cdclk.actual)) {
13536 ret = intel_lock_all_pipes(state);
13537 if (ret < 0)
13538 return ret;
13539
13540 state->cdclk.pipe = pipe;
13541 } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
13542 &state->cdclk.actual)) {
13543 ret = intel_modeset_all_pipes(state);
13544 if (ret < 0)
13545 return ret;
13546
13547 state->cdclk.pipe = INVALID_PIPE;
13548 }
13549
13550 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
13551 state->cdclk.logical.cdclk,
13552 state->cdclk.actual.cdclk);
13553 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
13554 state->cdclk.logical.voltage_level,
13555 state->cdclk.actual.voltage_level);
13556 }
13557
13558 intel_modeset_clear_plls(state);
13559
13560 if (IS_HASWELL(dev_priv))
13561 return haswell_mode_set_planes_workaround(state);
13562
13563 return 0;
13564 }
13565
13566 /*
13567 * Handle calculation of various watermark data at the end of the atomic check
13568 * phase. The code here should be run after the per-crtc and per-plane 'check'
13569 * handlers to ensure that all derived state has been updated.
13570 */
calc_watermark_data(struct intel_atomic_state * state)13571 static int calc_watermark_data(struct intel_atomic_state *state)
13572 {
13573 struct drm_device *dev = state->base.dev;
13574 struct drm_i915_private *dev_priv = to_i915(dev);
13575
13576 /* Is there platform-specific watermark information to calculate? */
13577 if (dev_priv->display.compute_global_watermarks)
13578 return dev_priv->display.compute_global_watermarks(state);
13579
13580 return 0;
13581 }
13582
intel_crtc_check_fastset(const struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)13583 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
13584 struct intel_crtc_state *new_crtc_state)
13585 {
13586 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
13587 return;
13588
13589 new_crtc_state->base.mode_changed = false;
13590 new_crtc_state->update_pipe = true;
13591
13592 /*
13593 * If we're not doing the full modeset we want to
13594 * keep the current M/N values as they may be
13595 * sufficiently different to the computed values
13596 * to cause problems.
13597 *
13598 * FIXME: should really copy more fuzzy state here
13599 */
13600 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
13601 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
13602 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
13603 new_crtc_state->has_drrs = old_crtc_state->has_drrs;
13604 }
13605
13606 /**
13607 * intel_atomic_check - validate state object
13608 * @dev: drm device
13609 * @_state: state to validate
13610 */
intel_atomic_check(struct drm_device * dev,struct drm_atomic_state * _state)13611 static int intel_atomic_check(struct drm_device *dev,
13612 struct drm_atomic_state *_state)
13613 {
13614 struct drm_i915_private *dev_priv = to_i915(dev);
13615 struct intel_atomic_state *state = to_intel_atomic_state(_state);
13616 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13617 struct intel_crtc *crtc;
13618 int ret, i;
13619 bool any_ms = state->cdclk.force_min_cdclk_changed;
13620
13621 /* Catch I915_MODE_FLAG_INHERITED */
13622 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13623 new_crtc_state, i) {
13624 if (new_crtc_state->base.mode.private_flags !=
13625 old_crtc_state->base.mode.private_flags)
13626 new_crtc_state->base.mode_changed = true;
13627 }
13628
13629 ret = drm_atomic_helper_check_modeset(dev, &state->base);
13630 if (ret)
13631 goto fail;
13632
13633 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13634 new_crtc_state, i) {
13635 if (!needs_modeset(new_crtc_state))
13636 continue;
13637
13638 if (!new_crtc_state->base.enable) {
13639 any_ms = true;
13640 continue;
13641 }
13642
13643 ret = intel_modeset_pipe_config(new_crtc_state);
13644 if (ret)
13645 goto fail;
13646
13647 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
13648
13649 if (needs_modeset(new_crtc_state))
13650 any_ms = true;
13651 }
13652
13653 ret = drm_dp_mst_atomic_check(&state->base);
13654 if (ret)
13655 goto fail;
13656
13657 if (any_ms) {
13658 ret = intel_modeset_checks(state);
13659 if (ret)
13660 goto fail;
13661 } else {
13662 state->cdclk.logical = dev_priv->cdclk.logical;
13663 }
13664
13665 ret = icl_add_linked_planes(state);
13666 if (ret)
13667 goto fail;
13668
13669 ret = drm_atomic_helper_check_planes(dev, &state->base);
13670 if (ret)
13671 goto fail;
13672
13673 intel_fbc_choose_crtc(dev_priv, state);
13674 ret = calc_watermark_data(state);
13675 if (ret)
13676 goto fail;
13677
13678 ret = intel_bw_atomic_check(state);
13679 if (ret)
13680 goto fail;
13681
13682 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13683 new_crtc_state, i) {
13684 if (!needs_modeset(new_crtc_state) &&
13685 !new_crtc_state->update_pipe)
13686 continue;
13687
13688 intel_dump_pipe_config(new_crtc_state, state,
13689 needs_modeset(new_crtc_state) ?
13690 "[modeset]" : "[fastset]");
13691 }
13692
13693 return 0;
13694
13695 fail:
13696 if (ret == -EDEADLK)
13697 return ret;
13698
13699 /*
13700 * FIXME would probably be nice to know which crtc specifically
13701 * caused the failure, in cases where we can pinpoint it.
13702 */
13703 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13704 new_crtc_state, i)
13705 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
13706
13707 return ret;
13708 }
13709
intel_atomic_prepare_commit(struct intel_atomic_state * state)13710 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
13711 {
13712 return drm_atomic_helper_prepare_planes(state->base.dev,
13713 &state->base);
13714 }
13715
intel_crtc_get_vblank_counter(struct intel_crtc * crtc)13716 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
13717 {
13718 struct drm_device *dev = crtc->base.dev;
13719 struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
13720
13721 if (!vblank->max_vblank_count)
13722 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
13723
13724 return crtc->base.funcs->get_vblank_counter(&crtc->base);
13725 }
13726
intel_update_crtc(struct intel_crtc * crtc,struct intel_atomic_state * state,struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)13727 static void intel_update_crtc(struct intel_crtc *crtc,
13728 struct intel_atomic_state *state,
13729 struct intel_crtc_state *old_crtc_state,
13730 struct intel_crtc_state *new_crtc_state)
13731 {
13732 struct drm_device *dev = state->base.dev;
13733 struct drm_i915_private *dev_priv = to_i915(dev);
13734 bool modeset = needs_modeset(new_crtc_state);
13735 struct intel_plane_state *new_plane_state =
13736 intel_atomic_get_new_plane_state(state,
13737 to_intel_plane(crtc->base.primary));
13738
13739 if (modeset) {
13740 update_scanline_offset(new_crtc_state);
13741 dev_priv->display.crtc_enable(new_crtc_state, state);
13742
13743 /* vblanks work again, re-enable pipe CRC. */
13744 intel_crtc_enable_pipe_crc(crtc);
13745 } else {
13746 if (new_crtc_state->preload_luts &&
13747 (new_crtc_state->base.color_mgmt_changed ||
13748 new_crtc_state->update_pipe))
13749 intel_color_load_luts(new_crtc_state);
13750
13751 intel_pre_plane_update(old_crtc_state, new_crtc_state);
13752
13753 if (new_crtc_state->update_pipe)
13754 intel_encoders_update_pipe(crtc, new_crtc_state, state);
13755 }
13756
13757 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
13758 intel_fbc_disable(crtc);
13759 else if (new_plane_state)
13760 intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
13761
13762 intel_begin_crtc_commit(state, crtc);
13763
13764 if (INTEL_GEN(dev_priv) >= 9)
13765 skl_update_planes_on_crtc(state, crtc);
13766 else
13767 i9xx_update_planes_on_crtc(state, crtc);
13768
13769 intel_finish_crtc_commit(state, crtc);
13770 }
13771
intel_update_crtcs(struct intel_atomic_state * state)13772 static void intel_update_crtcs(struct intel_atomic_state *state)
13773 {
13774 struct intel_crtc *crtc;
13775 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13776 int i;
13777
13778 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13779 if (!new_crtc_state->base.active)
13780 continue;
13781
13782 intel_update_crtc(crtc, state, old_crtc_state,
13783 new_crtc_state);
13784 }
13785 }
13786
skl_update_crtcs(struct intel_atomic_state * state)13787 static void skl_update_crtcs(struct intel_atomic_state *state)
13788 {
13789 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13790 struct intel_crtc *crtc;
13791 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13792 unsigned int updated = 0;
13793 bool progress;
13794 enum pipe pipe;
13795 int i;
13796 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
13797 u8 required_slices = state->wm_results.ddb.enabled_slices;
13798 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
13799
13800 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
13801 /* ignore allocations for crtc's that have been turned off. */
13802 if (new_crtc_state->base.active)
13803 entries[i] = old_crtc_state->wm.skl.ddb;
13804
13805 /* If 2nd DBuf slice required, enable it here */
13806 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
13807 icl_dbuf_slices_update(dev_priv, required_slices);
13808
13809 /*
13810 * Whenever the number of active pipes changes, we need to make sure we
13811 * update the pipes in the right order so that their ddb allocations
13812 * never overlap with eachother inbetween CRTC updates. Otherwise we'll
13813 * cause pipe underruns and other bad stuff.
13814 */
13815 do {
13816 progress = false;
13817
13818 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13819 bool vbl_wait = false;
13820 unsigned int cmask = drm_crtc_mask(&crtc->base);
13821
13822 pipe = crtc->pipe;
13823
13824 if (updated & cmask || !new_crtc_state->base.active)
13825 continue;
13826
13827 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
13828 entries,
13829 INTEL_INFO(dev_priv)->num_pipes, i))
13830 continue;
13831
13832 updated |= cmask;
13833 entries[i] = new_crtc_state->wm.skl.ddb;
13834
13835 /*
13836 * If this is an already active pipe, it's DDB changed,
13837 * and this isn't the last pipe that needs updating
13838 * then we need to wait for a vblank to pass for the
13839 * new ddb allocation to take effect.
13840 */
13841 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
13842 &old_crtc_state->wm.skl.ddb) &&
13843 !new_crtc_state->base.active_changed &&
13844 state->wm_results.dirty_pipes != updated)
13845 vbl_wait = true;
13846
13847 intel_update_crtc(crtc, state, old_crtc_state,
13848 new_crtc_state);
13849
13850 if (vbl_wait)
13851 intel_wait_for_vblank(dev_priv, pipe);
13852
13853 progress = true;
13854 }
13855 } while (progress);
13856
13857 /* If 2nd DBuf slice is no more required disable it */
13858 if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
13859 icl_dbuf_slices_update(dev_priv, required_slices);
13860 }
13861
intel_atomic_helper_free_state(struct drm_i915_private * dev_priv)13862 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
13863 {
13864 struct intel_atomic_state *state, *next;
13865 struct llist_node *freed;
13866
13867 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
13868 llist_for_each_entry_safe(state, next, freed, freed)
13869 drm_atomic_state_put(&state->base);
13870 }
13871
intel_atomic_helper_free_state_worker(struct work_struct * work)13872 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
13873 {
13874 struct drm_i915_private *dev_priv =
13875 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
13876
13877 intel_atomic_helper_free_state(dev_priv);
13878 }
13879
intel_atomic_commit_fence_wait(struct intel_atomic_state * intel_state)13880 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
13881 {
13882 struct wait_queue_entry wait_fence, wait_reset;
13883 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
13884
13885 init_wait_entry(&wait_fence, 0);
13886 init_wait_entry(&wait_reset, 0);
13887 for (;;) {
13888 prepare_to_wait(&intel_state->commit_ready.wait,
13889 &wait_fence, TASK_UNINTERRUPTIBLE);
13890 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
13891 I915_RESET_MODESET),
13892 &wait_reset, TASK_UNINTERRUPTIBLE);
13893
13894
13895 if (i915_sw_fence_done(&intel_state->commit_ready) ||
13896 test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
13897 break;
13898
13899 schedule();
13900 }
13901 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
13902 finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
13903 I915_RESET_MODESET),
13904 &wait_reset);
13905 }
13906
intel_atomic_cleanup_work(struct work_struct * work)13907 static void intel_atomic_cleanup_work(struct work_struct *work)
13908 {
13909 struct drm_atomic_state *state =
13910 container_of(work, struct drm_atomic_state, commit_work);
13911 struct drm_i915_private *i915 = to_i915(state->dev);
13912
13913 drm_atomic_helper_cleanup_planes(&i915->drm, state);
13914 drm_atomic_helper_commit_cleanup_done(state);
13915 drm_atomic_state_put(state);
13916
13917 intel_atomic_helper_free_state(i915);
13918 }
13919
intel_atomic_commit_tail(struct intel_atomic_state * state)13920 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
13921 {
13922 struct drm_device *dev = state->base.dev;
13923 struct drm_i915_private *dev_priv = to_i915(dev);
13924 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
13925 struct intel_crtc *crtc;
13926 u64 put_domains[I915_MAX_PIPES] = {};
13927 intel_wakeref_t wakeref = 0;
13928 int i;
13929
13930 intel_atomic_commit_fence_wait(state);
13931
13932 drm_atomic_helper_wait_for_dependencies(&state->base);
13933
13934 if (state->modeset)
13935 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13936
13937 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13938 if (needs_modeset(new_crtc_state) ||
13939 new_crtc_state->update_pipe) {
13940
13941 put_domains[crtc->pipe] =
13942 modeset_get_crtc_power_domains(new_crtc_state);
13943 }
13944
13945 if (!needs_modeset(new_crtc_state))
13946 continue;
13947
13948 intel_pre_plane_update(old_crtc_state, new_crtc_state);
13949
13950 if (old_crtc_state->base.active) {
13951 intel_crtc_disable_planes(state, crtc);
13952
13953 /*
13954 * We need to disable pipe CRC before disabling the pipe,
13955 * or we race against vblank off.
13956 */
13957 intel_crtc_disable_pipe_crc(crtc);
13958
13959 dev_priv->display.crtc_disable(old_crtc_state, state);
13960 crtc->active = false;
13961 intel_fbc_disable(crtc);
13962 intel_disable_shared_dpll(old_crtc_state);
13963
13964 /*
13965 * Underruns don't always raise
13966 * interrupts, so check manually.
13967 */
13968 intel_check_cpu_fifo_underruns(dev_priv);
13969 intel_check_pch_fifo_underruns(dev_priv);
13970
13971 /* FIXME unify this for all platforms */
13972 if (!new_crtc_state->base.active &&
13973 !HAS_GMCH(dev_priv) &&
13974 dev_priv->display.initial_watermarks)
13975 dev_priv->display.initial_watermarks(state,
13976 new_crtc_state);
13977 }
13978 }
13979
13980 /* FIXME: Eventually get rid of our crtc->config pointer */
13981 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
13982 crtc->config = new_crtc_state;
13983
13984 if (state->modeset) {
13985 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
13986
13987 intel_set_cdclk_pre_plane_update(dev_priv,
13988 &state->cdclk.actual,
13989 &dev_priv->cdclk.actual,
13990 state->cdclk.pipe);
13991
13992 /*
13993 * SKL workaround: bspec recommends we disable the SAGV when we
13994 * have more then one pipe enabled
13995 */
13996 if (!intel_can_enable_sagv(state))
13997 intel_disable_sagv(dev_priv);
13998
13999 intel_modeset_verify_disabled(dev_priv, state);
14000 }
14001
14002 /* Complete the events for pipes that have now been disabled */
14003 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14004 bool modeset = needs_modeset(new_crtc_state);
14005
14006 /* Complete events for now disable pipes here. */
14007 if (modeset && !new_crtc_state->base.active && new_crtc_state->base.event) {
14008 spin_lock_irq(&dev->event_lock);
14009 drm_crtc_send_vblank_event(&crtc->base, new_crtc_state->base.event);
14010 spin_unlock_irq(&dev->event_lock);
14011
14012 new_crtc_state->base.event = NULL;
14013 }
14014 }
14015
14016 if (state->modeset)
14017 intel_encoders_update_prepare(state);
14018
14019 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
14020 dev_priv->display.update_crtcs(state);
14021
14022 if (state->modeset) {
14023 intel_encoders_update_complete(state);
14024
14025 intel_set_cdclk_post_plane_update(dev_priv,
14026 &state->cdclk.actual,
14027 &dev_priv->cdclk.actual,
14028 state->cdclk.pipe);
14029 }
14030
14031 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
14032 * already, but still need the state for the delayed optimization. To
14033 * fix this:
14034 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
14035 * - schedule that vblank worker _before_ calling hw_done
14036 * - at the start of commit_tail, cancel it _synchrously
14037 * - switch over to the vblank wait helper in the core after that since
14038 * we don't need out special handling any more.
14039 */
14040 drm_atomic_helper_wait_for_flip_done(dev, &state->base);
14041
14042 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14043 if (new_crtc_state->base.active &&
14044 !needs_modeset(new_crtc_state) &&
14045 !new_crtc_state->preload_luts &&
14046 (new_crtc_state->base.color_mgmt_changed ||
14047 new_crtc_state->update_pipe))
14048 intel_color_load_luts(new_crtc_state);
14049 }
14050
14051 /*
14052 * Now that the vblank has passed, we can go ahead and program the
14053 * optimal watermarks on platforms that need two-step watermark
14054 * programming.
14055 *
14056 * TODO: Move this (and other cleanup) to an async worker eventually.
14057 */
14058 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14059 if (dev_priv->display.optimize_watermarks)
14060 dev_priv->display.optimize_watermarks(state,
14061 new_crtc_state);
14062 }
14063
14064 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14065 intel_post_plane_update(old_crtc_state);
14066
14067 if (put_domains[i])
14068 modeset_put_power_domains(dev_priv, put_domains[i]);
14069
14070 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
14071 }
14072
14073 if (state->modeset)
14074 intel_verify_planes(state);
14075
14076 if (state->modeset && intel_can_enable_sagv(state))
14077 intel_enable_sagv(dev_priv);
14078
14079 drm_atomic_helper_commit_hw_done(&state->base);
14080
14081 if (state->modeset) {
14082 /* As one of the primary mmio accessors, KMS has a high
14083 * likelihood of triggering bugs in unclaimed access. After we
14084 * finish modesetting, see if an error has been flagged, and if
14085 * so enable debugging for the next modeset - and hope we catch
14086 * the culprit.
14087 */
14088 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
14089 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
14090 }
14091 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
14092
14093 /*
14094 * Defer the cleanup of the old state to a separate worker to not
14095 * impede the current task (userspace for blocking modesets) that
14096 * are executed inline. For out-of-line asynchronous modesets/flips,
14097 * deferring to a new worker seems overkill, but we would place a
14098 * schedule point (cond_resched()) here anyway to keep latencies
14099 * down.
14100 */
14101 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
14102 queue_work(system_highpri_wq, &state->base.commit_work);
14103 }
14104
intel_atomic_commit_work(struct work_struct * work)14105 static void intel_atomic_commit_work(struct work_struct *work)
14106 {
14107 struct intel_atomic_state *state =
14108 container_of(work, struct intel_atomic_state, base.commit_work);
14109
14110 intel_atomic_commit_tail(state);
14111 }
14112
14113 static int __i915_sw_fence_call
intel_atomic_commit_ready(struct i915_sw_fence * fence,enum i915_sw_fence_notify notify)14114 intel_atomic_commit_ready(struct i915_sw_fence *fence,
14115 enum i915_sw_fence_notify notify)
14116 {
14117 struct intel_atomic_state *state =
14118 container_of(fence, struct intel_atomic_state, commit_ready);
14119
14120 switch (notify) {
14121 case FENCE_COMPLETE:
14122 /* we do blocking waits in the worker, nothing to do here */
14123 break;
14124 case FENCE_FREE:
14125 {
14126 struct intel_atomic_helper *helper =
14127 &to_i915(state->base.dev)->atomic_helper;
14128
14129 if (llist_add(&state->freed, &helper->free_list))
14130 schedule_work(&helper->free_work);
14131 break;
14132 }
14133 }
14134
14135 return NOTIFY_DONE;
14136 }
14137
intel_atomic_track_fbs(struct intel_atomic_state * state)14138 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
14139 {
14140 struct intel_plane_state *old_plane_state, *new_plane_state;
14141 struct intel_plane *plane;
14142 int i;
14143
14144 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
14145 new_plane_state, i)
14146 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb),
14147 to_intel_frontbuffer(new_plane_state->base.fb),
14148 plane->frontbuffer_bit);
14149 }
14150
intel_atomic_commit(struct drm_device * dev,struct drm_atomic_state * _state,bool nonblock)14151 static int intel_atomic_commit(struct drm_device *dev,
14152 struct drm_atomic_state *_state,
14153 bool nonblock)
14154 {
14155 struct intel_atomic_state *state = to_intel_atomic_state(_state);
14156 struct drm_i915_private *dev_priv = to_i915(dev);
14157 int ret = 0;
14158
14159 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
14160
14161 drm_atomic_state_get(&state->base);
14162 i915_sw_fence_init(&state->commit_ready,
14163 intel_atomic_commit_ready);
14164
14165 /*
14166 * The intel_legacy_cursor_update() fast path takes care
14167 * of avoiding the vblank waits for simple cursor
14168 * movement and flips. For cursor on/off and size changes,
14169 * we want to perform the vblank waits so that watermark
14170 * updates happen during the correct frames. Gen9+ have
14171 * double buffered watermarks and so shouldn't need this.
14172 *
14173 * Unset state->legacy_cursor_update before the call to
14174 * drm_atomic_helper_setup_commit() because otherwise
14175 * drm_atomic_helper_wait_for_flip_done() is a noop and
14176 * we get FIFO underruns because we didn't wait
14177 * for vblank.
14178 *
14179 * FIXME doing watermarks and fb cleanup from a vblank worker
14180 * (assuming we had any) would solve these problems.
14181 */
14182 if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
14183 struct intel_crtc_state *new_crtc_state;
14184 struct intel_crtc *crtc;
14185 int i;
14186
14187 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
14188 if (new_crtc_state->wm.need_postvbl_update ||
14189 new_crtc_state->update_wm_post)
14190 state->base.legacy_cursor_update = false;
14191 }
14192
14193 ret = intel_atomic_prepare_commit(state);
14194 if (ret) {
14195 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
14196 i915_sw_fence_commit(&state->commit_ready);
14197 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
14198 return ret;
14199 }
14200
14201 ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
14202 if (!ret)
14203 ret = drm_atomic_helper_swap_state(&state->base, true);
14204
14205 if (ret) {
14206 i915_sw_fence_commit(&state->commit_ready);
14207
14208 drm_atomic_helper_cleanup_planes(dev, &state->base);
14209 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
14210 return ret;
14211 }
14212 dev_priv->wm.distrust_bios_wm = false;
14213 intel_shared_dpll_swap_state(state);
14214 intel_atomic_track_fbs(state);
14215
14216 if (state->modeset) {
14217 memcpy(dev_priv->min_cdclk, state->min_cdclk,
14218 sizeof(state->min_cdclk));
14219 memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
14220 sizeof(state->min_voltage_level));
14221 dev_priv->active_crtcs = state->active_crtcs;
14222 dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
14223
14224 intel_cdclk_swap_state(state);
14225 }
14226
14227 drm_atomic_state_get(&state->base);
14228 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
14229
14230 i915_sw_fence_commit(&state->commit_ready);
14231 if (nonblock && state->modeset) {
14232 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
14233 } else if (nonblock) {
14234 queue_work(system_unbound_wq, &state->base.commit_work);
14235 } else {
14236 if (state->modeset)
14237 flush_workqueue(dev_priv->modeset_wq);
14238 intel_atomic_commit_tail(state);
14239 }
14240
14241 return 0;
14242 }
14243
14244 struct wait_rps_boost {
14245 struct wait_queue_entry wait;
14246
14247 struct drm_crtc *crtc;
14248 struct i915_request *request;
14249 };
14250
do_rps_boost(struct wait_queue_entry * _wait,unsigned mode,int sync,void * key)14251 static int do_rps_boost(struct wait_queue_entry *_wait,
14252 unsigned mode, int sync, void *key)
14253 {
14254 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
14255 struct i915_request *rq = wait->request;
14256
14257 /*
14258 * If we missed the vblank, but the request is already running it
14259 * is reasonable to assume that it will complete before the next
14260 * vblank without our intervention, so leave RPS alone.
14261 */
14262 if (!i915_request_started(rq))
14263 gen6_rps_boost(rq);
14264 i915_request_put(rq);
14265
14266 drm_crtc_vblank_put(wait->crtc);
14267
14268 list_del(&wait->wait.entry);
14269 kfree(wait);
14270 return 1;
14271 }
14272
add_rps_boost_after_vblank(struct drm_crtc * crtc,struct dma_fence * fence)14273 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
14274 struct dma_fence *fence)
14275 {
14276 struct wait_rps_boost *wait;
14277
14278 if (!dma_fence_is_i915(fence))
14279 return;
14280
14281 if (INTEL_GEN(to_i915(crtc->dev)) < 6)
14282 return;
14283
14284 if (drm_crtc_vblank_get(crtc))
14285 return;
14286
14287 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
14288 if (!wait) {
14289 drm_crtc_vblank_put(crtc);
14290 return;
14291 }
14292
14293 wait->request = to_request(dma_fence_get(fence));
14294 wait->crtc = crtc;
14295
14296 wait->wait.func = do_rps_boost;
14297 wait->wait.flags = 0;
14298
14299 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
14300 }
14301
intel_plane_pin_fb(struct intel_plane_state * plane_state)14302 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
14303 {
14304 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
14305 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
14306 struct drm_framebuffer *fb = plane_state->base.fb;
14307 struct i915_vma *vma;
14308
14309 if (plane->id == PLANE_CURSOR &&
14310 INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
14311 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14312 const int align = intel_cursor_alignment(dev_priv);
14313 int err;
14314
14315 err = i915_gem_object_attach_phys(obj, align);
14316 if (err)
14317 return err;
14318 }
14319
14320 vma = intel_pin_and_fence_fb_obj(fb,
14321 &plane_state->view,
14322 intel_plane_uses_fence(plane_state),
14323 &plane_state->flags);
14324 if (IS_ERR(vma))
14325 return PTR_ERR(vma);
14326
14327 plane_state->vma = vma;
14328
14329 return 0;
14330 }
14331
intel_plane_unpin_fb(struct intel_plane_state * old_plane_state)14332 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
14333 {
14334 struct i915_vma *vma;
14335
14336 vma = fetch_and_zero(&old_plane_state->vma);
14337 if (vma)
14338 intel_unpin_fb_vma(vma, old_plane_state->flags);
14339 }
14340
fb_obj_bump_render_priority(struct drm_i915_gem_object * obj)14341 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
14342 {
14343 struct i915_sched_attr attr = {
14344 .priority = I915_PRIORITY_DISPLAY,
14345 };
14346
14347 i915_gem_object_wait_priority(obj, 0, &attr);
14348 }
14349
14350 /**
14351 * intel_prepare_plane_fb - Prepare fb for usage on plane
14352 * @plane: drm plane to prepare for
14353 * @new_state: the plane state being prepared
14354 *
14355 * Prepares a framebuffer for usage on a display plane. Generally this
14356 * involves pinning the underlying object and updating the frontbuffer tracking
14357 * bits. Some older platforms need special physical address handling for
14358 * cursor planes.
14359 *
14360 * Must be called with struct_mutex held.
14361 *
14362 * Returns 0 on success, negative error code on failure.
14363 */
14364 int
intel_prepare_plane_fb(struct drm_plane * plane,struct drm_plane_state * new_state)14365 intel_prepare_plane_fb(struct drm_plane *plane,
14366 struct drm_plane_state *new_state)
14367 {
14368 struct intel_atomic_state *intel_state =
14369 to_intel_atomic_state(new_state->state);
14370 struct drm_i915_private *dev_priv = to_i915(plane->dev);
14371 struct drm_framebuffer *fb = new_state->fb;
14372 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14373 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
14374 int ret;
14375
14376 if (old_obj) {
14377 struct intel_crtc_state *crtc_state =
14378 intel_atomic_get_new_crtc_state(intel_state,
14379 to_intel_crtc(plane->state->crtc));
14380
14381 /* Big Hammer, we also need to ensure that any pending
14382 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
14383 * current scanout is retired before unpinning the old
14384 * framebuffer. Note that we rely on userspace rendering
14385 * into the buffer attached to the pipe they are waiting
14386 * on. If not, userspace generates a GPU hang with IPEHR
14387 * point to the MI_WAIT_FOR_EVENT.
14388 *
14389 * This should only fail upon a hung GPU, in which case we
14390 * can safely continue.
14391 */
14392 if (needs_modeset(crtc_state)) {
14393 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
14394 old_obj->base.resv, NULL,
14395 false, 0,
14396 GFP_KERNEL);
14397 if (ret < 0)
14398 return ret;
14399 }
14400 }
14401
14402 if (new_state->fence) { /* explicit fencing */
14403 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
14404 new_state->fence,
14405 I915_FENCE_TIMEOUT,
14406 GFP_KERNEL);
14407 if (ret < 0)
14408 return ret;
14409 }
14410
14411 if (!obj)
14412 return 0;
14413
14414 ret = i915_gem_object_pin_pages(obj);
14415 if (ret)
14416 return ret;
14417
14418 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14419 if (ret) {
14420 i915_gem_object_unpin_pages(obj);
14421 return ret;
14422 }
14423
14424 ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
14425
14426 mutex_unlock(&dev_priv->drm.struct_mutex);
14427 i915_gem_object_unpin_pages(obj);
14428 if (ret)
14429 return ret;
14430
14431 fb_obj_bump_render_priority(obj);
14432 intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
14433
14434 if (!new_state->fence) { /* implicit fencing */
14435 struct dma_fence *fence;
14436
14437 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
14438 obj->base.resv, NULL,
14439 false, I915_FENCE_TIMEOUT,
14440 GFP_KERNEL);
14441 if (ret < 0)
14442 return ret;
14443
14444 fence = dma_resv_get_excl_rcu(obj->base.resv);
14445 if (fence) {
14446 add_rps_boost_after_vblank(new_state->crtc, fence);
14447 dma_fence_put(fence);
14448 }
14449 } else {
14450 add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
14451 }
14452
14453 /*
14454 * We declare pageflips to be interactive and so merit a small bias
14455 * towards upclocking to deliver the frame on time. By only changing
14456 * the RPS thresholds to sample more regularly and aim for higher
14457 * clocks we can hopefully deliver low power workloads (like kodi)
14458 * that are not quite steady state without resorting to forcing
14459 * maximum clocks following a vblank miss (see do_rps_boost()).
14460 */
14461 if (!intel_state->rps_interactive) {
14462 intel_rps_mark_interactive(dev_priv, true);
14463 intel_state->rps_interactive = true;
14464 }
14465
14466 return 0;
14467 }
14468
14469 /**
14470 * intel_cleanup_plane_fb - Cleans up an fb after plane use
14471 * @plane: drm plane to clean up for
14472 * @old_state: the state from the previous modeset
14473 *
14474 * Cleans up a framebuffer that has just been removed from a plane.
14475 *
14476 * Must be called with struct_mutex held.
14477 */
14478 void
intel_cleanup_plane_fb(struct drm_plane * plane,struct drm_plane_state * old_state)14479 intel_cleanup_plane_fb(struct drm_plane *plane,
14480 struct drm_plane_state *old_state)
14481 {
14482 struct intel_atomic_state *intel_state =
14483 to_intel_atomic_state(old_state->state);
14484 struct drm_i915_private *dev_priv = to_i915(plane->dev);
14485
14486 if (intel_state->rps_interactive) {
14487 intel_rps_mark_interactive(dev_priv, false);
14488 intel_state->rps_interactive = false;
14489 }
14490
14491 /* Should only be called after a successful intel_prepare_plane_fb()! */
14492 mutex_lock(&dev_priv->drm.struct_mutex);
14493 intel_plane_unpin_fb(to_intel_plane_state(old_state));
14494 mutex_unlock(&dev_priv->drm.struct_mutex);
14495 }
14496
14497 int
skl_max_scale(const struct intel_crtc_state * crtc_state,u32 pixel_format)14498 skl_max_scale(const struct intel_crtc_state *crtc_state,
14499 u32 pixel_format)
14500 {
14501 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
14502 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14503 int max_scale, mult;
14504 int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
14505
14506 if (!crtc_state->base.enable)
14507 return DRM_PLANE_HELPER_NO_SCALING;
14508
14509 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
14510 max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
14511
14512 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
14513 max_dotclk *= 2;
14514
14515 if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
14516 return DRM_PLANE_HELPER_NO_SCALING;
14517
14518 /*
14519 * skl max scale is lower of:
14520 * close to 3 but not 3, -1 is for that purpose
14521 * or
14522 * cdclk/crtc_clock
14523 */
14524 mult = is_planar_yuv_format(pixel_format) ? 2 : 3;
14525 tmpclk1 = (1 << 16) * mult - 1;
14526 tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
14527 max_scale = min(tmpclk1, tmpclk2);
14528
14529 return max_scale;
14530 }
14531
intel_begin_crtc_commit(struct intel_atomic_state * state,struct intel_crtc * crtc)14532 static void intel_begin_crtc_commit(struct intel_atomic_state *state,
14533 struct intel_crtc *crtc)
14534 {
14535 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14536 struct intel_crtc_state *old_crtc_state =
14537 intel_atomic_get_old_crtc_state(state, crtc);
14538 struct intel_crtc_state *new_crtc_state =
14539 intel_atomic_get_new_crtc_state(state, crtc);
14540 bool modeset = needs_modeset(new_crtc_state);
14541
14542 /* Perform vblank evasion around commit operation */
14543 intel_pipe_update_start(new_crtc_state);
14544
14545 if (modeset)
14546 goto out;
14547
14548 if (new_crtc_state->base.color_mgmt_changed ||
14549 new_crtc_state->update_pipe)
14550 intel_color_commit(new_crtc_state);
14551
14552 if (new_crtc_state->update_pipe)
14553 intel_update_pipe_config(old_crtc_state, new_crtc_state);
14554 else if (INTEL_GEN(dev_priv) >= 9)
14555 skl_detach_scalers(new_crtc_state);
14556
14557 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
14558 bdw_set_pipemisc(new_crtc_state);
14559
14560 out:
14561 if (dev_priv->display.atomic_update_watermarks)
14562 dev_priv->display.atomic_update_watermarks(state,
14563 new_crtc_state);
14564 }
14565
intel_crtc_arm_fifo_underrun(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state)14566 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14567 struct intel_crtc_state *crtc_state)
14568 {
14569 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14570
14571 if (!IS_GEN(dev_priv, 2))
14572 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14573
14574 if (crtc_state->has_pch_encoder) {
14575 enum pipe pch_transcoder =
14576 intel_crtc_pch_transcoder(crtc);
14577
14578 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14579 }
14580 }
14581
intel_finish_crtc_commit(struct intel_atomic_state * state,struct intel_crtc * crtc)14582 static void intel_finish_crtc_commit(struct intel_atomic_state *state,
14583 struct intel_crtc *crtc)
14584 {
14585 struct intel_crtc_state *old_crtc_state =
14586 intel_atomic_get_old_crtc_state(state, crtc);
14587 struct intel_crtc_state *new_crtc_state =
14588 intel_atomic_get_new_crtc_state(state, crtc);
14589
14590 intel_pipe_update_end(new_crtc_state);
14591
14592 if (new_crtc_state->update_pipe &&
14593 !needs_modeset(new_crtc_state) &&
14594 old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
14595 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14596 }
14597
14598 /**
14599 * intel_plane_destroy - destroy a plane
14600 * @plane: plane to destroy
14601 *
14602 * Common destruction function for all types of planes (primary, cursor,
14603 * sprite).
14604 */
intel_plane_destroy(struct drm_plane * plane)14605 void intel_plane_destroy(struct drm_plane *plane)
14606 {
14607 drm_plane_cleanup(plane);
14608 kfree(to_intel_plane(plane));
14609 }
14610
i8xx_plane_format_mod_supported(struct drm_plane * _plane,u32 format,u64 modifier)14611 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
14612 u32 format, u64 modifier)
14613 {
14614 switch (modifier) {
14615 case DRM_FORMAT_MOD_LINEAR:
14616 case I915_FORMAT_MOD_X_TILED:
14617 break;
14618 default:
14619 return false;
14620 }
14621
14622 switch (format) {
14623 case DRM_FORMAT_C8:
14624 case DRM_FORMAT_RGB565:
14625 case DRM_FORMAT_XRGB1555:
14626 case DRM_FORMAT_XRGB8888:
14627 return modifier == DRM_FORMAT_MOD_LINEAR ||
14628 modifier == I915_FORMAT_MOD_X_TILED;
14629 default:
14630 return false;
14631 }
14632 }
14633
i965_plane_format_mod_supported(struct drm_plane * _plane,u32 format,u64 modifier)14634 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
14635 u32 format, u64 modifier)
14636 {
14637 switch (modifier) {
14638 case DRM_FORMAT_MOD_LINEAR:
14639 case I915_FORMAT_MOD_X_TILED:
14640 break;
14641 default:
14642 return false;
14643 }
14644
14645 switch (format) {
14646 case DRM_FORMAT_C8:
14647 case DRM_FORMAT_RGB565:
14648 case DRM_FORMAT_XRGB8888:
14649 case DRM_FORMAT_XBGR8888:
14650 case DRM_FORMAT_XRGB2101010:
14651 case DRM_FORMAT_XBGR2101010:
14652 return modifier == DRM_FORMAT_MOD_LINEAR ||
14653 modifier == I915_FORMAT_MOD_X_TILED;
14654 default:
14655 return false;
14656 }
14657 }
14658
intel_cursor_format_mod_supported(struct drm_plane * _plane,u32 format,u64 modifier)14659 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
14660 u32 format, u64 modifier)
14661 {
14662 return modifier == DRM_FORMAT_MOD_LINEAR &&
14663 format == DRM_FORMAT_ARGB8888;
14664 }
14665
14666 static const struct drm_plane_funcs i965_plane_funcs = {
14667 .update_plane = drm_atomic_helper_update_plane,
14668 .disable_plane = drm_atomic_helper_disable_plane,
14669 .destroy = intel_plane_destroy,
14670 .atomic_duplicate_state = intel_plane_duplicate_state,
14671 .atomic_destroy_state = intel_plane_destroy_state,
14672 .format_mod_supported = i965_plane_format_mod_supported,
14673 };
14674
14675 static const struct drm_plane_funcs i8xx_plane_funcs = {
14676 .update_plane = drm_atomic_helper_update_plane,
14677 .disable_plane = drm_atomic_helper_disable_plane,
14678 .destroy = intel_plane_destroy,
14679 .atomic_duplicate_state = intel_plane_duplicate_state,
14680 .atomic_destroy_state = intel_plane_destroy_state,
14681 .format_mod_supported = i8xx_plane_format_mod_supported,
14682 };
14683
14684 static int
intel_legacy_cursor_update(struct drm_plane * plane,struct drm_crtc * crtc,struct drm_framebuffer * fb,int crtc_x,int crtc_y,unsigned int crtc_w,unsigned int crtc_h,u32 src_x,u32 src_y,u32 src_w,u32 src_h,struct drm_modeset_acquire_ctx * ctx)14685 intel_legacy_cursor_update(struct drm_plane *plane,
14686 struct drm_crtc *crtc,
14687 struct drm_framebuffer *fb,
14688 int crtc_x, int crtc_y,
14689 unsigned int crtc_w, unsigned int crtc_h,
14690 u32 src_x, u32 src_y,
14691 u32 src_w, u32 src_h,
14692 struct drm_modeset_acquire_ctx *ctx)
14693 {
14694 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
14695 struct drm_plane_state *old_plane_state, *new_plane_state;
14696 struct intel_plane *intel_plane = to_intel_plane(plane);
14697 struct intel_crtc_state *crtc_state =
14698 to_intel_crtc_state(crtc->state);
14699 struct intel_crtc_state *new_crtc_state;
14700 int ret;
14701
14702 /*
14703 * When crtc is inactive or there is a modeset pending,
14704 * wait for it to complete in the slowpath
14705 */
14706 if (!crtc_state->base.active || needs_modeset(crtc_state) ||
14707 crtc_state->update_pipe)
14708 goto slow;
14709
14710 old_plane_state = plane->state;
14711 /*
14712 * Don't do an async update if there is an outstanding commit modifying
14713 * the plane. This prevents our async update's changes from getting
14714 * overridden by a previous synchronous update's state.
14715 */
14716 if (old_plane_state->commit &&
14717 !try_wait_for_completion(&old_plane_state->commit->hw_done))
14718 goto slow;
14719
14720 /*
14721 * If any parameters change that may affect watermarks,
14722 * take the slowpath. Only changing fb or position should be
14723 * in the fastpath.
14724 */
14725 if (old_plane_state->crtc != crtc ||
14726 old_plane_state->src_w != src_w ||
14727 old_plane_state->src_h != src_h ||
14728 old_plane_state->crtc_w != crtc_w ||
14729 old_plane_state->crtc_h != crtc_h ||
14730 !old_plane_state->fb != !fb)
14731 goto slow;
14732
14733 new_plane_state = intel_plane_duplicate_state(plane);
14734 if (!new_plane_state)
14735 return -ENOMEM;
14736
14737 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
14738 if (!new_crtc_state) {
14739 ret = -ENOMEM;
14740 goto out_free;
14741 }
14742
14743 drm_atomic_set_fb_for_plane(new_plane_state, fb);
14744
14745 new_plane_state->src_x = src_x;
14746 new_plane_state->src_y = src_y;
14747 new_plane_state->src_w = src_w;
14748 new_plane_state->src_h = src_h;
14749 new_plane_state->crtc_x = crtc_x;
14750 new_plane_state->crtc_y = crtc_y;
14751 new_plane_state->crtc_w = crtc_w;
14752 new_plane_state->crtc_h = crtc_h;
14753
14754 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
14755 to_intel_plane_state(old_plane_state),
14756 to_intel_plane_state(new_plane_state));
14757 if (ret)
14758 goto out_free;
14759
14760 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14761 if (ret)
14762 goto out_free;
14763
14764 ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
14765 if (ret)
14766 goto out_unlock;
14767
14768 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_FLIP);
14769 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->fb),
14770 to_intel_frontbuffer(fb),
14771 intel_plane->frontbuffer_bit);
14772
14773 /* Swap plane state */
14774 plane->state = new_plane_state;
14775
14776 /*
14777 * We cannot swap crtc_state as it may be in use by an atomic commit or
14778 * page flip that's running simultaneously. If we swap crtc_state and
14779 * destroy the old state, we will cause a use-after-free there.
14780 *
14781 * Only update active_planes, which is needed for our internal
14782 * bookkeeping. Either value will do the right thing when updating
14783 * planes atomically. If the cursor was part of the atomic update then
14784 * we would have taken the slowpath.
14785 */
14786 crtc_state->active_planes = new_crtc_state->active_planes;
14787
14788 if (plane->state->visible)
14789 intel_update_plane(intel_plane, crtc_state,
14790 to_intel_plane_state(plane->state));
14791 else
14792 intel_disable_plane(intel_plane, crtc_state);
14793
14794 intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
14795
14796 out_unlock:
14797 mutex_unlock(&dev_priv->drm.struct_mutex);
14798 out_free:
14799 if (new_crtc_state)
14800 intel_crtc_destroy_state(crtc, &new_crtc_state->base);
14801 if (ret)
14802 intel_plane_destroy_state(plane, new_plane_state);
14803 else
14804 intel_plane_destroy_state(plane, old_plane_state);
14805 return ret;
14806
14807 slow:
14808 return drm_atomic_helper_update_plane(plane, crtc, fb,
14809 crtc_x, crtc_y, crtc_w, crtc_h,
14810 src_x, src_y, src_w, src_h, ctx);
14811 }
14812
14813 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
14814 .update_plane = intel_legacy_cursor_update,
14815 .disable_plane = drm_atomic_helper_disable_plane,
14816 .destroy = intel_plane_destroy,
14817 .atomic_duplicate_state = intel_plane_duplicate_state,
14818 .atomic_destroy_state = intel_plane_destroy_state,
14819 .format_mod_supported = intel_cursor_format_mod_supported,
14820 };
14821
i9xx_plane_has_fbc(struct drm_i915_private * dev_priv,enum i9xx_plane_id i9xx_plane)14822 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
14823 enum i9xx_plane_id i9xx_plane)
14824 {
14825 if (!HAS_FBC(dev_priv))
14826 return false;
14827
14828 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
14829 return i9xx_plane == PLANE_A; /* tied to pipe A */
14830 else if (IS_IVYBRIDGE(dev_priv))
14831 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
14832 i9xx_plane == PLANE_C;
14833 else if (INTEL_GEN(dev_priv) >= 4)
14834 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
14835 else
14836 return i9xx_plane == PLANE_A;
14837 }
14838
14839 static struct intel_plane *
intel_primary_plane_create(struct drm_i915_private * dev_priv,enum pipe pipe)14840 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
14841 {
14842 struct intel_plane *plane;
14843 const struct drm_plane_funcs *plane_funcs;
14844 unsigned int supported_rotations;
14845 unsigned int possible_crtcs;
14846 const u64 *modifiers;
14847 const u32 *formats;
14848 int num_formats;
14849 int ret;
14850
14851 if (INTEL_GEN(dev_priv) >= 9)
14852 return skl_universal_plane_create(dev_priv, pipe,
14853 PLANE_PRIMARY);
14854
14855 plane = intel_plane_alloc();
14856 if (IS_ERR(plane))
14857 return plane;
14858
14859 plane->pipe = pipe;
14860 /*
14861 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
14862 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
14863 */
14864 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
14865 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
14866 else
14867 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
14868 plane->id = PLANE_PRIMARY;
14869 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
14870
14871 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
14872 if (plane->has_fbc) {
14873 struct intel_fbc *fbc = &dev_priv->fbc;
14874
14875 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
14876 }
14877
14878 if (INTEL_GEN(dev_priv) >= 4) {
14879 formats = i965_primary_formats;
14880 num_formats = ARRAY_SIZE(i965_primary_formats);
14881 modifiers = i9xx_format_modifiers;
14882
14883 plane->max_stride = i9xx_plane_max_stride;
14884 plane->update_plane = i9xx_update_plane;
14885 plane->disable_plane = i9xx_disable_plane;
14886 plane->get_hw_state = i9xx_plane_get_hw_state;
14887 plane->check_plane = i9xx_plane_check;
14888
14889 plane_funcs = &i965_plane_funcs;
14890 } else {
14891 formats = i8xx_primary_formats;
14892 num_formats = ARRAY_SIZE(i8xx_primary_formats);
14893 modifiers = i9xx_format_modifiers;
14894
14895 plane->max_stride = i9xx_plane_max_stride;
14896 plane->update_plane = i9xx_update_plane;
14897 plane->disable_plane = i9xx_disable_plane;
14898 plane->get_hw_state = i9xx_plane_get_hw_state;
14899 plane->check_plane = i9xx_plane_check;
14900
14901 plane_funcs = &i8xx_plane_funcs;
14902 }
14903
14904 possible_crtcs = BIT(pipe);
14905
14906 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
14907 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14908 possible_crtcs, plane_funcs,
14909 formats, num_formats, modifiers,
14910 DRM_PLANE_TYPE_PRIMARY,
14911 "primary %c", pipe_name(pipe));
14912 else
14913 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14914 possible_crtcs, plane_funcs,
14915 formats, num_formats, modifiers,
14916 DRM_PLANE_TYPE_PRIMARY,
14917 "plane %c",
14918 plane_name(plane->i9xx_plane));
14919 if (ret)
14920 goto fail;
14921
14922 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
14923 supported_rotations =
14924 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
14925 DRM_MODE_REFLECT_X;
14926 } else if (INTEL_GEN(dev_priv) >= 4) {
14927 supported_rotations =
14928 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
14929 } else {
14930 supported_rotations = DRM_MODE_ROTATE_0;
14931 }
14932
14933 if (INTEL_GEN(dev_priv) >= 4)
14934 drm_plane_create_rotation_property(&plane->base,
14935 DRM_MODE_ROTATE_0,
14936 supported_rotations);
14937
14938 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
14939
14940 return plane;
14941
14942 fail:
14943 intel_plane_free(plane);
14944
14945 return ERR_PTR(ret);
14946 }
14947
14948 static struct intel_plane *
intel_cursor_plane_create(struct drm_i915_private * dev_priv,enum pipe pipe)14949 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
14950 enum pipe pipe)
14951 {
14952 unsigned int possible_crtcs;
14953 struct intel_plane *cursor;
14954 int ret;
14955
14956 cursor = intel_plane_alloc();
14957 if (IS_ERR(cursor))
14958 return cursor;
14959
14960 cursor->pipe = pipe;
14961 cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
14962 cursor->id = PLANE_CURSOR;
14963 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
14964
14965 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
14966 cursor->max_stride = i845_cursor_max_stride;
14967 cursor->update_plane = i845_update_cursor;
14968 cursor->disable_plane = i845_disable_cursor;
14969 cursor->get_hw_state = i845_cursor_get_hw_state;
14970 cursor->check_plane = i845_check_cursor;
14971 } else {
14972 cursor->max_stride = i9xx_cursor_max_stride;
14973 cursor->update_plane = i9xx_update_cursor;
14974 cursor->disable_plane = i9xx_disable_cursor;
14975 cursor->get_hw_state = i9xx_cursor_get_hw_state;
14976 cursor->check_plane = i9xx_check_cursor;
14977 }
14978
14979 cursor->cursor.base = ~0;
14980 cursor->cursor.cntl = ~0;
14981
14982 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
14983 cursor->cursor.size = ~0;
14984
14985 possible_crtcs = BIT(pipe);
14986
14987 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
14988 possible_crtcs, &intel_cursor_plane_funcs,
14989 intel_cursor_formats,
14990 ARRAY_SIZE(intel_cursor_formats),
14991 cursor_format_modifiers,
14992 DRM_PLANE_TYPE_CURSOR,
14993 "cursor %c", pipe_name(pipe));
14994 if (ret)
14995 goto fail;
14996
14997 if (INTEL_GEN(dev_priv) >= 4)
14998 drm_plane_create_rotation_property(&cursor->base,
14999 DRM_MODE_ROTATE_0,
15000 DRM_MODE_ROTATE_0 |
15001 DRM_MODE_ROTATE_180);
15002
15003 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
15004
15005 return cursor;
15006
15007 fail:
15008 intel_plane_free(cursor);
15009
15010 return ERR_PTR(ret);
15011 }
15012
intel_crtc_init_scalers(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state)15013 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
15014 struct intel_crtc_state *crtc_state)
15015 {
15016 struct intel_crtc_scaler_state *scaler_state =
15017 &crtc_state->scaler_state;
15018 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15019 int i;
15020
15021 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
15022 if (!crtc->num_scalers)
15023 return;
15024
15025 for (i = 0; i < crtc->num_scalers; i++) {
15026 struct intel_scaler *scaler = &scaler_state->scalers[i];
15027
15028 scaler->in_use = 0;
15029 scaler->mode = 0;
15030 }
15031
15032 scaler_state->scaler_id = -1;
15033 }
15034
15035 #define INTEL_CRTC_FUNCS \
15036 .gamma_set = drm_atomic_helper_legacy_gamma_set, \
15037 .set_config = drm_atomic_helper_set_config, \
15038 .destroy = intel_crtc_destroy, \
15039 .page_flip = drm_atomic_helper_page_flip, \
15040 .atomic_duplicate_state = intel_crtc_duplicate_state, \
15041 .atomic_destroy_state = intel_crtc_destroy_state, \
15042 .set_crc_source = intel_crtc_set_crc_source, \
15043 .verify_crc_source = intel_crtc_verify_crc_source, \
15044 .get_crc_sources = intel_crtc_get_crc_sources
15045
15046 static const struct drm_crtc_funcs bdw_crtc_funcs = {
15047 INTEL_CRTC_FUNCS,
15048
15049 .get_vblank_counter = g4x_get_vblank_counter,
15050 .enable_vblank = bdw_enable_vblank,
15051 .disable_vblank = bdw_disable_vblank,
15052 };
15053
15054 static const struct drm_crtc_funcs ilk_crtc_funcs = {
15055 INTEL_CRTC_FUNCS,
15056
15057 .get_vblank_counter = g4x_get_vblank_counter,
15058 .enable_vblank = ilk_enable_vblank,
15059 .disable_vblank = ilk_disable_vblank,
15060 };
15061
15062 static const struct drm_crtc_funcs g4x_crtc_funcs = {
15063 INTEL_CRTC_FUNCS,
15064
15065 .get_vblank_counter = g4x_get_vblank_counter,
15066 .enable_vblank = i965_enable_vblank,
15067 .disable_vblank = i965_disable_vblank,
15068 };
15069
15070 static const struct drm_crtc_funcs i965_crtc_funcs = {
15071 INTEL_CRTC_FUNCS,
15072
15073 .get_vblank_counter = i915_get_vblank_counter,
15074 .enable_vblank = i965_enable_vblank,
15075 .disable_vblank = i965_disable_vblank,
15076 };
15077
15078 static const struct drm_crtc_funcs i945gm_crtc_funcs = {
15079 INTEL_CRTC_FUNCS,
15080
15081 .get_vblank_counter = i915_get_vblank_counter,
15082 .enable_vblank = i945gm_enable_vblank,
15083 .disable_vblank = i945gm_disable_vblank,
15084 };
15085
15086 static const struct drm_crtc_funcs i915_crtc_funcs = {
15087 INTEL_CRTC_FUNCS,
15088
15089 .get_vblank_counter = i915_get_vblank_counter,
15090 .enable_vblank = i8xx_enable_vblank,
15091 .disable_vblank = i8xx_disable_vblank,
15092 };
15093
15094 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
15095 INTEL_CRTC_FUNCS,
15096
15097 /* no hw vblank counter */
15098 .enable_vblank = i8xx_enable_vblank,
15099 .disable_vblank = i8xx_disable_vblank,
15100 };
15101
intel_crtc_init(struct drm_i915_private * dev_priv,enum pipe pipe)15102 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
15103 {
15104 const struct drm_crtc_funcs *funcs;
15105 struct intel_crtc *intel_crtc;
15106 struct intel_crtc_state *crtc_state = NULL;
15107 struct intel_plane *primary = NULL;
15108 struct intel_plane *cursor = NULL;
15109 int sprite, ret;
15110
15111 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
15112 if (!intel_crtc)
15113 return -ENOMEM;
15114
15115 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
15116 if (!crtc_state) {
15117 ret = -ENOMEM;
15118 goto fail;
15119 }
15120 __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base);
15121 intel_crtc->config = crtc_state;
15122
15123 primary = intel_primary_plane_create(dev_priv, pipe);
15124 if (IS_ERR(primary)) {
15125 ret = PTR_ERR(primary);
15126 goto fail;
15127 }
15128 intel_crtc->plane_ids_mask |= BIT(primary->id);
15129
15130 for_each_sprite(dev_priv, pipe, sprite) {
15131 struct intel_plane *plane;
15132
15133 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
15134 if (IS_ERR(plane)) {
15135 ret = PTR_ERR(plane);
15136 goto fail;
15137 }
15138 intel_crtc->plane_ids_mask |= BIT(plane->id);
15139 }
15140
15141 cursor = intel_cursor_plane_create(dev_priv, pipe);
15142 if (IS_ERR(cursor)) {
15143 ret = PTR_ERR(cursor);
15144 goto fail;
15145 }
15146 intel_crtc->plane_ids_mask |= BIT(cursor->id);
15147
15148 if (HAS_GMCH(dev_priv)) {
15149 if (IS_CHERRYVIEW(dev_priv) ||
15150 IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
15151 funcs = &g4x_crtc_funcs;
15152 else if (IS_GEN(dev_priv, 4))
15153 funcs = &i965_crtc_funcs;
15154 else if (IS_I945GM(dev_priv))
15155 funcs = &i945gm_crtc_funcs;
15156 else if (IS_GEN(dev_priv, 3))
15157 funcs = &i915_crtc_funcs;
15158 else
15159 funcs = &i8xx_crtc_funcs;
15160 } else {
15161 if (INTEL_GEN(dev_priv) >= 8)
15162 funcs = &bdw_crtc_funcs;
15163 else
15164 funcs = &ilk_crtc_funcs;
15165 }
15166
15167 ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
15168 &primary->base, &cursor->base,
15169 funcs, "pipe %c", pipe_name(pipe));
15170 if (ret)
15171 goto fail;
15172
15173 intel_crtc->pipe = pipe;
15174
15175 /* initialize shared scalers */
15176 intel_crtc_init_scalers(intel_crtc, crtc_state);
15177
15178 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
15179 dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
15180 dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
15181
15182 if (INTEL_GEN(dev_priv) < 9) {
15183 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
15184
15185 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
15186 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
15187 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
15188 }
15189
15190 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
15191
15192 intel_color_init(intel_crtc);
15193
15194 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
15195
15196 return 0;
15197
15198 fail:
15199 /*
15200 * drm_mode_config_cleanup() will free up any
15201 * crtcs/planes already initialized.
15202 */
15203 kfree(crtc_state);
15204 kfree(intel_crtc);
15205
15206 return ret;
15207 }
15208
intel_get_pipe_from_crtc_id_ioctl(struct drm_device * dev,void * data,struct drm_file * file)15209 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
15210 struct drm_file *file)
15211 {
15212 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
15213 struct drm_crtc *drmmode_crtc;
15214 struct intel_crtc *crtc;
15215
15216 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
15217 if (!drmmode_crtc)
15218 return -ENOENT;
15219
15220 crtc = to_intel_crtc(drmmode_crtc);
15221 pipe_from_crtc_id->pipe = crtc->pipe;
15222
15223 return 0;
15224 }
15225
intel_encoder_clones(struct intel_encoder * encoder)15226 static int intel_encoder_clones(struct intel_encoder *encoder)
15227 {
15228 struct drm_device *dev = encoder->base.dev;
15229 struct intel_encoder *source_encoder;
15230 int index_mask = 0;
15231 int entry = 0;
15232
15233 for_each_intel_encoder(dev, source_encoder) {
15234 if (encoders_cloneable(encoder, source_encoder))
15235 index_mask |= (1 << entry);
15236
15237 entry++;
15238 }
15239
15240 return index_mask;
15241 }
15242
ilk_has_edp_a(struct drm_i915_private * dev_priv)15243 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
15244 {
15245 if (!IS_MOBILE(dev_priv))
15246 return false;
15247
15248 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
15249 return false;
15250
15251 if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
15252 return false;
15253
15254 return true;
15255 }
15256
intel_ddi_crt_present(struct drm_i915_private * dev_priv)15257 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
15258 {
15259 if (INTEL_GEN(dev_priv) >= 9)
15260 return false;
15261
15262 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
15263 return false;
15264
15265 if (HAS_PCH_LPT_H(dev_priv) &&
15266 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
15267 return false;
15268
15269 /* DDI E can't be used if DDI A requires 4 lanes */
15270 if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
15271 return false;
15272
15273 if (!dev_priv->vbt.int_crt_support)
15274 return false;
15275
15276 return true;
15277 }
15278
intel_pps_unlock_regs_wa(struct drm_i915_private * dev_priv)15279 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
15280 {
15281 int pps_num;
15282 int pps_idx;
15283
15284 if (HAS_DDI(dev_priv))
15285 return;
15286 /*
15287 * This w/a is needed at least on CPT/PPT, but to be sure apply it
15288 * everywhere where registers can be write protected.
15289 */
15290 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15291 pps_num = 2;
15292 else
15293 pps_num = 1;
15294
15295 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
15296 u32 val = I915_READ(PP_CONTROL(pps_idx));
15297
15298 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
15299 I915_WRITE(PP_CONTROL(pps_idx), val);
15300 }
15301 }
15302
intel_pps_init(struct drm_i915_private * dev_priv)15303 static void intel_pps_init(struct drm_i915_private *dev_priv)
15304 {
15305 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
15306 dev_priv->pps_mmio_base = PCH_PPS_BASE;
15307 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15308 dev_priv->pps_mmio_base = VLV_PPS_BASE;
15309 else
15310 dev_priv->pps_mmio_base = PPS_BASE;
15311
15312 intel_pps_unlock_regs_wa(dev_priv);
15313 }
15314
intel_setup_outputs(struct drm_i915_private * dev_priv)15315 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
15316 {
15317 struct intel_encoder *encoder;
15318 bool dpd_is_edp = false;
15319
15320 intel_pps_init(dev_priv);
15321
15322 if (!HAS_DISPLAY(dev_priv))
15323 return;
15324
15325 if (INTEL_GEN(dev_priv) >= 12) {
15326 /* TODO: initialize TC ports as well */
15327 intel_ddi_init(dev_priv, PORT_A);
15328 intel_ddi_init(dev_priv, PORT_B);
15329 icl_dsi_init(dev_priv);
15330 } else if (IS_ELKHARTLAKE(dev_priv)) {
15331 intel_ddi_init(dev_priv, PORT_A);
15332 intel_ddi_init(dev_priv, PORT_B);
15333 intel_ddi_init(dev_priv, PORT_C);
15334 intel_ddi_init(dev_priv, PORT_D);
15335 icl_dsi_init(dev_priv);
15336 } else if (IS_GEN(dev_priv, 11)) {
15337 intel_ddi_init(dev_priv, PORT_A);
15338 intel_ddi_init(dev_priv, PORT_B);
15339 intel_ddi_init(dev_priv, PORT_C);
15340 intel_ddi_init(dev_priv, PORT_D);
15341 intel_ddi_init(dev_priv, PORT_E);
15342 /*
15343 * On some ICL SKUs port F is not present. No strap bits for
15344 * this, so rely on VBT.
15345 * Work around broken VBTs on SKUs known to have no port F.
15346 */
15347 if (IS_ICL_WITH_PORT_F(dev_priv) &&
15348 intel_bios_is_port_present(dev_priv, PORT_F))
15349 intel_ddi_init(dev_priv, PORT_F);
15350
15351 icl_dsi_init(dev_priv);
15352 } else if (IS_GEN9_LP(dev_priv)) {
15353 /*
15354 * FIXME: Broxton doesn't support port detection via the
15355 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
15356 * detect the ports.
15357 */
15358 intel_ddi_init(dev_priv, PORT_A);
15359 intel_ddi_init(dev_priv, PORT_B);
15360 intel_ddi_init(dev_priv, PORT_C);
15361
15362 vlv_dsi_init(dev_priv);
15363 } else if (HAS_DDI(dev_priv)) {
15364 int found;
15365
15366 if (intel_ddi_crt_present(dev_priv))
15367 intel_crt_init(dev_priv);
15368
15369 /*
15370 * Haswell uses DDI functions to detect digital outputs.
15371 * On SKL pre-D0 the strap isn't connected, so we assume
15372 * it's there.
15373 */
15374 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
15375 /* WaIgnoreDDIAStrap: skl */
15376 if (found || IS_GEN9_BC(dev_priv))
15377 intel_ddi_init(dev_priv, PORT_A);
15378
15379 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
15380 * register */
15381 found = I915_READ(SFUSE_STRAP);
15382
15383 if (found & SFUSE_STRAP_DDIB_DETECTED)
15384 intel_ddi_init(dev_priv, PORT_B);
15385 if (found & SFUSE_STRAP_DDIC_DETECTED)
15386 intel_ddi_init(dev_priv, PORT_C);
15387 if (found & SFUSE_STRAP_DDID_DETECTED)
15388 intel_ddi_init(dev_priv, PORT_D);
15389 if (found & SFUSE_STRAP_DDIF_DETECTED)
15390 intel_ddi_init(dev_priv, PORT_F);
15391 /*
15392 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
15393 */
15394 if (IS_GEN9_BC(dev_priv) &&
15395 intel_bios_is_port_present(dev_priv, PORT_E))
15396 intel_ddi_init(dev_priv, PORT_E);
15397
15398 } else if (HAS_PCH_SPLIT(dev_priv)) {
15399 int found;
15400
15401 /*
15402 * intel_edp_init_connector() depends on this completing first,
15403 * to prevent the registration of both eDP and LVDS and the
15404 * incorrect sharing of the PPS.
15405 */
15406 intel_lvds_init(dev_priv);
15407 intel_crt_init(dev_priv);
15408
15409 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
15410
15411 if (ilk_has_edp_a(dev_priv))
15412 intel_dp_init(dev_priv, DP_A, PORT_A);
15413
15414 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
15415 /* PCH SDVOB multiplex with HDMIB */
15416 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
15417 if (!found)
15418 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
15419 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
15420 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
15421 }
15422
15423 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
15424 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
15425
15426 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
15427 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
15428
15429 if (I915_READ(PCH_DP_C) & DP_DETECTED)
15430 intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
15431
15432 if (I915_READ(PCH_DP_D) & DP_DETECTED)
15433 intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
15434 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15435 bool has_edp, has_port;
15436
15437 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
15438 intel_crt_init(dev_priv);
15439
15440 /*
15441 * The DP_DETECTED bit is the latched state of the DDC
15442 * SDA pin at boot. However since eDP doesn't require DDC
15443 * (no way to plug in a DP->HDMI dongle) the DDC pins for
15444 * eDP ports may have been muxed to an alternate function.
15445 * Thus we can't rely on the DP_DETECTED bit alone to detect
15446 * eDP ports. Consult the VBT as well as DP_DETECTED to
15447 * detect eDP ports.
15448 *
15449 * Sadly the straps seem to be missing sometimes even for HDMI
15450 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
15451 * and VBT for the presence of the port. Additionally we can't
15452 * trust the port type the VBT declares as we've seen at least
15453 * HDMI ports that the VBT claim are DP or eDP.
15454 */
15455 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
15456 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
15457 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
15458 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
15459 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
15460 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
15461
15462 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
15463 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
15464 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
15465 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
15466 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
15467 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
15468
15469 if (IS_CHERRYVIEW(dev_priv)) {
15470 /*
15471 * eDP not supported on port D,
15472 * so no need to worry about it
15473 */
15474 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
15475 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
15476 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
15477 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
15478 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
15479 }
15480
15481 vlv_dsi_init(dev_priv);
15482 } else if (IS_PINEVIEW(dev_priv)) {
15483 intel_lvds_init(dev_priv);
15484 intel_crt_init(dev_priv);
15485 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
15486 bool found = false;
15487
15488 if (IS_MOBILE(dev_priv))
15489 intel_lvds_init(dev_priv);
15490
15491 intel_crt_init(dev_priv);
15492
15493 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
15494 DRM_DEBUG_KMS("probing SDVOB\n");
15495 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
15496 if (!found && IS_G4X(dev_priv)) {
15497 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
15498 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
15499 }
15500
15501 if (!found && IS_G4X(dev_priv))
15502 intel_dp_init(dev_priv, DP_B, PORT_B);
15503 }
15504
15505 /* Before G4X SDVOC doesn't have its own detect register */
15506
15507 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
15508 DRM_DEBUG_KMS("probing SDVOC\n");
15509 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
15510 }
15511
15512 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
15513
15514 if (IS_G4X(dev_priv)) {
15515 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
15516 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
15517 }
15518 if (IS_G4X(dev_priv))
15519 intel_dp_init(dev_priv, DP_C, PORT_C);
15520 }
15521
15522 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
15523 intel_dp_init(dev_priv, DP_D, PORT_D);
15524
15525 if (SUPPORTS_TV(dev_priv))
15526 intel_tv_init(dev_priv);
15527 } else if (IS_GEN(dev_priv, 2)) {
15528 if (IS_I85X(dev_priv))
15529 intel_lvds_init(dev_priv);
15530
15531 intel_crt_init(dev_priv);
15532 intel_dvo_init(dev_priv);
15533 }
15534
15535 intel_psr_init(dev_priv);
15536
15537 for_each_intel_encoder(&dev_priv->drm, encoder) {
15538 encoder->base.possible_crtcs = encoder->crtc_mask;
15539 encoder->base.possible_clones =
15540 intel_encoder_clones(encoder);
15541 }
15542
15543 intel_init_pch_refclk(dev_priv);
15544
15545 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
15546 }
15547
intel_user_framebuffer_destroy(struct drm_framebuffer * fb)15548 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
15549 {
15550 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
15551
15552 drm_framebuffer_cleanup(fb);
15553 intel_frontbuffer_put(intel_fb->frontbuffer);
15554
15555 kfree(intel_fb);
15556 }
15557
intel_user_framebuffer_create_handle(struct drm_framebuffer * fb,struct drm_file * file,unsigned int * handle)15558 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
15559 struct drm_file *file,
15560 unsigned int *handle)
15561 {
15562 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15563
15564 if (obj->userptr.mm) {
15565 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
15566 return -EINVAL;
15567 }
15568
15569 return drm_gem_handle_create(file, &obj->base, handle);
15570 }
15571
intel_user_framebuffer_dirty(struct drm_framebuffer * fb,struct drm_file * file,unsigned flags,unsigned color,struct drm_clip_rect * clips,unsigned num_clips)15572 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
15573 struct drm_file *file,
15574 unsigned flags, unsigned color,
15575 struct drm_clip_rect *clips,
15576 unsigned num_clips)
15577 {
15578 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15579
15580 i915_gem_object_flush_if_display(obj);
15581 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
15582
15583 return 0;
15584 }
15585
15586 static const struct drm_framebuffer_funcs intel_fb_funcs = {
15587 .destroy = intel_user_framebuffer_destroy,
15588 .create_handle = intel_user_framebuffer_create_handle,
15589 .dirty = intel_user_framebuffer_dirty,
15590 };
15591
intel_framebuffer_init(struct intel_framebuffer * intel_fb,struct drm_i915_gem_object * obj,struct drm_mode_fb_cmd2 * mode_cmd)15592 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
15593 struct drm_i915_gem_object *obj,
15594 struct drm_mode_fb_cmd2 *mode_cmd)
15595 {
15596 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
15597 struct drm_framebuffer *fb = &intel_fb->base;
15598 u32 max_stride;
15599 unsigned int tiling, stride;
15600 int ret = -EINVAL;
15601 int i;
15602
15603 intel_fb->frontbuffer = intel_frontbuffer_get(obj);
15604 if (!intel_fb->frontbuffer)
15605 return -ENOMEM;
15606
15607 i915_gem_object_lock(obj);
15608 tiling = i915_gem_object_get_tiling(obj);
15609 stride = i915_gem_object_get_stride(obj);
15610 i915_gem_object_unlock(obj);
15611
15612 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
15613 /*
15614 * If there's a fence, enforce that
15615 * the fb modifier and tiling mode match.
15616 */
15617 if (tiling != I915_TILING_NONE &&
15618 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
15619 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
15620 goto err;
15621 }
15622 } else {
15623 if (tiling == I915_TILING_X) {
15624 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
15625 } else if (tiling == I915_TILING_Y) {
15626 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
15627 goto err;
15628 }
15629 }
15630
15631 if (!drm_any_plane_has_format(&dev_priv->drm,
15632 mode_cmd->pixel_format,
15633 mode_cmd->modifier[0])) {
15634 struct drm_format_name_buf format_name;
15635
15636 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
15637 drm_get_format_name(mode_cmd->pixel_format,
15638 &format_name),
15639 mode_cmd->modifier[0]);
15640 goto err;
15641 }
15642
15643 /*
15644 * gen2/3 display engine uses the fence if present,
15645 * so the tiling mode must match the fb modifier exactly.
15646 */
15647 if (INTEL_GEN(dev_priv) < 4 &&
15648 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
15649 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
15650 goto err;
15651 }
15652
15653 max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
15654 mode_cmd->modifier[0]);
15655 if (mode_cmd->pitches[0] > max_stride) {
15656 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
15657 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
15658 "tiled" : "linear",
15659 mode_cmd->pitches[0], max_stride);
15660 goto err;
15661 }
15662
15663 /*
15664 * If there's a fence, enforce that
15665 * the fb pitch and fence stride match.
15666 */
15667 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
15668 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
15669 mode_cmd->pitches[0], stride);
15670 goto err;
15671 }
15672
15673 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
15674 if (mode_cmd->offsets[0] != 0)
15675 goto err;
15676
15677 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
15678
15679 for (i = 0; i < fb->format->num_planes; i++) {
15680 u32 stride_alignment;
15681
15682 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
15683 DRM_DEBUG_KMS("bad plane %d handle\n", i);
15684 goto err;
15685 }
15686
15687 stride_alignment = intel_fb_stride_alignment(fb, i);
15688
15689 /*
15690 * Display WA #0531: skl,bxt,kbl,glk
15691 *
15692 * Render decompression and plane width > 3840
15693 * combined with horizontal panning requires the
15694 * plane stride to be a multiple of 4. We'll just
15695 * require the entire fb to accommodate that to avoid
15696 * potential runtime errors at plane configuration time.
15697 */
15698 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
15699 is_ccs_modifier(fb->modifier))
15700 stride_alignment *= 4;
15701
15702 if (fb->pitches[i] & (stride_alignment - 1)) {
15703 DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
15704 i, fb->pitches[i], stride_alignment);
15705 goto err;
15706 }
15707
15708 fb->obj[i] = &obj->base;
15709 }
15710
15711 ret = intel_fill_fb_info(dev_priv, fb);
15712 if (ret)
15713 goto err;
15714
15715 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
15716 if (ret) {
15717 DRM_ERROR("framebuffer init failed %d\n", ret);
15718 goto err;
15719 }
15720
15721 return 0;
15722
15723 err:
15724 intel_frontbuffer_put(intel_fb->frontbuffer);
15725 return ret;
15726 }
15727
15728 static struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device * dev,struct drm_file * filp,const struct drm_mode_fb_cmd2 * user_mode_cmd)15729 intel_user_framebuffer_create(struct drm_device *dev,
15730 struct drm_file *filp,
15731 const struct drm_mode_fb_cmd2 *user_mode_cmd)
15732 {
15733 struct drm_framebuffer *fb;
15734 struct drm_i915_gem_object *obj;
15735 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
15736
15737 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
15738 if (!obj)
15739 return ERR_PTR(-ENOENT);
15740
15741 fb = intel_framebuffer_create(obj, &mode_cmd);
15742 i915_gem_object_put(obj);
15743
15744 return fb;
15745 }
15746
intel_atomic_state_free(struct drm_atomic_state * state)15747 static void intel_atomic_state_free(struct drm_atomic_state *state)
15748 {
15749 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
15750
15751 drm_atomic_state_default_release(state);
15752
15753 i915_sw_fence_fini(&intel_state->commit_ready);
15754
15755 kfree(state);
15756 }
15757
15758 static enum drm_mode_status
intel_mode_valid(struct drm_device * dev,const struct drm_display_mode * mode)15759 intel_mode_valid(struct drm_device *dev,
15760 const struct drm_display_mode *mode)
15761 {
15762 struct drm_i915_private *dev_priv = to_i915(dev);
15763 int hdisplay_max, htotal_max;
15764 int vdisplay_max, vtotal_max;
15765
15766 /*
15767 * Can't reject DBLSCAN here because Xorg ddxen can add piles
15768 * of DBLSCAN modes to the output's mode list when they detect
15769 * the scaling mode property on the connector. And they don't
15770 * ask the kernel to validate those modes in any way until
15771 * modeset time at which point the client gets a protocol error.
15772 * So in order to not upset those clients we silently ignore the
15773 * DBLSCAN flag on such connectors. For other connectors we will
15774 * reject modes with the DBLSCAN flag in encoder->compute_config().
15775 * And we always reject DBLSCAN modes in connector->mode_valid()
15776 * as we never want such modes on the connector's mode list.
15777 */
15778
15779 if (mode->vscan > 1)
15780 return MODE_NO_VSCAN;
15781
15782 if (mode->flags & DRM_MODE_FLAG_HSKEW)
15783 return MODE_H_ILLEGAL;
15784
15785 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
15786 DRM_MODE_FLAG_NCSYNC |
15787 DRM_MODE_FLAG_PCSYNC))
15788 return MODE_HSYNC;
15789
15790 if (mode->flags & (DRM_MODE_FLAG_BCAST |
15791 DRM_MODE_FLAG_PIXMUX |
15792 DRM_MODE_FLAG_CLKDIV2))
15793 return MODE_BAD;
15794
15795 if (INTEL_GEN(dev_priv) >= 9 ||
15796 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
15797 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
15798 vdisplay_max = 4096;
15799 htotal_max = 8192;
15800 vtotal_max = 8192;
15801 } else if (INTEL_GEN(dev_priv) >= 3) {
15802 hdisplay_max = 4096;
15803 vdisplay_max = 4096;
15804 htotal_max = 8192;
15805 vtotal_max = 8192;
15806 } else {
15807 hdisplay_max = 2048;
15808 vdisplay_max = 2048;
15809 htotal_max = 4096;
15810 vtotal_max = 4096;
15811 }
15812
15813 if (mode->hdisplay > hdisplay_max ||
15814 mode->hsync_start > htotal_max ||
15815 mode->hsync_end > htotal_max ||
15816 mode->htotal > htotal_max)
15817 return MODE_H_ILLEGAL;
15818
15819 if (mode->vdisplay > vdisplay_max ||
15820 mode->vsync_start > vtotal_max ||
15821 mode->vsync_end > vtotal_max ||
15822 mode->vtotal > vtotal_max)
15823 return MODE_V_ILLEGAL;
15824
15825 return MODE_OK;
15826 }
15827
15828 static const struct drm_mode_config_funcs intel_mode_funcs = {
15829 .fb_create = intel_user_framebuffer_create,
15830 .get_format_info = intel_get_format_info,
15831 .output_poll_changed = intel_fbdev_output_poll_changed,
15832 .mode_valid = intel_mode_valid,
15833 .atomic_check = intel_atomic_check,
15834 .atomic_commit = intel_atomic_commit,
15835 .atomic_state_alloc = intel_atomic_state_alloc,
15836 .atomic_state_clear = intel_atomic_state_clear,
15837 .atomic_state_free = intel_atomic_state_free,
15838 };
15839
15840 /**
15841 * intel_init_display_hooks - initialize the display modesetting hooks
15842 * @dev_priv: device private
15843 */
intel_init_display_hooks(struct drm_i915_private * dev_priv)15844 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15845 {
15846 intel_init_cdclk_hooks(dev_priv);
15847
15848 if (INTEL_GEN(dev_priv) >= 9) {
15849 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15850 dev_priv->display.get_initial_plane_config =
15851 skylake_get_initial_plane_config;
15852 dev_priv->display.crtc_compute_clock =
15853 haswell_crtc_compute_clock;
15854 dev_priv->display.crtc_enable = haswell_crtc_enable;
15855 dev_priv->display.crtc_disable = haswell_crtc_disable;
15856 } else if (HAS_DDI(dev_priv)) {
15857 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15858 dev_priv->display.get_initial_plane_config =
15859 i9xx_get_initial_plane_config;
15860 dev_priv->display.crtc_compute_clock =
15861 haswell_crtc_compute_clock;
15862 dev_priv->display.crtc_enable = haswell_crtc_enable;
15863 dev_priv->display.crtc_disable = haswell_crtc_disable;
15864 } else if (HAS_PCH_SPLIT(dev_priv)) {
15865 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
15866 dev_priv->display.get_initial_plane_config =
15867 i9xx_get_initial_plane_config;
15868 dev_priv->display.crtc_compute_clock =
15869 ironlake_crtc_compute_clock;
15870 dev_priv->display.crtc_enable = ironlake_crtc_enable;
15871 dev_priv->display.crtc_disable = ironlake_crtc_disable;
15872 } else if (IS_CHERRYVIEW(dev_priv)) {
15873 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15874 dev_priv->display.get_initial_plane_config =
15875 i9xx_get_initial_plane_config;
15876 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
15877 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15878 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15879 } else if (IS_VALLEYVIEW(dev_priv)) {
15880 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15881 dev_priv->display.get_initial_plane_config =
15882 i9xx_get_initial_plane_config;
15883 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
15884 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15885 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15886 } else if (IS_G4X(dev_priv)) {
15887 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15888 dev_priv->display.get_initial_plane_config =
15889 i9xx_get_initial_plane_config;
15890 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
15891 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15892 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15893 } else if (IS_PINEVIEW(dev_priv)) {
15894 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15895 dev_priv->display.get_initial_plane_config =
15896 i9xx_get_initial_plane_config;
15897 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
15898 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15899 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15900 } else if (!IS_GEN(dev_priv, 2)) {
15901 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15902 dev_priv->display.get_initial_plane_config =
15903 i9xx_get_initial_plane_config;
15904 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
15905 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15906 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15907 } else {
15908 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15909 dev_priv->display.get_initial_plane_config =
15910 i9xx_get_initial_plane_config;
15911 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
15912 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15913 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15914 }
15915
15916 if (IS_GEN(dev_priv, 5)) {
15917 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15918 } else if (IS_GEN(dev_priv, 6)) {
15919 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15920 } else if (IS_IVYBRIDGE(dev_priv)) {
15921 /* FIXME: detect B0+ stepping and use auto training */
15922 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15923 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
15924 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15925 }
15926
15927 if (INTEL_GEN(dev_priv) >= 9)
15928 dev_priv->display.update_crtcs = skl_update_crtcs;
15929 else
15930 dev_priv->display.update_crtcs = intel_update_crtcs;
15931 }
15932
i915_vgacntrl_reg(struct drm_i915_private * dev_priv)15933 static i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
15934 {
15935 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15936 return VLV_VGACNTRL;
15937 else if (INTEL_GEN(dev_priv) >= 5)
15938 return CPU_VGACNTRL;
15939 else
15940 return VGACNTRL;
15941 }
15942
15943 /* Disable the VGA plane that we never use */
i915_disable_vga(struct drm_i915_private * dev_priv)15944 static void i915_disable_vga(struct drm_i915_private *dev_priv)
15945 {
15946 struct pci_dev *pdev = dev_priv->drm.pdev;
15947 u8 sr1;
15948 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15949
15950 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15951 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
15952 outb(SR01, VGA_SR_INDEX);
15953 sr1 = inb(VGA_SR_DATA);
15954 outb(sr1 | 1<<5, VGA_SR_DATA);
15955 vga_put(pdev, VGA_RSRC_LEGACY_IO);
15956 udelay(300);
15957
15958 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15959 POSTING_READ(vga_reg);
15960 }
15961
intel_modeset_init_hw(struct drm_device * dev)15962 void intel_modeset_init_hw(struct drm_device *dev)
15963 {
15964 struct drm_i915_private *dev_priv = to_i915(dev);
15965
15966 intel_update_cdclk(dev_priv);
15967 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
15968 dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
15969 }
15970
15971 /*
15972 * Calculate what we think the watermarks should be for the state we've read
15973 * out of the hardware and then immediately program those watermarks so that
15974 * we ensure the hardware settings match our internal state.
15975 *
15976 * We can calculate what we think WM's should be by creating a duplicate of the
15977 * current state (which was constructed during hardware readout) and running it
15978 * through the atomic check code to calculate new watermark values in the
15979 * state object.
15980 */
sanitize_watermarks(struct drm_device * dev)15981 static void sanitize_watermarks(struct drm_device *dev)
15982 {
15983 struct drm_i915_private *dev_priv = to_i915(dev);
15984 struct drm_atomic_state *state;
15985 struct intel_atomic_state *intel_state;
15986 struct intel_crtc *crtc;
15987 struct intel_crtc_state *crtc_state;
15988 struct drm_modeset_acquire_ctx ctx;
15989 int ret;
15990 int i;
15991
15992 /* Only supported on platforms that use atomic watermark design */
15993 if (!dev_priv->display.optimize_watermarks)
15994 return;
15995
15996 /*
15997 * We need to hold connection_mutex before calling duplicate_state so
15998 * that the connector loop is protected.
15999 */
16000 drm_modeset_acquire_init(&ctx, 0);
16001 retry:
16002 ret = drm_modeset_lock_all_ctx(dev, &ctx);
16003 if (ret == -EDEADLK) {
16004 drm_modeset_backoff(&ctx);
16005 goto retry;
16006 } else if (WARN_ON(ret)) {
16007 goto fail;
16008 }
16009
16010 state = drm_atomic_helper_duplicate_state(dev, &ctx);
16011 if (WARN_ON(IS_ERR(state)))
16012 goto fail;
16013
16014 intel_state = to_intel_atomic_state(state);
16015
16016 /*
16017 * Hardware readout is the only time we don't want to calculate
16018 * intermediate watermarks (since we don't trust the current
16019 * watermarks).
16020 */
16021 if (!HAS_GMCH(dev_priv))
16022 intel_state->skip_intermediate_wm = true;
16023
16024 ret = intel_atomic_check(dev, state);
16025 if (ret) {
16026 /*
16027 * If we fail here, it means that the hardware appears to be
16028 * programmed in a way that shouldn't be possible, given our
16029 * understanding of watermark requirements. This might mean a
16030 * mistake in the hardware readout code or a mistake in the
16031 * watermark calculations for a given platform. Raise a WARN
16032 * so that this is noticeable.
16033 *
16034 * If this actually happens, we'll have to just leave the
16035 * BIOS-programmed watermarks untouched and hope for the best.
16036 */
16037 WARN(true, "Could not determine valid watermarks for inherited state\n");
16038 goto put_state;
16039 }
16040
16041 /* Write calculated watermark values back */
16042 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
16043 crtc_state->wm.need_postvbl_update = true;
16044 dev_priv->display.optimize_watermarks(intel_state, crtc_state);
16045
16046 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
16047 }
16048
16049 put_state:
16050 drm_atomic_state_put(state);
16051 fail:
16052 drm_modeset_drop_locks(&ctx);
16053 drm_modeset_acquire_fini(&ctx);
16054 }
16055
intel_update_fdi_pll_freq(struct drm_i915_private * dev_priv)16056 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
16057 {
16058 if (IS_GEN(dev_priv, 5)) {
16059 u32 fdi_pll_clk =
16060 I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
16061
16062 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
16063 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
16064 dev_priv->fdi_pll_freq = 270000;
16065 } else {
16066 return;
16067 }
16068
16069 DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
16070 }
16071
intel_initial_commit(struct drm_device * dev)16072 static int intel_initial_commit(struct drm_device *dev)
16073 {
16074 struct drm_atomic_state *state = NULL;
16075 struct drm_modeset_acquire_ctx ctx;
16076 struct drm_crtc *crtc;
16077 struct drm_crtc_state *crtc_state;
16078 int ret = 0;
16079
16080 state = drm_atomic_state_alloc(dev);
16081 if (!state)
16082 return -ENOMEM;
16083
16084 drm_modeset_acquire_init(&ctx, 0);
16085
16086 retry:
16087 state->acquire_ctx = &ctx;
16088
16089 drm_for_each_crtc(crtc, dev) {
16090 crtc_state = drm_atomic_get_crtc_state(state, crtc);
16091 if (IS_ERR(crtc_state)) {
16092 ret = PTR_ERR(crtc_state);
16093 goto out;
16094 }
16095
16096 if (crtc_state->active) {
16097 ret = drm_atomic_add_affected_planes(state, crtc);
16098 if (ret)
16099 goto out;
16100
16101 /*
16102 * FIXME hack to force a LUT update to avoid the
16103 * plane update forcing the pipe gamma on without
16104 * having a proper LUT loaded. Remove once we
16105 * have readout for pipe gamma enable.
16106 */
16107 crtc_state->color_mgmt_changed = true;
16108 }
16109 }
16110
16111 ret = drm_atomic_commit(state);
16112
16113 out:
16114 if (ret == -EDEADLK) {
16115 drm_atomic_state_clear(state);
16116 drm_modeset_backoff(&ctx);
16117 goto retry;
16118 }
16119
16120 drm_atomic_state_put(state);
16121
16122 drm_modeset_drop_locks(&ctx);
16123 drm_modeset_acquire_fini(&ctx);
16124
16125 return ret;
16126 }
16127
intel_modeset_init(struct drm_device * dev)16128 int intel_modeset_init(struct drm_device *dev)
16129 {
16130 struct drm_i915_private *dev_priv = to_i915(dev);
16131 enum pipe pipe;
16132 struct intel_crtc *crtc;
16133 int ret;
16134
16135 dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
16136
16137 drm_mode_config_init(dev);
16138
16139 ret = intel_bw_init(dev_priv);
16140 if (ret)
16141 return ret;
16142
16143 dev->mode_config.min_width = 0;
16144 dev->mode_config.min_height = 0;
16145
16146 dev->mode_config.preferred_depth = 24;
16147 dev->mode_config.prefer_shadow = 1;
16148
16149 dev->mode_config.allow_fb_modifiers = true;
16150
16151 dev->mode_config.funcs = &intel_mode_funcs;
16152
16153 init_llist_head(&dev_priv->atomic_helper.free_list);
16154 INIT_WORK(&dev_priv->atomic_helper.free_work,
16155 intel_atomic_helper_free_state_worker);
16156
16157 intel_init_quirks(dev_priv);
16158
16159 intel_fbc_init(dev_priv);
16160
16161 intel_init_pm(dev_priv);
16162
16163 /*
16164 * There may be no VBT; and if the BIOS enabled SSC we can
16165 * just keep using it to avoid unnecessary flicker. Whereas if the
16166 * BIOS isn't using it, don't assume it will work even if the VBT
16167 * indicates as much.
16168 */
16169 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
16170 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
16171 DREF_SSC1_ENABLE);
16172
16173 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
16174 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
16175 bios_lvds_use_ssc ? "en" : "dis",
16176 dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
16177 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
16178 }
16179 }
16180
16181 /*
16182 * Maximum framebuffer dimensions, chosen to match
16183 * the maximum render engine surface size on gen4+.
16184 */
16185 if (INTEL_GEN(dev_priv) >= 7) {
16186 dev->mode_config.max_width = 16384;
16187 dev->mode_config.max_height = 16384;
16188 } else if (INTEL_GEN(dev_priv) >= 4) {
16189 dev->mode_config.max_width = 8192;
16190 dev->mode_config.max_height = 8192;
16191 } else if (IS_GEN(dev_priv, 3)) {
16192 dev->mode_config.max_width = 4096;
16193 dev->mode_config.max_height = 4096;
16194 } else {
16195 dev->mode_config.max_width = 2048;
16196 dev->mode_config.max_height = 2048;
16197 }
16198
16199 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
16200 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
16201 dev->mode_config.cursor_height = 1023;
16202 } else if (IS_GEN(dev_priv, 2)) {
16203 dev->mode_config.cursor_width = 64;
16204 dev->mode_config.cursor_height = 64;
16205 } else {
16206 dev->mode_config.cursor_width = 256;
16207 dev->mode_config.cursor_height = 256;
16208 }
16209
16210 DRM_DEBUG_KMS("%d display pipe%s available.\n",
16211 INTEL_INFO(dev_priv)->num_pipes,
16212 INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
16213
16214 for_each_pipe(dev_priv, pipe) {
16215 ret = intel_crtc_init(dev_priv, pipe);
16216 if (ret) {
16217 drm_mode_config_cleanup(dev);
16218 return ret;
16219 }
16220 }
16221
16222 intel_shared_dpll_init(dev);
16223 intel_update_fdi_pll_freq(dev_priv);
16224
16225 intel_update_czclk(dev_priv);
16226 intel_modeset_init_hw(dev);
16227
16228 intel_hdcp_component_init(dev_priv);
16229
16230 if (dev_priv->max_cdclk_freq == 0)
16231 intel_update_max_cdclk(dev_priv);
16232
16233 /* Just disable it once at startup */
16234 i915_disable_vga(dev_priv);
16235 intel_setup_outputs(dev_priv);
16236
16237 drm_modeset_lock_all(dev);
16238 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
16239 drm_modeset_unlock_all(dev);
16240
16241 for_each_intel_crtc(dev, crtc) {
16242 struct intel_initial_plane_config plane_config = {};
16243
16244 if (!crtc->active)
16245 continue;
16246
16247 /*
16248 * Note that reserving the BIOS fb up front prevents us
16249 * from stuffing other stolen allocations like the ring
16250 * on top. This prevents some ugliness at boot time, and
16251 * can even allow for smooth boot transitions if the BIOS
16252 * fb is large enough for the active pipe configuration.
16253 */
16254 dev_priv->display.get_initial_plane_config(crtc,
16255 &plane_config);
16256
16257 /*
16258 * If the fb is shared between multiple heads, we'll
16259 * just get the first one.
16260 */
16261 intel_find_initial_plane_obj(crtc, &plane_config);
16262 }
16263
16264 /*
16265 * Make sure hardware watermarks really match the state we read out.
16266 * Note that we need to do this after reconstructing the BIOS fb's
16267 * since the watermark calculation done here will use pstate->fb.
16268 */
16269 if (!HAS_GMCH(dev_priv))
16270 sanitize_watermarks(dev);
16271
16272 /*
16273 * Force all active planes to recompute their states. So that on
16274 * mode_setcrtc after probe, all the intel_plane_state variables
16275 * are already calculated and there is no assert_plane warnings
16276 * during bootup.
16277 */
16278 ret = intel_initial_commit(dev);
16279 if (ret)
16280 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
16281
16282 return 0;
16283 }
16284
i830_enable_pipe(struct drm_i915_private * dev_priv,enum pipe pipe)16285 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
16286 {
16287 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16288 /* 640x480@60Hz, ~25175 kHz */
16289 struct dpll clock = {
16290 .m1 = 18,
16291 .m2 = 7,
16292 .p1 = 13,
16293 .p2 = 4,
16294 .n = 2,
16295 };
16296 u32 dpll, fp;
16297 int i;
16298
16299 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
16300
16301 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
16302 pipe_name(pipe), clock.vco, clock.dot);
16303
16304 fp = i9xx_dpll_compute_fp(&clock);
16305 dpll = DPLL_DVO_2X_MODE |
16306 DPLL_VGA_MODE_DIS |
16307 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
16308 PLL_P2_DIVIDE_BY_4 |
16309 PLL_REF_INPUT_DREFCLK |
16310 DPLL_VCO_ENABLE;
16311
16312 I915_WRITE(FP0(pipe), fp);
16313 I915_WRITE(FP1(pipe), fp);
16314
16315 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
16316 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
16317 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
16318 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
16319 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
16320 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
16321 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
16322
16323 /*
16324 * Apparently we need to have VGA mode enabled prior to changing
16325 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
16326 * dividers, even though the register value does change.
16327 */
16328 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
16329 I915_WRITE(DPLL(pipe), dpll);
16330
16331 /* Wait for the clocks to stabilize. */
16332 POSTING_READ(DPLL(pipe));
16333 udelay(150);
16334
16335 /* The pixel multiplier can only be updated once the
16336 * DPLL is enabled and the clocks are stable.
16337 *
16338 * So write it again.
16339 */
16340 I915_WRITE(DPLL(pipe), dpll);
16341
16342 /* We do this three times for luck */
16343 for (i = 0; i < 3 ; i++) {
16344 I915_WRITE(DPLL(pipe), dpll);
16345 POSTING_READ(DPLL(pipe));
16346 udelay(150); /* wait for warmup */
16347 }
16348
16349 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
16350 POSTING_READ(PIPECONF(pipe));
16351
16352 intel_wait_for_pipe_scanline_moving(crtc);
16353 }
16354
i830_disable_pipe(struct drm_i915_private * dev_priv,enum pipe pipe)16355 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
16356 {
16357 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16358
16359 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
16360 pipe_name(pipe));
16361
16362 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
16363 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
16364 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
16365 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
16366 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
16367
16368 I915_WRITE(PIPECONF(pipe), 0);
16369 POSTING_READ(PIPECONF(pipe));
16370
16371 intel_wait_for_pipe_scanline_stopped(crtc);
16372
16373 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
16374 POSTING_READ(DPLL(pipe));
16375 }
16376
16377 static void
intel_sanitize_plane_mapping(struct drm_i915_private * dev_priv)16378 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
16379 {
16380 struct intel_crtc *crtc;
16381
16382 if (INTEL_GEN(dev_priv) >= 4)
16383 return;
16384
16385 for_each_intel_crtc(&dev_priv->drm, crtc) {
16386 struct intel_plane *plane =
16387 to_intel_plane(crtc->base.primary);
16388 struct intel_crtc *plane_crtc;
16389 enum pipe pipe;
16390
16391 if (!plane->get_hw_state(plane, &pipe))
16392 continue;
16393
16394 if (pipe == crtc->pipe)
16395 continue;
16396
16397 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
16398 plane->base.base.id, plane->base.name);
16399
16400 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16401 intel_plane_disable_noatomic(plane_crtc, plane);
16402 }
16403 }
16404
intel_crtc_has_encoders(struct intel_crtc * crtc)16405 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
16406 {
16407 struct drm_device *dev = crtc->base.dev;
16408 struct intel_encoder *encoder;
16409
16410 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
16411 return true;
16412
16413 return false;
16414 }
16415
intel_encoder_find_connector(struct intel_encoder * encoder)16416 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
16417 {
16418 struct drm_device *dev = encoder->base.dev;
16419 struct intel_connector *connector;
16420
16421 for_each_connector_on_encoder(dev, &encoder->base, connector)
16422 return connector;
16423
16424 return NULL;
16425 }
16426
has_pch_trancoder(struct drm_i915_private * dev_priv,enum pipe pch_transcoder)16427 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
16428 enum pipe pch_transcoder)
16429 {
16430 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
16431 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
16432 }
16433
intel_sanitize_crtc(struct intel_crtc * crtc,struct drm_modeset_acquire_ctx * ctx)16434 static void intel_sanitize_crtc(struct intel_crtc *crtc,
16435 struct drm_modeset_acquire_ctx *ctx)
16436 {
16437 struct drm_device *dev = crtc->base.dev;
16438 struct drm_i915_private *dev_priv = to_i915(dev);
16439 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
16440 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
16441
16442 /* Clear any frame start delays used for debugging left by the BIOS */
16443 if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
16444 i915_reg_t reg = PIPECONF(cpu_transcoder);
16445
16446 I915_WRITE(reg,
16447 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
16448 }
16449
16450 if (crtc_state->base.active) {
16451 struct intel_plane *plane;
16452
16453 /* Disable everything but the primary plane */
16454 for_each_intel_plane_on_crtc(dev, crtc, plane) {
16455 const struct intel_plane_state *plane_state =
16456 to_intel_plane_state(plane->base.state);
16457
16458 if (plane_state->base.visible &&
16459 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
16460 intel_plane_disable_noatomic(crtc, plane);
16461 }
16462
16463 /*
16464 * Disable any background color set by the BIOS, but enable the
16465 * gamma and CSC to match how we program our planes.
16466 */
16467 if (INTEL_GEN(dev_priv) >= 9)
16468 I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
16469 SKL_BOTTOM_COLOR_GAMMA_ENABLE |
16470 SKL_BOTTOM_COLOR_CSC_ENABLE);
16471 }
16472
16473 /* Adjust the state of the output pipe according to whether we
16474 * have active connectors/encoders. */
16475 if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
16476 intel_crtc_disable_noatomic(&crtc->base, ctx);
16477
16478 if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
16479 /*
16480 * We start out with underrun reporting disabled to avoid races.
16481 * For correct bookkeeping mark this on active crtcs.
16482 *
16483 * Also on gmch platforms we dont have any hardware bits to
16484 * disable the underrun reporting. Which means we need to start
16485 * out with underrun reporting disabled also on inactive pipes,
16486 * since otherwise we'll complain about the garbage we read when
16487 * e.g. coming up after runtime pm.
16488 *
16489 * No protection against concurrent access is required - at
16490 * worst a fifo underrun happens which also sets this to false.
16491 */
16492 crtc->cpu_fifo_underrun_disabled = true;
16493 /*
16494 * We track the PCH trancoder underrun reporting state
16495 * within the crtc. With crtc for pipe A housing the underrun
16496 * reporting state for PCH transcoder A, crtc for pipe B housing
16497 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
16498 * and marking underrun reporting as disabled for the non-existing
16499 * PCH transcoders B and C would prevent enabling the south
16500 * error interrupt (see cpt_can_enable_serr_int()).
16501 */
16502 if (has_pch_trancoder(dev_priv, crtc->pipe))
16503 crtc->pch_fifo_underrun_disabled = true;
16504 }
16505 }
16506
has_bogus_dpll_config(const struct intel_crtc_state * crtc_state)16507 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
16508 {
16509 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
16510
16511 /*
16512 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
16513 * the hardware when a high res displays plugged in. DPLL P
16514 * divider is zero, and the pipe timings are bonkers. We'll
16515 * try to disable everything in that case.
16516 *
16517 * FIXME would be nice to be able to sanitize this state
16518 * without several WARNs, but for now let's take the easy
16519 * road.
16520 */
16521 return IS_GEN(dev_priv, 6) &&
16522 crtc_state->base.active &&
16523 crtc_state->shared_dpll &&
16524 crtc_state->port_clock == 0;
16525 }
16526
intel_sanitize_encoder(struct intel_encoder * encoder)16527 static void intel_sanitize_encoder(struct intel_encoder *encoder)
16528 {
16529 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
16530 struct intel_connector *connector;
16531 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
16532 struct intel_crtc_state *crtc_state = crtc ?
16533 to_intel_crtc_state(crtc->base.state) : NULL;
16534
16535 /* We need to check both for a crtc link (meaning that the
16536 * encoder is active and trying to read from a pipe) and the
16537 * pipe itself being active. */
16538 bool has_active_crtc = crtc_state &&
16539 crtc_state->base.active;
16540
16541 if (crtc_state && has_bogus_dpll_config(crtc_state)) {
16542 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
16543 pipe_name(crtc->pipe));
16544 has_active_crtc = false;
16545 }
16546
16547 connector = intel_encoder_find_connector(encoder);
16548 if (connector && !has_active_crtc) {
16549 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
16550 encoder->base.base.id,
16551 encoder->base.name);
16552
16553 /* Connector is active, but has no active pipe. This is
16554 * fallout from our resume register restoring. Disable
16555 * the encoder manually again. */
16556 if (crtc_state) {
16557 struct drm_encoder *best_encoder;
16558
16559 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
16560 encoder->base.base.id,
16561 encoder->base.name);
16562
16563 /* avoid oopsing in case the hooks consult best_encoder */
16564 best_encoder = connector->base.state->best_encoder;
16565 connector->base.state->best_encoder = &encoder->base;
16566
16567 if (encoder->disable)
16568 encoder->disable(encoder, crtc_state,
16569 connector->base.state);
16570 if (encoder->post_disable)
16571 encoder->post_disable(encoder, crtc_state,
16572 connector->base.state);
16573
16574 connector->base.state->best_encoder = best_encoder;
16575 }
16576 encoder->base.crtc = NULL;
16577
16578 /* Inconsistent output/port/pipe state happens presumably due to
16579 * a bug in one of the get_hw_state functions. Or someplace else
16580 * in our code, like the register restore mess on resume. Clamp
16581 * things to off as a safer default. */
16582
16583 connector->base.dpms = DRM_MODE_DPMS_OFF;
16584 connector->base.encoder = NULL;
16585 }
16586
16587 /* notify opregion of the sanitized encoder state */
16588 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
16589
16590 if (INTEL_GEN(dev_priv) >= 11)
16591 icl_sanitize_encoder_pll_mapping(encoder);
16592 }
16593
i915_redisable_vga_power_on(struct drm_i915_private * dev_priv)16594 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
16595 {
16596 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
16597
16598 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
16599 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
16600 i915_disable_vga(dev_priv);
16601 }
16602 }
16603
i915_redisable_vga(struct drm_i915_private * dev_priv)16604 void i915_redisable_vga(struct drm_i915_private *dev_priv)
16605 {
16606 intel_wakeref_t wakeref;
16607
16608 /*
16609 * This function can be called both from intel_modeset_setup_hw_state or
16610 * at a very early point in our resume sequence, where the power well
16611 * structures are not yet restored. Since this function is at a very
16612 * paranoid "someone might have enabled VGA while we were not looking"
16613 * level, just check if the power well is enabled instead of trying to
16614 * follow the "don't touch the power well if we don't need it" policy
16615 * the rest of the driver uses.
16616 */
16617 wakeref = intel_display_power_get_if_enabled(dev_priv,
16618 POWER_DOMAIN_VGA);
16619 if (!wakeref)
16620 return;
16621
16622 i915_redisable_vga_power_on(dev_priv);
16623
16624 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
16625 }
16626
16627 /* FIXME read out full plane state for all planes */
readout_plane_state(struct drm_i915_private * dev_priv)16628 static void readout_plane_state(struct drm_i915_private *dev_priv)
16629 {
16630 struct intel_plane *plane;
16631 struct intel_crtc *crtc;
16632
16633 for_each_intel_plane(&dev_priv->drm, plane) {
16634 struct intel_plane_state *plane_state =
16635 to_intel_plane_state(plane->base.state);
16636 struct intel_crtc_state *crtc_state;
16637 enum pipe pipe = PIPE_A;
16638 bool visible;
16639
16640 visible = plane->get_hw_state(plane, &pipe);
16641
16642 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16643 crtc_state = to_intel_crtc_state(crtc->base.state);
16644
16645 intel_set_plane_visible(crtc_state, plane_state, visible);
16646
16647 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
16648 plane->base.base.id, plane->base.name,
16649 enableddisabled(visible), pipe_name(pipe));
16650 }
16651
16652 for_each_intel_crtc(&dev_priv->drm, crtc) {
16653 struct intel_crtc_state *crtc_state =
16654 to_intel_crtc_state(crtc->base.state);
16655
16656 fixup_active_planes(crtc_state);
16657 }
16658 }
16659
intel_modeset_readout_hw_state(struct drm_device * dev)16660 static void intel_modeset_readout_hw_state(struct drm_device *dev)
16661 {
16662 struct drm_i915_private *dev_priv = to_i915(dev);
16663 enum pipe pipe;
16664 struct intel_crtc *crtc;
16665 struct intel_encoder *encoder;
16666 struct intel_connector *connector;
16667 struct drm_connector_list_iter conn_iter;
16668 int i;
16669
16670 dev_priv->active_crtcs = 0;
16671
16672 for_each_intel_crtc(dev, crtc) {
16673 struct intel_crtc_state *crtc_state =
16674 to_intel_crtc_state(crtc->base.state);
16675
16676 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
16677 memset(crtc_state, 0, sizeof(*crtc_state));
16678 __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base);
16679
16680 crtc_state->base.active = crtc_state->base.enable =
16681 dev_priv->display.get_pipe_config(crtc, crtc_state);
16682
16683 crtc->base.enabled = crtc_state->base.enable;
16684 crtc->active = crtc_state->base.active;
16685
16686 if (crtc_state->base.active)
16687 dev_priv->active_crtcs |= 1 << crtc->pipe;
16688
16689 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
16690 crtc->base.base.id, crtc->base.name,
16691 enableddisabled(crtc_state->base.active));
16692 }
16693
16694 readout_plane_state(dev_priv);
16695
16696 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16697 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16698
16699 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
16700 &pll->state.hw_state);
16701
16702 if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
16703 pll->info->id == DPLL_ID_EHL_DPLL4) {
16704 pll->wakeref = intel_display_power_get(dev_priv,
16705 POWER_DOMAIN_DPLL_DC_OFF);
16706 }
16707
16708 pll->state.crtc_mask = 0;
16709 for_each_intel_crtc(dev, crtc) {
16710 struct intel_crtc_state *crtc_state =
16711 to_intel_crtc_state(crtc->base.state);
16712
16713 if (crtc_state->base.active &&
16714 crtc_state->shared_dpll == pll)
16715 pll->state.crtc_mask |= 1 << crtc->pipe;
16716 }
16717 pll->active_mask = pll->state.crtc_mask;
16718
16719 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
16720 pll->info->name, pll->state.crtc_mask, pll->on);
16721 }
16722
16723 for_each_intel_encoder(dev, encoder) {
16724 pipe = 0;
16725
16726 if (encoder->get_hw_state(encoder, &pipe)) {
16727 struct intel_crtc_state *crtc_state;
16728
16729 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16730 crtc_state = to_intel_crtc_state(crtc->base.state);
16731
16732 encoder->base.crtc = &crtc->base;
16733 encoder->get_config(encoder, crtc_state);
16734 } else {
16735 encoder->base.crtc = NULL;
16736 }
16737
16738 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
16739 encoder->base.base.id, encoder->base.name,
16740 enableddisabled(encoder->base.crtc),
16741 pipe_name(pipe));
16742 }
16743
16744 drm_connector_list_iter_begin(dev, &conn_iter);
16745 for_each_intel_connector_iter(connector, &conn_iter) {
16746 if (connector->get_hw_state(connector)) {
16747 connector->base.dpms = DRM_MODE_DPMS_ON;
16748
16749 encoder = connector->encoder;
16750 connector->base.encoder = &encoder->base;
16751
16752 if (encoder->base.crtc &&
16753 encoder->base.crtc->state->active) {
16754 /*
16755 * This has to be done during hardware readout
16756 * because anything calling .crtc_disable may
16757 * rely on the connector_mask being accurate.
16758 */
16759 encoder->base.crtc->state->connector_mask |=
16760 drm_connector_mask(&connector->base);
16761 encoder->base.crtc->state->encoder_mask |=
16762 drm_encoder_mask(&encoder->base);
16763 }
16764
16765 } else {
16766 connector->base.dpms = DRM_MODE_DPMS_OFF;
16767 connector->base.encoder = NULL;
16768 }
16769 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
16770 connector->base.base.id, connector->base.name,
16771 enableddisabled(connector->base.encoder));
16772 }
16773 drm_connector_list_iter_end(&conn_iter);
16774
16775 for_each_intel_crtc(dev, crtc) {
16776 struct intel_bw_state *bw_state =
16777 to_intel_bw_state(dev_priv->bw_obj.state);
16778 struct intel_crtc_state *crtc_state =
16779 to_intel_crtc_state(crtc->base.state);
16780 struct intel_plane *plane;
16781 int min_cdclk = 0;
16782
16783 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
16784 if (crtc_state->base.active) {
16785 intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
16786 crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
16787 crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
16788 intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
16789 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
16790
16791 /*
16792 * The initial mode needs to be set in order to keep
16793 * the atomic core happy. It wants a valid mode if the
16794 * crtc's enabled, so we do the above call.
16795 *
16796 * But we don't set all the derived state fully, hence
16797 * set a flag to indicate that a full recalculation is
16798 * needed on the next commit.
16799 */
16800 crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
16801
16802 intel_crtc_compute_pixel_rate(crtc_state);
16803
16804 if (dev_priv->display.modeset_calc_cdclk) {
16805 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
16806 if (WARN_ON(min_cdclk < 0))
16807 min_cdclk = 0;
16808 }
16809
16810 drm_calc_timestamping_constants(&crtc->base,
16811 &crtc_state->base.adjusted_mode);
16812 update_scanline_offset(crtc_state);
16813 }
16814
16815 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
16816 dev_priv->min_voltage_level[crtc->pipe] =
16817 crtc_state->min_voltage_level;
16818
16819 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
16820 const struct intel_plane_state *plane_state =
16821 to_intel_plane_state(plane->base.state);
16822
16823 /*
16824 * FIXME don't have the fb yet, so can't
16825 * use intel_plane_data_rate() :(
16826 */
16827 if (plane_state->base.visible)
16828 crtc_state->data_rate[plane->id] =
16829 4 * crtc_state->pixel_rate;
16830 }
16831
16832 intel_bw_crtc_update(bw_state, crtc_state);
16833
16834 intel_pipe_config_sanity_check(dev_priv, crtc_state);
16835 }
16836 }
16837
16838 static void
get_encoder_power_domains(struct drm_i915_private * dev_priv)16839 get_encoder_power_domains(struct drm_i915_private *dev_priv)
16840 {
16841 struct intel_encoder *encoder;
16842
16843 for_each_intel_encoder(&dev_priv->drm, encoder) {
16844 struct intel_crtc_state *crtc_state;
16845
16846 if (!encoder->get_power_domains)
16847 continue;
16848
16849 /*
16850 * MST-primary and inactive encoders don't have a crtc state
16851 * and neither of these require any power domain references.
16852 */
16853 if (!encoder->base.crtc)
16854 continue;
16855
16856 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
16857 encoder->get_power_domains(encoder, crtc_state);
16858 }
16859 }
16860
intel_early_display_was(struct drm_i915_private * dev_priv)16861 static void intel_early_display_was(struct drm_i915_private *dev_priv)
16862 {
16863 /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
16864 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
16865 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
16866 DARBF_GATING_DIS);
16867
16868 if (IS_HASWELL(dev_priv)) {
16869 /*
16870 * WaRsPkgCStateDisplayPMReq:hsw
16871 * System hang if this isn't done before disabling all planes!
16872 */
16873 I915_WRITE(CHICKEN_PAR1_1,
16874 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
16875 }
16876 }
16877
ibx_sanitize_pch_hdmi_port(struct drm_i915_private * dev_priv,enum port port,i915_reg_t hdmi_reg)16878 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
16879 enum port port, i915_reg_t hdmi_reg)
16880 {
16881 u32 val = I915_READ(hdmi_reg);
16882
16883 if (val & SDVO_ENABLE ||
16884 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
16885 return;
16886
16887 DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
16888 port_name(port));
16889
16890 val &= ~SDVO_PIPE_SEL_MASK;
16891 val |= SDVO_PIPE_SEL(PIPE_A);
16892
16893 I915_WRITE(hdmi_reg, val);
16894 }
16895
ibx_sanitize_pch_dp_port(struct drm_i915_private * dev_priv,enum port port,i915_reg_t dp_reg)16896 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
16897 enum port port, i915_reg_t dp_reg)
16898 {
16899 u32 val = I915_READ(dp_reg);
16900
16901 if (val & DP_PORT_EN ||
16902 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
16903 return;
16904
16905 DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
16906 port_name(port));
16907
16908 val &= ~DP_PIPE_SEL_MASK;
16909 val |= DP_PIPE_SEL(PIPE_A);
16910
16911 I915_WRITE(dp_reg, val);
16912 }
16913
ibx_sanitize_pch_ports(struct drm_i915_private * dev_priv)16914 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
16915 {
16916 /*
16917 * The BIOS may select transcoder B on some of the PCH
16918 * ports even it doesn't enable the port. This would trip
16919 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
16920 * Sanitize the transcoder select bits to prevent that. We
16921 * assume that the BIOS never actually enabled the port,
16922 * because if it did we'd actually have to toggle the port
16923 * on and back off to make the transcoder A select stick
16924 * (see. intel_dp_link_down(), intel_disable_hdmi(),
16925 * intel_disable_sdvo()).
16926 */
16927 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
16928 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
16929 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
16930
16931 /* PCH SDVOB multiplex with HDMIB */
16932 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
16933 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
16934 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
16935 }
16936
16937 /* Scan out the current hw modeset state,
16938 * and sanitizes it to the current state
16939 */
16940 static void
intel_modeset_setup_hw_state(struct drm_device * dev,struct drm_modeset_acquire_ctx * ctx)16941 intel_modeset_setup_hw_state(struct drm_device *dev,
16942 struct drm_modeset_acquire_ctx *ctx)
16943 {
16944 struct drm_i915_private *dev_priv = to_i915(dev);
16945 struct intel_crtc_state *crtc_state;
16946 struct intel_encoder *encoder;
16947 struct intel_crtc *crtc;
16948 intel_wakeref_t wakeref;
16949 int i;
16950
16951 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
16952
16953 intel_early_display_was(dev_priv);
16954 intel_modeset_readout_hw_state(dev);
16955
16956 /* HW state is read out, now we need to sanitize this mess. */
16957
16958 /* Sanitize the TypeC port mode upfront, encoders depend on this */
16959 for_each_intel_encoder(dev, encoder) {
16960 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
16961
16962 /* We need to sanitize only the MST primary port. */
16963 if (encoder->type != INTEL_OUTPUT_DP_MST &&
16964 intel_phy_is_tc(dev_priv, phy))
16965 intel_tc_port_sanitize(enc_to_dig_port(&encoder->base));
16966 }
16967
16968 get_encoder_power_domains(dev_priv);
16969
16970 if (HAS_PCH_IBX(dev_priv))
16971 ibx_sanitize_pch_ports(dev_priv);
16972
16973 /*
16974 * intel_sanitize_plane_mapping() may need to do vblank
16975 * waits, so we need vblank interrupts restored beforehand.
16976 */
16977 for_each_intel_crtc(&dev_priv->drm, crtc) {
16978 crtc_state = to_intel_crtc_state(crtc->base.state);
16979
16980 drm_crtc_vblank_reset(&crtc->base);
16981
16982 if (crtc_state->base.active)
16983 intel_crtc_vblank_on(crtc_state);
16984 }
16985
16986 intel_sanitize_plane_mapping(dev_priv);
16987
16988 for_each_intel_encoder(dev, encoder)
16989 intel_sanitize_encoder(encoder);
16990
16991 for_each_intel_crtc(&dev_priv->drm, crtc) {
16992 crtc_state = to_intel_crtc_state(crtc->base.state);
16993 intel_sanitize_crtc(crtc, ctx);
16994 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
16995 }
16996
16997 intel_modeset_update_connector_atomic_state(dev);
16998
16999 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
17000 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
17001
17002 if (!pll->on || pll->active_mask)
17003 continue;
17004
17005 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
17006 pll->info->name);
17007
17008 pll->info->funcs->disable(dev_priv, pll);
17009 pll->on = false;
17010 }
17011
17012 if (IS_G4X(dev_priv)) {
17013 g4x_wm_get_hw_state(dev_priv);
17014 g4x_wm_sanitize(dev_priv);
17015 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
17016 vlv_wm_get_hw_state(dev_priv);
17017 vlv_wm_sanitize(dev_priv);
17018 } else if (INTEL_GEN(dev_priv) >= 9) {
17019 skl_wm_get_hw_state(dev_priv);
17020 } else if (HAS_PCH_SPLIT(dev_priv)) {
17021 ilk_wm_get_hw_state(dev_priv);
17022 }
17023
17024 for_each_intel_crtc(dev, crtc) {
17025 u64 put_domains;
17026
17027 crtc_state = to_intel_crtc_state(crtc->base.state);
17028 put_domains = modeset_get_crtc_power_domains(crtc_state);
17029 if (WARN_ON(put_domains))
17030 modeset_put_power_domains(dev_priv, put_domains);
17031 }
17032
17033 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
17034
17035 intel_fbc_init_pipe_state(dev_priv);
17036 }
17037
intel_display_resume(struct drm_device * dev)17038 void intel_display_resume(struct drm_device *dev)
17039 {
17040 struct drm_i915_private *dev_priv = to_i915(dev);
17041 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
17042 struct drm_modeset_acquire_ctx ctx;
17043 int ret;
17044
17045 dev_priv->modeset_restore_state = NULL;
17046 if (state)
17047 state->acquire_ctx = &ctx;
17048
17049 drm_modeset_acquire_init(&ctx, 0);
17050
17051 while (1) {
17052 ret = drm_modeset_lock_all_ctx(dev, &ctx);
17053 if (ret != -EDEADLK)
17054 break;
17055
17056 drm_modeset_backoff(&ctx);
17057 }
17058
17059 if (!ret)
17060 ret = __intel_display_resume(dev, state, &ctx);
17061
17062 intel_enable_ipc(dev_priv);
17063 drm_modeset_drop_locks(&ctx);
17064 drm_modeset_acquire_fini(&ctx);
17065
17066 if (ret)
17067 DRM_ERROR("Restoring old state failed with %i\n", ret);
17068 if (state)
17069 drm_atomic_state_put(state);
17070 }
17071
intel_hpd_poll_fini(struct drm_device * dev)17072 static void intel_hpd_poll_fini(struct drm_device *dev)
17073 {
17074 struct intel_connector *connector;
17075 struct drm_connector_list_iter conn_iter;
17076
17077 /* Kill all the work that may have been queued by hpd. */
17078 drm_connector_list_iter_begin(dev, &conn_iter);
17079 for_each_intel_connector_iter(connector, &conn_iter) {
17080 if (connector->modeset_retry_work.func)
17081 cancel_work_sync(&connector->modeset_retry_work);
17082 if (connector->hdcp.shim) {
17083 cancel_delayed_work_sync(&connector->hdcp.check_work);
17084 cancel_work_sync(&connector->hdcp.prop_work);
17085 }
17086 }
17087 drm_connector_list_iter_end(&conn_iter);
17088 }
17089
intel_modeset_driver_remove(struct drm_device * dev)17090 void intel_modeset_driver_remove(struct drm_device *dev)
17091 {
17092 struct drm_i915_private *dev_priv = to_i915(dev);
17093
17094 flush_workqueue(dev_priv->modeset_wq);
17095
17096 flush_work(&dev_priv->atomic_helper.free_work);
17097 WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
17098
17099 /*
17100 * Interrupts and polling as the first thing to avoid creating havoc.
17101 * Too much stuff here (turning of connectors, ...) would
17102 * experience fancy races otherwise.
17103 */
17104 intel_irq_uninstall(dev_priv);
17105
17106 /*
17107 * Due to the hpd irq storm handling the hotplug work can re-arm the
17108 * poll handlers. Hence disable polling after hpd handling is shut down.
17109 */
17110 intel_hpd_poll_fini(dev);
17111
17112 /* poll work can call into fbdev, hence clean that up afterwards */
17113 intel_fbdev_fini(dev_priv);
17114
17115 intel_unregister_dsm_handler();
17116
17117 intel_fbc_global_disable(dev_priv);
17118
17119 /* flush any delayed tasks or pending work */
17120 flush_scheduled_work();
17121
17122 intel_hdcp_component_fini(dev_priv);
17123
17124 drm_mode_config_cleanup(dev);
17125
17126 intel_overlay_cleanup(dev_priv);
17127
17128 intel_gmbus_teardown(dev_priv);
17129
17130 destroy_workqueue(dev_priv->modeset_wq);
17131
17132 intel_fbc_cleanup_cfb(dev_priv);
17133 }
17134
17135 /*
17136 * set vga decode state - true == enable VGA decode
17137 */
intel_modeset_vga_set_state(struct drm_i915_private * dev_priv,bool state)17138 int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
17139 {
17140 unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
17141 u16 gmch_ctrl;
17142
17143 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
17144 DRM_ERROR("failed to read control word\n");
17145 return -EIO;
17146 }
17147
17148 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
17149 return 0;
17150
17151 if (state)
17152 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
17153 else
17154 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
17155
17156 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
17157 DRM_ERROR("failed to write control word\n");
17158 return -EIO;
17159 }
17160
17161 return 0;
17162 }
17163
17164 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
17165
17166 struct intel_display_error_state {
17167
17168 u32 power_well_driver;
17169
17170 struct intel_cursor_error_state {
17171 u32 control;
17172 u32 position;
17173 u32 base;
17174 u32 size;
17175 } cursor[I915_MAX_PIPES];
17176
17177 struct intel_pipe_error_state {
17178 bool power_domain_on;
17179 u32 source;
17180 u32 stat;
17181 } pipe[I915_MAX_PIPES];
17182
17183 struct intel_plane_error_state {
17184 u32 control;
17185 u32 stride;
17186 u32 size;
17187 u32 pos;
17188 u32 addr;
17189 u32 surface;
17190 u32 tile_offset;
17191 } plane[I915_MAX_PIPES];
17192
17193 struct intel_transcoder_error_state {
17194 bool available;
17195 bool power_domain_on;
17196 enum transcoder cpu_transcoder;
17197
17198 u32 conf;
17199
17200 u32 htotal;
17201 u32 hblank;
17202 u32 hsync;
17203 u32 vtotal;
17204 u32 vblank;
17205 u32 vsync;
17206 } transcoder[5];
17207 };
17208
17209 struct intel_display_error_state *
intel_display_capture_error_state(struct drm_i915_private * dev_priv)17210 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
17211 {
17212 struct intel_display_error_state *error;
17213 int transcoders[] = {
17214 TRANSCODER_A,
17215 TRANSCODER_B,
17216 TRANSCODER_C,
17217 TRANSCODER_D,
17218 TRANSCODER_EDP,
17219 };
17220 int i;
17221
17222 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
17223
17224 if (!HAS_DISPLAY(dev_priv))
17225 return NULL;
17226
17227 error = kzalloc(sizeof(*error), GFP_ATOMIC);
17228 if (error == NULL)
17229 return NULL;
17230
17231 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
17232 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
17233
17234 for_each_pipe(dev_priv, i) {
17235 error->pipe[i].power_domain_on =
17236 __intel_display_power_is_enabled(dev_priv,
17237 POWER_DOMAIN_PIPE(i));
17238 if (!error->pipe[i].power_domain_on)
17239 continue;
17240
17241 error->cursor[i].control = I915_READ(CURCNTR(i));
17242 error->cursor[i].position = I915_READ(CURPOS(i));
17243 error->cursor[i].base = I915_READ(CURBASE(i));
17244
17245 error->plane[i].control = I915_READ(DSPCNTR(i));
17246 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
17247 if (INTEL_GEN(dev_priv) <= 3) {
17248 error->plane[i].size = I915_READ(DSPSIZE(i));
17249 error->plane[i].pos = I915_READ(DSPPOS(i));
17250 }
17251 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
17252 error->plane[i].addr = I915_READ(DSPADDR(i));
17253 if (INTEL_GEN(dev_priv) >= 4) {
17254 error->plane[i].surface = I915_READ(DSPSURF(i));
17255 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
17256 }
17257
17258 error->pipe[i].source = I915_READ(PIPESRC(i));
17259
17260 if (HAS_GMCH(dev_priv))
17261 error->pipe[i].stat = I915_READ(PIPESTAT(i));
17262 }
17263
17264 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
17265 enum transcoder cpu_transcoder = transcoders[i];
17266
17267 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
17268 continue;
17269
17270 error->transcoder[i].available = true;
17271 error->transcoder[i].power_domain_on =
17272 __intel_display_power_is_enabled(dev_priv,
17273 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
17274 if (!error->transcoder[i].power_domain_on)
17275 continue;
17276
17277 error->transcoder[i].cpu_transcoder = cpu_transcoder;
17278
17279 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
17280 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
17281 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
17282 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
17283 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
17284 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
17285 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
17286 }
17287
17288 return error;
17289 }
17290
17291 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
17292
17293 void
intel_display_print_error_state(struct drm_i915_error_state_buf * m,struct intel_display_error_state * error)17294 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
17295 struct intel_display_error_state *error)
17296 {
17297 struct drm_i915_private *dev_priv = m->i915;
17298 int i;
17299
17300 if (!error)
17301 return;
17302
17303 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
17304 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
17305 err_printf(m, "PWR_WELL_CTL2: %08x\n",
17306 error->power_well_driver);
17307 for_each_pipe(dev_priv, i) {
17308 err_printf(m, "Pipe [%d]:\n", i);
17309 err_printf(m, " Power: %s\n",
17310 onoff(error->pipe[i].power_domain_on));
17311 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
17312 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
17313
17314 err_printf(m, "Plane [%d]:\n", i);
17315 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
17316 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
17317 if (INTEL_GEN(dev_priv) <= 3) {
17318 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
17319 err_printf(m, " POS: %08x\n", error->plane[i].pos);
17320 }
17321 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
17322 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
17323 if (INTEL_GEN(dev_priv) >= 4) {
17324 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
17325 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
17326 }
17327
17328 err_printf(m, "Cursor [%d]:\n", i);
17329 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
17330 err_printf(m, " POS: %08x\n", error->cursor[i].position);
17331 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
17332 }
17333
17334 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
17335 if (!error->transcoder[i].available)
17336 continue;
17337
17338 err_printf(m, "CPU transcoder: %s\n",
17339 transcoder_name(error->transcoder[i].cpu_transcoder));
17340 err_printf(m, " Power: %s\n",
17341 onoff(error->transcoder[i].power_domain_on));
17342 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
17343 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
17344 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
17345 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
17346 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
17347 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
17348 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
17349 }
17350 }
17351
17352 #endif
17353