1 /*
2 * Copyright © 2006-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "intel_de.h"
25 #include "intel_display_types.h"
26 #include "intel_dpio_phy.h"
27 #include "intel_dpll.h"
28 #include "intel_dpll_mgr.h"
29
30 /**
31 * DOC: Display PLLs
32 *
33 * Display PLLs used for driving outputs vary by platform. While some have
34 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
35 * from a pool. In the latter scenario, it is possible that multiple pipes
36 * share a PLL if their configurations match.
37 *
38 * This file provides an abstraction over display PLLs. The function
39 * intel_shared_dpll_init() initializes the PLLs for the given platform. The
40 * users of a PLL are tracked and that tracking is integrated with the atomic
41 * modset interface. During an atomic operation, required PLLs can be reserved
42 * for a given CRTC and encoder configuration by calling
43 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
44 * with intel_release_shared_dplls().
45 * Changes to the users are first staged in the atomic state, and then made
46 * effective by calling intel_shared_dpll_swap_state() during the atomic
47 * commit phase.
48 */
49
50 struct intel_dpll_mgr {
51 const struct dpll_info *dpll_info;
52
53 bool (*get_dplls)(struct intel_atomic_state *state,
54 struct intel_crtc *crtc,
55 struct intel_encoder *encoder);
56 void (*put_dplls)(struct intel_atomic_state *state,
57 struct intel_crtc *crtc);
58 void (*update_active_dpll)(struct intel_atomic_state *state,
59 struct intel_crtc *crtc,
60 struct intel_encoder *encoder);
61 void (*update_ref_clks)(struct drm_i915_private *i915);
62 void (*dump_hw_state)(struct drm_i915_private *dev_priv,
63 const struct intel_dpll_hw_state *hw_state);
64 };
65
66 static void
intel_atomic_duplicate_dpll_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll_state * shared_dpll)67 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
68 struct intel_shared_dpll_state *shared_dpll)
69 {
70 enum intel_dpll_id i;
71
72 /* Copy shared dpll state */
73 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
74 struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
75
76 shared_dpll[i] = pll->state;
77 }
78 }
79
80 static struct intel_shared_dpll_state *
intel_atomic_get_shared_dpll_state(struct drm_atomic_state * s)81 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
82 {
83 struct intel_atomic_state *state = to_intel_atomic_state(s);
84
85 drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
86
87 if (!state->dpll_set) {
88 state->dpll_set = true;
89
90 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
91 state->shared_dpll);
92 }
93
94 return state->shared_dpll;
95 }
96
97 /**
98 * intel_get_shared_dpll_by_id - get a DPLL given its id
99 * @dev_priv: i915 device instance
100 * @id: pll id
101 *
102 * Returns:
103 * A pointer to the DPLL with @id
104 */
105 struct intel_shared_dpll *
intel_get_shared_dpll_by_id(struct drm_i915_private * dev_priv,enum intel_dpll_id id)106 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
107 enum intel_dpll_id id)
108 {
109 return &dev_priv->dpll.shared_dplls[id];
110 }
111
112 /**
113 * intel_get_shared_dpll_id - get the id of a DPLL
114 * @dev_priv: i915 device instance
115 * @pll: the DPLL
116 *
117 * Returns:
118 * The id of @pll
119 */
120 enum intel_dpll_id
intel_get_shared_dpll_id(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)121 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
122 struct intel_shared_dpll *pll)
123 {
124 long pll_idx = pll - dev_priv->dpll.shared_dplls;
125
126 if (drm_WARN_ON(&dev_priv->drm,
127 pll_idx < 0 ||
128 pll_idx >= dev_priv->dpll.num_shared_dpll))
129 return -1;
130
131 return pll_idx;
132 }
133
134 /* For ILK+ */
assert_shared_dpll(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,bool state)135 void assert_shared_dpll(struct drm_i915_private *dev_priv,
136 struct intel_shared_dpll *pll,
137 bool state)
138 {
139 bool cur_state;
140 struct intel_dpll_hw_state hw_state;
141
142 if (drm_WARN(&dev_priv->drm, !pll,
143 "asserting DPLL %s with no DPLL\n", onoff(state)))
144 return;
145
146 cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
147 I915_STATE_WARN(cur_state != state,
148 "%s assertion failure (expected %s, current %s)\n",
149 pll->info->name, onoff(state), onoff(cur_state));
150 }
151
icl_pll_id_to_tc_port(enum intel_dpll_id id)152 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
153 {
154 return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
155 }
156
icl_tc_port_to_pll_id(enum tc_port tc_port)157 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
158 {
159 return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
160 }
161
162 static i915_reg_t
intel_combo_pll_enable_reg(struct drm_i915_private * i915,struct intel_shared_dpll * pll)163 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
164 struct intel_shared_dpll *pll)
165 {
166 if (IS_DG1(i915))
167 return DG1_DPLL_ENABLE(pll->info->id);
168 else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
169 return MG_PLL_ENABLE(0);
170
171 return ICL_DPLL_ENABLE(pll->info->id);
172 }
173
174 static i915_reg_t
intel_tc_pll_enable_reg(struct drm_i915_private * i915,struct intel_shared_dpll * pll)175 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
176 struct intel_shared_dpll *pll)
177 {
178 const enum intel_dpll_id id = pll->info->id;
179 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
180
181 if (IS_ALDERLAKE_P(i915))
182 return ADLP_PORTTC_PLL_ENABLE(tc_port);
183
184 return MG_PLL_ENABLE(tc_port);
185 }
186
187 /**
188 * intel_prepare_shared_dpll - call a dpll's prepare hook
189 * @crtc_state: CRTC, and its state, which has a shared dpll
190 *
191 * This calls the PLL's prepare hook if it has one and if the PLL is not
192 * already enabled. The prepare hook is platform specific.
193 */
intel_prepare_shared_dpll(const struct intel_crtc_state * crtc_state)194 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
195 {
196 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
197 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
198 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
199
200 if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
201 return;
202
203 mutex_lock(&dev_priv->dpll.lock);
204 drm_WARN_ON(&dev_priv->drm, !pll->state.pipe_mask);
205 if (!pll->active_mask) {
206 drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
207 drm_WARN_ON(&dev_priv->drm, pll->on);
208 assert_shared_dpll_disabled(dev_priv, pll);
209
210 pll->info->funcs->prepare(dev_priv, pll);
211 }
212 mutex_unlock(&dev_priv->dpll.lock);
213 }
214
215 /**
216 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
217 * @crtc_state: CRTC, and its state, which has a shared DPLL
218 *
219 * Enable the shared DPLL used by @crtc.
220 */
intel_enable_shared_dpll(const struct intel_crtc_state * crtc_state)221 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
222 {
223 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
224 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
225 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
226 unsigned int pipe_mask = BIT(crtc->pipe);
227 unsigned int old_mask;
228
229 if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
230 return;
231
232 mutex_lock(&dev_priv->dpll.lock);
233 old_mask = pll->active_mask;
234
235 if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
236 drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
237 goto out;
238
239 pll->active_mask |= pipe_mask;
240
241 drm_dbg_kms(&dev_priv->drm,
242 "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
243 pll->info->name, pll->active_mask, pll->on,
244 crtc->base.base.id, crtc->base.name);
245
246 if (old_mask) {
247 drm_WARN_ON(&dev_priv->drm, !pll->on);
248 assert_shared_dpll_enabled(dev_priv, pll);
249 goto out;
250 }
251 drm_WARN_ON(&dev_priv->drm, pll->on);
252
253 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
254 pll->info->funcs->enable(dev_priv, pll);
255 pll->on = true;
256
257 out:
258 mutex_unlock(&dev_priv->dpll.lock);
259 }
260
261 /**
262 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
263 * @crtc_state: CRTC, and its state, which has a shared DPLL
264 *
265 * Disable the shared DPLL used by @crtc.
266 */
intel_disable_shared_dpll(const struct intel_crtc_state * crtc_state)267 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
268 {
269 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
270 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
271 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
272 unsigned int pipe_mask = BIT(crtc->pipe);
273
274 /* PCH only available on ILK+ */
275 if (DISPLAY_VER(dev_priv) < 5)
276 return;
277
278 if (pll == NULL)
279 return;
280
281 mutex_lock(&dev_priv->dpll.lock);
282 if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
283 "%s not used by [CRTC:%d:%s]\n", pll->info->name,
284 crtc->base.base.id, crtc->base.name))
285 goto out;
286
287 drm_dbg_kms(&dev_priv->drm,
288 "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
289 pll->info->name, pll->active_mask, pll->on,
290 crtc->base.base.id, crtc->base.name);
291
292 assert_shared_dpll_enabled(dev_priv, pll);
293 drm_WARN_ON(&dev_priv->drm, !pll->on);
294
295 pll->active_mask &= ~pipe_mask;
296 if (pll->active_mask)
297 goto out;
298
299 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
300 pll->info->funcs->disable(dev_priv, pll);
301 pll->on = false;
302
303 out:
304 mutex_unlock(&dev_priv->dpll.lock);
305 }
306
307 static struct intel_shared_dpll *
intel_find_shared_dpll(struct intel_atomic_state * state,const struct intel_crtc * crtc,const struct intel_dpll_hw_state * pll_state,unsigned long dpll_mask)308 intel_find_shared_dpll(struct intel_atomic_state *state,
309 const struct intel_crtc *crtc,
310 const struct intel_dpll_hw_state *pll_state,
311 unsigned long dpll_mask)
312 {
313 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
314 struct intel_shared_dpll *pll, *unused_pll = NULL;
315 struct intel_shared_dpll_state *shared_dpll;
316 enum intel_dpll_id i;
317
318 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
319
320 drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
321
322 for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
323 pll = &dev_priv->dpll.shared_dplls[i];
324
325 /* Only want to check enabled timings first */
326 if (shared_dpll[i].pipe_mask == 0) {
327 if (!unused_pll)
328 unused_pll = pll;
329 continue;
330 }
331
332 if (memcmp(pll_state,
333 &shared_dpll[i].hw_state,
334 sizeof(*pll_state)) == 0) {
335 drm_dbg_kms(&dev_priv->drm,
336 "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
337 crtc->base.base.id, crtc->base.name,
338 pll->info->name,
339 shared_dpll[i].pipe_mask,
340 pll->active_mask);
341 return pll;
342 }
343 }
344
345 /* Ok no matching timings, maybe there's a free one? */
346 if (unused_pll) {
347 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
348 crtc->base.base.id, crtc->base.name,
349 unused_pll->info->name);
350 return unused_pll;
351 }
352
353 return NULL;
354 }
355
356 static void
intel_reference_shared_dpll(struct intel_atomic_state * state,const struct intel_crtc * crtc,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)357 intel_reference_shared_dpll(struct intel_atomic_state *state,
358 const struct intel_crtc *crtc,
359 const struct intel_shared_dpll *pll,
360 const struct intel_dpll_hw_state *pll_state)
361 {
362 struct drm_i915_private *i915 = to_i915(state->base.dev);
363 struct intel_shared_dpll_state *shared_dpll;
364 const enum intel_dpll_id id = pll->info->id;
365
366 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
367
368 if (shared_dpll[id].pipe_mask == 0)
369 shared_dpll[id].hw_state = *pll_state;
370
371 drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
372 pipe_name(crtc->pipe));
373
374 shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
375 }
376
intel_unreference_shared_dpll(struct intel_atomic_state * state,const struct intel_crtc * crtc,const struct intel_shared_dpll * pll)377 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
378 const struct intel_crtc *crtc,
379 const struct intel_shared_dpll *pll)
380 {
381 struct intel_shared_dpll_state *shared_dpll;
382
383 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
384 shared_dpll[pll->info->id].pipe_mask &= ~BIT(crtc->pipe);
385 }
386
intel_put_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)387 static void intel_put_dpll(struct intel_atomic_state *state,
388 struct intel_crtc *crtc)
389 {
390 const struct intel_crtc_state *old_crtc_state =
391 intel_atomic_get_old_crtc_state(state, crtc);
392 struct intel_crtc_state *new_crtc_state =
393 intel_atomic_get_new_crtc_state(state, crtc);
394
395 new_crtc_state->shared_dpll = NULL;
396
397 if (!old_crtc_state->shared_dpll)
398 return;
399
400 intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
401 }
402
403 /**
404 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
405 * @state: atomic state
406 *
407 * This is the dpll version of drm_atomic_helper_swap_state() since the
408 * helper does not handle driver-specific global state.
409 *
410 * For consistency with atomic helpers this function does a complete swap,
411 * i.e. it also puts the current state into @state, even though there is no
412 * need for that at this moment.
413 */
intel_shared_dpll_swap_state(struct intel_atomic_state * state)414 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
415 {
416 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
417 struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
418 enum intel_dpll_id i;
419
420 if (!state->dpll_set)
421 return;
422
423 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
424 struct intel_shared_dpll *pll =
425 &dev_priv->dpll.shared_dplls[i];
426
427 swap(pll->state, shared_dpll[i]);
428 }
429 }
430
ibx_pch_dpll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)431 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
432 struct intel_shared_dpll *pll,
433 struct intel_dpll_hw_state *hw_state)
434 {
435 const enum intel_dpll_id id = pll->info->id;
436 intel_wakeref_t wakeref;
437 u32 val;
438
439 wakeref = intel_display_power_get_if_enabled(dev_priv,
440 POWER_DOMAIN_DISPLAY_CORE);
441 if (!wakeref)
442 return false;
443
444 val = intel_de_read(dev_priv, PCH_DPLL(id));
445 hw_state->dpll = val;
446 hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
447 hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
448
449 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
450
451 return val & DPLL_VCO_ENABLE;
452 }
453
ibx_pch_dpll_prepare(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)454 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
455 struct intel_shared_dpll *pll)
456 {
457 const enum intel_dpll_id id = pll->info->id;
458
459 intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
460 intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
461 }
462
ibx_assert_pch_refclk_enabled(struct drm_i915_private * dev_priv)463 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
464 {
465 u32 val;
466 bool enabled;
467
468 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
469
470 val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
471 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
472 DREF_SUPERSPREAD_SOURCE_MASK));
473 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
474 }
475
ibx_pch_dpll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)476 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
477 struct intel_shared_dpll *pll)
478 {
479 const enum intel_dpll_id id = pll->info->id;
480
481 /* PCH refclock must be enabled first */
482 ibx_assert_pch_refclk_enabled(dev_priv);
483
484 intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
485
486 /* Wait for the clocks to stabilize. */
487 intel_de_posting_read(dev_priv, PCH_DPLL(id));
488 udelay(150);
489
490 /* The pixel multiplier can only be updated once the
491 * DPLL is enabled and the clocks are stable.
492 *
493 * So write it again.
494 */
495 intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
496 intel_de_posting_read(dev_priv, PCH_DPLL(id));
497 udelay(200);
498 }
499
ibx_pch_dpll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)500 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
501 struct intel_shared_dpll *pll)
502 {
503 const enum intel_dpll_id id = pll->info->id;
504
505 intel_de_write(dev_priv, PCH_DPLL(id), 0);
506 intel_de_posting_read(dev_priv, PCH_DPLL(id));
507 udelay(200);
508 }
509
ibx_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)510 static bool ibx_get_dpll(struct intel_atomic_state *state,
511 struct intel_crtc *crtc,
512 struct intel_encoder *encoder)
513 {
514 struct intel_crtc_state *crtc_state =
515 intel_atomic_get_new_crtc_state(state, crtc);
516 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
517 struct intel_shared_dpll *pll;
518 enum intel_dpll_id i;
519
520 if (HAS_PCH_IBX(dev_priv)) {
521 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
522 i = (enum intel_dpll_id) crtc->pipe;
523 pll = &dev_priv->dpll.shared_dplls[i];
524
525 drm_dbg_kms(&dev_priv->drm,
526 "[CRTC:%d:%s] using pre-allocated %s\n",
527 crtc->base.base.id, crtc->base.name,
528 pll->info->name);
529 } else {
530 pll = intel_find_shared_dpll(state, crtc,
531 &crtc_state->dpll_hw_state,
532 BIT(DPLL_ID_PCH_PLL_B) |
533 BIT(DPLL_ID_PCH_PLL_A));
534 }
535
536 if (!pll)
537 return false;
538
539 /* reference the pll */
540 intel_reference_shared_dpll(state, crtc,
541 pll, &crtc_state->dpll_hw_state);
542
543 crtc_state->shared_dpll = pll;
544
545 return true;
546 }
547
ibx_dump_hw_state(struct drm_i915_private * dev_priv,const struct intel_dpll_hw_state * hw_state)548 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
549 const struct intel_dpll_hw_state *hw_state)
550 {
551 drm_dbg_kms(&dev_priv->drm,
552 "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
553 "fp0: 0x%x, fp1: 0x%x\n",
554 hw_state->dpll,
555 hw_state->dpll_md,
556 hw_state->fp0,
557 hw_state->fp1);
558 }
559
560 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
561 .prepare = ibx_pch_dpll_prepare,
562 .enable = ibx_pch_dpll_enable,
563 .disable = ibx_pch_dpll_disable,
564 .get_hw_state = ibx_pch_dpll_get_hw_state,
565 };
566
567 static const struct dpll_info pch_plls[] = {
568 { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
569 { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
570 { },
571 };
572
573 static const struct intel_dpll_mgr pch_pll_mgr = {
574 .dpll_info = pch_plls,
575 .get_dplls = ibx_get_dpll,
576 .put_dplls = intel_put_dpll,
577 .dump_hw_state = ibx_dump_hw_state,
578 };
579
hsw_ddi_wrpll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)580 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
581 struct intel_shared_dpll *pll)
582 {
583 const enum intel_dpll_id id = pll->info->id;
584
585 intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
586 intel_de_posting_read(dev_priv, WRPLL_CTL(id));
587 udelay(20);
588 }
589
hsw_ddi_spll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)590 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
591 struct intel_shared_dpll *pll)
592 {
593 intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
594 intel_de_posting_read(dev_priv, SPLL_CTL);
595 udelay(20);
596 }
597
hsw_ddi_wrpll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)598 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
599 struct intel_shared_dpll *pll)
600 {
601 const enum intel_dpll_id id = pll->info->id;
602 u32 val;
603
604 val = intel_de_read(dev_priv, WRPLL_CTL(id));
605 intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
606 intel_de_posting_read(dev_priv, WRPLL_CTL(id));
607
608 /*
609 * Try to set up the PCH reference clock once all DPLLs
610 * that depend on it have been shut down.
611 */
612 if (dev_priv->pch_ssc_use & BIT(id))
613 intel_init_pch_refclk(dev_priv);
614 }
615
hsw_ddi_spll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)616 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
617 struct intel_shared_dpll *pll)
618 {
619 enum intel_dpll_id id = pll->info->id;
620 u32 val;
621
622 val = intel_de_read(dev_priv, SPLL_CTL);
623 intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
624 intel_de_posting_read(dev_priv, SPLL_CTL);
625
626 /*
627 * Try to set up the PCH reference clock once all DPLLs
628 * that depend on it have been shut down.
629 */
630 if (dev_priv->pch_ssc_use & BIT(id))
631 intel_init_pch_refclk(dev_priv);
632 }
633
hsw_ddi_wrpll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)634 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
635 struct intel_shared_dpll *pll,
636 struct intel_dpll_hw_state *hw_state)
637 {
638 const enum intel_dpll_id id = pll->info->id;
639 intel_wakeref_t wakeref;
640 u32 val;
641
642 wakeref = intel_display_power_get_if_enabled(dev_priv,
643 POWER_DOMAIN_DISPLAY_CORE);
644 if (!wakeref)
645 return false;
646
647 val = intel_de_read(dev_priv, WRPLL_CTL(id));
648 hw_state->wrpll = val;
649
650 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
651
652 return val & WRPLL_PLL_ENABLE;
653 }
654
hsw_ddi_spll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)655 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
656 struct intel_shared_dpll *pll,
657 struct intel_dpll_hw_state *hw_state)
658 {
659 intel_wakeref_t wakeref;
660 u32 val;
661
662 wakeref = intel_display_power_get_if_enabled(dev_priv,
663 POWER_DOMAIN_DISPLAY_CORE);
664 if (!wakeref)
665 return false;
666
667 val = intel_de_read(dev_priv, SPLL_CTL);
668 hw_state->spll = val;
669
670 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
671
672 return val & SPLL_PLL_ENABLE;
673 }
674
675 #define LC_FREQ 2700
676 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
677
678 #define P_MIN 2
679 #define P_MAX 64
680 #define P_INC 2
681
682 /* Constraints for PLL good behavior */
683 #define REF_MIN 48
684 #define REF_MAX 400
685 #define VCO_MIN 2400
686 #define VCO_MAX 4800
687
688 struct hsw_wrpll_rnp {
689 unsigned p, n2, r2;
690 };
691
hsw_wrpll_get_budget_for_freq(int clock)692 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
693 {
694 unsigned budget;
695
696 switch (clock) {
697 case 25175000:
698 case 25200000:
699 case 27000000:
700 case 27027000:
701 case 37762500:
702 case 37800000:
703 case 40500000:
704 case 40541000:
705 case 54000000:
706 case 54054000:
707 case 59341000:
708 case 59400000:
709 case 72000000:
710 case 74176000:
711 case 74250000:
712 case 81000000:
713 case 81081000:
714 case 89012000:
715 case 89100000:
716 case 108000000:
717 case 108108000:
718 case 111264000:
719 case 111375000:
720 case 148352000:
721 case 148500000:
722 case 162000000:
723 case 162162000:
724 case 222525000:
725 case 222750000:
726 case 296703000:
727 case 297000000:
728 budget = 0;
729 break;
730 case 233500000:
731 case 245250000:
732 case 247750000:
733 case 253250000:
734 case 298000000:
735 budget = 1500;
736 break;
737 case 169128000:
738 case 169500000:
739 case 179500000:
740 case 202000000:
741 budget = 2000;
742 break;
743 case 256250000:
744 case 262500000:
745 case 270000000:
746 case 272500000:
747 case 273750000:
748 case 280750000:
749 case 281250000:
750 case 286000000:
751 case 291750000:
752 budget = 4000;
753 break;
754 case 267250000:
755 case 268500000:
756 budget = 5000;
757 break;
758 default:
759 budget = 1000;
760 break;
761 }
762
763 return budget;
764 }
765
hsw_wrpll_update_rnp(u64 freq2k,unsigned int budget,unsigned int r2,unsigned int n2,unsigned int p,struct hsw_wrpll_rnp * best)766 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
767 unsigned int r2, unsigned int n2,
768 unsigned int p,
769 struct hsw_wrpll_rnp *best)
770 {
771 u64 a, b, c, d, diff, diff_best;
772
773 /* No best (r,n,p) yet */
774 if (best->p == 0) {
775 best->p = p;
776 best->n2 = n2;
777 best->r2 = r2;
778 return;
779 }
780
781 /*
782 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
783 * freq2k.
784 *
785 * delta = 1e6 *
786 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
787 * freq2k;
788 *
789 * and we would like delta <= budget.
790 *
791 * If the discrepancy is above the PPM-based budget, always prefer to
792 * improve upon the previous solution. However, if you're within the
793 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
794 */
795 a = freq2k * budget * p * r2;
796 b = freq2k * budget * best->p * best->r2;
797 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
798 diff_best = abs_diff(freq2k * best->p * best->r2,
799 LC_FREQ_2K * best->n2);
800 c = 1000000 * diff;
801 d = 1000000 * diff_best;
802
803 if (a < c && b < d) {
804 /* If both are above the budget, pick the closer */
805 if (best->p * best->r2 * diff < p * r2 * diff_best) {
806 best->p = p;
807 best->n2 = n2;
808 best->r2 = r2;
809 }
810 } else if (a >= c && b < d) {
811 /* If A is below the threshold but B is above it? Update. */
812 best->p = p;
813 best->n2 = n2;
814 best->r2 = r2;
815 } else if (a >= c && b >= d) {
816 /* Both are below the limit, so pick the higher n2/(r2*r2) */
817 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
818 best->p = p;
819 best->n2 = n2;
820 best->r2 = r2;
821 }
822 }
823 /* Otherwise a < c && b >= d, do nothing */
824 }
825
826 static void
hsw_ddi_calculate_wrpll(int clock,unsigned * r2_out,unsigned * n2_out,unsigned * p_out)827 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
828 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
829 {
830 u64 freq2k;
831 unsigned p, n2, r2;
832 struct hsw_wrpll_rnp best = { 0, 0, 0 };
833 unsigned budget;
834
835 freq2k = clock / 100;
836
837 budget = hsw_wrpll_get_budget_for_freq(clock);
838
839 /* Special case handling for 540 pixel clock: bypass WR PLL entirely
840 * and directly pass the LC PLL to it. */
841 if (freq2k == 5400000) {
842 *n2_out = 2;
843 *p_out = 1;
844 *r2_out = 2;
845 return;
846 }
847
848 /*
849 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
850 * the WR PLL.
851 *
852 * We want R so that REF_MIN <= Ref <= REF_MAX.
853 * Injecting R2 = 2 * R gives:
854 * REF_MAX * r2 > LC_FREQ * 2 and
855 * REF_MIN * r2 < LC_FREQ * 2
856 *
857 * Which means the desired boundaries for r2 are:
858 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
859 *
860 */
861 for (r2 = LC_FREQ * 2 / REF_MAX + 1;
862 r2 <= LC_FREQ * 2 / REF_MIN;
863 r2++) {
864
865 /*
866 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
867 *
868 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
869 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
870 * VCO_MAX * r2 > n2 * LC_FREQ and
871 * VCO_MIN * r2 < n2 * LC_FREQ)
872 *
873 * Which means the desired boundaries for n2 are:
874 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
875 */
876 for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
877 n2 <= VCO_MAX * r2 / LC_FREQ;
878 n2++) {
879
880 for (p = P_MIN; p <= P_MAX; p += P_INC)
881 hsw_wrpll_update_rnp(freq2k, budget,
882 r2, n2, p, &best);
883 }
884 }
885
886 *n2_out = best.n2;
887 *p_out = best.p;
888 *r2_out = best.r2;
889 }
890
891 static struct intel_shared_dpll *
hsw_ddi_wrpll_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)892 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
893 struct intel_crtc *crtc)
894 {
895 struct intel_crtc_state *crtc_state =
896 intel_atomic_get_new_crtc_state(state, crtc);
897 struct intel_shared_dpll *pll;
898 u32 val;
899 unsigned int p, n2, r2;
900
901 hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
902
903 val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
904 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
905 WRPLL_DIVIDER_POST(p);
906
907 crtc_state->dpll_hw_state.wrpll = val;
908
909 pll = intel_find_shared_dpll(state, crtc,
910 &crtc_state->dpll_hw_state,
911 BIT(DPLL_ID_WRPLL2) |
912 BIT(DPLL_ID_WRPLL1));
913
914 if (!pll)
915 return NULL;
916
917 return pll;
918 }
919
hsw_ddi_wrpll_get_freq(struct drm_i915_private * dev_priv,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)920 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
921 const struct intel_shared_dpll *pll,
922 const struct intel_dpll_hw_state *pll_state)
923 {
924 int refclk;
925 int n, p, r;
926 u32 wrpll = pll_state->wrpll;
927
928 switch (wrpll & WRPLL_REF_MASK) {
929 case WRPLL_REF_SPECIAL_HSW:
930 /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
931 if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
932 refclk = dev_priv->dpll.ref_clks.nssc;
933 break;
934 }
935 fallthrough;
936 case WRPLL_REF_PCH_SSC:
937 /*
938 * We could calculate spread here, but our checking
939 * code only cares about 5% accuracy, and spread is a max of
940 * 0.5% downspread.
941 */
942 refclk = dev_priv->dpll.ref_clks.ssc;
943 break;
944 case WRPLL_REF_LCPLL:
945 refclk = 2700000;
946 break;
947 default:
948 MISSING_CASE(wrpll);
949 return 0;
950 }
951
952 r = wrpll & WRPLL_DIVIDER_REF_MASK;
953 p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
954 n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
955
956 /* Convert to KHz, p & r have a fixed point portion */
957 return (refclk * n / 10) / (p * r) * 2;
958 }
959
960 static struct intel_shared_dpll *
hsw_ddi_lcpll_get_dpll(struct intel_crtc_state * crtc_state)961 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
962 {
963 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
964 struct intel_shared_dpll *pll;
965 enum intel_dpll_id pll_id;
966 int clock = crtc_state->port_clock;
967
968 switch (clock / 2) {
969 case 81000:
970 pll_id = DPLL_ID_LCPLL_810;
971 break;
972 case 135000:
973 pll_id = DPLL_ID_LCPLL_1350;
974 break;
975 case 270000:
976 pll_id = DPLL_ID_LCPLL_2700;
977 break;
978 default:
979 drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
980 clock);
981 return NULL;
982 }
983
984 pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
985
986 if (!pll)
987 return NULL;
988
989 return pll;
990 }
991
hsw_ddi_lcpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)992 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
993 const struct intel_shared_dpll *pll,
994 const struct intel_dpll_hw_state *pll_state)
995 {
996 int link_clock = 0;
997
998 switch (pll->info->id) {
999 case DPLL_ID_LCPLL_810:
1000 link_clock = 81000;
1001 break;
1002 case DPLL_ID_LCPLL_1350:
1003 link_clock = 135000;
1004 break;
1005 case DPLL_ID_LCPLL_2700:
1006 link_clock = 270000;
1007 break;
1008 default:
1009 drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1010 break;
1011 }
1012
1013 return link_clock * 2;
1014 }
1015
1016 static struct intel_shared_dpll *
hsw_ddi_spll_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1017 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1018 struct intel_crtc *crtc)
1019 {
1020 struct intel_crtc_state *crtc_state =
1021 intel_atomic_get_new_crtc_state(state, crtc);
1022
1023 if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1024 return NULL;
1025
1026 crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
1027 SPLL_REF_MUXED_SSC;
1028
1029 return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1030 BIT(DPLL_ID_SPLL));
1031 }
1032
hsw_ddi_spll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)1033 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1034 const struct intel_shared_dpll *pll,
1035 const struct intel_dpll_hw_state *pll_state)
1036 {
1037 int link_clock = 0;
1038
1039 switch (pll_state->spll & SPLL_FREQ_MASK) {
1040 case SPLL_FREQ_810MHz:
1041 link_clock = 81000;
1042 break;
1043 case SPLL_FREQ_1350MHz:
1044 link_clock = 135000;
1045 break;
1046 case SPLL_FREQ_2700MHz:
1047 link_clock = 270000;
1048 break;
1049 default:
1050 drm_WARN(&i915->drm, 1, "bad spll freq\n");
1051 break;
1052 }
1053
1054 return link_clock * 2;
1055 }
1056
hsw_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1057 static bool hsw_get_dpll(struct intel_atomic_state *state,
1058 struct intel_crtc *crtc,
1059 struct intel_encoder *encoder)
1060 {
1061 struct intel_crtc_state *crtc_state =
1062 intel_atomic_get_new_crtc_state(state, crtc);
1063 struct intel_shared_dpll *pll;
1064
1065 memset(&crtc_state->dpll_hw_state, 0,
1066 sizeof(crtc_state->dpll_hw_state));
1067
1068 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1069 pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1070 else if (intel_crtc_has_dp_encoder(crtc_state))
1071 pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1072 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1073 pll = hsw_ddi_spll_get_dpll(state, crtc);
1074 else
1075 return false;
1076
1077 if (!pll)
1078 return false;
1079
1080 intel_reference_shared_dpll(state, crtc,
1081 pll, &crtc_state->dpll_hw_state);
1082
1083 crtc_state->shared_dpll = pll;
1084
1085 return true;
1086 }
1087
hsw_update_dpll_ref_clks(struct drm_i915_private * i915)1088 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1089 {
1090 i915->dpll.ref_clks.ssc = 135000;
1091 /* Non-SSC is only used on non-ULT HSW. */
1092 if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1093 i915->dpll.ref_clks.nssc = 24000;
1094 else
1095 i915->dpll.ref_clks.nssc = 135000;
1096 }
1097
hsw_dump_hw_state(struct drm_i915_private * dev_priv,const struct intel_dpll_hw_state * hw_state)1098 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1099 const struct intel_dpll_hw_state *hw_state)
1100 {
1101 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1102 hw_state->wrpll, hw_state->spll);
1103 }
1104
1105 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1106 .enable = hsw_ddi_wrpll_enable,
1107 .disable = hsw_ddi_wrpll_disable,
1108 .get_hw_state = hsw_ddi_wrpll_get_hw_state,
1109 .get_freq = hsw_ddi_wrpll_get_freq,
1110 };
1111
1112 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1113 .enable = hsw_ddi_spll_enable,
1114 .disable = hsw_ddi_spll_disable,
1115 .get_hw_state = hsw_ddi_spll_get_hw_state,
1116 .get_freq = hsw_ddi_spll_get_freq,
1117 };
1118
hsw_ddi_lcpll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1119 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1120 struct intel_shared_dpll *pll)
1121 {
1122 }
1123
hsw_ddi_lcpll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1124 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1125 struct intel_shared_dpll *pll)
1126 {
1127 }
1128
hsw_ddi_lcpll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)1129 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1130 struct intel_shared_dpll *pll,
1131 struct intel_dpll_hw_state *hw_state)
1132 {
1133 return true;
1134 }
1135
1136 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1137 .enable = hsw_ddi_lcpll_enable,
1138 .disable = hsw_ddi_lcpll_disable,
1139 .get_hw_state = hsw_ddi_lcpll_get_hw_state,
1140 .get_freq = hsw_ddi_lcpll_get_freq,
1141 };
1142
1143 static const struct dpll_info hsw_plls[] = {
1144 { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
1145 { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
1146 { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
1147 { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
1148 { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1149 { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1150 { },
1151 };
1152
1153 static const struct intel_dpll_mgr hsw_pll_mgr = {
1154 .dpll_info = hsw_plls,
1155 .get_dplls = hsw_get_dpll,
1156 .put_dplls = intel_put_dpll,
1157 .update_ref_clks = hsw_update_dpll_ref_clks,
1158 .dump_hw_state = hsw_dump_hw_state,
1159 };
1160
1161 struct skl_dpll_regs {
1162 i915_reg_t ctl, cfgcr1, cfgcr2;
1163 };
1164
1165 /* this array is indexed by the *shared* pll id */
1166 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1167 {
1168 /* DPLL 0 */
1169 .ctl = LCPLL1_CTL,
1170 /* DPLL 0 doesn't support HDMI mode */
1171 },
1172 {
1173 /* DPLL 1 */
1174 .ctl = LCPLL2_CTL,
1175 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1176 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1177 },
1178 {
1179 /* DPLL 2 */
1180 .ctl = WRPLL_CTL(0),
1181 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1182 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1183 },
1184 {
1185 /* DPLL 3 */
1186 .ctl = WRPLL_CTL(1),
1187 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1188 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1189 },
1190 };
1191
skl_ddi_pll_write_ctrl1(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1192 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1193 struct intel_shared_dpll *pll)
1194 {
1195 const enum intel_dpll_id id = pll->info->id;
1196 u32 val;
1197
1198 val = intel_de_read(dev_priv, DPLL_CTRL1);
1199
1200 val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1201 DPLL_CTRL1_SSC(id) |
1202 DPLL_CTRL1_LINK_RATE_MASK(id));
1203 val |= pll->state.hw_state.ctrl1 << (id * 6);
1204
1205 intel_de_write(dev_priv, DPLL_CTRL1, val);
1206 intel_de_posting_read(dev_priv, DPLL_CTRL1);
1207 }
1208
skl_ddi_pll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1209 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1210 struct intel_shared_dpll *pll)
1211 {
1212 const struct skl_dpll_regs *regs = skl_dpll_regs;
1213 const enum intel_dpll_id id = pll->info->id;
1214
1215 skl_ddi_pll_write_ctrl1(dev_priv, pll);
1216
1217 intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1218 intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1219 intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1220 intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1221
1222 /* the enable bit is always bit 31 */
1223 intel_de_write(dev_priv, regs[id].ctl,
1224 intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1225
1226 if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1227 drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1228 }
1229
skl_ddi_dpll0_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1230 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1231 struct intel_shared_dpll *pll)
1232 {
1233 skl_ddi_pll_write_ctrl1(dev_priv, pll);
1234 }
1235
skl_ddi_pll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1236 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1237 struct intel_shared_dpll *pll)
1238 {
1239 const struct skl_dpll_regs *regs = skl_dpll_regs;
1240 const enum intel_dpll_id id = pll->info->id;
1241
1242 /* the enable bit is always bit 31 */
1243 intel_de_write(dev_priv, regs[id].ctl,
1244 intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1245 intel_de_posting_read(dev_priv, regs[id].ctl);
1246 }
1247
skl_ddi_dpll0_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1248 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1249 struct intel_shared_dpll *pll)
1250 {
1251 }
1252
skl_ddi_pll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)1253 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1254 struct intel_shared_dpll *pll,
1255 struct intel_dpll_hw_state *hw_state)
1256 {
1257 u32 val;
1258 const struct skl_dpll_regs *regs = skl_dpll_regs;
1259 const enum intel_dpll_id id = pll->info->id;
1260 intel_wakeref_t wakeref;
1261 bool ret;
1262
1263 wakeref = intel_display_power_get_if_enabled(dev_priv,
1264 POWER_DOMAIN_DISPLAY_CORE);
1265 if (!wakeref)
1266 return false;
1267
1268 ret = false;
1269
1270 val = intel_de_read(dev_priv, regs[id].ctl);
1271 if (!(val & LCPLL_PLL_ENABLE))
1272 goto out;
1273
1274 val = intel_de_read(dev_priv, DPLL_CTRL1);
1275 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1276
1277 /* avoid reading back stale values if HDMI mode is not enabled */
1278 if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1279 hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1280 hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1281 }
1282 ret = true;
1283
1284 out:
1285 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1286
1287 return ret;
1288 }
1289
skl_ddi_dpll0_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)1290 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1291 struct intel_shared_dpll *pll,
1292 struct intel_dpll_hw_state *hw_state)
1293 {
1294 const struct skl_dpll_regs *regs = skl_dpll_regs;
1295 const enum intel_dpll_id id = pll->info->id;
1296 intel_wakeref_t wakeref;
1297 u32 val;
1298 bool ret;
1299
1300 wakeref = intel_display_power_get_if_enabled(dev_priv,
1301 POWER_DOMAIN_DISPLAY_CORE);
1302 if (!wakeref)
1303 return false;
1304
1305 ret = false;
1306
1307 /* DPLL0 is always enabled since it drives CDCLK */
1308 val = intel_de_read(dev_priv, regs[id].ctl);
1309 if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1310 goto out;
1311
1312 val = intel_de_read(dev_priv, DPLL_CTRL1);
1313 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1314
1315 ret = true;
1316
1317 out:
1318 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1319
1320 return ret;
1321 }
1322
1323 struct skl_wrpll_context {
1324 u64 min_deviation; /* current minimal deviation */
1325 u64 central_freq; /* chosen central freq */
1326 u64 dco_freq; /* chosen dco freq */
1327 unsigned int p; /* chosen divider */
1328 };
1329
skl_wrpll_context_init(struct skl_wrpll_context * ctx)1330 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1331 {
1332 memset(ctx, 0, sizeof(*ctx));
1333
1334 ctx->min_deviation = U64_MAX;
1335 }
1336
1337 /* DCO freq must be within +1%/-6% of the DCO central freq */
1338 #define SKL_DCO_MAX_PDEVIATION 100
1339 #define SKL_DCO_MAX_NDEVIATION 600
1340
skl_wrpll_try_divider(struct skl_wrpll_context * ctx,u64 central_freq,u64 dco_freq,unsigned int divider)1341 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1342 u64 central_freq,
1343 u64 dco_freq,
1344 unsigned int divider)
1345 {
1346 u64 deviation;
1347
1348 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1349 central_freq);
1350
1351 /* positive deviation */
1352 if (dco_freq >= central_freq) {
1353 if (deviation < SKL_DCO_MAX_PDEVIATION &&
1354 deviation < ctx->min_deviation) {
1355 ctx->min_deviation = deviation;
1356 ctx->central_freq = central_freq;
1357 ctx->dco_freq = dco_freq;
1358 ctx->p = divider;
1359 }
1360 /* negative deviation */
1361 } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1362 deviation < ctx->min_deviation) {
1363 ctx->min_deviation = deviation;
1364 ctx->central_freq = central_freq;
1365 ctx->dco_freq = dco_freq;
1366 ctx->p = divider;
1367 }
1368 }
1369
skl_wrpll_get_multipliers(unsigned int p,unsigned int * p0,unsigned int * p1,unsigned int * p2)1370 static void skl_wrpll_get_multipliers(unsigned int p,
1371 unsigned int *p0 /* out */,
1372 unsigned int *p1 /* out */,
1373 unsigned int *p2 /* out */)
1374 {
1375 /* even dividers */
1376 if (p % 2 == 0) {
1377 unsigned int half = p / 2;
1378
1379 if (half == 1 || half == 2 || half == 3 || half == 5) {
1380 *p0 = 2;
1381 *p1 = 1;
1382 *p2 = half;
1383 } else if (half % 2 == 0) {
1384 *p0 = 2;
1385 *p1 = half / 2;
1386 *p2 = 2;
1387 } else if (half % 3 == 0) {
1388 *p0 = 3;
1389 *p1 = half / 3;
1390 *p2 = 2;
1391 } else if (half % 7 == 0) {
1392 *p0 = 7;
1393 *p1 = half / 7;
1394 *p2 = 2;
1395 }
1396 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
1397 *p0 = 3;
1398 *p1 = 1;
1399 *p2 = p / 3;
1400 } else if (p == 5 || p == 7) {
1401 *p0 = p;
1402 *p1 = 1;
1403 *p2 = 1;
1404 } else if (p == 15) {
1405 *p0 = 3;
1406 *p1 = 1;
1407 *p2 = 5;
1408 } else if (p == 21) {
1409 *p0 = 7;
1410 *p1 = 1;
1411 *p2 = 3;
1412 } else if (p == 35) {
1413 *p0 = 7;
1414 *p1 = 1;
1415 *p2 = 5;
1416 }
1417 }
1418
1419 struct skl_wrpll_params {
1420 u32 dco_fraction;
1421 u32 dco_integer;
1422 u32 qdiv_ratio;
1423 u32 qdiv_mode;
1424 u32 kdiv;
1425 u32 pdiv;
1426 u32 central_freq;
1427 };
1428
skl_wrpll_params_populate(struct skl_wrpll_params * params,u64 afe_clock,int ref_clock,u64 central_freq,u32 p0,u32 p1,u32 p2)1429 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1430 u64 afe_clock,
1431 int ref_clock,
1432 u64 central_freq,
1433 u32 p0, u32 p1, u32 p2)
1434 {
1435 u64 dco_freq;
1436
1437 switch (central_freq) {
1438 case 9600000000ULL:
1439 params->central_freq = 0;
1440 break;
1441 case 9000000000ULL:
1442 params->central_freq = 1;
1443 break;
1444 case 8400000000ULL:
1445 params->central_freq = 3;
1446 }
1447
1448 switch (p0) {
1449 case 1:
1450 params->pdiv = 0;
1451 break;
1452 case 2:
1453 params->pdiv = 1;
1454 break;
1455 case 3:
1456 params->pdiv = 2;
1457 break;
1458 case 7:
1459 params->pdiv = 4;
1460 break;
1461 default:
1462 WARN(1, "Incorrect PDiv\n");
1463 }
1464
1465 switch (p2) {
1466 case 5:
1467 params->kdiv = 0;
1468 break;
1469 case 2:
1470 params->kdiv = 1;
1471 break;
1472 case 3:
1473 params->kdiv = 2;
1474 break;
1475 case 1:
1476 params->kdiv = 3;
1477 break;
1478 default:
1479 WARN(1, "Incorrect KDiv\n");
1480 }
1481
1482 params->qdiv_ratio = p1;
1483 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1484
1485 dco_freq = p0 * p1 * p2 * afe_clock;
1486
1487 /*
1488 * Intermediate values are in Hz.
1489 * Divide by MHz to match bsepc
1490 */
1491 params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1492 params->dco_fraction =
1493 div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1494 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1495 }
1496
1497 static bool
skl_ddi_calculate_wrpll(int clock,int ref_clock,struct skl_wrpll_params * wrpll_params)1498 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1499 int ref_clock,
1500 struct skl_wrpll_params *wrpll_params)
1501 {
1502 u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1503 u64 dco_central_freq[3] = { 8400000000ULL,
1504 9000000000ULL,
1505 9600000000ULL };
1506 static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
1507 24, 28, 30, 32, 36, 40, 42, 44,
1508 48, 52, 54, 56, 60, 64, 66, 68,
1509 70, 72, 76, 78, 80, 84, 88, 90,
1510 92, 96, 98 };
1511 static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1512 static const struct {
1513 const int *list;
1514 int n_dividers;
1515 } dividers[] = {
1516 { even_dividers, ARRAY_SIZE(even_dividers) },
1517 { odd_dividers, ARRAY_SIZE(odd_dividers) },
1518 };
1519 struct skl_wrpll_context ctx;
1520 unsigned int dco, d, i;
1521 unsigned int p0, p1, p2;
1522
1523 skl_wrpll_context_init(&ctx);
1524
1525 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1526 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1527 for (i = 0; i < dividers[d].n_dividers; i++) {
1528 unsigned int p = dividers[d].list[i];
1529 u64 dco_freq = p * afe_clock;
1530
1531 skl_wrpll_try_divider(&ctx,
1532 dco_central_freq[dco],
1533 dco_freq,
1534 p);
1535 /*
1536 * Skip the remaining dividers if we're sure to
1537 * have found the definitive divider, we can't
1538 * improve a 0 deviation.
1539 */
1540 if (ctx.min_deviation == 0)
1541 goto skip_remaining_dividers;
1542 }
1543 }
1544
1545 skip_remaining_dividers:
1546 /*
1547 * If a solution is found with an even divider, prefer
1548 * this one.
1549 */
1550 if (d == 0 && ctx.p)
1551 break;
1552 }
1553
1554 if (!ctx.p) {
1555 DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1556 return false;
1557 }
1558
1559 /*
1560 * gcc incorrectly analyses that these can be used without being
1561 * initialized. To be fair, it's hard to guess.
1562 */
1563 p0 = p1 = p2 = 0;
1564 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1565 skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1566 ctx.central_freq, p0, p1, p2);
1567
1568 return true;
1569 }
1570
skl_ddi_hdmi_pll_dividers(struct intel_crtc_state * crtc_state)1571 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1572 {
1573 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1574 u32 ctrl1, cfgcr1, cfgcr2;
1575 struct skl_wrpll_params wrpll_params = { 0, };
1576
1577 /*
1578 * See comment in intel_dpll_hw_state to understand why we always use 0
1579 * as the DPLL id in this function.
1580 */
1581 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1582
1583 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1584
1585 if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1586 i915->dpll.ref_clks.nssc,
1587 &wrpll_params))
1588 return false;
1589
1590 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1591 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1592 wrpll_params.dco_integer;
1593
1594 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1595 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1596 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1597 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1598 wrpll_params.central_freq;
1599
1600 memset(&crtc_state->dpll_hw_state, 0,
1601 sizeof(crtc_state->dpll_hw_state));
1602
1603 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1604 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1605 crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1606 return true;
1607 }
1608
skl_ddi_wrpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)1609 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1610 const struct intel_shared_dpll *pll,
1611 const struct intel_dpll_hw_state *pll_state)
1612 {
1613 int ref_clock = i915->dpll.ref_clks.nssc;
1614 u32 p0, p1, p2, dco_freq;
1615
1616 p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1617 p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1618
1619 if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
1620 p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1621 else
1622 p1 = 1;
1623
1624
1625 switch (p0) {
1626 case DPLL_CFGCR2_PDIV_1:
1627 p0 = 1;
1628 break;
1629 case DPLL_CFGCR2_PDIV_2:
1630 p0 = 2;
1631 break;
1632 case DPLL_CFGCR2_PDIV_3:
1633 p0 = 3;
1634 break;
1635 case DPLL_CFGCR2_PDIV_7_INVALID:
1636 /*
1637 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1638 * handling it the same way as PDIV_7.
1639 */
1640 drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1641 fallthrough;
1642 case DPLL_CFGCR2_PDIV_7:
1643 p0 = 7;
1644 break;
1645 default:
1646 MISSING_CASE(p0);
1647 return 0;
1648 }
1649
1650 switch (p2) {
1651 case DPLL_CFGCR2_KDIV_5:
1652 p2 = 5;
1653 break;
1654 case DPLL_CFGCR2_KDIV_2:
1655 p2 = 2;
1656 break;
1657 case DPLL_CFGCR2_KDIV_3:
1658 p2 = 3;
1659 break;
1660 case DPLL_CFGCR2_KDIV_1:
1661 p2 = 1;
1662 break;
1663 default:
1664 MISSING_CASE(p2);
1665 return 0;
1666 }
1667
1668 dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1669 ref_clock;
1670
1671 dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1672 ref_clock / 0x8000;
1673
1674 if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1675 return 0;
1676
1677 return dco_freq / (p0 * p1 * p2 * 5);
1678 }
1679
1680 static bool
skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state * crtc_state)1681 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1682 {
1683 u32 ctrl1;
1684
1685 /*
1686 * See comment in intel_dpll_hw_state to understand why we always use 0
1687 * as the DPLL id in this function.
1688 */
1689 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1690 switch (crtc_state->port_clock / 2) {
1691 case 81000:
1692 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1693 break;
1694 case 135000:
1695 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1696 break;
1697 case 270000:
1698 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1699 break;
1700 /* eDP 1.4 rates */
1701 case 162000:
1702 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1703 break;
1704 case 108000:
1705 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1706 break;
1707 case 216000:
1708 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1709 break;
1710 }
1711
1712 memset(&crtc_state->dpll_hw_state, 0,
1713 sizeof(crtc_state->dpll_hw_state));
1714
1715 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1716
1717 return true;
1718 }
1719
skl_ddi_lcpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)1720 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1721 const struct intel_shared_dpll *pll,
1722 const struct intel_dpll_hw_state *pll_state)
1723 {
1724 int link_clock = 0;
1725
1726 switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1727 DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1728 case DPLL_CTRL1_LINK_RATE_810:
1729 link_clock = 81000;
1730 break;
1731 case DPLL_CTRL1_LINK_RATE_1080:
1732 link_clock = 108000;
1733 break;
1734 case DPLL_CTRL1_LINK_RATE_1350:
1735 link_clock = 135000;
1736 break;
1737 case DPLL_CTRL1_LINK_RATE_1620:
1738 link_clock = 162000;
1739 break;
1740 case DPLL_CTRL1_LINK_RATE_2160:
1741 link_clock = 216000;
1742 break;
1743 case DPLL_CTRL1_LINK_RATE_2700:
1744 link_clock = 270000;
1745 break;
1746 default:
1747 drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1748 break;
1749 }
1750
1751 return link_clock * 2;
1752 }
1753
skl_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1754 static bool skl_get_dpll(struct intel_atomic_state *state,
1755 struct intel_crtc *crtc,
1756 struct intel_encoder *encoder)
1757 {
1758 struct intel_crtc_state *crtc_state =
1759 intel_atomic_get_new_crtc_state(state, crtc);
1760 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1761 struct intel_shared_dpll *pll;
1762 bool bret;
1763
1764 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1765 bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1766 if (!bret) {
1767 drm_dbg_kms(&i915->drm,
1768 "Could not get HDMI pll dividers.\n");
1769 return false;
1770 }
1771 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1772 bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1773 if (!bret) {
1774 drm_dbg_kms(&i915->drm,
1775 "Could not set DP dpll HW state.\n");
1776 return false;
1777 }
1778 } else {
1779 return false;
1780 }
1781
1782 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1783 pll = intel_find_shared_dpll(state, crtc,
1784 &crtc_state->dpll_hw_state,
1785 BIT(DPLL_ID_SKL_DPLL0));
1786 else
1787 pll = intel_find_shared_dpll(state, crtc,
1788 &crtc_state->dpll_hw_state,
1789 BIT(DPLL_ID_SKL_DPLL3) |
1790 BIT(DPLL_ID_SKL_DPLL2) |
1791 BIT(DPLL_ID_SKL_DPLL1));
1792 if (!pll)
1793 return false;
1794
1795 intel_reference_shared_dpll(state, crtc,
1796 pll, &crtc_state->dpll_hw_state);
1797
1798 crtc_state->shared_dpll = pll;
1799
1800 return true;
1801 }
1802
skl_ddi_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)1803 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1804 const struct intel_shared_dpll *pll,
1805 const struct intel_dpll_hw_state *pll_state)
1806 {
1807 /*
1808 * ctrl1 register is already shifted for each pll, just use 0 to get
1809 * the internal shift for each field
1810 */
1811 if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1812 return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1813 else
1814 return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1815 }
1816
skl_update_dpll_ref_clks(struct drm_i915_private * i915)1817 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1818 {
1819 /* No SSC ref */
1820 i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
1821 }
1822
skl_dump_hw_state(struct drm_i915_private * dev_priv,const struct intel_dpll_hw_state * hw_state)1823 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1824 const struct intel_dpll_hw_state *hw_state)
1825 {
1826 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1827 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1828 hw_state->ctrl1,
1829 hw_state->cfgcr1,
1830 hw_state->cfgcr2);
1831 }
1832
1833 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1834 .enable = skl_ddi_pll_enable,
1835 .disable = skl_ddi_pll_disable,
1836 .get_hw_state = skl_ddi_pll_get_hw_state,
1837 .get_freq = skl_ddi_pll_get_freq,
1838 };
1839
1840 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1841 .enable = skl_ddi_dpll0_enable,
1842 .disable = skl_ddi_dpll0_disable,
1843 .get_hw_state = skl_ddi_dpll0_get_hw_state,
1844 .get_freq = skl_ddi_pll_get_freq,
1845 };
1846
1847 static const struct dpll_info skl_plls[] = {
1848 { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1849 { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
1850 { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
1851 { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
1852 { },
1853 };
1854
1855 static const struct intel_dpll_mgr skl_pll_mgr = {
1856 .dpll_info = skl_plls,
1857 .get_dplls = skl_get_dpll,
1858 .put_dplls = intel_put_dpll,
1859 .update_ref_clks = skl_update_dpll_ref_clks,
1860 .dump_hw_state = skl_dump_hw_state,
1861 };
1862
bxt_ddi_pll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1863 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1864 struct intel_shared_dpll *pll)
1865 {
1866 u32 temp;
1867 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1868 enum dpio_phy phy;
1869 enum dpio_channel ch;
1870
1871 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1872
1873 /* Non-SSC reference */
1874 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1875 temp |= PORT_PLL_REF_SEL;
1876 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1877
1878 if (IS_GEMINILAKE(dev_priv)) {
1879 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1880 temp |= PORT_PLL_POWER_ENABLE;
1881 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1882
1883 if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1884 PORT_PLL_POWER_STATE), 200))
1885 drm_err(&dev_priv->drm,
1886 "Power state not set for PLL:%d\n", port);
1887 }
1888
1889 /* Disable 10 bit clock */
1890 temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1891 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1892 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1893
1894 /* Write P1 & P2 */
1895 temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1896 temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1897 temp |= pll->state.hw_state.ebb0;
1898 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1899
1900 /* Write M2 integer */
1901 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1902 temp &= ~PORT_PLL_M2_MASK;
1903 temp |= pll->state.hw_state.pll0;
1904 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1905
1906 /* Write N */
1907 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1908 temp &= ~PORT_PLL_N_MASK;
1909 temp |= pll->state.hw_state.pll1;
1910 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1911
1912 /* Write M2 fraction */
1913 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1914 temp &= ~PORT_PLL_M2_FRAC_MASK;
1915 temp |= pll->state.hw_state.pll2;
1916 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1917
1918 /* Write M2 fraction enable */
1919 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1920 temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1921 temp |= pll->state.hw_state.pll3;
1922 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1923
1924 /* Write coeff */
1925 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1926 temp &= ~PORT_PLL_PROP_COEFF_MASK;
1927 temp &= ~PORT_PLL_INT_COEFF_MASK;
1928 temp &= ~PORT_PLL_GAIN_CTL_MASK;
1929 temp |= pll->state.hw_state.pll6;
1930 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1931
1932 /* Write calibration val */
1933 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1934 temp &= ~PORT_PLL_TARGET_CNT_MASK;
1935 temp |= pll->state.hw_state.pll8;
1936 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1937
1938 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1939 temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1940 temp |= pll->state.hw_state.pll9;
1941 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1942
1943 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1944 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1945 temp &= ~PORT_PLL_DCO_AMP_MASK;
1946 temp |= pll->state.hw_state.pll10;
1947 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1948
1949 /* Recalibrate with new settings */
1950 temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1951 temp |= PORT_PLL_RECALIBRATE;
1952 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1953 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1954 temp |= pll->state.hw_state.ebb4;
1955 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1956
1957 /* Enable PLL */
1958 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1959 temp |= PORT_PLL_ENABLE;
1960 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1961 intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1962
1963 if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1964 200))
1965 drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1966
1967 if (IS_GEMINILAKE(dev_priv)) {
1968 temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1969 temp |= DCC_DELAY_RANGE_2;
1970 intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1971 }
1972
1973 /*
1974 * While we write to the group register to program all lanes at once we
1975 * can read only lane registers and we pick lanes 0/1 for that.
1976 */
1977 temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1978 temp &= ~LANE_STAGGER_MASK;
1979 temp &= ~LANESTAGGER_STRAP_OVRD;
1980 temp |= pll->state.hw_state.pcsdw12;
1981 intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1982 }
1983
bxt_ddi_pll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1984 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1985 struct intel_shared_dpll *pll)
1986 {
1987 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1988 u32 temp;
1989
1990 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1991 temp &= ~PORT_PLL_ENABLE;
1992 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1993 intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1994
1995 if (IS_GEMINILAKE(dev_priv)) {
1996 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1997 temp &= ~PORT_PLL_POWER_ENABLE;
1998 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1999
2000 if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
2001 PORT_PLL_POWER_STATE), 200))
2002 drm_err(&dev_priv->drm,
2003 "Power state not reset for PLL:%d\n", port);
2004 }
2005 }
2006
bxt_ddi_pll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)2007 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2008 struct intel_shared_dpll *pll,
2009 struct intel_dpll_hw_state *hw_state)
2010 {
2011 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2012 intel_wakeref_t wakeref;
2013 enum dpio_phy phy;
2014 enum dpio_channel ch;
2015 u32 val;
2016 bool ret;
2017
2018 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
2019
2020 wakeref = intel_display_power_get_if_enabled(dev_priv,
2021 POWER_DOMAIN_DISPLAY_CORE);
2022 if (!wakeref)
2023 return false;
2024
2025 ret = false;
2026
2027 val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2028 if (!(val & PORT_PLL_ENABLE))
2029 goto out;
2030
2031 hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2032 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2033
2034 hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2035 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2036
2037 hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2038 hw_state->pll0 &= PORT_PLL_M2_MASK;
2039
2040 hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2041 hw_state->pll1 &= PORT_PLL_N_MASK;
2042
2043 hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2044 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2045
2046 hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2047 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2048
2049 hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2050 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2051 PORT_PLL_INT_COEFF_MASK |
2052 PORT_PLL_GAIN_CTL_MASK;
2053
2054 hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2055 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2056
2057 hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2058 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2059
2060 hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2061 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2062 PORT_PLL_DCO_AMP_MASK;
2063
2064 /*
2065 * While we write to the group register to program all lanes at once we
2066 * can read only lane registers. We configure all lanes the same way, so
2067 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2068 */
2069 hw_state->pcsdw12 = intel_de_read(dev_priv,
2070 BXT_PORT_PCS_DW12_LN01(phy, ch));
2071 if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2072 drm_dbg(&dev_priv->drm,
2073 "lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2074 hw_state->pcsdw12,
2075 intel_de_read(dev_priv,
2076 BXT_PORT_PCS_DW12_LN23(phy, ch)));
2077 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2078
2079 ret = true;
2080
2081 out:
2082 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2083
2084 return ret;
2085 }
2086
2087 /* bxt clock parameters */
2088 struct bxt_clk_div {
2089 int clock;
2090 u32 p1;
2091 u32 p2;
2092 u32 m2_int;
2093 u32 m2_frac;
2094 bool m2_frac_en;
2095 u32 n;
2096
2097 int vco;
2098 };
2099
2100 /* pre-calculated values for DP linkrates */
2101 static const struct bxt_clk_div bxt_dp_clk_val[] = {
2102 {162000, 4, 2, 32, 1677722, 1, 1},
2103 {270000, 4, 1, 27, 0, 0, 1},
2104 {540000, 2, 1, 27, 0, 0, 1},
2105 {216000, 3, 2, 32, 1677722, 1, 1},
2106 {243000, 4, 1, 24, 1258291, 1, 1},
2107 {324000, 4, 1, 32, 1677722, 1, 1},
2108 {432000, 3, 1, 32, 1677722, 1, 1}
2109 };
2110
2111 static bool
bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state * crtc_state,struct bxt_clk_div * clk_div)2112 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2113 struct bxt_clk_div *clk_div)
2114 {
2115 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2116 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2117 struct dpll best_clock;
2118
2119 /* Calculate HDMI div */
2120 /*
2121 * FIXME: tie the following calculation into
2122 * i9xx_crtc_compute_clock
2123 */
2124 if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
2125 drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
2126 crtc_state->port_clock,
2127 pipe_name(crtc->pipe));
2128 return false;
2129 }
2130
2131 clk_div->p1 = best_clock.p1;
2132 clk_div->p2 = best_clock.p2;
2133 drm_WARN_ON(&i915->drm, best_clock.m1 != 2);
2134 clk_div->n = best_clock.n;
2135 clk_div->m2_int = best_clock.m2 >> 22;
2136 clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
2137 clk_div->m2_frac_en = clk_div->m2_frac != 0;
2138
2139 clk_div->vco = best_clock.vco;
2140
2141 return true;
2142 }
2143
bxt_ddi_dp_pll_dividers(struct intel_crtc_state * crtc_state,struct bxt_clk_div * clk_div)2144 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2145 struct bxt_clk_div *clk_div)
2146 {
2147 int clock = crtc_state->port_clock;
2148 int i;
2149
2150 *clk_div = bxt_dp_clk_val[0];
2151 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2152 if (bxt_dp_clk_val[i].clock == clock) {
2153 *clk_div = bxt_dp_clk_val[i];
2154 break;
2155 }
2156 }
2157
2158 clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
2159 }
2160
bxt_ddi_set_dpll_hw_state(struct intel_crtc_state * crtc_state,const struct bxt_clk_div * clk_div)2161 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2162 const struct bxt_clk_div *clk_div)
2163 {
2164 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2165 struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2166 int clock = crtc_state->port_clock;
2167 int vco = clk_div->vco;
2168 u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2169 u32 lanestagger;
2170
2171 memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
2172
2173 if (vco >= 6200000 && vco <= 6700000) {
2174 prop_coef = 4;
2175 int_coef = 9;
2176 gain_ctl = 3;
2177 targ_cnt = 8;
2178 } else if ((vco > 5400000 && vco < 6200000) ||
2179 (vco >= 4800000 && vco < 5400000)) {
2180 prop_coef = 5;
2181 int_coef = 11;
2182 gain_ctl = 3;
2183 targ_cnt = 9;
2184 } else if (vco == 5400000) {
2185 prop_coef = 3;
2186 int_coef = 8;
2187 gain_ctl = 1;
2188 targ_cnt = 9;
2189 } else {
2190 drm_err(&i915->drm, "Invalid VCO\n");
2191 return false;
2192 }
2193
2194 if (clock > 270000)
2195 lanestagger = 0x18;
2196 else if (clock > 135000)
2197 lanestagger = 0x0d;
2198 else if (clock > 67000)
2199 lanestagger = 0x07;
2200 else if (clock > 33000)
2201 lanestagger = 0x04;
2202 else
2203 lanestagger = 0x02;
2204
2205 dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2206 dpll_hw_state->pll0 = clk_div->m2_int;
2207 dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2208 dpll_hw_state->pll2 = clk_div->m2_frac;
2209
2210 if (clk_div->m2_frac_en)
2211 dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2212
2213 dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
2214 dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
2215
2216 dpll_hw_state->pll8 = targ_cnt;
2217
2218 dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
2219
2220 dpll_hw_state->pll10 =
2221 PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
2222 | PORT_PLL_DCO_AMP_OVR_EN_H;
2223
2224 dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2225
2226 dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2227
2228 return true;
2229 }
2230
2231 static bool
bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state * crtc_state)2232 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2233 {
2234 struct bxt_clk_div clk_div = {};
2235
2236 bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2237
2238 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2239 }
2240
2241 static bool
bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state * crtc_state)2242 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2243 {
2244 struct bxt_clk_div clk_div = {};
2245
2246 bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2247
2248 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2249 }
2250
bxt_ddi_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)2251 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2252 const struct intel_shared_dpll *pll,
2253 const struct intel_dpll_hw_state *pll_state)
2254 {
2255 struct dpll clock;
2256
2257 clock.m1 = 2;
2258 clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
2259 if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2260 clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
2261 clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
2262 clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
2263 clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
2264
2265 return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
2266 }
2267
bxt_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)2268 static bool bxt_get_dpll(struct intel_atomic_state *state,
2269 struct intel_crtc *crtc,
2270 struct intel_encoder *encoder)
2271 {
2272 struct intel_crtc_state *crtc_state =
2273 intel_atomic_get_new_crtc_state(state, crtc);
2274 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2275 struct intel_shared_dpll *pll;
2276 enum intel_dpll_id id;
2277
2278 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
2279 !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
2280 return false;
2281
2282 if (intel_crtc_has_dp_encoder(crtc_state) &&
2283 !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
2284 return false;
2285
2286 /* 1:1 mapping between ports and PLLs */
2287 id = (enum intel_dpll_id) encoder->port;
2288 pll = intel_get_shared_dpll_by_id(dev_priv, id);
2289
2290 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2291 crtc->base.base.id, crtc->base.name, pll->info->name);
2292
2293 intel_reference_shared_dpll(state, crtc,
2294 pll, &crtc_state->dpll_hw_state);
2295
2296 crtc_state->shared_dpll = pll;
2297
2298 return true;
2299 }
2300
bxt_update_dpll_ref_clks(struct drm_i915_private * i915)2301 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2302 {
2303 i915->dpll.ref_clks.ssc = 100000;
2304 i915->dpll.ref_clks.nssc = 100000;
2305 /* DSI non-SSC ref 19.2MHz */
2306 }
2307
bxt_dump_hw_state(struct drm_i915_private * dev_priv,const struct intel_dpll_hw_state * hw_state)2308 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2309 const struct intel_dpll_hw_state *hw_state)
2310 {
2311 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2312 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2313 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2314 hw_state->ebb0,
2315 hw_state->ebb4,
2316 hw_state->pll0,
2317 hw_state->pll1,
2318 hw_state->pll2,
2319 hw_state->pll3,
2320 hw_state->pll6,
2321 hw_state->pll8,
2322 hw_state->pll9,
2323 hw_state->pll10,
2324 hw_state->pcsdw12);
2325 }
2326
2327 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2328 .enable = bxt_ddi_pll_enable,
2329 .disable = bxt_ddi_pll_disable,
2330 .get_hw_state = bxt_ddi_pll_get_hw_state,
2331 .get_freq = bxt_ddi_pll_get_freq,
2332 };
2333
2334 static const struct dpll_info bxt_plls[] = {
2335 { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2336 { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2337 { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2338 { },
2339 };
2340
2341 static const struct intel_dpll_mgr bxt_pll_mgr = {
2342 .dpll_info = bxt_plls,
2343 .get_dplls = bxt_get_dpll,
2344 .put_dplls = intel_put_dpll,
2345 .update_ref_clks = bxt_update_dpll_ref_clks,
2346 .dump_hw_state = bxt_dump_hw_state,
2347 };
2348
icl_wrpll_get_multipliers(int bestdiv,int * pdiv,int * qdiv,int * kdiv)2349 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2350 int *qdiv, int *kdiv)
2351 {
2352 /* even dividers */
2353 if (bestdiv % 2 == 0) {
2354 if (bestdiv == 2) {
2355 *pdiv = 2;
2356 *qdiv = 1;
2357 *kdiv = 1;
2358 } else if (bestdiv % 4 == 0) {
2359 *pdiv = 2;
2360 *qdiv = bestdiv / 4;
2361 *kdiv = 2;
2362 } else if (bestdiv % 6 == 0) {
2363 *pdiv = 3;
2364 *qdiv = bestdiv / 6;
2365 *kdiv = 2;
2366 } else if (bestdiv % 5 == 0) {
2367 *pdiv = 5;
2368 *qdiv = bestdiv / 10;
2369 *kdiv = 2;
2370 } else if (bestdiv % 14 == 0) {
2371 *pdiv = 7;
2372 *qdiv = bestdiv / 14;
2373 *kdiv = 2;
2374 }
2375 } else {
2376 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2377 *pdiv = bestdiv;
2378 *qdiv = 1;
2379 *kdiv = 1;
2380 } else { /* 9, 15, 21 */
2381 *pdiv = bestdiv / 3;
2382 *qdiv = 1;
2383 *kdiv = 3;
2384 }
2385 }
2386 }
2387
icl_wrpll_params_populate(struct skl_wrpll_params * params,u32 dco_freq,u32 ref_freq,int pdiv,int qdiv,int kdiv)2388 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2389 u32 dco_freq, u32 ref_freq,
2390 int pdiv, int qdiv, int kdiv)
2391 {
2392 u32 dco;
2393
2394 switch (kdiv) {
2395 case 1:
2396 params->kdiv = 1;
2397 break;
2398 case 2:
2399 params->kdiv = 2;
2400 break;
2401 case 3:
2402 params->kdiv = 4;
2403 break;
2404 default:
2405 WARN(1, "Incorrect KDiv\n");
2406 }
2407
2408 switch (pdiv) {
2409 case 2:
2410 params->pdiv = 1;
2411 break;
2412 case 3:
2413 params->pdiv = 2;
2414 break;
2415 case 5:
2416 params->pdiv = 4;
2417 break;
2418 case 7:
2419 params->pdiv = 8;
2420 break;
2421 default:
2422 WARN(1, "Incorrect PDiv\n");
2423 }
2424
2425 WARN_ON(kdiv != 2 && qdiv != 1);
2426
2427 params->qdiv_ratio = qdiv;
2428 params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2429
2430 dco = div_u64((u64)dco_freq << 15, ref_freq);
2431
2432 params->dco_integer = dco >> 15;
2433 params->dco_fraction = dco & 0x7fff;
2434 }
2435
2436 /*
2437 * Display WA #22010492432: ehl, tgl, adl-p
2438 * Program half of the nominal DCO divider fraction value.
2439 */
2440 static bool
ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private * i915)2441 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2442 {
2443 return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2444 IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2445 IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) &&
2446 i915->dpll.ref_clks.nssc == 38400;
2447 }
2448
2449 struct icl_combo_pll_params {
2450 int clock;
2451 struct skl_wrpll_params wrpll;
2452 };
2453
2454 /*
2455 * These values alrea already adjusted: they're the bits we write to the
2456 * registers, not the logical values.
2457 */
2458 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2459 { 540000,
2460 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
2461 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2462 { 270000,
2463 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
2464 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2465 { 162000,
2466 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
2467 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2468 { 324000,
2469 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
2470 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2471 { 216000,
2472 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
2473 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2474 { 432000,
2475 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
2476 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2477 { 648000,
2478 { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
2479 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2480 { 810000,
2481 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
2482 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2483 };
2484
2485
2486 /* Also used for 38.4 MHz values. */
2487 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2488 { 540000,
2489 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
2490 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2491 { 270000,
2492 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
2493 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2494 { 162000,
2495 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
2496 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2497 { 324000,
2498 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
2499 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2500 { 216000,
2501 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
2502 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2503 { 432000,
2504 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
2505 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2506 { 648000,
2507 { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
2508 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2509 { 810000,
2510 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
2511 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2512 };
2513
2514 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2515 .dco_integer = 0x151, .dco_fraction = 0x4000,
2516 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2517 };
2518
2519 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2520 .dco_integer = 0x1A5, .dco_fraction = 0x7000,
2521 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2522 };
2523
2524 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2525 .dco_integer = 0x54, .dco_fraction = 0x3000,
2526 /* the following params are unused */
2527 .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2528 };
2529
2530 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2531 .dco_integer = 0x43, .dco_fraction = 0x4000,
2532 /* the following params are unused */
2533 };
2534
icl_calc_dp_combo_pll(struct intel_crtc_state * crtc_state,struct skl_wrpll_params * pll_params)2535 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2536 struct skl_wrpll_params *pll_params)
2537 {
2538 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2539 const struct icl_combo_pll_params *params =
2540 dev_priv->dpll.ref_clks.nssc == 24000 ?
2541 icl_dp_combo_pll_24MHz_values :
2542 icl_dp_combo_pll_19_2MHz_values;
2543 int clock = crtc_state->port_clock;
2544 int i;
2545
2546 for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2547 if (clock == params[i].clock) {
2548 *pll_params = params[i].wrpll;
2549 return true;
2550 }
2551 }
2552
2553 MISSING_CASE(clock);
2554 return false;
2555 }
2556
icl_calc_tbt_pll(struct intel_crtc_state * crtc_state,struct skl_wrpll_params * pll_params)2557 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2558 struct skl_wrpll_params *pll_params)
2559 {
2560 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2561
2562 if (DISPLAY_VER(dev_priv) >= 12) {
2563 switch (dev_priv->dpll.ref_clks.nssc) {
2564 default:
2565 MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
2566 fallthrough;
2567 case 19200:
2568 case 38400:
2569 *pll_params = tgl_tbt_pll_19_2MHz_values;
2570 break;
2571 case 24000:
2572 *pll_params = tgl_tbt_pll_24MHz_values;
2573 break;
2574 }
2575 } else {
2576 switch (dev_priv->dpll.ref_clks.nssc) {
2577 default:
2578 MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
2579 fallthrough;
2580 case 19200:
2581 case 38400:
2582 *pll_params = icl_tbt_pll_19_2MHz_values;
2583 break;
2584 case 24000:
2585 *pll_params = icl_tbt_pll_24MHz_values;
2586 break;
2587 }
2588 }
2589
2590 return true;
2591 }
2592
icl_ddi_tbt_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)2593 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2594 const struct intel_shared_dpll *pll,
2595 const struct intel_dpll_hw_state *pll_state)
2596 {
2597 /*
2598 * The PLL outputs multiple frequencies at the same time, selection is
2599 * made at DDI clock mux level.
2600 */
2601 drm_WARN_ON(&i915->drm, 1);
2602
2603 return 0;
2604 }
2605
icl_wrpll_ref_clock(struct drm_i915_private * i915)2606 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2607 {
2608 int ref_clock = i915->dpll.ref_clks.nssc;
2609
2610 /*
2611 * For ICL+, the spec states: if reference frequency is 38.4,
2612 * use 19.2 because the DPLL automatically divides that by 2.
2613 */
2614 if (ref_clock == 38400)
2615 ref_clock = 19200;
2616
2617 return ref_clock;
2618 }
2619
2620 static bool
icl_calc_wrpll(struct intel_crtc_state * crtc_state,struct skl_wrpll_params * wrpll_params)2621 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2622 struct skl_wrpll_params *wrpll_params)
2623 {
2624 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2625 int ref_clock = icl_wrpll_ref_clock(i915);
2626 u32 afe_clock = crtc_state->port_clock * 5;
2627 u32 dco_min = 7998000;
2628 u32 dco_max = 10000000;
2629 u32 dco_mid = (dco_min + dco_max) / 2;
2630 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
2631 18, 20, 24, 28, 30, 32, 36, 40,
2632 42, 44, 48, 50, 52, 54, 56, 60,
2633 64, 66, 68, 70, 72, 76, 78, 80,
2634 84, 88, 90, 92, 96, 98, 100, 102,
2635 3, 5, 7, 9, 15, 21 };
2636 u32 dco, best_dco = 0, dco_centrality = 0;
2637 u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2638 int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2639
2640 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2641 dco = afe_clock * dividers[d];
2642
2643 if (dco <= dco_max && dco >= dco_min) {
2644 dco_centrality = abs(dco - dco_mid);
2645
2646 if (dco_centrality < best_dco_centrality) {
2647 best_dco_centrality = dco_centrality;
2648 best_div = dividers[d];
2649 best_dco = dco;
2650 }
2651 }
2652 }
2653
2654 if (best_div == 0)
2655 return false;
2656
2657 icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2658 icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2659 pdiv, qdiv, kdiv);
2660
2661 return true;
2662 }
2663
icl_ddi_combo_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)2664 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2665 const struct intel_shared_dpll *pll,
2666 const struct intel_dpll_hw_state *pll_state)
2667 {
2668 int ref_clock = icl_wrpll_ref_clock(i915);
2669 u32 dco_fraction;
2670 u32 p0, p1, p2, dco_freq;
2671
2672 p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2673 p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2674
2675 if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2676 p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2677 DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2678 else
2679 p1 = 1;
2680
2681 switch (p0) {
2682 case DPLL_CFGCR1_PDIV_2:
2683 p0 = 2;
2684 break;
2685 case DPLL_CFGCR1_PDIV_3:
2686 p0 = 3;
2687 break;
2688 case DPLL_CFGCR1_PDIV_5:
2689 p0 = 5;
2690 break;
2691 case DPLL_CFGCR1_PDIV_7:
2692 p0 = 7;
2693 break;
2694 }
2695
2696 switch (p2) {
2697 case DPLL_CFGCR1_KDIV_1:
2698 p2 = 1;
2699 break;
2700 case DPLL_CFGCR1_KDIV_2:
2701 p2 = 2;
2702 break;
2703 case DPLL_CFGCR1_KDIV_3:
2704 p2 = 3;
2705 break;
2706 }
2707
2708 dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2709 ref_clock;
2710
2711 dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2712 DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2713
2714 if (ehl_combo_pll_div_frac_wa_needed(i915))
2715 dco_fraction *= 2;
2716
2717 dco_freq += (dco_fraction * ref_clock) / 0x8000;
2718
2719 if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2720 return 0;
2721
2722 return dco_freq / (p0 * p1 * p2 * 5);
2723 }
2724
icl_calc_dpll_state(struct drm_i915_private * i915,const struct skl_wrpll_params * pll_params,struct intel_dpll_hw_state * pll_state)2725 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2726 const struct skl_wrpll_params *pll_params,
2727 struct intel_dpll_hw_state *pll_state)
2728 {
2729 u32 dco_fraction = pll_params->dco_fraction;
2730
2731 memset(pll_state, 0, sizeof(*pll_state));
2732
2733 if (ehl_combo_pll_div_frac_wa_needed(i915))
2734 dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2735
2736 pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2737 pll_params->dco_integer;
2738
2739 pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2740 DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2741 DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2742 DPLL_CFGCR1_PDIV(pll_params->pdiv);
2743
2744 if (DISPLAY_VER(i915) >= 12)
2745 pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2746 else
2747 pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2748 }
2749
icl_mg_pll_find_divisors(int clock_khz,bool is_dp,bool use_ssc,u32 * target_dco_khz,struct intel_dpll_hw_state * state,bool is_dkl)2750 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2751 u32 *target_dco_khz,
2752 struct intel_dpll_hw_state *state,
2753 bool is_dkl)
2754 {
2755 u32 dco_min_freq, dco_max_freq;
2756 int div1_vals[] = {7, 5, 3, 2};
2757 unsigned int i;
2758 int div2;
2759
2760 dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2761 dco_max_freq = is_dp ? 8100000 : 10000000;
2762
2763 for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2764 int div1 = div1_vals[i];
2765
2766 for (div2 = 10; div2 > 0; div2--) {
2767 int dco = div1 * div2 * clock_khz * 5;
2768 int a_divratio, tlinedrv, inputsel;
2769 u32 hsdiv;
2770
2771 if (dco < dco_min_freq || dco > dco_max_freq)
2772 continue;
2773
2774 if (div2 >= 2) {
2775 /*
2776 * Note: a_divratio not matching TGL BSpec
2777 * algorithm but matching hardcoded values and
2778 * working on HW for DP alt-mode at least
2779 */
2780 a_divratio = is_dp ? 10 : 5;
2781 tlinedrv = is_dkl ? 1 : 2;
2782 } else {
2783 a_divratio = 5;
2784 tlinedrv = 0;
2785 }
2786 inputsel = is_dp ? 0 : 1;
2787
2788 switch (div1) {
2789 default:
2790 MISSING_CASE(div1);
2791 fallthrough;
2792 case 2:
2793 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2794 break;
2795 case 3:
2796 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2797 break;
2798 case 5:
2799 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2800 break;
2801 case 7:
2802 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2803 break;
2804 }
2805
2806 *target_dco_khz = dco;
2807
2808 state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2809
2810 state->mg_clktop2_coreclkctl1 =
2811 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2812
2813 state->mg_clktop2_hsclkctl =
2814 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2815 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2816 hsdiv |
2817 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2818
2819 return true;
2820 }
2821 }
2822
2823 return false;
2824 }
2825
2826 /*
2827 * The specification for this function uses real numbers, so the math had to be
2828 * adapted to integer-only calculation, that's why it looks so different.
2829 */
icl_calc_mg_pll_state(struct intel_crtc_state * crtc_state,struct intel_dpll_hw_state * pll_state)2830 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2831 struct intel_dpll_hw_state *pll_state)
2832 {
2833 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2834 int refclk_khz = dev_priv->dpll.ref_clks.nssc;
2835 int clock = crtc_state->port_clock;
2836 u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2837 u32 iref_ndiv, iref_trim, iref_pulse_w;
2838 u32 prop_coeff, int_coeff;
2839 u32 tdc_targetcnt, feedfwgain;
2840 u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2841 u64 tmp;
2842 bool use_ssc = false;
2843 bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2844 bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
2845
2846 memset(pll_state, 0, sizeof(*pll_state));
2847
2848 if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2849 pll_state, is_dkl)) {
2850 drm_dbg_kms(&dev_priv->drm,
2851 "Failed to find divisors for clock %d\n", clock);
2852 return false;
2853 }
2854
2855 m1div = 2;
2856 m2div_int = dco_khz / (refclk_khz * m1div);
2857 if (m2div_int > 255) {
2858 if (!is_dkl) {
2859 m1div = 4;
2860 m2div_int = dco_khz / (refclk_khz * m1div);
2861 }
2862
2863 if (m2div_int > 255) {
2864 drm_dbg_kms(&dev_priv->drm,
2865 "Failed to find mdiv for clock %d\n",
2866 clock);
2867 return false;
2868 }
2869 }
2870 m2div_rem = dco_khz % (refclk_khz * m1div);
2871
2872 tmp = (u64)m2div_rem * (1 << 22);
2873 do_div(tmp, refclk_khz * m1div);
2874 m2div_frac = tmp;
2875
2876 switch (refclk_khz) {
2877 case 19200:
2878 iref_ndiv = 1;
2879 iref_trim = 28;
2880 iref_pulse_w = 1;
2881 break;
2882 case 24000:
2883 iref_ndiv = 1;
2884 iref_trim = 25;
2885 iref_pulse_w = 2;
2886 break;
2887 case 38400:
2888 iref_ndiv = 2;
2889 iref_trim = 28;
2890 iref_pulse_w = 1;
2891 break;
2892 default:
2893 MISSING_CASE(refclk_khz);
2894 return false;
2895 }
2896
2897 /*
2898 * tdc_res = 0.000003
2899 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2900 *
2901 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2902 * was supposed to be a division, but we rearranged the operations of
2903 * the formula to avoid early divisions so we don't multiply the
2904 * rounding errors.
2905 *
2906 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2907 * we also rearrange to work with integers.
2908 *
2909 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2910 * last division by 10.
2911 */
2912 tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2913
2914 /*
2915 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2916 * 32 bits. That's not a problem since we round the division down
2917 * anyway.
2918 */
2919 feedfwgain = (use_ssc || m2div_rem > 0) ?
2920 m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2921
2922 if (dco_khz >= 9000000) {
2923 prop_coeff = 5;
2924 int_coeff = 10;
2925 } else {
2926 prop_coeff = 4;
2927 int_coeff = 8;
2928 }
2929
2930 if (use_ssc) {
2931 tmp = mul_u32_u32(dco_khz, 47 * 32);
2932 do_div(tmp, refclk_khz * m1div * 10000);
2933 ssc_stepsize = tmp;
2934
2935 tmp = mul_u32_u32(dco_khz, 1000);
2936 ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2937 } else {
2938 ssc_stepsize = 0;
2939 ssc_steplen = 0;
2940 }
2941 ssc_steplog = 4;
2942
2943 /* write pll_state calculations */
2944 if (is_dkl) {
2945 pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2946 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2947 DKL_PLL_DIV0_FBPREDIV(m1div) |
2948 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2949
2950 pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2951 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2952
2953 pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2954 DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2955 DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2956 (use_ssc ? DKL_PLL_SSC_EN : 0);
2957
2958 pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2959 DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2960
2961 pll_state->mg_pll_tdc_coldst_bias =
2962 DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2963 DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2964
2965 } else {
2966 pll_state->mg_pll_div0 =
2967 (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2968 MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2969 MG_PLL_DIV0_FBDIV_INT(m2div_int);
2970
2971 pll_state->mg_pll_div1 =
2972 MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2973 MG_PLL_DIV1_DITHER_DIV_2 |
2974 MG_PLL_DIV1_NDIVRATIO(1) |
2975 MG_PLL_DIV1_FBPREDIV(m1div);
2976
2977 pll_state->mg_pll_lf =
2978 MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2979 MG_PLL_LF_AFCCNTSEL_512 |
2980 MG_PLL_LF_GAINCTRL(1) |
2981 MG_PLL_LF_INT_COEFF(int_coeff) |
2982 MG_PLL_LF_PROP_COEFF(prop_coeff);
2983
2984 pll_state->mg_pll_frac_lock =
2985 MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2986 MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2987 MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2988 MG_PLL_FRAC_LOCK_DCODITHEREN |
2989 MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
2990 if (use_ssc || m2div_rem > 0)
2991 pll_state->mg_pll_frac_lock |=
2992 MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
2993
2994 pll_state->mg_pll_ssc =
2995 (use_ssc ? MG_PLL_SSC_EN : 0) |
2996 MG_PLL_SSC_TYPE(2) |
2997 MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
2998 MG_PLL_SSC_STEPNUM(ssc_steplog) |
2999 MG_PLL_SSC_FLLEN |
3000 MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3001
3002 pll_state->mg_pll_tdc_coldst_bias =
3003 MG_PLL_TDC_COLDST_COLDSTART |
3004 MG_PLL_TDC_COLDST_IREFINT_EN |
3005 MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3006 MG_PLL_TDC_TDCOVCCORR_EN |
3007 MG_PLL_TDC_TDCSEL(3);
3008
3009 pll_state->mg_pll_bias =
3010 MG_PLL_BIAS_BIAS_GB_SEL(3) |
3011 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3012 MG_PLL_BIAS_BIAS_BONUS(10) |
3013 MG_PLL_BIAS_BIASCAL_EN |
3014 MG_PLL_BIAS_CTRIM(12) |
3015 MG_PLL_BIAS_VREF_RDAC(4) |
3016 MG_PLL_BIAS_IREFTRIM(iref_trim);
3017
3018 if (refclk_khz == 38400) {
3019 pll_state->mg_pll_tdc_coldst_bias_mask =
3020 MG_PLL_TDC_COLDST_COLDSTART;
3021 pll_state->mg_pll_bias_mask = 0;
3022 } else {
3023 pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3024 pll_state->mg_pll_bias_mask = -1U;
3025 }
3026
3027 pll_state->mg_pll_tdc_coldst_bias &=
3028 pll_state->mg_pll_tdc_coldst_bias_mask;
3029 pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3030 }
3031
3032 return true;
3033 }
3034
icl_ddi_mg_pll_get_freq(struct drm_i915_private * dev_priv,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)3035 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3036 const struct intel_shared_dpll *pll,
3037 const struct intel_dpll_hw_state *pll_state)
3038 {
3039 u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3040 u64 tmp;
3041
3042 ref_clock = dev_priv->dpll.ref_clks.nssc;
3043
3044 if (DISPLAY_VER(dev_priv) >= 12) {
3045 m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3046 m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3047 m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3048
3049 if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3050 m2_frac = pll_state->mg_pll_bias &
3051 DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3052 m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3053 } else {
3054 m2_frac = 0;
3055 }
3056 } else {
3057 m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3058 m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3059
3060 if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3061 m2_frac = pll_state->mg_pll_div0 &
3062 MG_PLL_DIV0_FBDIV_FRAC_MASK;
3063 m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3064 } else {
3065 m2_frac = 0;
3066 }
3067 }
3068
3069 switch (pll_state->mg_clktop2_hsclkctl &
3070 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3071 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3072 div1 = 2;
3073 break;
3074 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3075 div1 = 3;
3076 break;
3077 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3078 div1 = 5;
3079 break;
3080 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3081 div1 = 7;
3082 break;
3083 default:
3084 MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3085 return 0;
3086 }
3087
3088 div2 = (pll_state->mg_clktop2_hsclkctl &
3089 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3090 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3091
3092 /* div2 value of 0 is same as 1 means no div */
3093 if (div2 == 0)
3094 div2 = 1;
3095
3096 /*
3097 * Adjust the original formula to delay the division by 2^22 in order to
3098 * minimize possible rounding errors.
3099 */
3100 tmp = (u64)m1 * m2_int * ref_clock +
3101 (((u64)m1 * m2_frac * ref_clock) >> 22);
3102 tmp = div_u64(tmp, 5 * div1 * div2);
3103
3104 return tmp;
3105 }
3106
3107 /**
3108 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3109 * @crtc_state: state for the CRTC to select the DPLL for
3110 * @port_dpll_id: the active @port_dpll_id to select
3111 *
3112 * Select the given @port_dpll_id instance from the DPLLs reserved for the
3113 * CRTC.
3114 */
icl_set_active_port_dpll(struct intel_crtc_state * crtc_state,enum icl_port_dpll_id port_dpll_id)3115 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3116 enum icl_port_dpll_id port_dpll_id)
3117 {
3118 struct icl_port_dpll *port_dpll =
3119 &crtc_state->icl_port_dplls[port_dpll_id];
3120
3121 crtc_state->shared_dpll = port_dpll->pll;
3122 crtc_state->dpll_hw_state = port_dpll->hw_state;
3123 }
3124
icl_update_active_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3125 static void icl_update_active_dpll(struct intel_atomic_state *state,
3126 struct intel_crtc *crtc,
3127 struct intel_encoder *encoder)
3128 {
3129 struct intel_crtc_state *crtc_state =
3130 intel_atomic_get_new_crtc_state(state, crtc);
3131 struct intel_digital_port *primary_port;
3132 enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3133
3134 primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3135 enc_to_mst(encoder)->primary :
3136 enc_to_dig_port(encoder);
3137
3138 if (primary_port &&
3139 (primary_port->tc_mode == TC_PORT_DP_ALT ||
3140 primary_port->tc_mode == TC_PORT_LEGACY))
3141 port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3142
3143 icl_set_active_port_dpll(crtc_state, port_dpll_id);
3144 }
3145
intel_get_hti_plls(struct drm_i915_private * i915)3146 static u32 intel_get_hti_plls(struct drm_i915_private *i915)
3147 {
3148 if (!(i915->hti_state & HDPORT_ENABLED))
3149 return 0;
3150
3151 return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
3152 }
3153
icl_get_combo_phy_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3154 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3155 struct intel_crtc *crtc,
3156 struct intel_encoder *encoder)
3157 {
3158 struct intel_crtc_state *crtc_state =
3159 intel_atomic_get_new_crtc_state(state, crtc);
3160 struct skl_wrpll_params pll_params = { };
3161 struct icl_port_dpll *port_dpll =
3162 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3163 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3164 enum port port = encoder->port;
3165 unsigned long dpll_mask;
3166 int ret;
3167
3168 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3169 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3170 ret = icl_calc_wrpll(crtc_state, &pll_params);
3171 else
3172 ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3173
3174 if (!ret) {
3175 drm_dbg_kms(&dev_priv->drm,
3176 "Could not calculate combo PHY PLL state.\n");
3177
3178 return false;
3179 }
3180
3181 icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3182
3183 if (IS_ALDERLAKE_S(dev_priv)) {
3184 dpll_mask =
3185 BIT(DPLL_ID_DG1_DPLL3) |
3186 BIT(DPLL_ID_DG1_DPLL2) |
3187 BIT(DPLL_ID_ICL_DPLL1) |
3188 BIT(DPLL_ID_ICL_DPLL0);
3189 } else if (IS_DG1(dev_priv)) {
3190 if (port == PORT_D || port == PORT_E) {
3191 dpll_mask =
3192 BIT(DPLL_ID_DG1_DPLL2) |
3193 BIT(DPLL_ID_DG1_DPLL3);
3194 } else {
3195 dpll_mask =
3196 BIT(DPLL_ID_DG1_DPLL0) |
3197 BIT(DPLL_ID_DG1_DPLL1);
3198 }
3199 } else if (IS_ROCKETLAKE(dev_priv)) {
3200 dpll_mask =
3201 BIT(DPLL_ID_EHL_DPLL4) |
3202 BIT(DPLL_ID_ICL_DPLL1) |
3203 BIT(DPLL_ID_ICL_DPLL0);
3204 } else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3205 dpll_mask =
3206 BIT(DPLL_ID_EHL_DPLL4) |
3207 BIT(DPLL_ID_ICL_DPLL1) |
3208 BIT(DPLL_ID_ICL_DPLL0);
3209 } else {
3210 dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3211 }
3212
3213 /* Eliminate DPLLs from consideration if reserved by HTI */
3214 dpll_mask &= ~intel_get_hti_plls(dev_priv);
3215
3216 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3217 &port_dpll->hw_state,
3218 dpll_mask);
3219 if (!port_dpll->pll) {
3220 drm_dbg_kms(&dev_priv->drm,
3221 "No combo PHY PLL found for [ENCODER:%d:%s]\n",
3222 encoder->base.base.id, encoder->base.name);
3223 return false;
3224 }
3225
3226 intel_reference_shared_dpll(state, crtc,
3227 port_dpll->pll, &port_dpll->hw_state);
3228
3229 icl_update_active_dpll(state, crtc, encoder);
3230
3231 return true;
3232 }
3233
icl_get_tc_phy_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3234 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3235 struct intel_crtc *crtc,
3236 struct intel_encoder *encoder)
3237 {
3238 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3239 struct intel_crtc_state *crtc_state =
3240 intel_atomic_get_new_crtc_state(state, crtc);
3241 struct skl_wrpll_params pll_params = { };
3242 struct icl_port_dpll *port_dpll;
3243 enum intel_dpll_id dpll_id;
3244
3245 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3246 if (!icl_calc_tbt_pll(crtc_state, &pll_params)) {
3247 drm_dbg_kms(&dev_priv->drm,
3248 "Could not calculate TBT PLL state.\n");
3249 return false;
3250 }
3251
3252 icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3253
3254 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3255 &port_dpll->hw_state,
3256 BIT(DPLL_ID_ICL_TBTPLL));
3257 if (!port_dpll->pll) {
3258 drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
3259 return false;
3260 }
3261 intel_reference_shared_dpll(state, crtc,
3262 port_dpll->pll, &port_dpll->hw_state);
3263
3264
3265 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3266 if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
3267 drm_dbg_kms(&dev_priv->drm,
3268 "Could not calculate MG PHY PLL state.\n");
3269 goto err_unreference_tbt_pll;
3270 }
3271
3272 dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3273 encoder->port));
3274 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3275 &port_dpll->hw_state,
3276 BIT(dpll_id));
3277 if (!port_dpll->pll) {
3278 drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
3279 goto err_unreference_tbt_pll;
3280 }
3281 intel_reference_shared_dpll(state, crtc,
3282 port_dpll->pll, &port_dpll->hw_state);
3283
3284 icl_update_active_dpll(state, crtc, encoder);
3285
3286 return true;
3287
3288 err_unreference_tbt_pll:
3289 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3290 intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3291
3292 return false;
3293 }
3294
icl_get_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3295 static bool icl_get_dplls(struct intel_atomic_state *state,
3296 struct intel_crtc *crtc,
3297 struct intel_encoder *encoder)
3298 {
3299 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3300 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3301
3302 if (intel_phy_is_combo(dev_priv, phy))
3303 return icl_get_combo_phy_dpll(state, crtc, encoder);
3304 else if (intel_phy_is_tc(dev_priv, phy))
3305 return icl_get_tc_phy_dplls(state, crtc, encoder);
3306
3307 MISSING_CASE(phy);
3308
3309 return false;
3310 }
3311
icl_put_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc)3312 static void icl_put_dplls(struct intel_atomic_state *state,
3313 struct intel_crtc *crtc)
3314 {
3315 const struct intel_crtc_state *old_crtc_state =
3316 intel_atomic_get_old_crtc_state(state, crtc);
3317 struct intel_crtc_state *new_crtc_state =
3318 intel_atomic_get_new_crtc_state(state, crtc);
3319 enum icl_port_dpll_id id;
3320
3321 new_crtc_state->shared_dpll = NULL;
3322
3323 for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3324 const struct icl_port_dpll *old_port_dpll =
3325 &old_crtc_state->icl_port_dplls[id];
3326 struct icl_port_dpll *new_port_dpll =
3327 &new_crtc_state->icl_port_dplls[id];
3328
3329 new_port_dpll->pll = NULL;
3330
3331 if (!old_port_dpll->pll)
3332 continue;
3333
3334 intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3335 }
3336 }
3337
mg_pll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)3338 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3339 struct intel_shared_dpll *pll,
3340 struct intel_dpll_hw_state *hw_state)
3341 {
3342 const enum intel_dpll_id id = pll->info->id;
3343 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3344 intel_wakeref_t wakeref;
3345 bool ret = false;
3346 u32 val;
3347
3348 i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3349
3350 wakeref = intel_display_power_get_if_enabled(dev_priv,
3351 POWER_DOMAIN_DISPLAY_CORE);
3352 if (!wakeref)
3353 return false;
3354
3355 val = intel_de_read(dev_priv, enable_reg);
3356 if (!(val & PLL_ENABLE))
3357 goto out;
3358
3359 hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3360 MG_REFCLKIN_CTL(tc_port));
3361 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3362
3363 hw_state->mg_clktop2_coreclkctl1 =
3364 intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3365 hw_state->mg_clktop2_coreclkctl1 &=
3366 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3367
3368 hw_state->mg_clktop2_hsclkctl =
3369 intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3370 hw_state->mg_clktop2_hsclkctl &=
3371 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3372 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3373 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3374 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3375
3376 hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3377 hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3378 hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3379 hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3380 MG_PLL_FRAC_LOCK(tc_port));
3381 hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3382
3383 hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3384 hw_state->mg_pll_tdc_coldst_bias =
3385 intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3386
3387 if (dev_priv->dpll.ref_clks.nssc == 38400) {
3388 hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3389 hw_state->mg_pll_bias_mask = 0;
3390 } else {
3391 hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3392 hw_state->mg_pll_bias_mask = -1U;
3393 }
3394
3395 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3396 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3397
3398 ret = true;
3399 out:
3400 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3401 return ret;
3402 }
3403
dkl_pll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)3404 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3405 struct intel_shared_dpll *pll,
3406 struct intel_dpll_hw_state *hw_state)
3407 {
3408 const enum intel_dpll_id id = pll->info->id;
3409 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3410 intel_wakeref_t wakeref;
3411 bool ret = false;
3412 u32 val;
3413
3414 wakeref = intel_display_power_get_if_enabled(dev_priv,
3415 POWER_DOMAIN_DISPLAY_CORE);
3416 if (!wakeref)
3417 return false;
3418
3419 val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3420 if (!(val & PLL_ENABLE))
3421 goto out;
3422
3423 /*
3424 * All registers read here have the same HIP_INDEX_REG even though
3425 * they are on different building blocks
3426 */
3427 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3428 HIP_INDEX_VAL(tc_port, 0x2));
3429
3430 hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3431 DKL_REFCLKIN_CTL(tc_port));
3432 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3433
3434 hw_state->mg_clktop2_hsclkctl =
3435 intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3436 hw_state->mg_clktop2_hsclkctl &=
3437 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3438 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3439 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3440 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3441
3442 hw_state->mg_clktop2_coreclkctl1 =
3443 intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3444 hw_state->mg_clktop2_coreclkctl1 &=
3445 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3446
3447 hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3448 hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
3449 DKL_PLL_DIV0_PROP_COEFF_MASK |
3450 DKL_PLL_DIV0_FBPREDIV_MASK |
3451 DKL_PLL_DIV0_FBDIV_INT_MASK);
3452
3453 hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3454 hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3455 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3456
3457 hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3458 hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3459 DKL_PLL_SSC_STEP_LEN_MASK |
3460 DKL_PLL_SSC_STEP_NUM_MASK |
3461 DKL_PLL_SSC_EN);
3462
3463 hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3464 hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3465 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3466
3467 hw_state->mg_pll_tdc_coldst_bias =
3468 intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3469 hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3470 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3471
3472 ret = true;
3473 out:
3474 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3475 return ret;
3476 }
3477
icl_pll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state,i915_reg_t enable_reg)3478 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3479 struct intel_shared_dpll *pll,
3480 struct intel_dpll_hw_state *hw_state,
3481 i915_reg_t enable_reg)
3482 {
3483 const enum intel_dpll_id id = pll->info->id;
3484 intel_wakeref_t wakeref;
3485 bool ret = false;
3486 u32 val;
3487
3488 wakeref = intel_display_power_get_if_enabled(dev_priv,
3489 POWER_DOMAIN_DISPLAY_CORE);
3490 if (!wakeref)
3491 return false;
3492
3493 val = intel_de_read(dev_priv, enable_reg);
3494 if (!(val & PLL_ENABLE))
3495 goto out;
3496
3497 if (IS_ALDERLAKE_S(dev_priv)) {
3498 hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3499 hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3500 } else if (IS_DG1(dev_priv)) {
3501 hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3502 hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3503 } else if (IS_ROCKETLAKE(dev_priv)) {
3504 hw_state->cfgcr0 = intel_de_read(dev_priv,
3505 RKL_DPLL_CFGCR0(id));
3506 hw_state->cfgcr1 = intel_de_read(dev_priv,
3507 RKL_DPLL_CFGCR1(id));
3508 } else if (DISPLAY_VER(dev_priv) >= 12) {
3509 hw_state->cfgcr0 = intel_de_read(dev_priv,
3510 TGL_DPLL_CFGCR0(id));
3511 hw_state->cfgcr1 = intel_de_read(dev_priv,
3512 TGL_DPLL_CFGCR1(id));
3513 } else {
3514 if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3515 hw_state->cfgcr0 = intel_de_read(dev_priv,
3516 ICL_DPLL_CFGCR0(4));
3517 hw_state->cfgcr1 = intel_de_read(dev_priv,
3518 ICL_DPLL_CFGCR1(4));
3519 } else {
3520 hw_state->cfgcr0 = intel_de_read(dev_priv,
3521 ICL_DPLL_CFGCR0(id));
3522 hw_state->cfgcr1 = intel_de_read(dev_priv,
3523 ICL_DPLL_CFGCR1(id));
3524 }
3525 }
3526
3527 ret = true;
3528 out:
3529 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3530 return ret;
3531 }
3532
combo_pll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)3533 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3534 struct intel_shared_dpll *pll,
3535 struct intel_dpll_hw_state *hw_state)
3536 {
3537 i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3538
3539 return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3540 }
3541
tbt_pll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)3542 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3543 struct intel_shared_dpll *pll,
3544 struct intel_dpll_hw_state *hw_state)
3545 {
3546 return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3547 }
3548
icl_dpll_write(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3549 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3550 struct intel_shared_dpll *pll)
3551 {
3552 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3553 const enum intel_dpll_id id = pll->info->id;
3554 i915_reg_t cfgcr0_reg, cfgcr1_reg;
3555
3556 if (IS_ALDERLAKE_S(dev_priv)) {
3557 cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3558 cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3559 } else if (IS_DG1(dev_priv)) {
3560 cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3561 cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3562 } else if (IS_ROCKETLAKE(dev_priv)) {
3563 cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3564 cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3565 } else if (DISPLAY_VER(dev_priv) >= 12) {
3566 cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3567 cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3568 } else {
3569 if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3570 cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3571 cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3572 } else {
3573 cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3574 cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3575 }
3576 }
3577
3578 intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3579 intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3580 intel_de_posting_read(dev_priv, cfgcr1_reg);
3581 }
3582
icl_mg_pll_write(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3583 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3584 struct intel_shared_dpll *pll)
3585 {
3586 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3587 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3588 u32 val;
3589
3590 /*
3591 * Some of the following registers have reserved fields, so program
3592 * these with RMW based on a mask. The mask can be fixed or generated
3593 * during the calc/readout phase if the mask depends on some other HW
3594 * state like refclk, see icl_calc_mg_pll_state().
3595 */
3596 val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3597 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3598 val |= hw_state->mg_refclkin_ctl;
3599 intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3600
3601 val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3602 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3603 val |= hw_state->mg_clktop2_coreclkctl1;
3604 intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3605
3606 val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3607 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3608 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3609 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3610 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3611 val |= hw_state->mg_clktop2_hsclkctl;
3612 intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3613
3614 intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3615 intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3616 intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3617 intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3618 hw_state->mg_pll_frac_lock);
3619 intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3620
3621 val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3622 val &= ~hw_state->mg_pll_bias_mask;
3623 val |= hw_state->mg_pll_bias;
3624 intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3625
3626 val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3627 val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3628 val |= hw_state->mg_pll_tdc_coldst_bias;
3629 intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3630
3631 intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3632 }
3633
dkl_pll_write(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3634 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3635 struct intel_shared_dpll *pll)
3636 {
3637 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3638 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3639 u32 val;
3640
3641 /*
3642 * All registers programmed here have the same HIP_INDEX_REG even
3643 * though on different building block
3644 */
3645 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3646 HIP_INDEX_VAL(tc_port, 0x2));
3647
3648 /* All the registers are RMW */
3649 val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3650 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3651 val |= hw_state->mg_refclkin_ctl;
3652 intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3653
3654 val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3655 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3656 val |= hw_state->mg_clktop2_coreclkctl1;
3657 intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3658
3659 val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3660 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3661 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3662 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3663 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3664 val |= hw_state->mg_clktop2_hsclkctl;
3665 intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3666
3667 val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3668 val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
3669 DKL_PLL_DIV0_PROP_COEFF_MASK |
3670 DKL_PLL_DIV0_FBPREDIV_MASK |
3671 DKL_PLL_DIV0_FBDIV_INT_MASK);
3672 val |= hw_state->mg_pll_div0;
3673 intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
3674
3675 val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3676 val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3677 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3678 val |= hw_state->mg_pll_div1;
3679 intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3680
3681 val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3682 val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3683 DKL_PLL_SSC_STEP_LEN_MASK |
3684 DKL_PLL_SSC_STEP_NUM_MASK |
3685 DKL_PLL_SSC_EN);
3686 val |= hw_state->mg_pll_ssc;
3687 intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3688
3689 val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3690 val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3691 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3692 val |= hw_state->mg_pll_bias;
3693 intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3694
3695 val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3696 val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3697 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3698 val |= hw_state->mg_pll_tdc_coldst_bias;
3699 intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3700
3701 intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3702 }
3703
icl_pll_power_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,i915_reg_t enable_reg)3704 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3705 struct intel_shared_dpll *pll,
3706 i915_reg_t enable_reg)
3707 {
3708 u32 val;
3709
3710 val = intel_de_read(dev_priv, enable_reg);
3711 val |= PLL_POWER_ENABLE;
3712 intel_de_write(dev_priv, enable_reg, val);
3713
3714 /*
3715 * The spec says we need to "wait" but it also says it should be
3716 * immediate.
3717 */
3718 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3719 drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3720 pll->info->id);
3721 }
3722
icl_pll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,i915_reg_t enable_reg)3723 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3724 struct intel_shared_dpll *pll,
3725 i915_reg_t enable_reg)
3726 {
3727 u32 val;
3728
3729 val = intel_de_read(dev_priv, enable_reg);
3730 val |= PLL_ENABLE;
3731 intel_de_write(dev_priv, enable_reg, val);
3732
3733 /* Timeout is actually 600us. */
3734 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3735 drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3736 }
3737
adlp_cmtg_clock_gating_wa(struct drm_i915_private * i915,struct intel_shared_dpll * pll)3738 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3739 {
3740 u32 val;
3741
3742 if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
3743 pll->info->id != DPLL_ID_ICL_DPLL0)
3744 return;
3745 /*
3746 * Wa_16011069516:adl-p[a0]
3747 *
3748 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3749 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3750 * sanity check this assumption with a double read, which presumably
3751 * returns the correct value even with clock gating on.
3752 *
3753 * Instead of the usual place for workarounds we apply this one here,
3754 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3755 */
3756 val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3757 val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3758 intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING);
3759 if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3760 drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3761 }
3762
combo_pll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3763 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3764 struct intel_shared_dpll *pll)
3765 {
3766 i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3767
3768 if (IS_JSL_EHL(dev_priv) &&
3769 pll->info->id == DPLL_ID_EHL_DPLL4) {
3770
3771 /*
3772 * We need to disable DC states when this DPLL is enabled.
3773 * This can be done by taking a reference on DPLL4 power
3774 * domain.
3775 */
3776 pll->wakeref = intel_display_power_get(dev_priv,
3777 POWER_DOMAIN_DPLL_DC_OFF);
3778 }
3779
3780 icl_pll_power_enable(dev_priv, pll, enable_reg);
3781
3782 icl_dpll_write(dev_priv, pll);
3783
3784 /*
3785 * DVFS pre sequence would be here, but in our driver the cdclk code
3786 * paths should already be setting the appropriate voltage, hence we do
3787 * nothing here.
3788 */
3789
3790 icl_pll_enable(dev_priv, pll, enable_reg);
3791
3792 adlp_cmtg_clock_gating_wa(dev_priv, pll);
3793
3794 /* DVFS post sequence would be here. See the comment above. */
3795 }
3796
tbt_pll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3797 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3798 struct intel_shared_dpll *pll)
3799 {
3800 icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3801
3802 icl_dpll_write(dev_priv, pll);
3803
3804 /*
3805 * DVFS pre sequence would be here, but in our driver the cdclk code
3806 * paths should already be setting the appropriate voltage, hence we do
3807 * nothing here.
3808 */
3809
3810 icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3811
3812 /* DVFS post sequence would be here. See the comment above. */
3813 }
3814
mg_pll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3815 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3816 struct intel_shared_dpll *pll)
3817 {
3818 i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3819
3820 icl_pll_power_enable(dev_priv, pll, enable_reg);
3821
3822 if (DISPLAY_VER(dev_priv) >= 12)
3823 dkl_pll_write(dev_priv, pll);
3824 else
3825 icl_mg_pll_write(dev_priv, pll);
3826
3827 /*
3828 * DVFS pre sequence would be here, but in our driver the cdclk code
3829 * paths should already be setting the appropriate voltage, hence we do
3830 * nothing here.
3831 */
3832
3833 icl_pll_enable(dev_priv, pll, enable_reg);
3834
3835 /* DVFS post sequence would be here. See the comment above. */
3836 }
3837
icl_pll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,i915_reg_t enable_reg)3838 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3839 struct intel_shared_dpll *pll,
3840 i915_reg_t enable_reg)
3841 {
3842 u32 val;
3843
3844 /* The first steps are done by intel_ddi_post_disable(). */
3845
3846 /*
3847 * DVFS pre sequence would be here, but in our driver the cdclk code
3848 * paths should already be setting the appropriate voltage, hence we do
3849 * nothing here.
3850 */
3851
3852 val = intel_de_read(dev_priv, enable_reg);
3853 val &= ~PLL_ENABLE;
3854 intel_de_write(dev_priv, enable_reg, val);
3855
3856 /* Timeout is actually 1us. */
3857 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3858 drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3859
3860 /* DVFS post sequence would be here. See the comment above. */
3861
3862 val = intel_de_read(dev_priv, enable_reg);
3863 val &= ~PLL_POWER_ENABLE;
3864 intel_de_write(dev_priv, enable_reg, val);
3865
3866 /*
3867 * The spec says we need to "wait" but it also says it should be
3868 * immediate.
3869 */
3870 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3871 drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3872 pll->info->id);
3873 }
3874
combo_pll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3875 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3876 struct intel_shared_dpll *pll)
3877 {
3878 i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3879
3880 icl_pll_disable(dev_priv, pll, enable_reg);
3881
3882 if (IS_JSL_EHL(dev_priv) &&
3883 pll->info->id == DPLL_ID_EHL_DPLL4)
3884 intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
3885 pll->wakeref);
3886 }
3887
tbt_pll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3888 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3889 struct intel_shared_dpll *pll)
3890 {
3891 icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3892 }
3893
mg_pll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3894 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3895 struct intel_shared_dpll *pll)
3896 {
3897 i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3898
3899 icl_pll_disable(dev_priv, pll, enable_reg);
3900 }
3901
icl_update_dpll_ref_clks(struct drm_i915_private * i915)3902 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3903 {
3904 /* No SSC ref */
3905 i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
3906 }
3907
icl_dump_hw_state(struct drm_i915_private * dev_priv,const struct intel_dpll_hw_state * hw_state)3908 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3909 const struct intel_dpll_hw_state *hw_state)
3910 {
3911 drm_dbg_kms(&dev_priv->drm,
3912 "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
3913 "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3914 "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3915 "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3916 "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3917 "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3918 hw_state->cfgcr0, hw_state->cfgcr1,
3919 hw_state->mg_refclkin_ctl,
3920 hw_state->mg_clktop2_coreclkctl1,
3921 hw_state->mg_clktop2_hsclkctl,
3922 hw_state->mg_pll_div0,
3923 hw_state->mg_pll_div1,
3924 hw_state->mg_pll_lf,
3925 hw_state->mg_pll_frac_lock,
3926 hw_state->mg_pll_ssc,
3927 hw_state->mg_pll_bias,
3928 hw_state->mg_pll_tdc_coldst_bias);
3929 }
3930
3931 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3932 .enable = combo_pll_enable,
3933 .disable = combo_pll_disable,
3934 .get_hw_state = combo_pll_get_hw_state,
3935 .get_freq = icl_ddi_combo_pll_get_freq,
3936 };
3937
3938 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3939 .enable = tbt_pll_enable,
3940 .disable = tbt_pll_disable,
3941 .get_hw_state = tbt_pll_get_hw_state,
3942 .get_freq = icl_ddi_tbt_pll_get_freq,
3943 };
3944
3945 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3946 .enable = mg_pll_enable,
3947 .disable = mg_pll_disable,
3948 .get_hw_state = mg_pll_get_hw_state,
3949 .get_freq = icl_ddi_mg_pll_get_freq,
3950 };
3951
3952 static const struct dpll_info icl_plls[] = {
3953 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3954 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3955 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3956 { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3957 { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3958 { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3959 { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3960 { },
3961 };
3962
3963 static const struct intel_dpll_mgr icl_pll_mgr = {
3964 .dpll_info = icl_plls,
3965 .get_dplls = icl_get_dplls,
3966 .put_dplls = icl_put_dplls,
3967 .update_active_dpll = icl_update_active_dpll,
3968 .update_ref_clks = icl_update_dpll_ref_clks,
3969 .dump_hw_state = icl_dump_hw_state,
3970 };
3971
3972 static const struct dpll_info ehl_plls[] = {
3973 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3974 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3975 { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
3976 { },
3977 };
3978
3979 static const struct intel_dpll_mgr ehl_pll_mgr = {
3980 .dpll_info = ehl_plls,
3981 .get_dplls = icl_get_dplls,
3982 .put_dplls = icl_put_dplls,
3983 .update_ref_clks = icl_update_dpll_ref_clks,
3984 .dump_hw_state = icl_dump_hw_state,
3985 };
3986
3987 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
3988 .enable = mg_pll_enable,
3989 .disable = mg_pll_disable,
3990 .get_hw_state = dkl_pll_get_hw_state,
3991 .get_freq = icl_ddi_mg_pll_get_freq,
3992 };
3993
3994 static const struct dpll_info tgl_plls[] = {
3995 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3996 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3997 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3998 { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3999 { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4000 { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4001 { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4002 { "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4003 { "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4004 { },
4005 };
4006
4007 static const struct intel_dpll_mgr tgl_pll_mgr = {
4008 .dpll_info = tgl_plls,
4009 .get_dplls = icl_get_dplls,
4010 .put_dplls = icl_put_dplls,
4011 .update_active_dpll = icl_update_active_dpll,
4012 .update_ref_clks = icl_update_dpll_ref_clks,
4013 .dump_hw_state = icl_dump_hw_state,
4014 };
4015
4016 static const struct dpll_info rkl_plls[] = {
4017 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4018 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4019 { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4020 { },
4021 };
4022
4023 static const struct intel_dpll_mgr rkl_pll_mgr = {
4024 .dpll_info = rkl_plls,
4025 .get_dplls = icl_get_dplls,
4026 .put_dplls = icl_put_dplls,
4027 .update_ref_clks = icl_update_dpll_ref_clks,
4028 .dump_hw_state = icl_dump_hw_state,
4029 };
4030
4031 static const struct dpll_info dg1_plls[] = {
4032 { "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4033 { "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4034 { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4035 { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4036 { },
4037 };
4038
4039 static const struct intel_dpll_mgr dg1_pll_mgr = {
4040 .dpll_info = dg1_plls,
4041 .get_dplls = icl_get_dplls,
4042 .put_dplls = icl_put_dplls,
4043 .update_ref_clks = icl_update_dpll_ref_clks,
4044 .dump_hw_state = icl_dump_hw_state,
4045 };
4046
4047 static const struct dpll_info adls_plls[] = {
4048 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4049 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4050 { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4051 { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4052 { },
4053 };
4054
4055 static const struct intel_dpll_mgr adls_pll_mgr = {
4056 .dpll_info = adls_plls,
4057 .get_dplls = icl_get_dplls,
4058 .put_dplls = icl_put_dplls,
4059 .update_ref_clks = icl_update_dpll_ref_clks,
4060 .dump_hw_state = icl_dump_hw_state,
4061 };
4062
4063 static const struct dpll_info adlp_plls[] = {
4064 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4065 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4066 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4067 { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4068 { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4069 { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4070 { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4071 { },
4072 };
4073
4074 static const struct intel_dpll_mgr adlp_pll_mgr = {
4075 .dpll_info = adlp_plls,
4076 .get_dplls = icl_get_dplls,
4077 .put_dplls = icl_put_dplls,
4078 .update_active_dpll = icl_update_active_dpll,
4079 .update_ref_clks = icl_update_dpll_ref_clks,
4080 .dump_hw_state = icl_dump_hw_state,
4081 };
4082
4083 /**
4084 * intel_shared_dpll_init - Initialize shared DPLLs
4085 * @dev: drm device
4086 *
4087 * Initialize shared DPLLs for @dev.
4088 */
intel_shared_dpll_init(struct drm_device * dev)4089 void intel_shared_dpll_init(struct drm_device *dev)
4090 {
4091 struct drm_i915_private *dev_priv = to_i915(dev);
4092 const struct intel_dpll_mgr *dpll_mgr = NULL;
4093 const struct dpll_info *dpll_info;
4094 int i;
4095
4096 if (IS_DG2(dev_priv))
4097 /* No shared DPLLs on DG2; port PLLs are part of the PHY */
4098 dpll_mgr = NULL;
4099 else if (IS_ALDERLAKE_P(dev_priv))
4100 dpll_mgr = &adlp_pll_mgr;
4101 else if (IS_ALDERLAKE_S(dev_priv))
4102 dpll_mgr = &adls_pll_mgr;
4103 else if (IS_DG1(dev_priv))
4104 dpll_mgr = &dg1_pll_mgr;
4105 else if (IS_ROCKETLAKE(dev_priv))
4106 dpll_mgr = &rkl_pll_mgr;
4107 else if (DISPLAY_VER(dev_priv) >= 12)
4108 dpll_mgr = &tgl_pll_mgr;
4109 else if (IS_JSL_EHL(dev_priv))
4110 dpll_mgr = &ehl_pll_mgr;
4111 else if (DISPLAY_VER(dev_priv) >= 11)
4112 dpll_mgr = &icl_pll_mgr;
4113 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4114 dpll_mgr = &bxt_pll_mgr;
4115 else if (DISPLAY_VER(dev_priv) == 9)
4116 dpll_mgr = &skl_pll_mgr;
4117 else if (HAS_DDI(dev_priv))
4118 dpll_mgr = &hsw_pll_mgr;
4119 else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4120 dpll_mgr = &pch_pll_mgr;
4121
4122 if (!dpll_mgr) {
4123 dev_priv->dpll.num_shared_dpll = 0;
4124 return;
4125 }
4126
4127 dpll_info = dpll_mgr->dpll_info;
4128
4129 for (i = 0; dpll_info[i].name; i++) {
4130 drm_WARN_ON(dev, i != dpll_info[i].id);
4131 dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
4132 }
4133
4134 dev_priv->dpll.mgr = dpll_mgr;
4135 dev_priv->dpll.num_shared_dpll = i;
4136 mutex_init(&dev_priv->dpll.lock);
4137
4138 BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
4139 }
4140
4141 /**
4142 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4143 * @state: atomic state
4144 * @crtc: CRTC to reserve DPLLs for
4145 * @encoder: encoder
4146 *
4147 * This function reserves all required DPLLs for the given CRTC and encoder
4148 * combination in the current atomic commit @state and the new @crtc atomic
4149 * state.
4150 *
4151 * The new configuration in the atomic commit @state is made effective by
4152 * calling intel_shared_dpll_swap_state().
4153 *
4154 * The reserved DPLLs should be released by calling
4155 * intel_release_shared_dplls().
4156 *
4157 * Returns:
4158 * True if all required DPLLs were successfully reserved.
4159 */
intel_reserve_shared_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)4160 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
4161 struct intel_crtc *crtc,
4162 struct intel_encoder *encoder)
4163 {
4164 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4165 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4166
4167 if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4168 return false;
4169
4170 return dpll_mgr->get_dplls(state, crtc, encoder);
4171 }
4172
4173 /**
4174 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4175 * @state: atomic state
4176 * @crtc: crtc from which the DPLLs are to be released
4177 *
4178 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4179 * from the current atomic commit @state and the old @crtc atomic state.
4180 *
4181 * The new configuration in the atomic commit @state is made effective by
4182 * calling intel_shared_dpll_swap_state().
4183 */
intel_release_shared_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc)4184 void intel_release_shared_dplls(struct intel_atomic_state *state,
4185 struct intel_crtc *crtc)
4186 {
4187 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4188 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4189
4190 /*
4191 * FIXME: this function is called for every platform having a
4192 * compute_clock hook, even though the platform doesn't yet support
4193 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4194 * called on those.
4195 */
4196 if (!dpll_mgr)
4197 return;
4198
4199 dpll_mgr->put_dplls(state, crtc);
4200 }
4201
4202 /**
4203 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4204 * @state: atomic state
4205 * @crtc: the CRTC for which to update the active DPLL
4206 * @encoder: encoder determining the type of port DPLL
4207 *
4208 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4209 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4210 * DPLL selected will be based on the current mode of the encoder's port.
4211 */
intel_update_active_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)4212 void intel_update_active_dpll(struct intel_atomic_state *state,
4213 struct intel_crtc *crtc,
4214 struct intel_encoder *encoder)
4215 {
4216 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4217 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4218
4219 if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4220 return;
4221
4222 dpll_mgr->update_active_dpll(state, crtc, encoder);
4223 }
4224
4225 /**
4226 * intel_dpll_get_freq - calculate the DPLL's output frequency
4227 * @i915: i915 device
4228 * @pll: DPLL for which to calculate the output frequency
4229 * @pll_state: DPLL state from which to calculate the output frequency
4230 *
4231 * Return the output frequency corresponding to @pll's passed in @pll_state.
4232 */
intel_dpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)4233 int intel_dpll_get_freq(struct drm_i915_private *i915,
4234 const struct intel_shared_dpll *pll,
4235 const struct intel_dpll_hw_state *pll_state)
4236 {
4237 if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4238 return 0;
4239
4240 return pll->info->funcs->get_freq(i915, pll, pll_state);
4241 }
4242
4243 /**
4244 * intel_dpll_get_hw_state - readout the DPLL's hardware state
4245 * @i915: i915 device
4246 * @pll: DPLL for which to calculate the output frequency
4247 * @hw_state: DPLL's hardware state
4248 *
4249 * Read out @pll's hardware state into @hw_state.
4250 */
intel_dpll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)4251 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4252 struct intel_shared_dpll *pll,
4253 struct intel_dpll_hw_state *hw_state)
4254 {
4255 return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4256 }
4257
readout_dpll_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll)4258 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4259 struct intel_shared_dpll *pll)
4260 {
4261 struct intel_crtc *crtc;
4262
4263 pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4264
4265 if (IS_JSL_EHL(i915) && pll->on &&
4266 pll->info->id == DPLL_ID_EHL_DPLL4) {
4267 pll->wakeref = intel_display_power_get(i915,
4268 POWER_DOMAIN_DPLL_DC_OFF);
4269 }
4270
4271 pll->state.pipe_mask = 0;
4272 for_each_intel_crtc(&i915->drm, crtc) {
4273 struct intel_crtc_state *crtc_state =
4274 to_intel_crtc_state(crtc->base.state);
4275
4276 if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4277 pll->state.pipe_mask |= BIT(crtc->pipe);
4278 }
4279 pll->active_mask = pll->state.pipe_mask;
4280
4281 drm_dbg_kms(&i915->drm,
4282 "%s hw state readout: pipe_mask 0x%x, on %i\n",
4283 pll->info->name, pll->state.pipe_mask, pll->on);
4284 }
4285
intel_dpll_update_ref_clks(struct drm_i915_private * i915)4286 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4287 {
4288 if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
4289 i915->dpll.mgr->update_ref_clks(i915);
4290 }
4291
intel_dpll_readout_hw_state(struct drm_i915_private * i915)4292 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4293 {
4294 int i;
4295
4296 for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4297 readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
4298 }
4299
sanitize_dpll_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll)4300 static void sanitize_dpll_state(struct drm_i915_private *i915,
4301 struct intel_shared_dpll *pll)
4302 {
4303 if (!pll->on)
4304 return;
4305
4306 adlp_cmtg_clock_gating_wa(i915, pll);
4307
4308 if (pll->active_mask)
4309 return;
4310
4311 drm_dbg_kms(&i915->drm,
4312 "%s enabled but not in use, disabling\n",
4313 pll->info->name);
4314
4315 pll->info->funcs->disable(i915, pll);
4316 pll->on = false;
4317 }
4318
intel_dpll_sanitize_state(struct drm_i915_private * i915)4319 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4320 {
4321 int i;
4322
4323 for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4324 sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
4325 }
4326
4327 /**
4328 * intel_dpll_dump_hw_state - write hw_state to dmesg
4329 * @dev_priv: i915 drm device
4330 * @hw_state: hw state to be written to the log
4331 *
4332 * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4333 */
intel_dpll_dump_hw_state(struct drm_i915_private * dev_priv,const struct intel_dpll_hw_state * hw_state)4334 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4335 const struct intel_dpll_hw_state *hw_state)
4336 {
4337 if (dev_priv->dpll.mgr) {
4338 dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
4339 } else {
4340 /* fallback for platforms that don't use the shared dpll
4341 * infrastructure
4342 */
4343 drm_dbg_kms(&dev_priv->drm,
4344 "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4345 "fp0: 0x%x, fp1: 0x%x\n",
4346 hw_state->dpll,
4347 hw_state->dpll_md,
4348 hw_state->fp0,
4349 hw_state->fp1);
4350 }
4351 }
4352