1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
4 */
5
6 #include <linux/clk-provider.h>
7 #include <linux/delay.h>
8
9 #include "dsi_phy.h"
10 #include "dsi.xml.h"
11 #include "dsi_phy_28nm_8960.xml.h"
12
13 /*
14 * DSI PLL 28nm (8960/A family) - clock diagram (eg: DSI1):
15 *
16 *
17 * +------+
18 * dsi1vco_clk ----o-----| DIV1 |---dsi1pllbit (not exposed as clock)
19 * F * byte_clk | +------+
20 * | bit clock divider (F / 8)
21 * |
22 * | +------+
23 * o-----| DIV2 |---dsi0pllbyte---o---> To byte RCG
24 * | +------+ | (sets parent rate)
25 * | byte clock divider (F) |
26 * | |
27 * | o---> To esc RCG
28 * | (doesn't set parent rate)
29 * |
30 * | +------+
31 * o-----| DIV3 |----dsi0pll------o---> To dsi RCG
32 * +------+ | (sets parent rate)
33 * dsi clock divider (F * magic) |
34 * |
35 * o---> To pixel rcg
36 * (doesn't set parent rate)
37 */
38
39 #define POLL_MAX_READS 8000
40 #define POLL_TIMEOUT_US 1
41
42 #define VCO_REF_CLK_RATE 27000000
43 #define VCO_MIN_RATE 600000000
44 #define VCO_MAX_RATE 1200000000
45
46 #define VCO_PREF_DIV_RATIO 27
47
48 struct pll_28nm_cached_state {
49 unsigned long vco_rate;
50 u8 postdiv3;
51 u8 postdiv2;
52 u8 postdiv1;
53 };
54
55 struct clk_bytediv {
56 struct clk_hw hw;
57 void __iomem *reg;
58 };
59
60 struct dsi_pll_28nm {
61 struct clk_hw clk_hw;
62
63 struct msm_dsi_phy *phy;
64
65 struct pll_28nm_cached_state cached_state;
66 };
67
68 #define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, clk_hw)
69
pll_28nm_poll_for_ready(struct dsi_pll_28nm * pll_28nm,int nb_tries,int timeout_us)70 static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
71 int nb_tries, int timeout_us)
72 {
73 bool pll_locked = false;
74 u32 val;
75
76 while (nb_tries--) {
77 val = dsi_phy_read(pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_RDY);
78 pll_locked = !!(val & DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY);
79
80 if (pll_locked)
81 break;
82
83 udelay(timeout_us);
84 }
85 DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
86
87 return pll_locked;
88 }
89
90 /*
91 * Clock Callbacks
92 */
dsi_pll_28nm_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)93 static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
94 unsigned long parent_rate)
95 {
96 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
97 void __iomem *base = pll_28nm->phy->pll_base;
98 u32 val, temp, fb_divider;
99
100 DBG("rate=%lu, parent's=%lu", rate, parent_rate);
101
102 temp = rate / 10;
103 val = VCO_REF_CLK_RATE / 10;
104 fb_divider = (temp * VCO_PREF_DIV_RATIO) / val;
105 fb_divider = fb_divider / 2 - 1;
106 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1,
107 fb_divider & 0xff);
108
109 val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);
110
111 val |= (fb_divider >> 8) & 0x07;
112
113 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2,
114 val);
115
116 val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
117
118 val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f;
119
120 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3,
121 val);
122
123 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6,
124 0xf);
125
126 val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
127 val |= 0x7 << 4;
128 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
129 val);
130
131 return 0;
132 }
133
dsi_pll_28nm_clk_is_enabled(struct clk_hw * hw)134 static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
135 {
136 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
137
138 return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
139 POLL_TIMEOUT_US);
140 }
141
dsi_pll_28nm_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)142 static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
143 unsigned long parent_rate)
144 {
145 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
146 void __iomem *base = pll_28nm->phy->pll_base;
147 unsigned long vco_rate;
148 u32 status, fb_divider, temp, ref_divider;
149
150 VERB("parent_rate=%lu", parent_rate);
151
152 status = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0);
153
154 if (status & DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE) {
155 fb_divider = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1);
156 fb_divider &= 0xff;
157 temp = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2) & 0x07;
158 fb_divider = (temp << 8) | fb_divider;
159 fb_divider += 1;
160
161 ref_divider = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
162 ref_divider &= 0x3f;
163 ref_divider += 1;
164
165 /* multiply by 2 */
166 vco_rate = (parent_rate / ref_divider) * fb_divider * 2;
167 } else {
168 vco_rate = 0;
169 }
170
171 DBG("returning vco rate = %lu", vco_rate);
172
173 return vco_rate;
174 }
175
dsi_pll_28nm_vco_prepare(struct clk_hw * hw)176 static int dsi_pll_28nm_vco_prepare(struct clk_hw *hw)
177 {
178 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
179 struct device *dev = &pll_28nm->phy->pdev->dev;
180 void __iomem *base = pll_28nm->phy->pll_base;
181 bool locked;
182 unsigned int bit_div, byte_div;
183 int max_reads = 1000, timeout_us = 100;
184 u32 val;
185
186 DBG("id=%d", pll_28nm->phy->id);
187
188 if (unlikely(pll_28nm->phy->pll_on))
189 return 0;
190
191 /*
192 * before enabling the PLL, configure the bit clock divider since we
193 * don't expose it as a clock to the outside world
194 * 1: read back the byte clock divider that should already be set
195 * 2: divide by 8 to get bit clock divider
196 * 3: write it to POSTDIV1
197 */
198 val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
199 byte_div = val + 1;
200 bit_div = byte_div / 8;
201
202 val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
203 val &= ~0xf;
204 val |= (bit_div - 1);
205 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8, val);
206
207 /* enable the PLL */
208 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0,
209 DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
210
211 locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
212
213 if (unlikely(!locked)) {
214 DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
215 return -EINVAL;
216 }
217
218 DBG("DSI PLL lock success");
219 pll_28nm->phy->pll_on = true;
220
221 return 0;
222 }
223
dsi_pll_28nm_vco_unprepare(struct clk_hw * hw)224 static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
225 {
226 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
227
228 DBG("id=%d", pll_28nm->phy->id);
229
230 if (unlikely(!pll_28nm->phy->pll_on))
231 return;
232
233 dsi_phy_write(pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0, 0x00);
234
235 pll_28nm->phy->pll_on = false;
236 }
237
dsi_pll_28nm_clk_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)238 static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
239 unsigned long rate, unsigned long *parent_rate)
240 {
241 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
242
243 if (rate < pll_28nm->phy->cfg->min_pll_rate)
244 return pll_28nm->phy->cfg->min_pll_rate;
245 else if (rate > pll_28nm->phy->cfg->max_pll_rate)
246 return pll_28nm->phy->cfg->max_pll_rate;
247 else
248 return rate;
249 }
250
251 static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
252 .round_rate = dsi_pll_28nm_clk_round_rate,
253 .set_rate = dsi_pll_28nm_clk_set_rate,
254 .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
255 .prepare = dsi_pll_28nm_vco_prepare,
256 .unprepare = dsi_pll_28nm_vco_unprepare,
257 .is_enabled = dsi_pll_28nm_clk_is_enabled,
258 };
259
260 /*
261 * Custom byte clock divier clk_ops
262 *
263 * This clock is the entry point to configuring the PLL. The user (dsi host)
264 * will set this clock's rate to the desired byte clock rate. The VCO lock
265 * frequency is a multiple of the byte clock rate. The multiplication factor
266 * (shown as F in the diagram above) is a function of the byte clock rate.
267 *
268 * This custom divider clock ensures that its parent (VCO) is set to the
269 * desired rate, and that the byte clock postdivider (POSTDIV2) is configured
270 * accordingly
271 */
272 #define to_clk_bytediv(_hw) container_of(_hw, struct clk_bytediv, hw)
273
clk_bytediv_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)274 static unsigned long clk_bytediv_recalc_rate(struct clk_hw *hw,
275 unsigned long parent_rate)
276 {
277 struct clk_bytediv *bytediv = to_clk_bytediv(hw);
278 unsigned int div;
279
280 div = dsi_phy_read(bytediv->reg) & 0xff;
281
282 return parent_rate / (div + 1);
283 }
284
285 /* find multiplication factor(wrt byte clock) at which the VCO should be set */
get_vco_mul_factor(unsigned long byte_clk_rate)286 static unsigned int get_vco_mul_factor(unsigned long byte_clk_rate)
287 {
288 unsigned long bit_mhz;
289
290 /* convert to bit clock in Mhz */
291 bit_mhz = (byte_clk_rate * 8) / 1000000;
292
293 if (bit_mhz < 125)
294 return 64;
295 else if (bit_mhz < 250)
296 return 32;
297 else if (bit_mhz < 600)
298 return 16;
299 else
300 return 8;
301 }
302
clk_bytediv_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * prate)303 static long clk_bytediv_round_rate(struct clk_hw *hw, unsigned long rate,
304 unsigned long *prate)
305 {
306 unsigned long best_parent;
307 unsigned int factor;
308
309 factor = get_vco_mul_factor(rate);
310
311 best_parent = rate * factor;
312 *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
313
314 return *prate / factor;
315 }
316
clk_bytediv_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)317 static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
318 unsigned long parent_rate)
319 {
320 struct clk_bytediv *bytediv = to_clk_bytediv(hw);
321 u32 val;
322 unsigned int factor;
323
324 factor = get_vco_mul_factor(rate);
325
326 val = dsi_phy_read(bytediv->reg);
327 val |= (factor - 1) & 0xff;
328 dsi_phy_write(bytediv->reg, val);
329
330 return 0;
331 }
332
333 /* Our special byte clock divider ops */
334 static const struct clk_ops clk_bytediv_ops = {
335 .round_rate = clk_bytediv_round_rate,
336 .set_rate = clk_bytediv_set_rate,
337 .recalc_rate = clk_bytediv_recalc_rate,
338 };
339
340 /*
341 * PLL Callbacks
342 */
dsi_28nm_pll_save_state(struct msm_dsi_phy * phy)343 static void dsi_28nm_pll_save_state(struct msm_dsi_phy *phy)
344 {
345 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
346 struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
347 void __iomem *base = pll_28nm->phy->pll_base;
348
349 cached_state->postdiv3 =
350 dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10);
351 cached_state->postdiv2 =
352 dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
353 cached_state->postdiv1 =
354 dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
355
356 cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
357 }
358
dsi_28nm_pll_restore_state(struct msm_dsi_phy * phy)359 static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
360 {
361 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
362 struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
363 void __iomem *base = pll_28nm->phy->pll_base;
364 int ret;
365
366 ret = dsi_pll_28nm_clk_set_rate(phy->vco_hw,
367 cached_state->vco_rate, 0);
368 if (ret) {
369 DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev,
370 "restore vco rate failed. ret=%d\n", ret);
371 return ret;
372 }
373
374 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
375 cached_state->postdiv3);
376 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9,
377 cached_state->postdiv2);
378 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
379 cached_state->postdiv1);
380
381 return 0;
382 }
383
pll_28nm_register(struct dsi_pll_28nm * pll_28nm,struct clk_hw ** provided_clocks)384 static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
385 {
386 char clk_name[32];
387 struct clk_init_data vco_init = {
388 .parent_data = &(const struct clk_parent_data) {
389 .fw_name = "ref",
390 },
391 .num_parents = 1,
392 .flags = CLK_IGNORE_UNUSED,
393 .ops = &clk_ops_dsi_pll_28nm_vco,
394 };
395 struct device *dev = &pll_28nm->phy->pdev->dev;
396 struct clk_hw *hw;
397 struct clk_bytediv *bytediv;
398 struct clk_init_data bytediv_init = { };
399 int ret;
400
401 DBG("%d", pll_28nm->phy->id);
402
403 bytediv = devm_kzalloc(dev, sizeof(*bytediv), GFP_KERNEL);
404 if (!bytediv)
405 return -ENOMEM;
406
407 snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id);
408 vco_init.name = clk_name;
409
410 pll_28nm->clk_hw.init = &vco_init;
411
412 ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw);
413 if (ret)
414 return ret;
415
416 /* prepare and register bytediv */
417 bytediv->hw.init = &bytediv_init;
418 bytediv->reg = pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
419
420 snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id + 1);
421
422 bytediv_init.name = clk_name;
423 bytediv_init.ops = &clk_bytediv_ops;
424 bytediv_init.flags = CLK_SET_RATE_PARENT;
425 bytediv_init.parent_hws = (const struct clk_hw*[]){
426 &pll_28nm->clk_hw,
427 };
428 bytediv_init.num_parents = 1;
429
430 /* DIV2 */
431 ret = devm_clk_hw_register(dev, &bytediv->hw);
432 if (ret)
433 return ret;
434 provided_clocks[DSI_BYTE_PLL_CLK] = &bytediv->hw;
435
436 snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id + 1);
437 /* DIV3 */
438 hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
439 &pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base +
440 REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
441 0, 8, 0, NULL);
442 if (IS_ERR(hw))
443 return PTR_ERR(hw);
444 provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
445
446 return 0;
447 }
448
dsi_pll_28nm_8960_init(struct msm_dsi_phy * phy)449 static int dsi_pll_28nm_8960_init(struct msm_dsi_phy *phy)
450 {
451 struct platform_device *pdev = phy->pdev;
452 struct dsi_pll_28nm *pll_28nm;
453 int ret;
454
455 if (!pdev)
456 return -ENODEV;
457
458 pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
459 if (!pll_28nm)
460 return -ENOMEM;
461
462 pll_28nm->phy = phy;
463
464 ret = pll_28nm_register(pll_28nm, phy->provided_clocks->hws);
465 if (ret) {
466 DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
467 return ret;
468 }
469
470 phy->vco_hw = &pll_28nm->clk_hw;
471
472 return 0;
473 }
474
dsi_28nm_dphy_set_timing(struct msm_dsi_phy * phy,struct msm_dsi_dphy_timing * timing)475 static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
476 struct msm_dsi_dphy_timing *timing)
477 {
478 void __iomem *base = phy->base;
479
480 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_0,
481 DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
482 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_1,
483 DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
484 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_2,
485 DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
486 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_3, 0x0);
487 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_4,
488 DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
489 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_5,
490 DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
491 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_6,
492 DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
493 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_7,
494 DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
495 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_8,
496 DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
497 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_9,
498 DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
499 DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
500 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_10,
501 DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
502 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_11,
503 DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
504 }
505
dsi_28nm_phy_regulator_init(struct msm_dsi_phy * phy)506 static void dsi_28nm_phy_regulator_init(struct msm_dsi_phy *phy)
507 {
508 void __iomem *base = phy->reg_base;
509
510 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0, 0x3);
511 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1, 1);
512 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 1);
513 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0);
514 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4,
515 0x100);
516 }
517
dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy * phy)518 static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy)
519 {
520 void __iomem *base = phy->reg_base;
521
522 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0, 0x3);
523 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1, 0xa);
524 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 0x4);
525 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0x0);
526 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4, 0x20);
527 }
528
dsi_28nm_phy_calibration(struct msm_dsi_phy * phy)529 static void dsi_28nm_phy_calibration(struct msm_dsi_phy *phy)
530 {
531 void __iomem *base = phy->reg_base;
532 u32 status;
533 int i = 5000;
534
535 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG,
536 0x3);
537
538 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2, 0x0);
539 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1, 0x5a);
540 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_3, 0x10);
541 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_4, 0x1);
542 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_0, 0x1);
543
544 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER, 0x1);
545 usleep_range(5000, 6000);
546 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER, 0x0);
547
548 do {
549 status = dsi_phy_read(base +
550 REG_DSI_28nm_8960_PHY_MISC_CAL_STATUS);
551
552 if (!(status & DSI_28nm_8960_PHY_MISC_CAL_STATUS_CAL_BUSY))
553 break;
554
555 udelay(1);
556 } while (--i > 0);
557 }
558
dsi_28nm_phy_lane_config(struct msm_dsi_phy * phy)559 static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy)
560 {
561 void __iomem *base = phy->base;
562 int i;
563
564 for (i = 0; i < 4; i++) {
565 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_0(i), 0x80);
566 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_1(i), 0x45);
567 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_2(i), 0x00);
568 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(i),
569 0x00);
570 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(i),
571 0x01);
572 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(i),
573 0x66);
574 }
575
576 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_0, 0x40);
577 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_1, 0x67);
578 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_2, 0x0);
579 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_DATAPATH, 0x0);
580 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR0, 0x1);
581 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR1, 0x88);
582 }
583
dsi_28nm_phy_enable(struct msm_dsi_phy * phy,struct msm_dsi_phy_clk_request * clk_req)584 static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
585 struct msm_dsi_phy_clk_request *clk_req)
586 {
587 struct msm_dsi_dphy_timing *timing = &phy->timing;
588 void __iomem *base = phy->base;
589
590 DBG("");
591
592 if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
593 DRM_DEV_ERROR(&phy->pdev->dev,
594 "%s: D-PHY timing calculation failed\n",
595 __func__);
596 return -EINVAL;
597 }
598
599 dsi_28nm_phy_regulator_init(phy);
600
601 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LDO_CTRL, 0x04);
602
603 /* strength control */
604 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_0, 0xff);
605 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_1, 0x00);
606 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_2, 0x06);
607
608 /* phy ctrl */
609 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_0, 0x5f);
610 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_1, 0x00);
611 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_2, 0x00);
612 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_3, 0x10);
613
614 dsi_28nm_phy_regulator_ctrl(phy);
615
616 dsi_28nm_phy_calibration(phy);
617
618 dsi_28nm_phy_lane_config(phy);
619
620 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4, 0x0f);
621 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_1, 0x03);
622 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_0, 0x03);
623 dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4, 0x0);
624
625 dsi_28nm_dphy_set_timing(phy, timing);
626
627 return 0;
628 }
629
dsi_28nm_phy_disable(struct msm_dsi_phy * phy)630 static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
631 {
632 dsi_phy_write(phy->base + REG_DSI_28nm_8960_PHY_CTRL_0, 0x0);
633
634 /*
635 * Wait for the registers writes to complete in order to
636 * ensure that the phy is completely disabled
637 */
638 wmb();
639 }
640
641 static const struct regulator_bulk_data dsi_phy_28nm_8960_regulators[] = {
642 { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
643 };
644
645 const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
646 .has_phy_regulator = true,
647 .regulator_data = dsi_phy_28nm_8960_regulators,
648 .num_regulators = ARRAY_SIZE(dsi_phy_28nm_8960_regulators),
649 .ops = {
650 .enable = dsi_28nm_phy_enable,
651 .disable = dsi_28nm_phy_disable,
652 .pll_init = dsi_pll_28nm_8960_init,
653 .save_pll_state = dsi_28nm_pll_save_state,
654 .restore_pll_state = dsi_28nm_pll_restore_state,
655 },
656 .min_pll_rate = VCO_MIN_RATE,
657 .max_pll_rate = VCO_MAX_RATE,
658 .io_start = { 0x4700300, 0x5800300 },
659 .num_dsi_phy = 2,
660 };
661