1 /*
2 * SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018, The Linux Foundation
4 */
5
6 #include <linux/iopoll.h>
7
8 #include "dsi_phy.h"
9 #include "dsi.xml.h"
10
dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy * phy)11 static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy)
12 {
13 void __iomem *base = phy->base;
14 u32 data = 0;
15
16 data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL);
17 mb(); /* make sure read happened */
18
19 return (data & BIT(0));
20 }
21
dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy * phy,bool enable)22 static void dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
23 {
24 void __iomem *lane_base = phy->lane_base;
25 int phy_lane_0 = 0; /* TODO: Support all lane swap configs */
26
27 /*
28 * LPRX and CDRX need to enabled only for physical data lane
29 * corresponding to the logical data lane 0
30 */
31 if (enable)
32 dsi_phy_write(lane_base +
33 REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
34 else
35 dsi_phy_write(lane_base +
36 REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
37 }
38
dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy * phy)39 static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
40 {
41 int i;
42 u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
43 void __iomem *lane_base = phy->lane_base;
44
45 /* Strength ctrl settings */
46 for (i = 0; i < 5; i++) {
47 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i),
48 0x55);
49 /*
50 * Disable LPRX and CDRX for all lanes. And later on, it will
51 * be only enabled for the physical data lane corresponding
52 * to the logical data lane 0
53 */
54 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(i), 0);
55 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_PIN_SWAP(i), 0x0);
56 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(i),
57 0x88);
58 }
59
60 dsi_phy_hw_v3_0_config_lpcdrx(phy, true);
61
62 /* other settings */
63 for (i = 0; i < 5; i++) {
64 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG0(i), 0x0);
65 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG1(i), 0x0);
66 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG2(i), 0x0);
67 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG3(i),
68 i == 4 ? 0x80 : 0x0);
69 dsi_phy_write(lane_base +
70 REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i), 0x0);
71 dsi_phy_write(lane_base +
72 REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i), 0x0);
73 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i),
74 tx_dctrl[i]);
75 }
76
77 /* Toggle BIT 0 to release freeze I/0 */
78 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05);
79 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
80 }
81
dsi_10nm_phy_enable(struct msm_dsi_phy * phy,int src_pll_id,struct msm_dsi_phy_clk_request * clk_req)82 static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
83 struct msm_dsi_phy_clk_request *clk_req)
84 {
85 int ret;
86 u32 status;
87 u32 const delay_us = 5;
88 u32 const timeout_us = 1000;
89 struct msm_dsi_dphy_timing *timing = &phy->timing;
90 void __iomem *base = phy->base;
91 u32 data;
92
93 DBG("");
94
95 if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
96 dev_err(&phy->pdev->dev,
97 "%s: D-PHY timing calculation failed\n", __func__);
98 return -EINVAL;
99 }
100
101 if (dsi_phy_hw_v3_0_is_pll_on(phy))
102 pr_warn("PLL turned on before configuring PHY\n");
103
104 /* wait for REFGEN READY */
105 ret = readl_poll_timeout_atomic(base + REG_DSI_10nm_PHY_CMN_PHY_STATUS,
106 status, (status & BIT(0)),
107 delay_us, timeout_us);
108 if (ret) {
109 pr_err("Ref gen not ready. Aborting\n");
110 return -EINVAL;
111 }
112
113 /* de-assert digital and pll power down */
114 data = BIT(6) | BIT(5);
115 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
116
117 /* Assert PLL core reset */
118 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x00);
119
120 /* turn off resync FIFO */
121 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x00);
122
123 /* Select MS1 byte-clk */
124 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL, 0x10);
125
126 /* Enable LDO */
127 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL, 0x59);
128
129 /* Configure PHY lane swap (TODO: we need to calculate this) */
130 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG0, 0x21);
131 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG1, 0x84);
132
133 /* DSI PHY timings */
134 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0,
135 timing->hs_halfbyte_en);
136 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1,
137 timing->clk_zero);
138 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2,
139 timing->clk_prepare);
140 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3,
141 timing->clk_trail);
142 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4,
143 timing->hs_exit);
144 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5,
145 timing->hs_zero);
146 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6,
147 timing->hs_prepare);
148 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7,
149 timing->hs_trail);
150 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8,
151 timing->hs_rqst);
152 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9,
153 timing->ta_go | (timing->ta_sure << 3));
154 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10,
155 timing->ta_get);
156 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11,
157 0x00);
158
159 /* Remove power down from all blocks */
160 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x7f);
161
162 /* power up lanes */
163 data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
164
165 /* TODO: only power up lanes that are used */
166 data |= 0x1F;
167 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
168 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0x1F);
169
170 /* Select full-rate mode */
171 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_2, 0x40);
172
173 ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
174 if (ret) {
175 dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
176 __func__, ret);
177 return ret;
178 }
179
180 /* DSI lane settings */
181 dsi_phy_hw_v3_0_lane_settings(phy);
182
183 DBG("DSI%d PHY enabled", phy->id);
184
185 return 0;
186 }
187
dsi_10nm_phy_disable(struct msm_dsi_phy * phy)188 static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy)
189 {
190 }
191
dsi_10nm_phy_init(struct msm_dsi_phy * phy)192 static int dsi_10nm_phy_init(struct msm_dsi_phy *phy)
193 {
194 struct platform_device *pdev = phy->pdev;
195
196 phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
197 "DSI_PHY_LANE");
198 if (IS_ERR(phy->lane_base)) {
199 dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
200 __func__);
201 return -ENOMEM;
202 }
203
204 return 0;
205 }
206
207 const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
208 .type = MSM_DSI_PHY_10NM,
209 .src_pll_truthtable = { {false, false}, {true, false} },
210 .reg_cfg = {
211 .num = 1,
212 .regs = {
213 {"vdds", 36000, 32},
214 },
215 },
216 .ops = {
217 .enable = dsi_10nm_phy_enable,
218 .disable = dsi_10nm_phy_disable,
219 .init = dsi_10nm_phy_init,
220 },
221 .io_start = { 0xae94400, 0xae96400 },
222 .num_dsi_phy = 2,
223 };
224