1 /*
2  * HiSilicon Hixxxx UFS Driver
3  *
4  * Copyright (c) 2016-2017 Linaro Ltd.
5  * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
6  *
7  * Released under the GPLv2 only.
8  * SPDX-License-Identifier: GPL-2.0
9  */
10 
11 #include <linux/time.h>
12 #include <linux/of.h>
13 #include <linux/of_address.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/platform_device.h>
16 #include <linux/reset.h>
17 
18 #include "ufshcd.h"
19 #include "ufshcd-pltfrm.h"
20 #include "unipro.h"
21 #include "ufs-hisi.h"
22 #include "ufshci.h"
23 
ufs_hisi_check_hibern8(struct ufs_hba * hba)24 static int ufs_hisi_check_hibern8(struct ufs_hba *hba)
25 {
26 	int err = 0;
27 	u32 tx_fsm_val_0 = 0;
28 	u32 tx_fsm_val_1 = 0;
29 	unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
30 
31 	do {
32 		err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
33 				      &tx_fsm_val_0);
34 		err |= ufshcd_dme_get(hba,
35 		    UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
36 		if (err || (tx_fsm_val_0 == TX_FSM_HIBERN8 &&
37 			tx_fsm_val_1 == TX_FSM_HIBERN8))
38 			break;
39 
40 		/* sleep for max. 200us */
41 		usleep_range(100, 200);
42 	} while (time_before(jiffies, timeout));
43 
44 	/*
45 	 * we might have scheduled out for long during polling so
46 	 * check the state again.
47 	 */
48 	if (time_after(jiffies, timeout)) {
49 		err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
50 				     &tx_fsm_val_0);
51 		err |= ufshcd_dme_get(hba,
52 		 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
53 	}
54 
55 	if (err) {
56 		dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
57 			__func__, err);
58 	} else if (tx_fsm_val_0 != TX_FSM_HIBERN8 ||
59 			 tx_fsm_val_1 != TX_FSM_HIBERN8) {
60 		err = -1;
61 		dev_err(hba->dev, "%s: invalid TX_FSM_STATE, lane0 = %d, lane1 = %d\n",
62 			__func__, tx_fsm_val_0, tx_fsm_val_1);
63 	}
64 
65 	return err;
66 }
67 
ufs_hi3660_clk_init(struct ufs_hba * hba)68 static void ufs_hi3660_clk_init(struct ufs_hba *hba)
69 {
70 	struct ufs_hisi_host *host = ufshcd_get_variant(hba);
71 
72 	ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
73 	if (ufs_sys_ctrl_readl(host, PHY_CLK_CTRL) & BIT_SYSCTRL_REF_CLOCK_EN)
74 		mdelay(1);
75 	/* use abb clk */
76 	ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_SRC_SEl, UFS_SYSCTRL);
77 	ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_ISO_EN, PHY_ISO_EN);
78 	/* open mphy ref clk */
79 	ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
80 }
81 
ufs_hi3660_soc_init(struct ufs_hba * hba)82 static void ufs_hi3660_soc_init(struct ufs_hba *hba)
83 {
84 	struct ufs_hisi_host *host = ufshcd_get_variant(hba);
85 	u32 reg;
86 
87 	if (!IS_ERR(host->rst))
88 		reset_control_assert(host->rst);
89 
90 	/* HC_PSW powerup */
91 	ufs_sys_ctrl_set_bits(host, BIT_UFS_PSW_MTCMOS_EN, PSW_POWER_CTRL);
92 	udelay(10);
93 	/* notify PWR ready */
94 	ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PWR_READY, HC_LP_CTRL);
95 	ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | 0,
96 		UFS_DEVICE_RESET_CTRL);
97 
98 	reg = ufs_sys_ctrl_readl(host, PHY_CLK_CTRL);
99 	reg = (reg & ~MASK_SYSCTRL_CFG_CLOCK_FREQ) | UFS_FREQ_CFG_CLK;
100 	/* set cfg clk freq */
101 	ufs_sys_ctrl_writel(host, reg, PHY_CLK_CTRL);
102 	/* set ref clk freq */
103 	ufs_sys_ctrl_clr_bits(host, MASK_SYSCTRL_REF_CLOCK_SEL, PHY_CLK_CTRL);
104 	/* bypass ufs clk gate */
105 	ufs_sys_ctrl_set_bits(host, MASK_UFS_CLK_GATE_BYPASS,
106 						 CLOCK_GATE_BYPASS);
107 	ufs_sys_ctrl_set_bits(host, MASK_UFS_SYSCRTL_BYPASS, UFS_SYSCTRL);
108 
109 	/* open psw clk */
110 	ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PSW_CLK_EN, PSW_CLK_CTRL);
111 	/* disable ufshc iso */
112 	ufs_sys_ctrl_clr_bits(host, BIT_UFS_PSW_ISO_CTRL, PSW_POWER_CTRL);
113 	/* disable phy iso */
114 	ufs_sys_ctrl_clr_bits(host, BIT_UFS_PHY_ISO_CTRL, PHY_ISO_EN);
115 	/* notice iso disable */
116 	ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_LP_ISOL_EN, HC_LP_CTRL);
117 
118 	/* disable lp_reset_n */
119 	ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_LP_RESET_N, RESET_CTRL_EN);
120 	mdelay(1);
121 
122 	ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | BIT_UFS_DEVICE_RESET,
123 		UFS_DEVICE_RESET_CTRL);
124 
125 	msleep(20);
126 
127 	/*
128 	 * enable the fix of linereset recovery,
129 	 * and enable rx_reset/tx_rest beat
130 	 * enable ref_clk_en override(bit5) &
131 	 * override value = 1(bit4), with mask
132 	 */
133 	ufs_sys_ctrl_writel(host, 0x03300330, UFS_DEVICE_RESET_CTRL);
134 
135 	if (!IS_ERR(host->rst))
136 		reset_control_deassert(host->rst);
137 }
138 
ufs_hisi_link_startup_pre_change(struct ufs_hba * hba)139 static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba)
140 {
141 	int err;
142 	uint32_t value;
143 	uint32_t reg;
144 
145 	/* Unipro VS_mphy_disable */
146 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x1);
147 	/* PA_HSSeries */
148 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x156A, 0x0), 0x2);
149 	/* MPHY CBRATESEL */
150 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8114, 0x0), 0x1);
151 	/* MPHY CBOVRCTRL2 */
152 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8121, 0x0), 0x2D);
153 	/* MPHY CBOVRCTRL3 */
154 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8122, 0x0), 0x1);
155 	/* Unipro VS_MphyCfgUpdt */
156 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
157 	/* MPHY RXOVRCTRL4 rx0 */
158 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x4), 0x58);
159 	/* MPHY RXOVRCTRL4 rx1 */
160 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x5), 0x58);
161 	/* MPHY RXOVRCTRL5 rx0 */
162 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x4), 0xB);
163 	/* MPHY RXOVRCTRL5 rx1 */
164 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x5), 0xB);
165 	/* MPHY RXSQCONTROL rx0 */
166 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x4), 0x1);
167 	/* MPHY RXSQCONTROL rx1 */
168 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x5), 0x1);
169 	/* Unipro VS_MphyCfgUpdt */
170 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
171 
172 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8113, 0x0), 0x1);
173 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
174 
175 	/* Tactive RX */
176 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7);
177 	/* Tactive RX */
178 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7);
179 
180 	/* Gear3 Synclength */
181 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x4), 0x4F);
182 	/* Gear3 Synclength */
183 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x5), 0x4F);
184 	/* Gear2 Synclength */
185 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x4), 0x4F);
186 	/* Gear2 Synclength */
187 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x5), 0x4F);
188 	/* Gear1 Synclength */
189 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x4), 0x4F);
190 	/* Gear1 Synclength */
191 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x5), 0x4F);
192 	/* Thibernate Tx */
193 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x0), 0x5);
194 	/* Thibernate Tx */
195 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x1), 0x5);
196 
197 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
198 	/* Unipro VS_mphy_disable */
199 	ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), &value);
200 	if (value != 0x1)
201 		dev_info(hba->dev,
202 		    "Warring!!! Unipro VS_mphy_disable is 0x%x\n", value);
203 
204 	/* Unipro VS_mphy_disable */
205 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x0);
206 	err = ufs_hisi_check_hibern8(hba);
207 	if (err)
208 		dev_err(hba->dev, "ufs_hisi_check_hibern8 error\n");
209 
210 	ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV);
211 
212 	/* disable auto H8 */
213 	reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
214 	reg = reg & (~UFS_AHIT_AH8ITV_MASK);
215 	ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER);
216 
217 	/* Unipro PA_Local_TX_LCC_Enable */
218 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x155E, 0x0), 0x0);
219 	/* close Unipro VS_Mk2ExtnSupport */
220 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), 0x0);
221 	ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), &value);
222 	if (value != 0) {
223 		/* Ensure close success */
224 		dev_info(hba->dev, "WARN: close VS_Mk2ExtnSupport failed\n");
225 	}
226 
227 	return err;
228 }
229 
ufs_hisi_link_startup_post_change(struct ufs_hba * hba)230 static int ufs_hisi_link_startup_post_change(struct ufs_hba *hba)
231 {
232 	struct ufs_hisi_host *host = ufshcd_get_variant(hba);
233 
234 	/* Unipro DL_AFC0CreditThreshold */
235 	ufshcd_dme_set(hba, UIC_ARG_MIB(0x2044), 0x0);
236 	/* Unipro DL_TC0OutAckThreshold */
237 	ufshcd_dme_set(hba, UIC_ARG_MIB(0x2045), 0x0);
238 	/* Unipro DL_TC0TXFCThreshold */
239 	ufshcd_dme_set(hba, UIC_ARG_MIB(0x2040), 0x9);
240 
241 	/* not bypass ufs clk gate */
242 	ufs_sys_ctrl_clr_bits(host, MASK_UFS_CLK_GATE_BYPASS,
243 						CLOCK_GATE_BYPASS);
244 	ufs_sys_ctrl_clr_bits(host, MASK_UFS_SYSCRTL_BYPASS,
245 						UFS_SYSCTRL);
246 
247 	/* select received symbol cnt */
248 	ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09a), 0x80000000);
249 	 /* reset counter0 and enable */
250 	ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09c), 0x00000005);
251 
252 	return 0;
253 }
254 
ufs_hi3660_link_startup_notify(struct ufs_hba * hba,enum ufs_notify_change_status status)255 static int ufs_hi3660_link_startup_notify(struct ufs_hba *hba,
256 					  enum ufs_notify_change_status status)
257 {
258 	int err = 0;
259 
260 	switch (status) {
261 	case PRE_CHANGE:
262 		err = ufs_hisi_link_startup_pre_change(hba);
263 		break;
264 	case POST_CHANGE:
265 		err = ufs_hisi_link_startup_post_change(hba);
266 		break;
267 	default:
268 		break;
269 	}
270 
271 	return err;
272 }
273 
274 struct ufs_hisi_dev_params {
275 	u32 pwm_rx_gear; /* pwm rx gear to work in */
276 	u32 pwm_tx_gear; /* pwm tx gear to work in */
277 	u32 hs_rx_gear;  /* hs rx gear to work in */
278 	u32 hs_tx_gear;  /* hs tx gear to work in */
279 	u32 rx_lanes;    /* number of rx lanes */
280 	u32 tx_lanes;    /* number of tx lanes */
281 	u32 rx_pwr_pwm;  /* rx pwm working pwr */
282 	u32 tx_pwr_pwm;  /* tx pwm working pwr */
283 	u32 rx_pwr_hs;   /* rx hs working pwr */
284 	u32 tx_pwr_hs;   /* tx hs working pwr */
285 	u32 hs_rate;     /* rate A/B to work in HS */
286 	u32 desired_working_mode;
287 };
288 
ufs_hisi_get_pwr_dev_param(struct ufs_hisi_dev_params * hisi_param,struct ufs_pa_layer_attr * dev_max,struct ufs_pa_layer_attr * agreed_pwr)289 static int ufs_hisi_get_pwr_dev_param(
290 				    struct ufs_hisi_dev_params *hisi_param,
291 				    struct ufs_pa_layer_attr *dev_max,
292 				    struct ufs_pa_layer_attr *agreed_pwr)
293 {
294 	int min_hisi_gear;
295 	int min_dev_gear;
296 	bool is_dev_sup_hs = false;
297 	bool is_hisi_max_hs = false;
298 
299 	if (dev_max->pwr_rx == FASTAUTO_MODE || dev_max->pwr_rx == FAST_MODE)
300 		is_dev_sup_hs = true;
301 
302 	if (hisi_param->desired_working_mode == FAST) {
303 		is_hisi_max_hs = true;
304 		min_hisi_gear = min_t(u32, hisi_param->hs_rx_gear,
305 				       hisi_param->hs_tx_gear);
306 	} else {
307 		min_hisi_gear = min_t(u32, hisi_param->pwm_rx_gear,
308 				       hisi_param->pwm_tx_gear);
309 	}
310 
311 	/*
312 	 * device doesn't support HS but
313 	 * hisi_param->desired_working_mode is HS,
314 	 * thus device and hisi_param don't agree
315 	 */
316 	if (!is_dev_sup_hs && is_hisi_max_hs) {
317 		pr_err("%s: device not support HS\n", __func__);
318 		return -ENOTSUPP;
319 	} else if (is_dev_sup_hs && is_hisi_max_hs) {
320 		/*
321 		 * since device supports HS, it supports FAST_MODE.
322 		 * since hisi_param->desired_working_mode is also HS
323 		 * then final decision (FAST/FASTAUTO) is done according
324 		 * to hisi_params as it is the restricting factor
325 		 */
326 		agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
327 			hisi_param->rx_pwr_hs;
328 	} else {
329 		/*
330 		 * here hisi_param->desired_working_mode is PWM.
331 		 * it doesn't matter whether device supports HS or PWM,
332 		 * in both cases hisi_param->desired_working_mode will
333 		 * determine the mode
334 		 */
335 		agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
336 			hisi_param->rx_pwr_pwm;
337 	}
338 
339 	/*
340 	 * we would like tx to work in the minimum number of lanes
341 	 * between device capability and vendor preferences.
342 	 * the same decision will be made for rx
343 	 */
344 	agreed_pwr->lane_tx =
345 		min_t(u32, dev_max->lane_tx, hisi_param->tx_lanes);
346 	agreed_pwr->lane_rx =
347 		min_t(u32, dev_max->lane_rx, hisi_param->rx_lanes);
348 
349 	/* device maximum gear is the minimum between device rx and tx gears */
350 	min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
351 
352 	/*
353 	 * if both device capabilities and vendor pre-defined preferences are
354 	 * both HS or both PWM then set the minimum gear to be the chosen
355 	 * working gear.
356 	 * if one is PWM and one is HS then the one that is PWM get to decide
357 	 * what is the gear, as it is the one that also decided previously what
358 	 * pwr the device will be configured to.
359 	 */
360 	if ((is_dev_sup_hs && is_hisi_max_hs) ||
361 	    (!is_dev_sup_hs && !is_hisi_max_hs))
362 		agreed_pwr->gear_rx = agreed_pwr->gear_tx =
363 			min_t(u32, min_dev_gear, min_hisi_gear);
364 	else
365 		agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_hisi_gear;
366 
367 	agreed_pwr->hs_rate = hisi_param->hs_rate;
368 
369 	pr_info("ufs final power mode: gear = %d, lane = %d, pwr = %d, rate = %d\n",
370 		agreed_pwr->gear_rx, agreed_pwr->lane_rx, agreed_pwr->pwr_rx,
371 		agreed_pwr->hs_rate);
372 	return 0;
373 }
374 
ufs_hisi_set_dev_cap(struct ufs_hisi_dev_params * hisi_param)375 static void ufs_hisi_set_dev_cap(struct ufs_hisi_dev_params *hisi_param)
376 {
377 	hisi_param->rx_lanes = UFS_HISI_LIMIT_NUM_LANES_RX;
378 	hisi_param->tx_lanes = UFS_HISI_LIMIT_NUM_LANES_TX;
379 	hisi_param->hs_rx_gear = UFS_HISI_LIMIT_HSGEAR_RX;
380 	hisi_param->hs_tx_gear = UFS_HISI_LIMIT_HSGEAR_TX;
381 	hisi_param->pwm_rx_gear = UFS_HISI_LIMIT_PWMGEAR_RX;
382 	hisi_param->pwm_tx_gear = UFS_HISI_LIMIT_PWMGEAR_TX;
383 	hisi_param->rx_pwr_pwm = UFS_HISI_LIMIT_RX_PWR_PWM;
384 	hisi_param->tx_pwr_pwm = UFS_HISI_LIMIT_TX_PWR_PWM;
385 	hisi_param->rx_pwr_hs = UFS_HISI_LIMIT_RX_PWR_HS;
386 	hisi_param->tx_pwr_hs = UFS_HISI_LIMIT_TX_PWR_HS;
387 	hisi_param->hs_rate = UFS_HISI_LIMIT_HS_RATE;
388 	hisi_param->desired_working_mode = UFS_HISI_LIMIT_DESIRED_MODE;
389 }
390 
ufs_hisi_pwr_change_pre_change(struct ufs_hba * hba)391 static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
392 {
393 	/* update */
394 	ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1);
395 	/* PA_TxSkip */
396 	ufshcd_dme_set(hba, UIC_ARG_MIB(0x155c), 0x0);
397 	/*PA_PWRModeUserData0 = 8191, default is 0*/
398 	ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), 8191);
399 	/*PA_PWRModeUserData1 = 65535, default is 0*/
400 	ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), 65535);
401 	/*PA_PWRModeUserData2 = 32767, default is 0*/
402 	ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), 32767);
403 	/*DME_FC0ProtectionTimeOutVal = 8191, default is 0*/
404 	ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), 8191);
405 	/*DME_TC0ReplayTimeOutVal = 65535, default is 0*/
406 	ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), 65535);
407 	/*DME_AFC0ReqTimeOutVal = 32767, default is 0*/
408 	ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), 32767);
409 	/*PA_PWRModeUserData3 = 8191, default is 0*/
410 	ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), 8191);
411 	/*PA_PWRModeUserData4 = 65535, default is 0*/
412 	ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), 65535);
413 	/*PA_PWRModeUserData5 = 32767, default is 0*/
414 	ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), 32767);
415 	/*DME_FC1ProtectionTimeOutVal = 8191, default is 0*/
416 	ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), 8191);
417 	/*DME_TC1ReplayTimeOutVal = 65535, default is 0*/
418 	ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), 65535);
419 	/*DME_AFC1ReqTimeOutVal = 32767, default is 0*/
420 	ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), 32767);
421 }
422 
ufs_hi3660_pwr_change_notify(struct ufs_hba * hba,enum ufs_notify_change_status status,struct ufs_pa_layer_attr * dev_max_params,struct ufs_pa_layer_attr * dev_req_params)423 static int ufs_hi3660_pwr_change_notify(struct ufs_hba *hba,
424 				       enum ufs_notify_change_status status,
425 				       struct ufs_pa_layer_attr *dev_max_params,
426 				       struct ufs_pa_layer_attr *dev_req_params)
427 {
428 	struct ufs_hisi_dev_params ufs_hisi_cap;
429 	int ret = 0;
430 
431 	if (!dev_req_params) {
432 		dev_err(hba->dev,
433 			    "%s: incoming dev_req_params is NULL\n", __func__);
434 		ret = -EINVAL;
435 		goto out;
436 	}
437 
438 	switch (status) {
439 	case PRE_CHANGE:
440 		ufs_hisi_set_dev_cap(&ufs_hisi_cap);
441 		ret = ufs_hisi_get_pwr_dev_param(
442 			&ufs_hisi_cap, dev_max_params, dev_req_params);
443 		if (ret) {
444 			dev_err(hba->dev,
445 			    "%s: failed to determine capabilities\n", __func__);
446 			goto out;
447 		}
448 
449 		ufs_hisi_pwr_change_pre_change(hba);
450 		break;
451 	case POST_CHANGE:
452 		break;
453 	default:
454 		ret = -EINVAL;
455 		break;
456 	}
457 out:
458 	return ret;
459 }
460 
ufs_hisi_suspend(struct ufs_hba * hba,enum ufs_pm_op pm_op)461 static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
462 {
463 	struct ufs_hisi_host *host = ufshcd_get_variant(hba);
464 
465 	if (ufshcd_is_runtime_pm(pm_op))
466 		return 0;
467 
468 	if (host->in_suspend) {
469 		WARN_ON(1);
470 		return 0;
471 	}
472 
473 	ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
474 	udelay(10);
475 	/* set ref_dig_clk override of PHY PCS to 0 */
476 	ufs_sys_ctrl_writel(host, 0x00100000, UFS_DEVICE_RESET_CTRL);
477 
478 	host->in_suspend = true;
479 
480 	return 0;
481 }
482 
ufs_hisi_resume(struct ufs_hba * hba,enum ufs_pm_op pm_op)483 static int ufs_hisi_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
484 {
485 	struct ufs_hisi_host *host = ufshcd_get_variant(hba);
486 
487 	if (!host->in_suspend)
488 		return 0;
489 
490 	/* set ref_dig_clk override of PHY PCS to 1 */
491 	ufs_sys_ctrl_writel(host, 0x00100010, UFS_DEVICE_RESET_CTRL);
492 	udelay(10);
493 	ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
494 
495 	host->in_suspend = false;
496 	return 0;
497 }
498 
ufs_hisi_get_resource(struct ufs_hisi_host * host)499 static int ufs_hisi_get_resource(struct ufs_hisi_host *host)
500 {
501 	struct resource *mem_res;
502 	struct device *dev = host->hba->dev;
503 	struct platform_device *pdev = to_platform_device(dev);
504 
505 	/* get resource of ufs sys ctrl */
506 	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
507 	host->ufs_sys_ctrl = devm_ioremap_resource(dev, mem_res);
508 	if (IS_ERR(host->ufs_sys_ctrl))
509 		return PTR_ERR(host->ufs_sys_ctrl);
510 
511 	return 0;
512 }
513 
ufs_hisi_set_pm_lvl(struct ufs_hba * hba)514 static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba)
515 {
516 	hba->rpm_lvl = UFS_PM_LVL_1;
517 	hba->spm_lvl = UFS_PM_LVL_3;
518 }
519 
520 /**
521  * ufs_hisi_init_common
522  * @hba: host controller instance
523  */
ufs_hisi_init_common(struct ufs_hba * hba)524 static int ufs_hisi_init_common(struct ufs_hba *hba)
525 {
526 	int err = 0;
527 	struct device *dev = hba->dev;
528 	struct ufs_hisi_host *host;
529 
530 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
531 	if (!host)
532 		return -ENOMEM;
533 
534 	host->hba = hba;
535 	ufshcd_set_variant(hba, host);
536 
537 	host->rst  = devm_reset_control_get(dev, "rst");
538 
539 	ufs_hisi_set_pm_lvl(hba);
540 
541 	err = ufs_hisi_get_resource(host);
542 	if (err) {
543 		ufshcd_set_variant(hba, NULL);
544 		return err;
545 	}
546 
547 	return 0;
548 }
549 
ufs_hi3660_init(struct ufs_hba * hba)550 static int ufs_hi3660_init(struct ufs_hba *hba)
551 {
552 	int ret = 0;
553 	struct device *dev = hba->dev;
554 
555 	ret = ufs_hisi_init_common(hba);
556 	if (ret) {
557 		dev_err(dev, "%s: ufs common init fail\n", __func__);
558 		return ret;
559 	}
560 
561 	ufs_hi3660_clk_init(hba);
562 
563 	ufs_hi3660_soc_init(hba);
564 
565 	return 0;
566 }
567 
568 static struct ufs_hba_variant_ops ufs_hba_hisi_vops = {
569 	.name = "hi3660",
570 	.init = ufs_hi3660_init,
571 	.link_startup_notify = ufs_hi3660_link_startup_notify,
572 	.pwr_change_notify = ufs_hi3660_pwr_change_notify,
573 	.suspend = ufs_hisi_suspend,
574 	.resume = ufs_hisi_resume,
575 };
576 
ufs_hisi_probe(struct platform_device * pdev)577 static int ufs_hisi_probe(struct platform_device *pdev)
578 {
579 	return ufshcd_pltfrm_init(pdev, &ufs_hba_hisi_vops);
580 }
581 
ufs_hisi_remove(struct platform_device * pdev)582 static int ufs_hisi_remove(struct platform_device *pdev)
583 {
584 	struct ufs_hba *hba =  platform_get_drvdata(pdev);
585 
586 	ufshcd_remove(hba);
587 	return 0;
588 }
589 
590 static const struct of_device_id ufs_hisi_of_match[] = {
591 	{ .compatible = "hisilicon,hi3660-ufs" },
592 	{},
593 };
594 
595 MODULE_DEVICE_TABLE(of, ufs_hisi_of_match);
596 
597 static const struct dev_pm_ops ufs_hisi_pm_ops = {
598 	.suspend	= ufshcd_pltfrm_suspend,
599 	.resume		= ufshcd_pltfrm_resume,
600 	.runtime_suspend = ufshcd_pltfrm_runtime_suspend,
601 	.runtime_resume  = ufshcd_pltfrm_runtime_resume,
602 	.runtime_idle    = ufshcd_pltfrm_runtime_idle,
603 };
604 
605 static struct platform_driver ufs_hisi_pltform = {
606 	.probe	= ufs_hisi_probe,
607 	.remove	= ufs_hisi_remove,
608 	.shutdown = ufshcd_pltfrm_shutdown,
609 	.driver	= {
610 		.name	= "ufshcd-hisi",
611 		.pm	= &ufs_hisi_pm_ops,
612 		.of_match_table = of_match_ptr(ufs_hisi_of_match),
613 	},
614 };
615 module_platform_driver(ufs_hisi_pltform);
616 
617 MODULE_LICENSE("GPL");
618 MODULE_ALIAS("platform:ufshcd-hisi");
619 MODULE_DESCRIPTION("HiSilicon Hixxxx UFS Driver");
620