1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
4  */
5 
6 #include "phy-qcom-ufs-i.h"
7 
8 #define MAX_PROP_NAME              32
9 #define VDDA_PHY_MIN_UV            1000000
10 #define VDDA_PHY_MAX_UV            1000000
11 #define VDDA_PLL_MIN_UV            1800000
12 #define VDDA_PLL_MAX_UV            1800000
13 #define VDDP_REF_CLK_MIN_UV        1200000
14 #define VDDP_REF_CLK_MAX_UV        1200000
15 
ufs_qcom_phy_calibrate(struct ufs_qcom_phy * ufs_qcom_phy,struct ufs_qcom_phy_calibration * tbl_A,int tbl_size_A,struct ufs_qcom_phy_calibration * tbl_B,int tbl_size_B,bool is_rate_B)16 int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
17 			   struct ufs_qcom_phy_calibration *tbl_A,
18 			   int tbl_size_A,
19 			   struct ufs_qcom_phy_calibration *tbl_B,
20 			   int tbl_size_B, bool is_rate_B)
21 {
22 	int i;
23 	int ret = 0;
24 
25 	if (!tbl_A) {
26 		dev_err(ufs_qcom_phy->dev, "%s: tbl_A is NULL", __func__);
27 		ret = EINVAL;
28 		goto out;
29 	}
30 
31 	for (i = 0; i < tbl_size_A; i++)
32 		writel_relaxed(tbl_A[i].cfg_value,
33 			       ufs_qcom_phy->mmio + tbl_A[i].reg_offset);
34 
35 	/*
36 	 * In case we would like to work in rate B, we need
37 	 * to override a registers that were configured in rate A table
38 	 * with registers of rate B table.
39 	 * table.
40 	 */
41 	if (is_rate_B) {
42 		if (!tbl_B) {
43 			dev_err(ufs_qcom_phy->dev, "%s: tbl_B is NULL",
44 				__func__);
45 			ret = EINVAL;
46 			goto out;
47 		}
48 
49 		for (i = 0; i < tbl_size_B; i++)
50 			writel_relaxed(tbl_B[i].cfg_value,
51 				ufs_qcom_phy->mmio + tbl_B[i].reg_offset);
52 	}
53 
54 	/* flush buffered writes */
55 	mb();
56 
57 out:
58 	return ret;
59 }
60 EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate);
61 
62 /*
63  * This assumes the embedded phy structure inside generic_phy is of type
64  * struct ufs_qcom_phy. In order to function properly it's crucial
65  * to keep the embedded struct "struct ufs_qcom_phy common_cfg"
66  * as the first inside generic_phy.
67  */
get_ufs_qcom_phy(struct phy * generic_phy)68 struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy)
69 {
70 	return (struct ufs_qcom_phy *)phy_get_drvdata(generic_phy);
71 }
72 EXPORT_SYMBOL_GPL(get_ufs_qcom_phy);
73 
74 static
ufs_qcom_phy_base_init(struct platform_device * pdev,struct ufs_qcom_phy * phy_common)75 int ufs_qcom_phy_base_init(struct platform_device *pdev,
76 			   struct ufs_qcom_phy *phy_common)
77 {
78 	struct device *dev = &pdev->dev;
79 	struct resource *res;
80 	int err = 0;
81 
82 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_mem");
83 	phy_common->mmio = devm_ioremap_resource(dev, res);
84 	if (IS_ERR((void const *)phy_common->mmio)) {
85 		err = PTR_ERR((void const *)phy_common->mmio);
86 		phy_common->mmio = NULL;
87 		dev_err(dev, "%s: ioremap for phy_mem resource failed %d\n",
88 			__func__, err);
89 		return err;
90 	}
91 
92 	/* "dev_ref_clk_ctrl_mem" is optional resource */
93 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
94 					   "dev_ref_clk_ctrl_mem");
95 	phy_common->dev_ref_clk_ctrl_mmio = devm_ioremap_resource(dev, res);
96 	if (IS_ERR((void const *)phy_common->dev_ref_clk_ctrl_mmio))
97 		phy_common->dev_ref_clk_ctrl_mmio = NULL;
98 
99 	return 0;
100 }
101 
ufs_qcom_phy_generic_probe(struct platform_device * pdev,struct ufs_qcom_phy * common_cfg,const struct phy_ops * ufs_qcom_phy_gen_ops,struct ufs_qcom_phy_specific_ops * phy_spec_ops)102 struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
103 				struct ufs_qcom_phy *common_cfg,
104 				const struct phy_ops *ufs_qcom_phy_gen_ops,
105 				struct ufs_qcom_phy_specific_ops *phy_spec_ops)
106 {
107 	int err;
108 	struct device *dev = &pdev->dev;
109 	struct phy *generic_phy = NULL;
110 	struct phy_provider *phy_provider;
111 
112 	err = ufs_qcom_phy_base_init(pdev, common_cfg);
113 	if (err) {
114 		dev_err(dev, "%s: phy base init failed %d\n", __func__, err);
115 		goto out;
116 	}
117 
118 	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
119 	if (IS_ERR(phy_provider)) {
120 		err = PTR_ERR(phy_provider);
121 		dev_err(dev, "%s: failed to register phy %d\n", __func__, err);
122 		goto out;
123 	}
124 
125 	generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops);
126 	if (IS_ERR(generic_phy)) {
127 		err =  PTR_ERR(generic_phy);
128 		dev_err(dev, "%s: failed to create phy %d\n", __func__, err);
129 		generic_phy = NULL;
130 		goto out;
131 	}
132 
133 	common_cfg->phy_spec_ops = phy_spec_ops;
134 	common_cfg->dev = dev;
135 
136 out:
137 	return generic_phy;
138 }
139 EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe);
140 
ufs_qcom_phy_get_reset(struct ufs_qcom_phy * phy_common)141 static int ufs_qcom_phy_get_reset(struct ufs_qcom_phy *phy_common)
142 {
143 	struct reset_control *reset;
144 
145 	if (phy_common->ufs_reset)
146 		return 0;
147 
148 	reset = devm_reset_control_get_exclusive_by_index(phy_common->dev, 0);
149 	if (IS_ERR(reset))
150 		return PTR_ERR(reset);
151 
152 	phy_common->ufs_reset = reset;
153 	return 0;
154 }
155 
__ufs_qcom_phy_clk_get(struct device * dev,const char * name,struct clk ** clk_out,bool err_print)156 static int __ufs_qcom_phy_clk_get(struct device *dev,
157 			 const char *name, struct clk **clk_out, bool err_print)
158 {
159 	struct clk *clk;
160 	int err = 0;
161 
162 	clk = devm_clk_get(dev, name);
163 	if (IS_ERR(clk)) {
164 		err = PTR_ERR(clk);
165 		if (err_print)
166 			dev_err(dev, "failed to get %s err %d", name, err);
167 	} else {
168 		*clk_out = clk;
169 	}
170 
171 	return err;
172 }
173 
ufs_qcom_phy_clk_get(struct device * dev,const char * name,struct clk ** clk_out)174 static int ufs_qcom_phy_clk_get(struct device *dev,
175 			 const char *name, struct clk **clk_out)
176 {
177 	return __ufs_qcom_phy_clk_get(dev, name, clk_out, true);
178 }
179 
ufs_qcom_phy_init_clks(struct ufs_qcom_phy * phy_common)180 int ufs_qcom_phy_init_clks(struct ufs_qcom_phy *phy_common)
181 {
182 	int err;
183 
184 	if (of_device_is_compatible(phy_common->dev->of_node,
185 				"qcom,msm8996-ufs-phy-qmp-14nm"))
186 		goto skip_txrx_clk;
187 
188 	err = ufs_qcom_phy_clk_get(phy_common->dev, "tx_iface_clk",
189 				   &phy_common->tx_iface_clk);
190 	if (err)
191 		goto out;
192 
193 	err = ufs_qcom_phy_clk_get(phy_common->dev, "rx_iface_clk",
194 				   &phy_common->rx_iface_clk);
195 	if (err)
196 		goto out;
197 
198 skip_txrx_clk:
199 	err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_src",
200 				   &phy_common->ref_clk_src);
201 	if (err)
202 		goto out;
203 
204 	/*
205 	 * "ref_clk_parent" is optional hence don't abort init if it's not
206 	 * found.
207 	 */
208 	__ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_parent",
209 				   &phy_common->ref_clk_parent, false);
210 
211 	err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk",
212 				   &phy_common->ref_clk);
213 
214 out:
215 	return err;
216 }
217 EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_clks);
218 
ufs_qcom_phy_init_vreg(struct device * dev,struct ufs_qcom_phy_vreg * vreg,const char * name)219 static int ufs_qcom_phy_init_vreg(struct device *dev,
220 				  struct ufs_qcom_phy_vreg *vreg,
221 				  const char *name)
222 {
223 	int err = 0;
224 
225 	char prop_name[MAX_PROP_NAME];
226 
227 	vreg->name = name;
228 	vreg->reg = devm_regulator_get(dev, name);
229 	if (IS_ERR(vreg->reg)) {
230 		err = PTR_ERR(vreg->reg);
231 		dev_err(dev, "failed to get %s, %d\n", name, err);
232 		goto out;
233 	}
234 
235 	if (dev->of_node) {
236 		snprintf(prop_name, MAX_PROP_NAME, "%s-max-microamp", name);
237 		err = of_property_read_u32(dev->of_node,
238 					prop_name, &vreg->max_uA);
239 		if (err && err != -EINVAL) {
240 			dev_err(dev, "%s: failed to read %s\n",
241 					__func__, prop_name);
242 			goto out;
243 		} else if (err == -EINVAL || !vreg->max_uA) {
244 			if (regulator_count_voltages(vreg->reg) > 0) {
245 				dev_err(dev, "%s: %s is mandatory\n",
246 						__func__, prop_name);
247 				goto out;
248 			}
249 			err = 0;
250 		}
251 	}
252 
253 	if (!strcmp(name, "vdda-pll")) {
254 		vreg->max_uV = VDDA_PLL_MAX_UV;
255 		vreg->min_uV = VDDA_PLL_MIN_UV;
256 	} else if (!strcmp(name, "vdda-phy")) {
257 		vreg->max_uV = VDDA_PHY_MAX_UV;
258 		vreg->min_uV = VDDA_PHY_MIN_UV;
259 	} else if (!strcmp(name, "vddp-ref-clk")) {
260 		vreg->max_uV = VDDP_REF_CLK_MAX_UV;
261 		vreg->min_uV = VDDP_REF_CLK_MIN_UV;
262 	}
263 
264 out:
265 	return err;
266 }
267 
ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy * phy_common)268 int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common)
269 {
270 	int err;
271 
272 	err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_pll,
273 		"vdda-pll");
274 	if (err)
275 		goto out;
276 
277 	err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_phy,
278 		"vdda-phy");
279 
280 	if (err)
281 		goto out;
282 
283 	err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vddp_ref_clk,
284 				     "vddp-ref-clk");
285 
286 out:
287 	return err;
288 }
289 EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators);
290 
ufs_qcom_phy_cfg_vreg(struct device * dev,struct ufs_qcom_phy_vreg * vreg,bool on)291 static int ufs_qcom_phy_cfg_vreg(struct device *dev,
292 			  struct ufs_qcom_phy_vreg *vreg, bool on)
293 {
294 	int ret = 0;
295 	struct regulator *reg = vreg->reg;
296 	const char *name = vreg->name;
297 	int min_uV;
298 	int uA_load;
299 
300 	if (regulator_count_voltages(reg) > 0) {
301 		min_uV = on ? vreg->min_uV : 0;
302 		ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
303 		if (ret) {
304 			dev_err(dev, "%s: %s set voltage failed, err=%d\n",
305 					__func__, name, ret);
306 			goto out;
307 		}
308 		uA_load = on ? vreg->max_uA : 0;
309 		ret = regulator_set_load(reg, uA_load);
310 		if (ret >= 0) {
311 			/*
312 			 * regulator_set_load() returns new regulator
313 			 * mode upon success.
314 			 */
315 			ret = 0;
316 		} else {
317 			dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n",
318 					__func__, name, uA_load, ret);
319 			goto out;
320 		}
321 	}
322 out:
323 	return ret;
324 }
325 
ufs_qcom_phy_enable_vreg(struct device * dev,struct ufs_qcom_phy_vreg * vreg)326 static int ufs_qcom_phy_enable_vreg(struct device *dev,
327 			     struct ufs_qcom_phy_vreg *vreg)
328 {
329 	int ret = 0;
330 
331 	if (!vreg || vreg->enabled)
332 		goto out;
333 
334 	ret = ufs_qcom_phy_cfg_vreg(dev, vreg, true);
335 	if (ret) {
336 		dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n",
337 			__func__, ret);
338 		goto out;
339 	}
340 
341 	ret = regulator_enable(vreg->reg);
342 	if (ret) {
343 		dev_err(dev, "%s: enable failed, err=%d\n",
344 				__func__, ret);
345 		goto out;
346 	}
347 
348 	vreg->enabled = true;
349 out:
350 	return ret;
351 }
352 
ufs_qcom_phy_enable_ref_clk(struct ufs_qcom_phy * phy)353 static int ufs_qcom_phy_enable_ref_clk(struct ufs_qcom_phy *phy)
354 {
355 	int ret = 0;
356 
357 	if (phy->is_ref_clk_enabled)
358 		goto out;
359 
360 	/*
361 	 * reference clock is propagated in a daisy-chained manner from
362 	 * source to phy, so ungate them at each stage.
363 	 */
364 	ret = clk_prepare_enable(phy->ref_clk_src);
365 	if (ret) {
366 		dev_err(phy->dev, "%s: ref_clk_src enable failed %d\n",
367 				__func__, ret);
368 		goto out;
369 	}
370 
371 	/*
372 	 * "ref_clk_parent" is optional clock hence make sure that clk reference
373 	 * is available before trying to enable the clock.
374 	 */
375 	if (phy->ref_clk_parent) {
376 		ret = clk_prepare_enable(phy->ref_clk_parent);
377 		if (ret) {
378 			dev_err(phy->dev, "%s: ref_clk_parent enable failed %d\n",
379 					__func__, ret);
380 			goto out_disable_src;
381 		}
382 	}
383 
384 	ret = clk_prepare_enable(phy->ref_clk);
385 	if (ret) {
386 		dev_err(phy->dev, "%s: ref_clk enable failed %d\n",
387 				__func__, ret);
388 		goto out_disable_parent;
389 	}
390 
391 	phy->is_ref_clk_enabled = true;
392 	goto out;
393 
394 out_disable_parent:
395 	if (phy->ref_clk_parent)
396 		clk_disable_unprepare(phy->ref_clk_parent);
397 out_disable_src:
398 	clk_disable_unprepare(phy->ref_clk_src);
399 out:
400 	return ret;
401 }
402 
ufs_qcom_phy_disable_vreg(struct device * dev,struct ufs_qcom_phy_vreg * vreg)403 static int ufs_qcom_phy_disable_vreg(struct device *dev,
404 			      struct ufs_qcom_phy_vreg *vreg)
405 {
406 	int ret = 0;
407 
408 	if (!vreg || !vreg->enabled)
409 		goto out;
410 
411 	ret = regulator_disable(vreg->reg);
412 
413 	if (!ret) {
414 		/* ignore errors on applying disable config */
415 		ufs_qcom_phy_cfg_vreg(dev, vreg, false);
416 		vreg->enabled = false;
417 	} else {
418 		dev_err(dev, "%s: %s disable failed, err=%d\n",
419 				__func__, vreg->name, ret);
420 	}
421 out:
422 	return ret;
423 }
424 
ufs_qcom_phy_disable_ref_clk(struct ufs_qcom_phy * phy)425 static void ufs_qcom_phy_disable_ref_clk(struct ufs_qcom_phy *phy)
426 {
427 	if (phy->is_ref_clk_enabled) {
428 		clk_disable_unprepare(phy->ref_clk);
429 		/*
430 		 * "ref_clk_parent" is optional clock hence make sure that clk
431 		 * reference is available before trying to disable the clock.
432 		 */
433 		if (phy->ref_clk_parent)
434 			clk_disable_unprepare(phy->ref_clk_parent);
435 		clk_disable_unprepare(phy->ref_clk_src);
436 		phy->is_ref_clk_enabled = false;
437 	}
438 }
439 
440 /* Turn ON M-PHY RMMI interface clocks */
ufs_qcom_phy_enable_iface_clk(struct ufs_qcom_phy * phy)441 static int ufs_qcom_phy_enable_iface_clk(struct ufs_qcom_phy *phy)
442 {
443 	int ret = 0;
444 
445 	if (phy->is_iface_clk_enabled)
446 		goto out;
447 
448 	ret = clk_prepare_enable(phy->tx_iface_clk);
449 	if (ret) {
450 		dev_err(phy->dev, "%s: tx_iface_clk enable failed %d\n",
451 				__func__, ret);
452 		goto out;
453 	}
454 	ret = clk_prepare_enable(phy->rx_iface_clk);
455 	if (ret) {
456 		clk_disable_unprepare(phy->tx_iface_clk);
457 		dev_err(phy->dev, "%s: rx_iface_clk enable failed %d. disabling also tx_iface_clk\n",
458 				__func__, ret);
459 		goto out;
460 	}
461 	phy->is_iface_clk_enabled = true;
462 
463 out:
464 	return ret;
465 }
466 
467 /* Turn OFF M-PHY RMMI interface clocks */
ufs_qcom_phy_disable_iface_clk(struct ufs_qcom_phy * phy)468 static void ufs_qcom_phy_disable_iface_clk(struct ufs_qcom_phy *phy)
469 {
470 	if (phy->is_iface_clk_enabled) {
471 		clk_disable_unprepare(phy->tx_iface_clk);
472 		clk_disable_unprepare(phy->rx_iface_clk);
473 		phy->is_iface_clk_enabled = false;
474 	}
475 }
476 
ufs_qcom_phy_start_serdes(struct ufs_qcom_phy * ufs_qcom_phy)477 static int ufs_qcom_phy_start_serdes(struct ufs_qcom_phy *ufs_qcom_phy)
478 {
479 	int ret = 0;
480 
481 	if (!ufs_qcom_phy->phy_spec_ops->start_serdes) {
482 		dev_err(ufs_qcom_phy->dev, "%s: start_serdes() callback is not supported\n",
483 			__func__);
484 		ret = -ENOTSUPP;
485 	} else {
486 		ufs_qcom_phy->phy_spec_ops->start_serdes(ufs_qcom_phy);
487 	}
488 
489 	return ret;
490 }
491 
ufs_qcom_phy_set_tx_lane_enable(struct phy * generic_phy,u32 tx_lanes)492 int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
493 {
494 	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
495 	int ret = 0;
496 
497 	if (!ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable) {
498 		dev_err(ufs_qcom_phy->dev, "%s: set_tx_lane_enable() callback is not supported\n",
499 			__func__);
500 		ret = -ENOTSUPP;
501 	} else {
502 		ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable(ufs_qcom_phy,
503 							       tx_lanes);
504 	}
505 
506 	return ret;
507 }
508 EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable);
509 
ufs_qcom_phy_save_controller_version(struct phy * generic_phy,u8 major,u16 minor,u16 step)510 void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
511 					  u8 major, u16 minor, u16 step)
512 {
513 	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
514 
515 	ufs_qcom_phy->host_ctrl_rev_major = major;
516 	ufs_qcom_phy->host_ctrl_rev_minor = minor;
517 	ufs_qcom_phy->host_ctrl_rev_step = step;
518 }
519 EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version);
520 
ufs_qcom_phy_is_pcs_ready(struct ufs_qcom_phy * ufs_qcom_phy)521 static int ufs_qcom_phy_is_pcs_ready(struct ufs_qcom_phy *ufs_qcom_phy)
522 {
523 	if (!ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready) {
524 		dev_err(ufs_qcom_phy->dev, "%s: is_physical_coding_sublayer_ready() callback is not supported\n",
525 			__func__);
526 		return -ENOTSUPP;
527 	}
528 
529 	return ufs_qcom_phy->phy_spec_ops->
530 			is_physical_coding_sublayer_ready(ufs_qcom_phy);
531 }
532 
ufs_qcom_phy_power_on(struct phy * generic_phy)533 int ufs_qcom_phy_power_on(struct phy *generic_phy)
534 {
535 	struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
536 	struct device *dev = phy_common->dev;
537 	bool is_rate_B = false;
538 	int err;
539 
540 	err = ufs_qcom_phy_get_reset(phy_common);
541 	if (err)
542 		return err;
543 
544 	err = reset_control_assert(phy_common->ufs_reset);
545 	if (err)
546 		return err;
547 
548 	if (phy_common->mode == PHY_MODE_UFS_HS_B)
549 		is_rate_B = true;
550 
551 	err = phy_common->phy_spec_ops->calibrate(phy_common, is_rate_B);
552 	if (err)
553 		return err;
554 
555 	err = reset_control_deassert(phy_common->ufs_reset);
556 	if (err) {
557 		dev_err(dev, "Failed to assert UFS PHY reset");
558 		return err;
559 	}
560 
561 	err = ufs_qcom_phy_start_serdes(phy_common);
562 	if (err)
563 		return err;
564 
565 	err = ufs_qcom_phy_is_pcs_ready(phy_common);
566 	if (err)
567 		return err;
568 
569 	err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_phy);
570 	if (err) {
571 		dev_err(dev, "%s enable vdda_phy failed, err=%d\n",
572 			__func__, err);
573 		goto out;
574 	}
575 
576 	phy_common->phy_spec_ops->power_control(phy_common, true);
577 
578 	/* vdda_pll also enables ref clock LDOs so enable it first */
579 	err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_pll);
580 	if (err) {
581 		dev_err(dev, "%s enable vdda_pll failed, err=%d\n",
582 			__func__, err);
583 		goto out_disable_phy;
584 	}
585 
586 	err = ufs_qcom_phy_enable_iface_clk(phy_common);
587 	if (err) {
588 		dev_err(dev, "%s enable phy iface clock failed, err=%d\n",
589 			__func__, err);
590 		goto out_disable_pll;
591 	}
592 
593 	err = ufs_qcom_phy_enable_ref_clk(phy_common);
594 	if (err) {
595 		dev_err(dev, "%s enable phy ref clock failed, err=%d\n",
596 			__func__, err);
597 		goto out_disable_iface_clk;
598 	}
599 
600 	/* enable device PHY ref_clk pad rail */
601 	if (phy_common->vddp_ref_clk.reg) {
602 		err = ufs_qcom_phy_enable_vreg(dev,
603 					       &phy_common->vddp_ref_clk);
604 		if (err) {
605 			dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n",
606 				__func__, err);
607 			goto out_disable_ref_clk;
608 		}
609 	}
610 
611 	goto out;
612 
613 out_disable_ref_clk:
614 	ufs_qcom_phy_disable_ref_clk(phy_common);
615 out_disable_iface_clk:
616 	ufs_qcom_phy_disable_iface_clk(phy_common);
617 out_disable_pll:
618 	ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_pll);
619 out_disable_phy:
620 	ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_phy);
621 out:
622 	return err;
623 }
624 EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_on);
625 
ufs_qcom_phy_power_off(struct phy * generic_phy)626 int ufs_qcom_phy_power_off(struct phy *generic_phy)
627 {
628 	struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
629 
630 	phy_common->phy_spec_ops->power_control(phy_common, false);
631 
632 	if (phy_common->vddp_ref_clk.reg)
633 		ufs_qcom_phy_disable_vreg(phy_common->dev,
634 					  &phy_common->vddp_ref_clk);
635 	ufs_qcom_phy_disable_ref_clk(phy_common);
636 	ufs_qcom_phy_disable_iface_clk(phy_common);
637 
638 	ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_pll);
639 	ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_phy);
640 	reset_control_assert(phy_common->ufs_reset);
641 	return 0;
642 }
643 EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off);
644 
645 MODULE_AUTHOR("Yaniv Gardi <ygardi@codeaurora.org>");
646 MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
647 MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY");
648 MODULE_LICENSE("GPL v2");
649