1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * DaVinci MDIO Module driver
4  *
5  * Copyright (C) 2010 Texas Instruments.
6  *
7  * Shamelessly ripped out of davinci_emac.c, original copyrights follow:
8  *
9  * Copyright (C) 2009 Texas Instruments.
10  *
11  */
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/platform_device.h>
15 #include <linux/delay.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/phy.h>
19 #include <linux/clk.h>
20 #include <linux/err.h>
21 #include <linux/io.h>
22 #include <linux/iopoll.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/davinci_emac.h>
25 #include <linux/of.h>
26 #include <linux/of_device.h>
27 #include <linux/of_mdio.h>
28 #include <linux/pinctrl/consumer.h>
29 #include <linux/mdio-bitbang.h>
30 #include <linux/sys_soc.h>
31 
32 /*
33  * This timeout definition is a worst-case ultra defensive measure against
34  * unexpected controller lock ups.  Ideally, we should never ever hit this
35  * scenario in practice.
36  */
37 #define MDIO_TIMEOUT		100 /* msecs */
38 
39 #define PHY_REG_MASK		0x1f
40 #define PHY_ID_MASK		0x1f
41 
42 #define DEF_OUT_FREQ		2200000		/* 2.2 MHz */
43 
44 struct davinci_mdio_of_param {
45 	int autosuspend_delay_ms;
46 	bool manual_mode;
47 };
48 
49 struct davinci_mdio_regs {
50 	u32	version;
51 	u32	control;
52 #define CONTROL_IDLE		BIT(31)
53 #define CONTROL_ENABLE		BIT(30)
54 #define CONTROL_MAX_DIV		(0xffff)
55 #define CONTROL_CLKDIV		GENMASK(15, 0)
56 
57 #define MDIO_MAN_MDCLK_O	BIT(2)
58 #define MDIO_MAN_OE		BIT(1)
59 #define MDIO_MAN_PIN		BIT(0)
60 #define MDIO_MANUALMODE		BIT(31)
61 
62 #define MDIO_PIN               0
63 
64 
65 	u32	alive;
66 	u32	link;
67 	u32	linkintraw;
68 	u32	linkintmasked;
69 	u32	__reserved_0[2];
70 	u32	userintraw;
71 	u32	userintmasked;
72 	u32	userintmaskset;
73 	u32	userintmaskclr;
74 	u32	manualif;
75 	u32	poll;
76 	u32	__reserved_1[18];
77 
78 	struct {
79 		u32	access;
80 #define USERACCESS_GO		BIT(31)
81 #define USERACCESS_WRITE	BIT(30)
82 #define USERACCESS_ACK		BIT(29)
83 #define USERACCESS_READ		(0)
84 #define USERACCESS_DATA		(0xffff)
85 
86 		u32	physel;
87 	}	user[];
88 };
89 
90 static const struct mdio_platform_data default_pdata = {
91 	.bus_freq = DEF_OUT_FREQ,
92 };
93 
94 struct davinci_mdio_data {
95 	struct mdio_platform_data pdata;
96 	struct mdiobb_ctrl bb_ctrl;
97 	struct davinci_mdio_regs __iomem *regs;
98 	struct clk	*clk;
99 	struct device	*dev;
100 	struct mii_bus	*bus;
101 	bool            active_in_suspend;
102 	unsigned long	access_time; /* jiffies */
103 	/* Indicates that driver shouldn't modify phy_mask in case
104 	 * if MDIO bus is registered from DT.
105 	 */
106 	bool		skip_scan;
107 	u32		clk_div;
108 	bool		manual_mode;
109 };
110 
davinci_mdio_init_clk(struct davinci_mdio_data * data)111 static void davinci_mdio_init_clk(struct davinci_mdio_data *data)
112 {
113 	u32 mdio_in, div, mdio_out_khz, access_time;
114 
115 	mdio_in = clk_get_rate(data->clk);
116 	div = (mdio_in / data->pdata.bus_freq) - 1;
117 	if (div > CONTROL_MAX_DIV)
118 		div = CONTROL_MAX_DIV;
119 
120 	data->clk_div = div;
121 	/*
122 	 * One mdio transaction consists of:
123 	 *	32 bits of preamble
124 	 *	32 bits of transferred data
125 	 *	24 bits of bus yield (not needed unless shared?)
126 	 */
127 	mdio_out_khz = mdio_in / (1000 * (div + 1));
128 	access_time  = (88 * 1000) / mdio_out_khz;
129 
130 	/*
131 	 * In the worst case, we could be kicking off a user-access immediately
132 	 * after the mdio bus scan state-machine triggered its own read.  If
133 	 * so, our request could get deferred by one access cycle.  We
134 	 * defensively allow for 4 access cycles.
135 	 */
136 	data->access_time = usecs_to_jiffies(access_time * 4);
137 	if (!data->access_time)
138 		data->access_time = 1;
139 }
140 
davinci_mdio_enable(struct davinci_mdio_data * data)141 static void davinci_mdio_enable(struct davinci_mdio_data *data)
142 {
143 	/* set enable and clock divider */
144 	writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
145 }
146 
davinci_mdio_disable(struct davinci_mdio_data * data)147 static void davinci_mdio_disable(struct davinci_mdio_data *data)
148 {
149 	u32 reg;
150 
151 	/* Disable MDIO state machine */
152 	reg = readl(&data->regs->control);
153 
154 	reg &= ~CONTROL_CLKDIV;
155 	reg |= data->clk_div;
156 
157 	reg &= ~CONTROL_ENABLE;
158 	writel(reg, &data->regs->control);
159 }
160 
davinci_mdio_enable_manual_mode(struct davinci_mdio_data * data)161 static void davinci_mdio_enable_manual_mode(struct davinci_mdio_data *data)
162 {
163 	u32 reg;
164 	/* set manual mode */
165 	reg = readl(&data->regs->poll);
166 	reg |= MDIO_MANUALMODE;
167 	writel(reg, &data->regs->poll);
168 }
169 
davinci_set_mdc(struct mdiobb_ctrl * ctrl,int level)170 static void davinci_set_mdc(struct mdiobb_ctrl *ctrl, int level)
171 {
172 	struct davinci_mdio_data *data;
173 	u32 reg;
174 
175 	data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
176 	reg = readl(&data->regs->manualif);
177 
178 	if (level)
179 		reg |= MDIO_MAN_MDCLK_O;
180 	else
181 		reg &= ~MDIO_MAN_MDCLK_O;
182 
183 	writel(reg, &data->regs->manualif);
184 }
185 
davinci_set_mdio_dir(struct mdiobb_ctrl * ctrl,int output)186 static void davinci_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
187 {
188 	struct davinci_mdio_data *data;
189 	u32 reg;
190 
191 	data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
192 	reg = readl(&data->regs->manualif);
193 
194 	if (output)
195 		reg |= MDIO_MAN_OE;
196 	else
197 		reg &= ~MDIO_MAN_OE;
198 
199 	writel(reg, &data->regs->manualif);
200 }
201 
davinci_set_mdio_data(struct mdiobb_ctrl * ctrl,int value)202 static void  davinci_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
203 {
204 	struct davinci_mdio_data *data;
205 	u32 reg;
206 
207 	data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
208 	reg = readl(&data->regs->manualif);
209 
210 	if (value)
211 		reg |= MDIO_MAN_PIN;
212 	else
213 		reg &= ~MDIO_MAN_PIN;
214 
215 	writel(reg, &data->regs->manualif);
216 }
217 
davinci_get_mdio_data(struct mdiobb_ctrl * ctrl)218 static int davinci_get_mdio_data(struct mdiobb_ctrl *ctrl)
219 {
220 	struct davinci_mdio_data *data;
221 	unsigned long reg;
222 
223 	data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
224 	reg = readl(&data->regs->manualif);
225 	return test_bit(MDIO_PIN, &reg);
226 }
227 
davinci_mdiobb_read(struct mii_bus * bus,int phy,int reg)228 static int davinci_mdiobb_read(struct mii_bus *bus, int phy, int reg)
229 {
230 	int ret;
231 
232 	ret = pm_runtime_resume_and_get(bus->parent);
233 	if (ret < 0)
234 		return ret;
235 
236 	ret = mdiobb_read(bus, phy, reg);
237 
238 	pm_runtime_mark_last_busy(bus->parent);
239 	pm_runtime_put_autosuspend(bus->parent);
240 
241 	return ret;
242 }
243 
davinci_mdiobb_write(struct mii_bus * bus,int phy,int reg,u16 val)244 static int davinci_mdiobb_write(struct mii_bus *bus, int phy, int reg,
245 				u16 val)
246 {
247 	int ret;
248 
249 	ret = pm_runtime_resume_and_get(bus->parent);
250 	if (ret < 0)
251 		return ret;
252 
253 	ret = mdiobb_write(bus, phy, reg, val);
254 
255 	pm_runtime_mark_last_busy(bus->parent);
256 	pm_runtime_put_autosuspend(bus->parent);
257 
258 	return ret;
259 }
260 
davinci_mdio_common_reset(struct davinci_mdio_data * data)261 static int davinci_mdio_common_reset(struct davinci_mdio_data *data)
262 {
263 	u32 phy_mask, ver;
264 	int ret;
265 
266 	ret = pm_runtime_resume_and_get(data->dev);
267 	if (ret < 0)
268 		return ret;
269 
270 	if (data->manual_mode) {
271 		davinci_mdio_disable(data);
272 		davinci_mdio_enable_manual_mode(data);
273 	}
274 
275 	/* wait for scan logic to settle */
276 	msleep(PHY_MAX_ADDR * data->access_time);
277 
278 	/* dump hardware version info */
279 	ver = readl(&data->regs->version);
280 	dev_info(data->dev,
281 		 "davinci mdio revision %d.%d, bus freq %ld\n",
282 		 (ver >> 8) & 0xff, ver & 0xff,
283 		 data->pdata.bus_freq);
284 
285 	if (data->skip_scan)
286 		goto done;
287 
288 	/* get phy mask from the alive register */
289 	phy_mask = readl(&data->regs->alive);
290 	if (phy_mask) {
291 		/* restrict mdio bus to live phys only */
292 		dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
293 		phy_mask = ~phy_mask;
294 	} else {
295 		/* desperately scan all phys */
296 		dev_warn(data->dev, "no live phy, scanning all\n");
297 		phy_mask = 0;
298 	}
299 	data->bus->phy_mask = phy_mask;
300 
301 done:
302 	pm_runtime_mark_last_busy(data->dev);
303 	pm_runtime_put_autosuspend(data->dev);
304 
305 	return 0;
306 }
307 
davinci_mdio_reset(struct mii_bus * bus)308 static int davinci_mdio_reset(struct mii_bus *bus)
309 {
310 	struct davinci_mdio_data *data = bus->priv;
311 
312 	return davinci_mdio_common_reset(data);
313 }
314 
davinci_mdiobb_reset(struct mii_bus * bus)315 static int davinci_mdiobb_reset(struct mii_bus *bus)
316 {
317 	struct mdiobb_ctrl *ctrl = bus->priv;
318 	struct davinci_mdio_data *data;
319 
320 	data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
321 
322 	return davinci_mdio_common_reset(data);
323 }
324 
325 /* wait until hardware is ready for another user access */
wait_for_user_access(struct davinci_mdio_data * data)326 static inline int wait_for_user_access(struct davinci_mdio_data *data)
327 {
328 	struct davinci_mdio_regs __iomem *regs = data->regs;
329 	unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
330 	u32 reg;
331 
332 	while (time_after(timeout, jiffies)) {
333 		reg = readl(&regs->user[0].access);
334 		if ((reg & USERACCESS_GO) == 0)
335 			return 0;
336 
337 		reg = readl(&regs->control);
338 		if ((reg & CONTROL_IDLE) == 0) {
339 			usleep_range(100, 200);
340 			continue;
341 		}
342 
343 		/*
344 		 * An emac soft_reset may have clobbered the mdio controller's
345 		 * state machine.  We need to reset and retry the current
346 		 * operation
347 		 */
348 		dev_warn(data->dev, "resetting idled controller\n");
349 		davinci_mdio_enable(data);
350 		return -EAGAIN;
351 	}
352 
353 	reg = readl(&regs->user[0].access);
354 	if ((reg & USERACCESS_GO) == 0)
355 		return 0;
356 
357 	dev_err(data->dev, "timed out waiting for user access\n");
358 	return -ETIMEDOUT;
359 }
360 
361 /* wait until hardware state machine is idle */
wait_for_idle(struct davinci_mdio_data * data)362 static inline int wait_for_idle(struct davinci_mdio_data *data)
363 {
364 	struct davinci_mdio_regs __iomem *regs = data->regs;
365 	u32 val, ret;
366 
367 	ret = readl_poll_timeout(&regs->control, val, val & CONTROL_IDLE,
368 				 0, MDIO_TIMEOUT * 1000);
369 	if (ret)
370 		dev_err(data->dev, "timed out waiting for idle\n");
371 
372 	return ret;
373 }
374 
davinci_mdio_read(struct mii_bus * bus,int phy_id,int phy_reg)375 static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
376 {
377 	struct davinci_mdio_data *data = bus->priv;
378 	u32 reg;
379 	int ret;
380 
381 	if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
382 		return -EINVAL;
383 
384 	ret = pm_runtime_resume_and_get(data->dev);
385 	if (ret < 0)
386 		return ret;
387 
388 	reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
389 	       (phy_id << 16));
390 
391 	while (1) {
392 		ret = wait_for_user_access(data);
393 		if (ret == -EAGAIN)
394 			continue;
395 		if (ret < 0)
396 			break;
397 
398 		writel(reg, &data->regs->user[0].access);
399 
400 		ret = wait_for_user_access(data);
401 		if (ret == -EAGAIN)
402 			continue;
403 		if (ret < 0)
404 			break;
405 
406 		reg = readl(&data->regs->user[0].access);
407 		ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
408 		break;
409 	}
410 
411 	pm_runtime_mark_last_busy(data->dev);
412 	pm_runtime_put_autosuspend(data->dev);
413 	return ret;
414 }
415 
davinci_mdio_write(struct mii_bus * bus,int phy_id,int phy_reg,u16 phy_data)416 static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
417 			      int phy_reg, u16 phy_data)
418 {
419 	struct davinci_mdio_data *data = bus->priv;
420 	u32 reg;
421 	int ret;
422 
423 	if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
424 		return -EINVAL;
425 
426 	ret = pm_runtime_resume_and_get(data->dev);
427 	if (ret < 0)
428 		return ret;
429 
430 	reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
431 		   (phy_id << 16) | (phy_data & USERACCESS_DATA));
432 
433 	while (1) {
434 		ret = wait_for_user_access(data);
435 		if (ret == -EAGAIN)
436 			continue;
437 		if (ret < 0)
438 			break;
439 
440 		writel(reg, &data->regs->user[0].access);
441 
442 		ret = wait_for_user_access(data);
443 		if (ret == -EAGAIN)
444 			continue;
445 		break;
446 	}
447 
448 	pm_runtime_mark_last_busy(data->dev);
449 	pm_runtime_put_autosuspend(data->dev);
450 
451 	return ret;
452 }
453 
davinci_mdio_probe_dt(struct mdio_platform_data * data,struct platform_device * pdev)454 static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
455 			 struct platform_device *pdev)
456 {
457 	struct device_node *node = pdev->dev.of_node;
458 	u32 prop;
459 
460 	if (!node)
461 		return -EINVAL;
462 
463 	if (of_property_read_u32(node, "bus_freq", &prop)) {
464 		dev_err(&pdev->dev, "Missing bus_freq property in the DT.\n");
465 		return -EINVAL;
466 	}
467 	data->bus_freq = prop;
468 
469 	return 0;
470 }
471 
472 struct k3_mdio_soc_data {
473 	bool manual_mode;
474 };
475 
476 static const struct k3_mdio_soc_data am65_mdio_soc_data = {
477 	.manual_mode = true,
478 };
479 
480 static const struct soc_device_attribute k3_mdio_socinfo[] = {
481 	{ .family = "AM62X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
482 	{ .family = "AM64X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
483 	{ .family = "AM64X", .revision = "SR2.0", .data = &am65_mdio_soc_data },
484 	{ .family = "AM65X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
485 	{ .family = "AM65X", .revision = "SR2.0", .data = &am65_mdio_soc_data },
486 	{ .family = "J7200", .revision = "SR1.0", .data = &am65_mdio_soc_data },
487 	{ .family = "J7200", .revision = "SR2.0", .data = &am65_mdio_soc_data },
488 	{ .family = "J721E", .revision = "SR1.0", .data = &am65_mdio_soc_data },
489 	{ .family = "J721E", .revision = "SR2.0", .data = &am65_mdio_soc_data },
490 	{ .family = "J721S2", .revision = "SR1.0", .data = &am65_mdio_soc_data},
491 	{ /* sentinel */ },
492 };
493 
494 #if IS_ENABLED(CONFIG_OF)
495 static const struct davinci_mdio_of_param of_cpsw_mdio_data = {
496 	.autosuspend_delay_ms = 100,
497 };
498 
499 static const struct of_device_id davinci_mdio_of_mtable[] = {
500 	{ .compatible = "ti,davinci_mdio", },
501 	{ .compatible = "ti,cpsw-mdio", .data = &of_cpsw_mdio_data},
502 	{ /* sentinel */ },
503 };
504 MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
505 #endif
506 
507 static const struct mdiobb_ops davinci_mdiobb_ops = {
508 	.owner = THIS_MODULE,
509 	.set_mdc = davinci_set_mdc,
510 	.set_mdio_dir = davinci_set_mdio_dir,
511 	.set_mdio_data = davinci_set_mdio_data,
512 	.get_mdio_data = davinci_get_mdio_data,
513 };
514 
davinci_mdio_probe(struct platform_device * pdev)515 static int davinci_mdio_probe(struct platform_device *pdev)
516 {
517 	struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
518 	struct device *dev = &pdev->dev;
519 	struct davinci_mdio_data *data;
520 	struct resource *res;
521 	struct phy_device *phy;
522 	int ret, addr;
523 	int autosuspend_delay_ms = -1;
524 
525 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
526 	if (!data)
527 		return -ENOMEM;
528 
529 	data->manual_mode = false;
530 	data->bb_ctrl.ops = &davinci_mdiobb_ops;
531 
532 	if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
533 		const struct soc_device_attribute *soc_match_data;
534 
535 		soc_match_data = soc_device_match(k3_mdio_socinfo);
536 		if (soc_match_data && soc_match_data->data) {
537 			const struct k3_mdio_soc_data *socdata =
538 						soc_match_data->data;
539 
540 			data->manual_mode = socdata->manual_mode;
541 		}
542 	}
543 
544 	if (data->manual_mode)
545 		data->bus = alloc_mdio_bitbang(&data->bb_ctrl);
546 	else
547 		data->bus = devm_mdiobus_alloc(dev);
548 
549 	if (!data->bus) {
550 		dev_err(dev, "failed to alloc mii bus\n");
551 		return -ENOMEM;
552 	}
553 
554 	if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
555 		const struct davinci_mdio_of_param *of_mdio_data;
556 
557 		ret = davinci_mdio_probe_dt(&data->pdata, pdev);
558 		if (ret)
559 			return ret;
560 		snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
561 
562 		of_mdio_data = of_device_get_match_data(&pdev->dev);
563 		if (of_mdio_data) {
564 			autosuspend_delay_ms =
565 					of_mdio_data->autosuspend_delay_ms;
566 		}
567 	} else {
568 		data->pdata = pdata ? (*pdata) : default_pdata;
569 		snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
570 			 pdev->name, pdev->id);
571 	}
572 
573 	data->bus->name		= dev_name(dev);
574 
575 	if (data->manual_mode) {
576 		data->bus->read		= davinci_mdiobb_read;
577 		data->bus->write	= davinci_mdiobb_write;
578 		data->bus->reset	= davinci_mdiobb_reset;
579 
580 		dev_info(dev, "Configuring MDIO in manual mode\n");
581 	} else {
582 		data->bus->read		= davinci_mdio_read;
583 		data->bus->write	= davinci_mdio_write;
584 		data->bus->reset	= davinci_mdio_reset;
585 		data->bus->priv		= data;
586 	}
587 	data->bus->parent	= dev;
588 
589 	data->clk = devm_clk_get(dev, "fck");
590 	if (IS_ERR(data->clk)) {
591 		dev_err(dev, "failed to get device clock\n");
592 		return PTR_ERR(data->clk);
593 	}
594 
595 	dev_set_drvdata(dev, data);
596 	data->dev = dev;
597 
598 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
599 	if (!res)
600 		return -EINVAL;
601 	data->regs = devm_ioremap(dev, res->start, resource_size(res));
602 	if (!data->regs)
603 		return -ENOMEM;
604 
605 	davinci_mdio_init_clk(data);
606 
607 	pm_runtime_set_autosuspend_delay(&pdev->dev, autosuspend_delay_ms);
608 	pm_runtime_use_autosuspend(&pdev->dev);
609 	pm_runtime_enable(&pdev->dev);
610 
611 	/* register the mii bus
612 	 * Create PHYs from DT only in case if PHY child nodes are explicitly
613 	 * defined to support backward compatibility with DTs which assume that
614 	 * Davinci MDIO will always scan the bus for PHYs detection.
615 	 */
616 	if (dev->of_node && of_get_child_count(dev->of_node))
617 		data->skip_scan = true;
618 
619 	ret = of_mdiobus_register(data->bus, dev->of_node);
620 	if (ret)
621 		goto bail_out;
622 
623 	/* scan and dump the bus */
624 	for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
625 		phy = mdiobus_get_phy(data->bus, addr);
626 		if (phy) {
627 			dev_info(dev, "phy[%d]: device %s, driver %s\n",
628 				 phy->mdio.addr, phydev_name(phy),
629 				 phy->drv ? phy->drv->name : "unknown");
630 		}
631 	}
632 
633 	return 0;
634 
635 bail_out:
636 	pm_runtime_dont_use_autosuspend(&pdev->dev);
637 	pm_runtime_disable(&pdev->dev);
638 	return ret;
639 }
640 
davinci_mdio_remove(struct platform_device * pdev)641 static int davinci_mdio_remove(struct platform_device *pdev)
642 {
643 	struct davinci_mdio_data *data = platform_get_drvdata(pdev);
644 
645 	if (data->bus) {
646 		mdiobus_unregister(data->bus);
647 
648 		if (data->manual_mode)
649 			free_mdio_bitbang(data->bus);
650 	}
651 
652 	pm_runtime_dont_use_autosuspend(&pdev->dev);
653 	pm_runtime_disable(&pdev->dev);
654 
655 	return 0;
656 }
657 
658 #ifdef CONFIG_PM
davinci_mdio_runtime_suspend(struct device * dev)659 static int davinci_mdio_runtime_suspend(struct device *dev)
660 {
661 	struct davinci_mdio_data *data = dev_get_drvdata(dev);
662 	u32 ctrl;
663 
664 	/* shutdown the scan state machine */
665 	ctrl = readl(&data->regs->control);
666 	ctrl &= ~CONTROL_ENABLE;
667 	writel(ctrl, &data->regs->control);
668 
669 	if (!data->manual_mode)
670 		wait_for_idle(data);
671 
672 	return 0;
673 }
674 
davinci_mdio_runtime_resume(struct device * dev)675 static int davinci_mdio_runtime_resume(struct device *dev)
676 {
677 	struct davinci_mdio_data *data = dev_get_drvdata(dev);
678 
679 	if (data->manual_mode) {
680 		davinci_mdio_disable(data);
681 		davinci_mdio_enable_manual_mode(data);
682 	} else {
683 		davinci_mdio_enable(data);
684 	}
685 	return 0;
686 }
687 #endif
688 
689 #ifdef CONFIG_PM_SLEEP
davinci_mdio_suspend(struct device * dev)690 static int davinci_mdio_suspend(struct device *dev)
691 {
692 	struct davinci_mdio_data *data = dev_get_drvdata(dev);
693 	int ret = 0;
694 
695 	data->active_in_suspend = !pm_runtime_status_suspended(dev);
696 	if (data->active_in_suspend)
697 		ret = pm_runtime_force_suspend(dev);
698 	if (ret < 0)
699 		return ret;
700 
701 	/* Select sleep pin state */
702 	pinctrl_pm_select_sleep_state(dev);
703 
704 	return 0;
705 }
706 
davinci_mdio_resume(struct device * dev)707 static int davinci_mdio_resume(struct device *dev)
708 {
709 	struct davinci_mdio_data *data = dev_get_drvdata(dev);
710 
711 	/* Select default pin state */
712 	pinctrl_pm_select_default_state(dev);
713 
714 	if (data->active_in_suspend)
715 		pm_runtime_force_resume(dev);
716 
717 	return 0;
718 }
719 #endif
720 
721 static const struct dev_pm_ops davinci_mdio_pm_ops = {
722 	SET_RUNTIME_PM_OPS(davinci_mdio_runtime_suspend,
723 			   davinci_mdio_runtime_resume, NULL)
724 	SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
725 };
726 
727 static struct platform_driver davinci_mdio_driver = {
728 	.driver = {
729 		.name	 = "davinci_mdio",
730 		.pm	 = &davinci_mdio_pm_ops,
731 		.of_match_table = of_match_ptr(davinci_mdio_of_mtable),
732 	},
733 	.probe = davinci_mdio_probe,
734 	.remove = davinci_mdio_remove,
735 };
736 
davinci_mdio_init(void)737 static int __init davinci_mdio_init(void)
738 {
739 	return platform_driver_register(&davinci_mdio_driver);
740 }
741 device_initcall(davinci_mdio_init);
742 
davinci_mdio_exit(void)743 static void __exit davinci_mdio_exit(void)
744 {
745 	platform_driver_unregister(&davinci_mdio_driver);
746 }
747 module_exit(davinci_mdio_exit);
748 
749 MODULE_LICENSE("GPL");
750 MODULE_DESCRIPTION("DaVinci MDIO driver");
751