1 /*
2 * Copyright (c) 2021 IP-Logix Inc.
3 * Copyright 2022 NXP
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #define DT_DRV_COMPAT ethernet_phy
9
10 #include <errno.h>
11 #include <zephyr/device.h>
12 #include <zephyr/init.h>
13 #include <zephyr/kernel.h>
14 #include <zephyr/drivers/mdio.h>
15 #include <zephyr/net/phy.h>
16 #include <zephyr/net/mii.h>
17
18 #include <zephyr/logging/log.h>
19 LOG_MODULE_REGISTER(phy_mii, CONFIG_PHY_LOG_LEVEL);
20
21 struct phy_mii_dev_config {
22 uint8_t phy_addr;
23 bool no_reset;
24 bool fixed;
25 int fixed_speed;
26 const struct device * const mdio;
27 };
28
29 struct phy_mii_dev_data {
30 const struct device *dev;
31 phy_callback_t cb;
32 void *cb_data;
33 struct k_work_delayable monitor_work;
34 struct phy_link_state state;
35 struct k_sem sem;
36 bool gigabit_supported;
37 };
38
39 /* Offset to align capabilities bits of 1000BASE-T Control and Status regs */
40 #define MII_1KSTSR_OFFSET 2
41
42 #define MII_INVALID_PHY_ID UINT32_MAX
43
44 static int phy_mii_get_link_state(const struct device *dev,
45 struct phy_link_state *state);
46
phy_mii_reg_read(const struct device * dev,uint16_t reg_addr,uint16_t * value)47 static inline int phy_mii_reg_read(const struct device *dev, uint16_t reg_addr,
48 uint16_t *value)
49 {
50 const struct phy_mii_dev_config *const cfg = dev->config;
51
52 /* if there is no mdio (fixed-link) it is not supported to read */
53 if (cfg->mdio == NULL) {
54 return -ENOTSUP;
55 }
56 return mdio_read(cfg->mdio, cfg->phy_addr, reg_addr, value);
57 }
58
phy_mii_reg_write(const struct device * dev,uint16_t reg_addr,uint16_t value)59 static inline int phy_mii_reg_write(const struct device *dev, uint16_t reg_addr,
60 uint16_t value)
61 {
62 const struct phy_mii_dev_config *const cfg = dev->config;
63
64 /* if there is no mdio (fixed-link) it is not supported to write */
65 if (cfg->mdio == NULL) {
66 return -ENOTSUP;
67 }
68 return mdio_write(cfg->mdio, cfg->phy_addr, reg_addr, value);
69 }
70
is_gigabit_supported(const struct device * dev)71 static bool is_gigabit_supported(const struct device *dev)
72 {
73 uint16_t bmsr_reg;
74 uint16_t estat_reg;
75
76 if (phy_mii_reg_read(dev, MII_BMSR, &bmsr_reg) < 0) {
77 return -EIO;
78 }
79
80 if (bmsr_reg & MII_BMSR_EXTEND_STATUS) {
81 if (phy_mii_reg_read(dev, MII_ESTAT, &estat_reg) < 0) {
82 return -EIO;
83 }
84
85 if (estat_reg & (MII_ESTAT_1000BASE_T_HALF
86 | MII_ESTAT_1000BASE_T_FULL)) {
87 return true;
88 }
89 }
90
91 return false;
92 }
93
reset(const struct device * dev)94 static int reset(const struct device *dev)
95 {
96 uint32_t timeout = 12U;
97 uint16_t value;
98
99 /* Issue a soft reset */
100 if (phy_mii_reg_write(dev, MII_BMCR, MII_BMCR_RESET) < 0) {
101 return -EIO;
102 }
103
104 /* Wait up to 0.6s for the reset sequence to finish. According to
105 * IEEE 802.3, Section 2, Subsection 22.2.4.1.1 a PHY reset may take
106 * up to 0.5 s.
107 */
108 do {
109 if (timeout-- == 0U) {
110 return -ETIMEDOUT;
111 }
112
113 k_sleep(K_MSEC(50));
114
115 if (phy_mii_reg_read(dev, MII_BMCR, &value) < 0) {
116 return -EIO;
117 }
118 } while (value & MII_BMCR_RESET);
119
120 return 0;
121 }
122
get_id(const struct device * dev,uint32_t * phy_id)123 static int get_id(const struct device *dev, uint32_t *phy_id)
124 {
125 uint16_t value;
126
127 if (phy_mii_reg_read(dev, MII_PHYID1R, &value) < 0) {
128 return -EIO;
129 }
130
131 *phy_id = value << 16;
132
133 if (phy_mii_reg_read(dev, MII_PHYID2R, &value) < 0) {
134 return -EIO;
135 }
136
137 *phy_id |= value;
138
139 return 0;
140 }
141
update_link_state(const struct device * dev)142 static int update_link_state(const struct device *dev)
143 {
144 const struct phy_mii_dev_config *const cfg = dev->config;
145 struct phy_mii_dev_data *const data = dev->data;
146 bool link_up;
147
148 uint16_t anar_reg = 0;
149 uint16_t bmcr_reg = 0;
150 uint16_t bmsr_reg = 0;
151 uint16_t anlpar_reg = 0;
152 uint16_t c1kt_reg = 0;
153 uint16_t s1kt_reg = 0;
154 uint32_t timeout = CONFIG_PHY_AUTONEG_TIMEOUT_MS / 100;
155
156 if (phy_mii_reg_read(dev, MII_BMSR, &bmsr_reg) < 0) {
157 return -EIO;
158 }
159
160 link_up = bmsr_reg & MII_BMSR_LINK_STATUS;
161
162 /* If there is no change in link state don't proceed. */
163 if (link_up == data->state.is_up) {
164 return -EAGAIN;
165 }
166
167 data->state.is_up = link_up;
168
169 /* If link is down, there is nothing more to be done */
170 if (data->state.is_up == false) {
171 LOG_INF("PHY (%d) is down", cfg->phy_addr);
172 return 0;
173 }
174
175 /**
176 * Perform auto-negotiation sequence.
177 */
178 LOG_DBG("PHY (%d) Starting MII PHY auto-negotiate sequence",
179 cfg->phy_addr);
180
181 /* Read PHY default advertising parameters */
182 if (phy_mii_reg_read(dev, MII_ANAR, &anar_reg) < 0) {
183 return -EIO;
184 }
185
186 /* Configure and start auto-negotiation process */
187 if (phy_mii_reg_read(dev, MII_BMCR, &bmcr_reg) < 0) {
188 return -EIO;
189 }
190
191 bmcr_reg |= MII_BMCR_AUTONEG_ENABLE | MII_BMCR_AUTONEG_RESTART;
192 bmcr_reg &= ~MII_BMCR_ISOLATE; /* Don't isolate the PHY */
193
194 if (phy_mii_reg_write(dev, MII_BMCR, bmcr_reg) < 0) {
195 return -EIO;
196 }
197
198 /* Wait for the auto-negotiation process to complete */
199 do {
200 if (timeout-- == 0U) {
201 LOG_DBG("PHY (%d) auto-negotiate timedout",
202 cfg->phy_addr);
203 return -ETIMEDOUT;
204 }
205
206 k_sleep(K_MSEC(100));
207
208 /* On some PHY chips, the BMSR bits are latched, so the first read may
209 * show incorrect status. A second read ensures correct values.
210 */
211 if (phy_mii_reg_read(dev, MII_BMSR, &bmsr_reg) < 0) {
212 return -EIO;
213 }
214
215 /* Second read, clears the latched bits and gives the correct status */
216 if (phy_mii_reg_read(dev, MII_BMSR, &bmsr_reg) < 0) {
217 return -EIO;
218 }
219 } while (!(bmsr_reg & MII_BMSR_AUTONEG_COMPLETE));
220
221 LOG_DBG("PHY (%d) auto-negotiate sequence completed",
222 cfg->phy_addr);
223
224 /** Read peer device capability */
225 if (phy_mii_reg_read(dev, MII_ANLPAR, &anlpar_reg) < 0) {
226 return -EIO;
227 }
228
229 if (data->gigabit_supported) {
230 if (phy_mii_reg_read(dev, MII_1KTCR, &c1kt_reg) < 0) {
231 return -EIO;
232 }
233 if (phy_mii_reg_read(dev, MII_1KSTSR, &s1kt_reg) < 0) {
234 return -EIO;
235 }
236 s1kt_reg = (uint16_t)(s1kt_reg >> MII_1KSTSR_OFFSET);
237 }
238
239 if (data->gigabit_supported &&
240 ((c1kt_reg & s1kt_reg) & MII_ADVERTISE_1000_FULL)) {
241 data->state.speed = LINK_FULL_1000BASE_T;
242 } else if (data->gigabit_supported &&
243 ((c1kt_reg & s1kt_reg) & MII_ADVERTISE_1000_HALF)) {
244 data->state.speed = LINK_HALF_1000BASE_T;
245 } else if ((anar_reg & anlpar_reg) & MII_ADVERTISE_100_FULL) {
246 data->state.speed = LINK_FULL_100BASE_T;
247 } else if ((anar_reg & anlpar_reg) & MII_ADVERTISE_100_HALF) {
248 data->state.speed = LINK_HALF_100BASE_T;
249 } else if ((anar_reg & anlpar_reg) & MII_ADVERTISE_10_FULL) {
250 data->state.speed = LINK_FULL_10BASE_T;
251 } else {
252 data->state.speed = LINK_HALF_10BASE_T;
253 }
254
255 LOG_INF("PHY (%d) Link speed %s Mb, %s duplex",
256 cfg->phy_addr,
257 PHY_LINK_IS_SPEED_1000M(data->state.speed) ? "1000" :
258 (PHY_LINK_IS_SPEED_100M(data->state.speed) ? "100" : "10"),
259 PHY_LINK_IS_FULL_DUPLEX(data->state.speed) ? "full" : "half");
260
261 return 0;
262 }
263
invoke_link_cb(const struct device * dev)264 static void invoke_link_cb(const struct device *dev)
265 {
266 struct phy_mii_dev_data *const data = dev->data;
267 struct phy_link_state state;
268
269 if (data->cb == NULL) {
270 return;
271 }
272
273 phy_mii_get_link_state(dev, &state);
274
275 data->cb(data->dev, &state, data->cb_data);
276 }
277
monitor_work_handler(struct k_work * work)278 static void monitor_work_handler(struct k_work *work)
279 {
280 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
281 struct phy_mii_dev_data *const data =
282 CONTAINER_OF(dwork, struct phy_mii_dev_data, monitor_work);
283 const struct device *dev = data->dev;
284 int rc;
285
286 k_sem_take(&data->sem, K_FOREVER);
287
288 rc = update_link_state(dev);
289
290 k_sem_give(&data->sem);
291
292 /* If link state has changed and a callback is set, invoke callback */
293 if (rc == 0) {
294 invoke_link_cb(dev);
295 }
296
297 /* Submit delayed work */
298 k_work_reschedule(&data->monitor_work,
299 K_MSEC(CONFIG_PHY_MONITOR_PERIOD));
300 }
301
phy_mii_read(const struct device * dev,uint16_t reg_addr,uint32_t * data)302 static int phy_mii_read(const struct device *dev, uint16_t reg_addr,
303 uint32_t *data)
304 {
305 return phy_mii_reg_read(dev, reg_addr, (uint16_t *)data);
306 }
307
phy_mii_write(const struct device * dev,uint16_t reg_addr,uint32_t data)308 static int phy_mii_write(const struct device *dev, uint16_t reg_addr,
309 uint32_t data)
310 {
311 return phy_mii_reg_write(dev, reg_addr, (uint16_t)data);
312 }
313
phy_mii_cfg_link(const struct device * dev,enum phy_link_speed adv_speeds)314 static int phy_mii_cfg_link(const struct device *dev,
315 enum phy_link_speed adv_speeds)
316 {
317 struct phy_mii_dev_data *const data = dev->data;
318 uint16_t anar_reg;
319 uint16_t bmcr_reg;
320 uint16_t c1kt_reg;
321
322 if (phy_mii_reg_read(dev, MII_ANAR, &anar_reg) < 0) {
323 return -EIO;
324 }
325
326 if (phy_mii_reg_read(dev, MII_BMCR, &bmcr_reg) < 0) {
327 return -EIO;
328 }
329
330 if (data->gigabit_supported) {
331 if (phy_mii_reg_read(dev, MII_1KTCR, &c1kt_reg) < 0) {
332 return -EIO;
333 }
334 }
335
336 if (adv_speeds & LINK_FULL_10BASE_T) {
337 anar_reg |= MII_ADVERTISE_10_FULL;
338 } else {
339 anar_reg &= ~MII_ADVERTISE_10_FULL;
340 }
341
342 if (adv_speeds & LINK_HALF_10BASE_T) {
343 anar_reg |= MII_ADVERTISE_10_HALF;
344 } else {
345 anar_reg &= ~MII_ADVERTISE_10_HALF;
346 }
347
348 if (adv_speeds & LINK_FULL_100BASE_T) {
349 anar_reg |= MII_ADVERTISE_100_FULL;
350 } else {
351 anar_reg &= ~MII_ADVERTISE_100_FULL;
352 }
353
354 if (adv_speeds & LINK_HALF_100BASE_T) {
355 anar_reg |= MII_ADVERTISE_100_HALF;
356 } else {
357 anar_reg &= ~MII_ADVERTISE_100_HALF;
358 }
359
360 if (data->gigabit_supported) {
361 if (adv_speeds & LINK_FULL_1000BASE_T) {
362 c1kt_reg |= MII_ADVERTISE_1000_FULL;
363 } else {
364 c1kt_reg &= ~MII_ADVERTISE_1000_FULL;
365 }
366
367 if (adv_speeds & LINK_HALF_1000BASE_T) {
368 c1kt_reg |= MII_ADVERTISE_1000_HALF;
369 } else {
370 c1kt_reg &= ~MII_ADVERTISE_1000_HALF;
371 }
372
373 if (phy_mii_reg_write(dev, MII_1KTCR, c1kt_reg) < 0) {
374 return -EIO;
375 }
376 }
377
378 bmcr_reg |= MII_BMCR_AUTONEG_ENABLE;
379
380 if (phy_mii_reg_write(dev, MII_ANAR, anar_reg) < 0) {
381 return -EIO;
382 }
383
384 if (phy_mii_reg_write(dev, MII_BMCR, bmcr_reg) < 0) {
385 return -EIO;
386 }
387
388 return 0;
389 }
390
phy_mii_get_link_state(const struct device * dev,struct phy_link_state * state)391 static int phy_mii_get_link_state(const struct device *dev,
392 struct phy_link_state *state)
393 {
394 struct phy_mii_dev_data *const data = dev->data;
395
396 k_sem_take(&data->sem, K_FOREVER);
397
398 memcpy(state, &data->state, sizeof(struct phy_link_state));
399
400 k_sem_give(&data->sem);
401
402 return 0;
403 }
404
phy_mii_link_cb_set(const struct device * dev,phy_callback_t cb,void * user_data)405 static int phy_mii_link_cb_set(const struct device *dev, phy_callback_t cb,
406 void *user_data)
407 {
408 struct phy_mii_dev_data *const data = dev->data;
409
410 data->cb = cb;
411 data->cb_data = user_data;
412
413 /**
414 * Immediately invoke the callback to notify the caller of the
415 * current link status.
416 */
417 invoke_link_cb(dev);
418
419 return 0;
420 }
421
phy_mii_initialize(const struct device * dev)422 static int phy_mii_initialize(const struct device *dev)
423 {
424 const struct phy_mii_dev_config *const cfg = dev->config;
425 struct phy_mii_dev_data *const data = dev->data;
426 uint32_t phy_id;
427
428 k_sem_init(&data->sem, 1, 1);
429
430 data->dev = dev;
431 data->cb = NULL;
432
433 /**
434 * If this is a *fixed* link then we don't need to communicate
435 * with a PHY. We set the link parameters as configured
436 * and set link state to up.
437 */
438 if (cfg->fixed) {
439 const static int speed_to_phy_link_speed[] = {
440 LINK_HALF_10BASE_T,
441 LINK_FULL_10BASE_T,
442 LINK_HALF_100BASE_T,
443 LINK_FULL_100BASE_T,
444 LINK_HALF_1000BASE_T,
445 LINK_FULL_1000BASE_T,
446 };
447
448 data->state.speed = speed_to_phy_link_speed[cfg->fixed_speed];
449 data->state.is_up = true;
450 } else {
451 data->state.is_up = false;
452
453 mdio_bus_enable(cfg->mdio);
454
455 if (cfg->no_reset == false) {
456 reset(dev);
457 }
458
459 if (get_id(dev, &phy_id) == 0) {
460 if (phy_id == MII_INVALID_PHY_ID) {
461 LOG_ERR("No PHY found at address %d",
462 cfg->phy_addr);
463
464 return -EINVAL;
465 }
466
467 LOG_INF("PHY (%d) ID %X", cfg->phy_addr, phy_id);
468 }
469
470 data->gigabit_supported = is_gigabit_supported(dev);
471
472 /* Advertise all speeds */
473 phy_mii_cfg_link(dev, LINK_HALF_10BASE_T |
474 LINK_FULL_10BASE_T |
475 LINK_HALF_100BASE_T |
476 LINK_FULL_100BASE_T |
477 LINK_HALF_1000BASE_T |
478 LINK_FULL_1000BASE_T);
479
480 k_work_init_delayable(&data->monitor_work,
481 monitor_work_handler);
482
483 monitor_work_handler(&data->monitor_work.work);
484 }
485
486 return 0;
487 }
488
489 #define IS_FIXED_LINK(n) DT_INST_NODE_HAS_PROP(n, fixed_link)
490
491 static DEVICE_API(ethphy, phy_mii_driver_api) = {
492 .get_link = phy_mii_get_link_state,
493 .cfg_link = phy_mii_cfg_link,
494 .link_cb_set = phy_mii_link_cb_set,
495 .read = phy_mii_read,
496 .write = phy_mii_write,
497 };
498
499 #define PHY_MII_CONFIG(n) \
500 static const struct phy_mii_dev_config phy_mii_dev_config_##n = { \
501 .phy_addr = DT_INST_REG_ADDR(n), \
502 .no_reset = DT_INST_PROP(n, no_reset), \
503 .fixed = IS_FIXED_LINK(n), \
504 .fixed_speed = DT_INST_ENUM_IDX_OR(n, fixed_link, 0), \
505 .mdio = UTIL_AND(UTIL_NOT(IS_FIXED_LINK(n)), \
506 DEVICE_DT_GET(DT_INST_BUS(n))) \
507 };
508
509 #define PHY_MII_DEVICE(n) \
510 PHY_MII_CONFIG(n); \
511 static struct phy_mii_dev_data phy_mii_dev_data_##n; \
512 DEVICE_DT_INST_DEFINE(n, \
513 &phy_mii_initialize, \
514 NULL, \
515 &phy_mii_dev_data_##n, \
516 &phy_mii_dev_config_##n, POST_KERNEL, \
517 CONFIG_PHY_INIT_PRIORITY, \
518 &phy_mii_driver_api);
519
520 DT_INST_FOREACH_STATUS_OKAY(PHY_MII_DEVICE)
521