1 /* MCUX Ethernet Driver
2 *
3 * Copyright (c) 2016-2017 ARM Ltd
4 * Copyright (c) 2016 Linaro Ltd
5 * Copyright (c) 2018 Intel Corporation
6 * Copyright 2023 NXP
7 *
8 * SPDX-License-Identifier: Apache-2.0
9 */
10
11 #define DT_DRV_COMPAT nxp_kinetis_ethernet
12
13 /* Driver Limitations:
14 *
15 * There is no statistics collection for either normal operation or
16 * error behaviour.
17 */
18
19 #define LOG_MODULE_NAME eth_mcux
20 #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL
21 #define RING_ID 0
22
23 #include <zephyr/logging/log.h>
24 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
25
26 #include <zephyr/device.h>
27 #include <zephyr/sys/util.h>
28 #include <zephyr/kernel.h>
29 #include <zephyr/sys/__assert.h>
30 #include <zephyr/net/net_pkt.h>
31 #include <zephyr/net/net_if.h>
32 #include <zephyr/net/ethernet.h>
33 #include <ethernet/eth_stats.h>
34 #include <zephyr/pm/device.h>
35 #include <zephyr/irq.h>
36
37 #if defined(CONFIG_PTP_CLOCK_MCUX)
38 #include <zephyr/drivers/ptp_clock.h>
39 #endif
40
41 #if defined(CONFIG_NET_DSA)
42 #include <zephyr/net/dsa.h>
43 #endif
44
45 #include "fsl_enet.h"
46 #include "fsl_phy.h"
47 #include "fsl_phyksz8081.h"
48 #include "fsl_enet_mdio.h"
49 #if defined(CONFIG_NET_POWER_MANAGEMENT)
50 #include "fsl_clock.h"
51 #include <zephyr/drivers/clock_control.h>
52 #endif
53 #include <zephyr/devicetree.h>
54 #include <zephyr/drivers/gpio.h>
55 #if defined(CONFIG_PINCTRL)
56 #include <zephyr/drivers/pinctrl.h>
57 #endif
58
59 #include "eth.h"
60
61 #define PHY_OMS_OVERRIDE_REG 0x16U /* The PHY Operation Mode Strap Override register. */
62 #define PHY_OMS_STATUS_REG 0x17U /* The PHY Operation Mode Strap Status register. */
63
64 #define PHY_OMS_NANDTREE_MASK 0x0020U /* The PHY NAND Tree Strap-In Override/Status mask. */
65 #define PHY_OMS_FACTORY_MODE_MASK 0x8000U /* The factory mode Override/Status mask. */
66
67 /* Defines the PHY KSZ8081 vendor defined registers. */
68 #define PHY_CONTROL1_REG 0x1EU /* The PHY control one register. */
69 #define PHY_CONTROL2_REG 0x1FU /* The PHY control two register. */
70
71 /* Defines the PHY KSZ8081 ID number. */
72 #define PHY_CONTROL_ID1 0x22U /* The PHY ID1 */
73
74 /* Defines the mask flag of operation mode in control registers */
75 #define PHY_CTL2_REMOTELOOP_MASK 0x0004U /* The PHY remote loopback mask. */
76 #define PHY_CTL2_REFCLK_SELECT_MASK 0x0080U /* The PHY RMII reference clock select. */
77 #define PHY_CTL1_10HALFDUPLEX_MASK 0x0001U /* The PHY 10M half duplex mask. */
78 #define PHY_CTL1_100HALFDUPLEX_MASK 0x0002U /* The PHY 100M half duplex mask. */
79 #define PHY_CTL1_10FULLDUPLEX_MASK 0x0005U /* The PHY 10M full duplex mask. */
80 #define PHY_CTL1_100FULLDUPLEX_MASK 0x0006U /* The PHY 100M full duplex mask. */
81 #define PHY_CTL1_SPEEDUPLX_MASK 0x0007U /* The PHY speed and duplex mask. */
82 #define PHY_CTL1_ENERGYDETECT_MASK 0x10U /* The PHY signal present on rx differential pair. */
83 #define PHY_CTL1_LINKUP_MASK 0x100U /* The PHY link up. */
84 #define PHY_LINK_READY_MASK (PHY_CTL1_ENERGYDETECT_MASK | PHY_CTL1_LINKUP_MASK)
85
86 /* Defines the timeout macro. */
87 #define PHY_READID_TIMEOUT_COUNT 1000U
88
89 /* Define RX and TX thread stack sizes */
90 #define ETH_MCUX_RX_THREAD_STACK_SIZE 1600
91 #define ETH_MCUX_TX_THREAD_STACK_SIZE 1600
92
93 #define FREESCALE_OUI_B0 0x00
94 #define FREESCALE_OUI_B1 0x04
95 #define FREESCALE_OUI_B2 0x9f
96
97 #define ETH_MCUX_FIXED_LINK_NODE \
98 DT_CHILD(DT_NODELABEL(enet), fixed_link)
99 #define ETH_MCUX_FIXED_LINK \
100 DT_NODE_EXISTS(ETH_MCUX_FIXED_LINK_NODE)
101 #define ETH_MCUX_FIXED_LINK_SPEED \
102 DT_PROP(ETH_MCUX_FIXED_LINK_NODE, speed)
103 #define ETH_MCUX_FIXED_LINK_FULL_DUPLEX \
104 DT_PROP(ETH_MCUX_FIXED_LINK_NODE, full_duplex)
105
106 enum eth_mcux_phy_state {
107 eth_mcux_phy_state_initial,
108 eth_mcux_phy_state_reset,
109 eth_mcux_phy_state_autoneg,
110 eth_mcux_phy_state_restart,
111 eth_mcux_phy_state_read_status,
112 eth_mcux_phy_state_read_duplex,
113 eth_mcux_phy_state_wait,
114 eth_mcux_phy_state_closing
115 };
116
117 struct _phy_resource {
118 mdioWrite write;
119 mdioRead read;
120 };
121
122 #if defined(CONFIG_NET_POWER_MANAGEMENT)
123 extern uint32_t ENET_GetInstance(ENET_Type * base);
124 static const clock_ip_name_t enet_clocks[] = ENET_CLOCKS;
125 #endif
126
127 static void eth_mcux_init(const struct device *dev);
128
129 #if defined(CONFIG_ETH_MCUX_PHY_EXTRA_DEBUG)
phy_state_name(enum eth_mcux_phy_state state)130 static const char *phy_state_name(enum eth_mcux_phy_state state)
131 {
132 static const char * const name[] = {
133 "initial",
134 "reset",
135 "autoneg",
136 "restart",
137 "read-status",
138 "read-duplex",
139 "wait",
140 "closing"
141 };
142
143 return name[state];
144 }
145 #endif
146
eth_name(ENET_Type * base)147 static const char *eth_name(ENET_Type *base)
148 {
149 switch ((int)base) {
150 case DT_INST_REG_ADDR(0):
151 return "ETH_0";
152 #if DT_NODE_HAS_STATUS(DT_DRV_INST(1), okay)
153 case DT_INST_REG_ADDR(1):
154 return "ETH_1";
155 #endif
156 default:
157 return "unknown";
158 }
159 }
160
161 struct eth_context {
162 ENET_Type *base;
163 void (*config_func)(void);
164 /* If VLAN is enabled, there can be multiple VLAN interfaces related to
165 * this physical device. In that case, this pointer value is not really
166 * used for anything.
167 */
168 struct net_if *iface;
169 #if defined(CONFIG_NET_POWER_MANAGEMENT)
170 clock_ip_name_t clock;
171 const struct device *clock_dev;
172 #endif
173 enet_handle_t enet_handle;
174 #if defined(CONFIG_PTP_CLOCK_MCUX)
175 const struct device *ptp_clock;
176 enet_ptp_config_t ptp_config;
177 double clk_ratio;
178 struct k_mutex ptp_mutex;
179 struct k_sem ptp_ts_sem;
180 #endif
181 struct k_sem tx_buf_sem;
182 phy_handle_t *phy_handle;
183 struct _phy_resource *phy_config;
184 struct k_sem rx_thread_sem;
185 enum eth_mcux_phy_state phy_state;
186 bool enabled;
187 bool link_up;
188 uint32_t phy_addr;
189 uint32_t rx_irq_num;
190 uint32_t tx_irq_num;
191 phy_duplex_t phy_duplex;
192 phy_speed_t phy_speed;
193 uint8_t mac_addr[6];
194 void (*generate_mac)(uint8_t *);
195 struct k_work phy_work;
196 struct k_work_delayable delayed_phy_work;
197
198 K_KERNEL_STACK_MEMBER(rx_thread_stack, ETH_MCUX_RX_THREAD_STACK_SIZE);
199 struct k_thread rx_thread;
200
201 /* TODO: FIXME. This Ethernet frame sized buffer is used for
202 * interfacing with MCUX. How it works is that hardware uses
203 * DMA scatter buffers to receive a frame, and then public
204 * MCUX call gathers them into this buffer (there's no other
205 * public interface). All this happens only for this driver
206 * to scatter this buffer again into Zephyr fragment buffers.
207 * This is not efficient, but proper resolution of this issue
208 * depends on introduction of zero-copy networking support
209 * in Zephyr, and adding needed interface to MCUX (or
210 * bypassing it and writing a more complex driver working
211 * directly with hardware).
212 *
213 * Note that we do not copy FCS into this buffer thus the
214 * size is 1514 bytes.
215 */
216 struct k_mutex tx_frame_buf_mutex;
217 struct k_mutex rx_frame_buf_mutex;
218 uint8_t *tx_frame_buf; /* Max MTU + ethernet header */
219 uint8_t *rx_frame_buf; /* Max MTU + ethernet header */
220 #if defined(CONFIG_PINCTRL)
221 const struct pinctrl_dev_config *pincfg;
222 #endif
223 #if defined(CONFIG_ETH_MCUX_PHY_RESET)
224 const struct gpio_dt_spec int_gpio;
225 const struct gpio_dt_spec reset_gpio;
226 #endif
227 };
228
229 /* Use ENET_FRAME_MAX_VLANFRAMELEN for VLAN frame size
230 * Use ENET_FRAME_MAX_FRAMELEN for Ethernet frame size
231 */
232 #if defined(CONFIG_NET_VLAN)
233 #if !defined(ENET_FRAME_MAX_VLANFRAMELEN)
234 #define ENET_FRAME_MAX_VLANFRAMELEN (ENET_FRAME_MAX_FRAMELEN + 4)
235 #endif
236 #define ETH_MCUX_BUFFER_SIZE \
237 ROUND_UP(ENET_FRAME_MAX_VLANFRAMELEN, ENET_BUFF_ALIGNMENT)
238 #else
239 #define ETH_MCUX_BUFFER_SIZE \
240 ROUND_UP(ENET_FRAME_MAX_FRAMELEN, ENET_BUFF_ALIGNMENT)
241 #endif /* CONFIG_NET_VLAN */
242
243 #ifdef CONFIG_SOC_FAMILY_KINETIS
244 #if defined(CONFIG_NET_POWER_MANAGEMENT)
245 static void eth_mcux_phy_enter_reset(struct eth_context *context);
246 void eth_mcux_phy_stop(struct eth_context *context);
247
eth_mcux_device_pm_action(const struct device * dev,enum pm_device_action action)248 static int eth_mcux_device_pm_action(const struct device *dev,
249 enum pm_device_action action)
250 {
251 struct eth_context *eth_ctx = dev->data;
252 int ret = 0;
253
254 if (!device_is_ready(eth_ctx->clock_dev)) {
255 LOG_ERR("No CLOCK dev");
256
257 ret = -EIO;
258 goto out;
259 }
260
261 switch (action) {
262 case PM_DEVICE_ACTION_SUSPEND:
263 LOG_DBG("Suspending");
264
265 ret = net_if_suspend(eth_ctx->iface);
266 if (ret == -EBUSY) {
267 goto out;
268 }
269
270 eth_mcux_phy_enter_reset(eth_ctx);
271 eth_mcux_phy_stop(eth_ctx);
272
273 ENET_Reset(eth_ctx->base);
274 ENET_Deinit(eth_ctx->base);
275 clock_control_off(eth_ctx->clock_dev,
276 (clock_control_subsys_t)eth_ctx->clock);
277 break;
278 case PM_DEVICE_ACTION_RESUME:
279 LOG_DBG("Resuming");
280
281 clock_control_on(eth_ctx->clock_dev,
282 (clock_control_subsys_t)eth_ctx->clock);
283 eth_mcux_init(dev);
284 net_if_resume(eth_ctx->iface);
285 break;
286 default:
287 ret = -ENOTSUP;
288 break;
289 }
290
291 out:
292
293 return ret;
294 }
295 #endif /* CONFIG_NET_POWER_MANAGEMENT */
296 #endif /* CONFIG_SOC_FAMILY_KINETIS */
297
298 #if ETH_MCUX_FIXED_LINK
eth_mcux_get_phy_params(phy_duplex_t * p_phy_duplex,phy_speed_t * p_phy_speed)299 static void eth_mcux_get_phy_params(phy_duplex_t *p_phy_duplex,
300 phy_speed_t *p_phy_speed)
301 {
302 *p_phy_duplex = kPHY_HalfDuplex;
303 #if ETH_MCUX_FIXED_LINK_FULL_DUPLEX
304 *p_phy_duplex = kPHY_FullDuplex;
305 #endif
306
307 *p_phy_speed = kPHY_Speed10M;
308 #if ETH_MCUX_FIXED_LINK_SPEED == 100
309 *p_phy_speed = kPHY_Speed100M;
310 #endif
311 }
312 #else
313
eth_mcux_decode_duplex_and_speed(uint32_t status,phy_duplex_t * p_phy_duplex,phy_speed_t * p_phy_speed)314 static void eth_mcux_decode_duplex_and_speed(uint32_t status,
315 phy_duplex_t *p_phy_duplex,
316 phy_speed_t *p_phy_speed)
317 {
318 switch (status & PHY_CTL1_SPEEDUPLX_MASK) {
319 case PHY_CTL1_10FULLDUPLEX_MASK:
320 *p_phy_duplex = kPHY_FullDuplex;
321 *p_phy_speed = kPHY_Speed10M;
322 break;
323 case PHY_CTL1_100FULLDUPLEX_MASK:
324 *p_phy_duplex = kPHY_FullDuplex;
325 *p_phy_speed = kPHY_Speed100M;
326 break;
327 case PHY_CTL1_100HALFDUPLEX_MASK:
328 *p_phy_duplex = kPHY_HalfDuplex;
329 *p_phy_speed = kPHY_Speed100M;
330 break;
331 case PHY_CTL1_10HALFDUPLEX_MASK:
332 *p_phy_duplex = kPHY_HalfDuplex;
333 *p_phy_speed = kPHY_Speed10M;
334 break;
335 }
336 }
337 #endif /* ETH_MCUX_FIXED_LINK */
338
get_iface(struct eth_context * ctx,uint16_t vlan_tag)339 static inline struct net_if *get_iface(struct eth_context *ctx, uint16_t vlan_tag)
340 {
341 #if defined(CONFIG_NET_VLAN)
342 struct net_if *iface;
343
344 iface = net_eth_get_vlan_iface(ctx->iface, vlan_tag);
345 if (!iface) {
346 return ctx->iface;
347 }
348
349 return iface;
350 #else
351 ARG_UNUSED(vlan_tag);
352
353 return ctx->iface;
354 #endif
355 }
356
eth_mcux_phy_enter_reset(struct eth_context * context)357 static void eth_mcux_phy_enter_reset(struct eth_context *context)
358 {
359 /* Reset the PHY. */
360 #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
361 ENET_StartSMIWrite(context->base, context->phy_addr,
362 PHY_BASICCONTROL_REG,
363 kENET_MiiWriteValidFrame,
364 PHY_BCTL_RESET_MASK);
365 #endif
366 context->phy_state = eth_mcux_phy_state_reset;
367 #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
368 k_work_submit(&context->phy_work);
369 #endif
370 }
371
eth_mcux_phy_start(struct eth_context * context)372 static void eth_mcux_phy_start(struct eth_context *context)
373 {
374 #if defined(CONFIG_ETH_MCUX_PHY_EXTRA_DEBUG)
375 LOG_DBG("%s phy_state=%s", eth_name(context->base),
376 phy_state_name(context->phy_state));
377 #endif
378
379 context->enabled = true;
380
381 switch (context->phy_state) {
382 case eth_mcux_phy_state_initial:
383 context->phy_handle->phyAddr = context->phy_addr;
384 ENET_ActiveRead(context->base);
385 /* Reset the PHY. */
386 #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
387 ENET_StartSMIWrite(context->base, context->phy_addr,
388 PHY_BASICCONTROL_REG,
389 kENET_MiiWriteValidFrame,
390 PHY_BCTL_RESET_MASK);
391 #else
392 /*
393 * With no SMI communication one needs to wait for
394 * iface being up by the network core.
395 */
396 k_work_submit(&context->phy_work);
397 break;
398 #endif
399 #if defined(CONFIG_SOC_SERIES_IMX_RT)
400 context->phy_state = eth_mcux_phy_state_initial;
401 #else
402 context->phy_state = eth_mcux_phy_state_reset;
403 #endif
404 break;
405 case eth_mcux_phy_state_reset:
406 eth_mcux_phy_enter_reset(context);
407 break;
408 case eth_mcux_phy_state_autoneg:
409 case eth_mcux_phy_state_restart:
410 case eth_mcux_phy_state_read_status:
411 case eth_mcux_phy_state_read_duplex:
412 case eth_mcux_phy_state_wait:
413 case eth_mcux_phy_state_closing:
414 break;
415 }
416 }
417
eth_mcux_phy_stop(struct eth_context * context)418 void eth_mcux_phy_stop(struct eth_context *context)
419 {
420 #if defined(CONFIG_ETH_MCUX_PHY_EXTRA_DEBUG)
421 LOG_DBG("%s phy_state=%s", eth_name(context->base),
422 phy_state_name(context->phy_state));
423 #endif
424
425 context->enabled = false;
426
427 switch (context->phy_state) {
428 case eth_mcux_phy_state_initial:
429 case eth_mcux_phy_state_reset:
430 case eth_mcux_phy_state_autoneg:
431 case eth_mcux_phy_state_restart:
432 case eth_mcux_phy_state_read_status:
433 case eth_mcux_phy_state_read_duplex:
434 /* Do nothing, let the current communication complete
435 * then deal with shutdown.
436 */
437 context->phy_state = eth_mcux_phy_state_closing;
438 break;
439 case eth_mcux_phy_state_wait:
440 k_work_cancel_delayable(&context->delayed_phy_work);
441 /* @todo, actually power down the PHY ? */
442 context->phy_state = eth_mcux_phy_state_initial;
443 break;
444 case eth_mcux_phy_state_closing:
445 /* We are already going down. */
446 break;
447 }
448 }
449
eth_mcux_phy_event(struct eth_context * context)450 static void eth_mcux_phy_event(struct eth_context *context)
451 {
452 #if !(defined(CONFIG_ETH_MCUX_NO_PHY_SMI) && ETH_MCUX_FIXED_LINK)
453 uint32_t status;
454 #endif
455 bool link_up;
456 #if defined(CONFIG_SOC_SERIES_IMX_RT)
457 status_t res;
458 uint16_t ctrl2;
459 #endif
460 phy_duplex_t phy_duplex = kPHY_FullDuplex;
461 phy_speed_t phy_speed = kPHY_Speed100M;
462
463 #if defined(CONFIG_ETH_MCUX_PHY_EXTRA_DEBUG)
464 LOG_DBG("%s phy_state=%s", eth_name(context->base),
465 phy_state_name(context->phy_state));
466 #endif
467 switch (context->phy_state) {
468 case eth_mcux_phy_state_initial:
469 #if defined(CONFIG_SOC_SERIES_IMX_RT)
470 ENET_DisableInterrupts(context->base, ENET_EIR_MII_MASK);
471 res = PHY_Read(context->phy_handle, PHY_CONTROL2_REG, &ctrl2);
472 ENET_EnableInterrupts(context->base, ENET_EIR_MII_MASK);
473 if (res != kStatus_Success) {
474 LOG_WRN("Reading PHY reg failed (status 0x%x)", res);
475 k_work_submit(&context->phy_work);
476 } else {
477 ctrl2 |= PHY_CTL2_REFCLK_SELECT_MASK;
478 ENET_StartSMIWrite(context->base, context->phy_addr,
479 PHY_CONTROL2_REG,
480 kENET_MiiWriteValidFrame,
481 ctrl2);
482 }
483 context->phy_state = eth_mcux_phy_state_reset;
484 #endif /* CONFIG_SOC_SERIES_IMX_RT */
485 #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
486 /*
487 * When the iface is available proceed with the eth link setup,
488 * otherwise reschedule the eth_mcux_phy_event and check after
489 * 1ms
490 */
491 if (context->iface) {
492 context->phy_state = eth_mcux_phy_state_reset;
493 }
494
495 k_work_reschedule(&context->delayed_phy_work, K_MSEC(1));
496 #endif
497 break;
498 case eth_mcux_phy_state_closing:
499 if (context->enabled) {
500 eth_mcux_phy_enter_reset(context);
501 } else {
502 /* @todo, actually power down the PHY ? */
503 context->phy_state = eth_mcux_phy_state_initial;
504 }
505 break;
506 case eth_mcux_phy_state_reset:
507 /* Setup PHY autonegotiation. */
508 #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
509 ENET_StartSMIWrite(context->base, context->phy_addr,
510 PHY_AUTONEG_ADVERTISE_REG,
511 kENET_MiiWriteValidFrame,
512 (PHY_100BASETX_FULLDUPLEX_MASK |
513 PHY_100BASETX_HALFDUPLEX_MASK |
514 PHY_10BASETX_FULLDUPLEX_MASK |
515 PHY_10BASETX_HALFDUPLEX_MASK |
516 PHY_IEEE802_3_SELECTOR_MASK));
517 #endif
518 context->phy_state = eth_mcux_phy_state_autoneg;
519 #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
520 k_work_submit(&context->phy_work);
521 #endif
522 break;
523 case eth_mcux_phy_state_autoneg:
524 #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
525 /* Setup PHY autonegotiation. */
526 ENET_StartSMIWrite(context->base, context->phy_addr,
527 PHY_BASICCONTROL_REG,
528 kENET_MiiWriteValidFrame,
529 (PHY_BCTL_AUTONEG_MASK |
530 PHY_BCTL_RESTART_AUTONEG_MASK));
531 #endif
532 context->phy_state = eth_mcux_phy_state_restart;
533 #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
534 k_work_submit(&context->phy_work);
535 #endif
536 break;
537 case eth_mcux_phy_state_wait:
538 case eth_mcux_phy_state_restart:
539 /* Start reading the PHY basic status. */
540 #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
541 ENET_StartSMIRead(context->base, context->phy_addr,
542 PHY_BASICSTATUS_REG,
543 kENET_MiiReadValidFrame);
544 #endif
545 context->phy_state = eth_mcux_phy_state_read_status;
546 #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
547 k_work_submit(&context->phy_work);
548 #endif
549 break;
550 case eth_mcux_phy_state_read_status:
551 /* PHY Basic status is available. */
552 #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI) && ETH_MCUX_FIXED_LINK
553 link_up = true;
554 #else
555 status = ENET_ReadSMIData(context->base);
556 link_up = status & PHY_BSTATUS_LINKSTATUS_MASK;
557 #endif
558 if (link_up && !context->link_up && context->iface != NULL) {
559 /* Start reading the PHY control register. */
560 #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
561 ENET_StartSMIRead(context->base, context->phy_addr,
562 PHY_CONTROL1_REG,
563 kENET_MiiReadValidFrame);
564 #endif
565 context->link_up = link_up;
566 context->phy_state = eth_mcux_phy_state_read_duplex;
567 net_eth_carrier_on(context->iface);
568 k_msleep(1);
569 #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
570 k_work_submit(&context->phy_work);
571 #endif
572 } else if (!link_up && context->link_up && context->iface != NULL) {
573 LOG_INF("%s link down", eth_name(context->base));
574 context->link_up = link_up;
575 k_work_reschedule(&context->delayed_phy_work,
576 K_MSEC(CONFIG_ETH_MCUX_PHY_TICK_MS));
577 context->phy_state = eth_mcux_phy_state_wait;
578 net_eth_carrier_off(context->iface);
579 } else {
580 k_work_reschedule(&context->delayed_phy_work,
581 K_MSEC(CONFIG_ETH_MCUX_PHY_TICK_MS));
582 context->phy_state = eth_mcux_phy_state_wait;
583 }
584
585 break;
586 case eth_mcux_phy_state_read_duplex:
587 /* PHY control register is available. */
588 #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI) && ETH_MCUX_FIXED_LINK
589 eth_mcux_get_phy_params(&phy_duplex, &phy_speed);
590 LOG_INF("%s - Fixed Link", eth_name(context->base));
591 #else
592 status = ENET_ReadSMIData(context->base);
593 eth_mcux_decode_duplex_and_speed(status,
594 &phy_duplex,
595 &phy_speed);
596 #endif
597 if (phy_speed != context->phy_speed ||
598 phy_duplex != context->phy_duplex) {
599 context->phy_speed = phy_speed;
600 context->phy_duplex = phy_duplex;
601 ENET_SetMII(context->base,
602 (enet_mii_speed_t) phy_speed,
603 (enet_mii_duplex_t) phy_duplex);
604 }
605
606 LOG_INF("%s enabled %sM %s-duplex mode.",
607 eth_name(context->base),
608 (phy_speed ? "100" : "10"),
609 (phy_duplex ? "full" : "half"));
610 k_work_reschedule(&context->delayed_phy_work,
611 K_MSEC(CONFIG_ETH_MCUX_PHY_TICK_MS));
612 context->phy_state = eth_mcux_phy_state_wait;
613 break;
614 }
615 }
616
eth_mcux_phy_work(struct k_work * item)617 static void eth_mcux_phy_work(struct k_work *item)
618 {
619 struct eth_context *context =
620 CONTAINER_OF(item, struct eth_context, phy_work);
621
622 eth_mcux_phy_event(context);
623 }
624
eth_mcux_delayed_phy_work(struct k_work * item)625 static void eth_mcux_delayed_phy_work(struct k_work *item)
626 {
627 struct k_work_delayable *dwork = k_work_delayable_from_work(item);
628 struct eth_context *context =
629 CONTAINER_OF(dwork, struct eth_context, delayed_phy_work);
630
631 eth_mcux_phy_event(context);
632 }
633
eth_mcux_phy_setup(struct eth_context * context)634 static void eth_mcux_phy_setup(struct eth_context *context)
635 {
636 #if defined(CONFIG_SOC_SERIES_IMX_RT)
637 status_t res;
638 uint16_t oms_override;
639
640 /* Disable MII interrupts to prevent triggering PHY events. */
641 ENET_DisableInterrupts(context->base, ENET_EIR_MII_MASK);
642
643 res = PHY_Read(context->phy_handle,
644 PHY_OMS_OVERRIDE_REG, &oms_override);
645 if (res != kStatus_Success) {
646 LOG_WRN("Reading PHY reg failed (status 0x%x)", res);
647 } else {
648 /* Based on strap-in pins the PHY can be in factory test mode.
649 * Force normal operation.
650 */
651 oms_override &= ~PHY_OMS_FACTORY_MODE_MASK;
652
653 /* Prevent PHY entering NAND Tree mode override. */
654 if (oms_override & PHY_OMS_NANDTREE_MASK) {
655 oms_override &= ~PHY_OMS_NANDTREE_MASK;
656 }
657
658 res = PHY_Write(context->phy_handle,
659 PHY_OMS_OVERRIDE_REG, oms_override);
660 if (res != kStatus_Success) {
661 LOG_WRN("Writing PHY reg failed (status 0x%x)", res);
662 }
663 }
664
665 ENET_EnableInterrupts(context->base, ENET_EIR_MII_MASK);
666 #endif
667 }
668
669 #if defined(CONFIG_PTP_CLOCK_MCUX)
670
eth_get_ptp_data(struct net_if * iface,struct net_pkt * pkt)671 static bool eth_get_ptp_data(struct net_if *iface, struct net_pkt *pkt)
672 {
673 int eth_hlen;
674
675 #if defined(CONFIG_NET_VLAN)
676 struct net_eth_vlan_hdr *hdr_vlan;
677 struct ethernet_context *eth_ctx;
678 bool vlan_enabled = false;
679
680 eth_ctx = net_if_l2_data(iface);
681 if (net_eth_is_vlan_enabled(eth_ctx, iface)) {
682 hdr_vlan = (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt);
683 vlan_enabled = true;
684
685 if (ntohs(hdr_vlan->type) != NET_ETH_PTYPE_PTP) {
686 return false;
687 }
688
689 eth_hlen = sizeof(struct net_eth_vlan_hdr);
690 } else
691 #endif
692 {
693 if (ntohs(NET_ETH_HDR(pkt)->type) != NET_ETH_PTYPE_PTP) {
694 return false;
695 }
696
697 eth_hlen = sizeof(struct net_eth_hdr);
698 }
699
700 net_pkt_set_priority(pkt, NET_PRIORITY_CA);
701
702 return true;
703 }
704 #endif /* CONFIG_PTP_CLOCK_MCUX */
705
eth_tx(const struct device * dev,struct net_pkt * pkt)706 static int eth_tx(const struct device *dev, struct net_pkt *pkt)
707 {
708 struct eth_context *context = dev->data;
709 uint16_t total_len = net_pkt_get_len(pkt);
710 status_t status;
711
712 #if defined(CONFIG_PTP_CLOCK_MCUX)
713 bool timestamped_frame;
714 #endif
715
716 /* Wait for a TX buffer descriptor to be available */
717 k_sem_take(&context->tx_buf_sem, K_FOREVER);
718
719 k_mutex_lock(&context->tx_frame_buf_mutex, K_FOREVER);
720
721 if (net_pkt_read(pkt, context->tx_frame_buf, total_len)) {
722 k_mutex_unlock(&context->tx_frame_buf_mutex);
723 return -EIO;
724 }
725
726
727 #if defined(CONFIG_PTP_CLOCK_MCUX)
728 timestamped_frame = eth_get_ptp_data(net_pkt_iface(pkt), pkt);
729 if (timestamped_frame) {
730 status = ENET_SendFrame(context->base, &context->enet_handle,
731 context->tx_frame_buf, total_len, RING_ID, true, pkt);
732 if (!status) {
733 net_pkt_ref(pkt);
734 /*
735 * Network stack will modify the packet upon return,
736 * so wait for the packet to be timestamped,
737 * which will occur within the TX ISR, before
738 * returning
739 */
740 k_sem_take(&context->ptp_ts_sem, K_FOREVER);
741 }
742
743 } else
744 #endif
745 {
746 status = ENET_SendFrame(context->base, &context->enet_handle,
747 context->tx_frame_buf, total_len, RING_ID, false, NULL);
748 }
749
750 if (status) {
751 LOG_ERR("ENET_SendFrame error: %d", (int)status);
752 k_mutex_unlock(&context->tx_frame_buf_mutex);
753 ENET_ReclaimTxDescriptor(context->base,
754 &context->enet_handle, RING_ID);
755 return -1;
756 }
757
758 k_mutex_unlock(&context->tx_frame_buf_mutex);
759
760 return 0;
761 }
762
eth_rx(struct eth_context * context)763 static int eth_rx(struct eth_context *context)
764 {
765 uint16_t vlan_tag = NET_VLAN_TAG_UNSPEC;
766 uint32_t frame_length = 0U;
767 struct net_if *iface;
768 struct net_pkt *pkt;
769 status_t status;
770 uint32_t ts;
771
772 #if defined(CONFIG_PTP_CLOCK_MCUX)
773 enet_ptp_time_t ptpTimeData;
774 #endif
775
776 status = ENET_GetRxFrameSize(&context->enet_handle,
777 (uint32_t *)&frame_length, RING_ID);
778 if (status == kStatus_ENET_RxFrameEmpty) {
779 return 0;
780 } else if (status == kStatus_ENET_RxFrameError) {
781 enet_data_error_stats_t error_stats;
782
783 LOG_ERR("ENET_GetRxFrameSize return: %d", (int)status);
784
785 ENET_GetRxErrBeforeReadFrame(&context->enet_handle,
786 &error_stats, RING_ID);
787 goto flush;
788 }
789
790 if (frame_length > NET_ETH_MAX_FRAME_SIZE) {
791 LOG_ERR("frame too large (%d)", frame_length);
792 goto flush;
793 }
794
795 /* Using root iface. It will be updated in net_recv_data() */
796 pkt = net_pkt_rx_alloc_with_buffer(context->iface, frame_length,
797 AF_UNSPEC, 0, K_NO_WAIT);
798 if (!pkt) {
799 goto flush;
800 }
801
802 /* in case multiply thread access
803 * we need to protect it with mutex.
804 */
805 k_mutex_lock(&context->rx_frame_buf_mutex, K_FOREVER);
806
807 status = ENET_ReadFrame(context->base, &context->enet_handle,
808 context->rx_frame_buf, frame_length, RING_ID, &ts);
809 if (status) {
810 LOG_ERR("ENET_ReadFrame failed: %d", (int)status);
811 net_pkt_unref(pkt);
812
813 k_mutex_unlock(&context->rx_frame_buf_mutex);
814 goto error;
815 }
816
817 if (net_pkt_write(pkt, context->rx_frame_buf, frame_length)) {
818 LOG_ERR("Unable to write frame into the pkt");
819 net_pkt_unref(pkt);
820 k_mutex_unlock(&context->rx_frame_buf_mutex);
821 goto error;
822 }
823
824 k_mutex_unlock(&context->rx_frame_buf_mutex);
825
826 #if defined(CONFIG_NET_VLAN)
827 {
828 struct net_eth_hdr *hdr = NET_ETH_HDR(pkt);
829
830 if (ntohs(hdr->type) == NET_ETH_PTYPE_VLAN) {
831 struct net_eth_vlan_hdr *hdr_vlan =
832 (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt);
833
834 net_pkt_set_vlan_tci(pkt, ntohs(hdr_vlan->vlan.tci));
835 vlan_tag = net_pkt_vlan_tag(pkt);
836
837 #if CONFIG_NET_TC_RX_COUNT > 1
838 {
839 enum net_priority prio;
840
841 prio = net_vlan2priority(
842 net_pkt_vlan_priority(pkt));
843 net_pkt_set_priority(pkt, prio);
844 }
845 #endif
846 }
847 }
848 #endif /* CONFIG_NET_VLAN */
849
850 /*
851 * Use MAC timestamp
852 */
853 #if defined(CONFIG_PTP_CLOCK_MCUX)
854 k_mutex_lock(&context->ptp_mutex, K_FOREVER);
855 if (eth_get_ptp_data(get_iface(context, vlan_tag), pkt)) {
856 ENET_Ptp1588GetTimer(context->base, &context->enet_handle,
857 &ptpTimeData);
858 /* If latest timestamp reloads after getting from Rx BD,
859 * then second - 1 to make sure the actual Rx timestamp is
860 * accurate
861 */
862 if (ptpTimeData.nanosecond < ts) {
863 ptpTimeData.second--;
864 }
865
866 pkt->timestamp.nanosecond = ts;
867 pkt->timestamp.second = ptpTimeData.second;
868 } else {
869 /* Invalid value. */
870 pkt->timestamp.nanosecond = UINT32_MAX;
871 pkt->timestamp.second = UINT64_MAX;
872 }
873 k_mutex_unlock(&context->ptp_mutex);
874 #endif /* CONFIG_PTP_CLOCK_MCUX */
875
876 iface = get_iface(context, vlan_tag);
877 #if defined(CONFIG_NET_DSA)
878 iface = dsa_net_recv(iface, &pkt);
879 #endif
880 if (net_recv_data(iface, pkt) < 0) {
881 net_pkt_unref(pkt);
882 goto error;
883 }
884
885 return 1;
886 flush:
887 /* Flush the current read buffer. This operation can
888 * only report failure if there is no frame to flush,
889 * which cannot happen in this context.
890 */
891 status = ENET_ReadFrame(context->base, &context->enet_handle, NULL,
892 0, RING_ID, NULL);
893 __ASSERT_NO_MSG(status == kStatus_Success);
894 error:
895 eth_stats_update_errors_rx(get_iface(context, vlan_tag));
896 return -EIO;
897 }
898
899 #if defined(CONFIG_PTP_CLOCK_MCUX) && defined(CONFIG_NET_L2_PTP)
ts_register_tx_event(struct eth_context * context,enet_frame_info_t * frameinfo)900 static inline void ts_register_tx_event(struct eth_context *context,
901 enet_frame_info_t *frameinfo)
902 {
903 struct net_pkt *pkt;
904
905 pkt = frameinfo->context;
906 if (pkt && atomic_get(&pkt->atomic_ref) > 0) {
907 if (eth_get_ptp_data(net_pkt_iface(pkt), pkt)) {
908 if (frameinfo->isTsAvail) {
909 k_mutex_lock(&context->ptp_mutex, K_FOREVER);
910
911 pkt->timestamp.nanosecond =
912 frameinfo->timeStamp.nanosecond;
913 pkt->timestamp.second =
914 frameinfo->timeStamp.second;
915
916 net_if_add_tx_timestamp(pkt);
917 k_sem_give(&context->ptp_ts_sem);
918 k_mutex_unlock(&context->ptp_mutex);
919 }
920 }
921
922 net_pkt_unref(pkt);
923 } else {
924 if (IS_ENABLED(CONFIG_ETH_MCUX_PHY_EXTRA_DEBUG) && pkt) {
925 LOG_ERR("pkt %p already freed", pkt);
926 }
927 }
928
929 }
930 #endif /* CONFIG_PTP_CLOCK_MCUX && CONFIG_NET_L2_PTP */
931
eth_callback(ENET_Type * base,enet_handle_t * handle,uint32_t ringId,enet_event_t event,enet_frame_info_t * frameinfo,void * param)932 static void eth_callback(ENET_Type *base, enet_handle_t *handle,
933 #if FSL_FEATURE_ENET_QUEUE > 1
934 uint32_t ringId,
935 #endif /* FSL_FEATURE_ENET_QUEUE > 1 */
936 enet_event_t event, enet_frame_info_t *frameinfo, void *param)
937 {
938 struct eth_context *context = param;
939
940 switch (event) {
941 case kENET_RxEvent:
942 k_sem_give(&context->rx_thread_sem);
943 break;
944 case kENET_TxEvent:
945 #if defined(CONFIG_PTP_CLOCK_MCUX) && defined(CONFIG_NET_L2_PTP)
946 /* Register event */
947 ts_register_tx_event(context, frameinfo);
948 #endif /* CONFIG_PTP_CLOCK_MCUX && CONFIG_NET_L2_PTP */
949 /* Free the TX buffer. */
950 k_sem_give(&context->tx_buf_sem);
951 break;
952 case kENET_ErrEvent:
953 /* Error event: BABR/BABT/EBERR/LC/RL/UN/PLR. */
954 break;
955 case kENET_WakeUpEvent:
956 /* Wake up from sleep mode event. */
957 break;
958 case kENET_TimeStampEvent:
959 /* Time stamp event. */
960 /* Reset periodic timer to default value. */
961 context->base->ATPER = NSEC_PER_SEC;
962 break;
963 case kENET_TimeStampAvailEvent:
964 /* Time stamp available event. */
965 break;
966 }
967 }
968
eth_rx_thread(void * arg1,void * unused1,void * unused2)969 static void eth_rx_thread(void *arg1, void *unused1, void *unused2)
970 {
971 struct eth_context *context = (struct eth_context *)arg1;
972
973 while (1) {
974 if (k_sem_take(&context->rx_thread_sem, K_FOREVER) == 0) {
975 while (eth_rx(context) == 1) {
976 ;
977 }
978 /* enable the IRQ for RX */
979 ENET_EnableInterrupts(context->base,
980 kENET_RxFrameInterrupt | kENET_RxBufferInterrupt);
981 }
982 }
983 }
984
985 #if defined(CONFIG_ETH_MCUX_PHY_RESET)
eth_phy_reset(const struct device * dev)986 static int eth_phy_reset(const struct device *dev)
987 {
988 int err;
989 struct eth_context *context = dev->data;
990
991 /* pull up the ENET_INT before RESET. */
992 err = gpio_pin_configure_dt(&context->int_gpio, GPIO_OUTPUT_ACTIVE);
993 if (err) {
994 return err;
995 }
996 return gpio_pin_configure_dt(&context->reset_gpio, GPIO_OUTPUT_INACTIVE);
997 }
998
eth_phy_init(const struct device * dev)999 static int eth_phy_init(const struct device *dev)
1000 {
1001 struct eth_context *context = dev->data;
1002
1003 /* RESET PHY chip. */
1004 k_busy_wait(USEC_PER_MSEC * 500);
1005 return gpio_pin_set_dt(&context->reset_gpio, 1);
1006 }
1007 #endif
1008
eth_mcux_init(const struct device * dev)1009 static void eth_mcux_init(const struct device *dev)
1010 {
1011 struct eth_context *context = dev->data;
1012 const enet_buffer_config_t *buffer_config = dev->config;
1013 enet_config_t enet_config;
1014 uint32_t sys_clock;
1015 #if defined(CONFIG_PTP_CLOCK_MCUX)
1016 uint8_t ptp_multicast[6] = { 0x01, 0x1B, 0x19, 0x00, 0x00, 0x00 };
1017 uint8_t ptp_peer_multicast[6] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x0E };
1018 #endif
1019 #if defined(CONFIG_MDNS_RESPONDER) || defined(CONFIG_MDNS_RESOLVER)
1020 /* standard multicast MAC address */
1021 uint8_t mdns_multicast[6] = { 0x01, 0x00, 0x5E, 0x00, 0x00, 0xFB };
1022 #endif
1023
1024 context->phy_state = eth_mcux_phy_state_initial;
1025 context->phy_handle->ops = &phyksz8081_ops;
1026
1027 #if defined(CONFIG_SOC_SERIES_IMX_RT10XX)
1028 #if DT_NODE_HAS_STATUS(DT_NODELABEL(enet), okay)
1029 sys_clock = CLOCK_GetFreq(kCLOCK_IpgClk);
1030 #endif
1031 #if DT_NODE_HAS_STATUS(DT_NODELABEL(enet2), okay)
1032 sys_clock = CLOCK_GetFreq(kCLOCK_EnetPll1Clk);
1033 #endif
1034 #elif defined(CONFIG_SOC_SERIES_IMX_RT11XX)
1035 sys_clock = CLOCK_GetRootClockFreq(kCLOCK_Root_Bus);
1036 #else
1037 sys_clock = CLOCK_GetFreq(kCLOCK_CoreSysClk);
1038 #endif
1039
1040 ENET_GetDefaultConfig(&enet_config);
1041 enet_config.interrupt |= kENET_RxFrameInterrupt;
1042 enet_config.interrupt |= kENET_TxFrameInterrupt;
1043 #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
1044 enet_config.interrupt |= kENET_MiiInterrupt;
1045 #endif
1046 enet_config.miiMode = kENET_RmiiMode;
1047 enet_config.callback = eth_callback;
1048 enet_config.userData = context;
1049
1050 if (IS_ENABLED(CONFIG_ETH_MCUX_PROMISCUOUS_MODE)) {
1051 enet_config.macSpecialConfig |= kENET_ControlPromiscuousEnable;
1052 }
1053
1054 if (IS_ENABLED(CONFIG_NET_VLAN)) {
1055 enet_config.macSpecialConfig |= kENET_ControlVLANTagEnable;
1056 }
1057
1058 if (IS_ENABLED(CONFIG_ETH_MCUX_HW_ACCELERATION)) {
1059 enet_config.txAccelerConfig |=
1060 kENET_TxAccelIpCheckEnabled |
1061 kENET_TxAccelProtoCheckEnabled;
1062 enet_config.rxAccelerConfig |=
1063 kENET_RxAccelIpCheckEnabled |
1064 kENET_RxAccelProtoCheckEnabled;
1065 }
1066
1067 ENET_Init(context->base,
1068 &context->enet_handle,
1069 &enet_config,
1070 buffer_config,
1071 context->mac_addr,
1072 sys_clock);
1073
1074 #if defined(CONFIG_PTP_CLOCK_MCUX)
1075 ENET_AddMulticastGroup(context->base, ptp_multicast);
1076 ENET_AddMulticastGroup(context->base, ptp_peer_multicast);
1077
1078 /* only for ERRATA_2579 */
1079 context->ptp_config.channel = kENET_PtpTimerChannel3;
1080 context->ptp_config.ptp1588ClockSrc_Hz =
1081 CONFIG_ETH_MCUX_PTP_CLOCK_SRC_HZ;
1082 context->clk_ratio = 1.0;
1083
1084 ENET_Ptp1588SetChannelMode(context->base, kENET_PtpTimerChannel3,
1085 kENET_PtpChannelPulseHighonCompare, true);
1086 ENET_Ptp1588Configure(context->base, &context->enet_handle,
1087 &context->ptp_config);
1088 #endif
1089
1090 #if defined(CONFIG_MDNS_RESPONDER) || defined(CONFIG_MDNS_RESOLVER)
1091 ENET_AddMulticastGroup(context->base, mdns_multicast);
1092 #endif
1093
1094 #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
1095 ENET_SetSMI(context->base, sys_clock, false);
1096 #endif
1097
1098 /* handle PHY setup after SMI initialization */
1099 eth_mcux_phy_setup(context);
1100
1101 #if defined(CONFIG_PTP_CLOCK_MCUX)
1102 /* Enable reclaim of tx descriptors that will have the tx timestamp */
1103 ENET_SetTxReclaim(&context->enet_handle, true, 0);
1104 #endif
1105
1106 eth_mcux_phy_start(context);
1107 }
1108
eth_init(const struct device * dev)1109 static int eth_init(const struct device *dev)
1110 {
1111 struct eth_context *context = dev->data;
1112 #if defined(CONFIG_PINCTRL)
1113 int err;
1114
1115 err = pinctrl_apply_state(context->pincfg, PINCTRL_STATE_DEFAULT);
1116 if (err) {
1117 return err;
1118 }
1119 #endif /* CONFIG_PINCTRL */
1120
1121 #if defined(CONFIG_NET_POWER_MANAGEMENT)
1122 const uint32_t inst = ENET_GetInstance(context->base);
1123
1124 context->clock = enet_clocks[inst];
1125 #endif
1126
1127 #if defined(CONFIG_ETH_MCUX_PHY_RESET)
1128 eth_phy_reset(dev);
1129 eth_phy_init(dev);
1130 #endif
1131
1132 #if defined(CONFIG_PTP_CLOCK_MCUX)
1133 k_mutex_init(&context->ptp_mutex);
1134 k_sem_init(&context->ptp_ts_sem, 0, 1);
1135 #endif
1136 k_mutex_init(&context->rx_frame_buf_mutex);
1137 k_mutex_init(&context->tx_frame_buf_mutex);
1138
1139 k_sem_init(&context->rx_thread_sem, 0, CONFIG_ETH_MCUX_RX_BUFFERS);
1140 k_sem_init(&context->tx_buf_sem,
1141 CONFIG_ETH_MCUX_TX_BUFFERS, CONFIG_ETH_MCUX_TX_BUFFERS);
1142 k_work_init(&context->phy_work, eth_mcux_phy_work);
1143 k_work_init_delayable(&context->delayed_phy_work,
1144 eth_mcux_delayed_phy_work);
1145
1146 /* Start interruption-poll thread */
1147 k_thread_create(&context->rx_thread, context->rx_thread_stack,
1148 K_KERNEL_STACK_SIZEOF(context->rx_thread_stack),
1149 eth_rx_thread, (void *) context, NULL, NULL,
1150 K_PRIO_COOP(2),
1151 0, K_NO_WAIT);
1152 k_thread_name_set(&context->rx_thread, "mcux_eth_rx");
1153 if (context->generate_mac) {
1154 context->generate_mac(context->mac_addr);
1155 }
1156
1157 eth_mcux_init(dev);
1158
1159 LOG_DBG("%s MAC %02x:%02x:%02x:%02x:%02x:%02x",
1160 dev->name,
1161 context->mac_addr[0], context->mac_addr[1],
1162 context->mac_addr[2], context->mac_addr[3],
1163 context->mac_addr[4], context->mac_addr[5]);
1164
1165 return 0;
1166 }
1167
1168 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
net_if_mcast_cb(struct net_if * iface,const struct net_addr * addr,bool is_joined)1169 static void net_if_mcast_cb(struct net_if *iface,
1170 const struct net_addr *addr,
1171 bool is_joined)
1172 {
1173 const struct device *dev = net_if_get_device(iface);
1174 struct eth_context *context = dev->data;
1175 struct net_eth_addr mac_addr;
1176
1177 if (IS_ENABLED(CONFIG_NET_IPV4) && addr->family == AF_INET) {
1178 net_eth_ipv4_mcast_to_mac_addr(&addr->in_addr, &mac_addr);
1179 } else if (IS_ENABLED(CONFIG_NET_IPV6) && addr->family == AF_INET6) {
1180 net_eth_ipv6_mcast_to_mac_addr(&addr->in6_addr, &mac_addr);
1181 } else {
1182 return;
1183 }
1184
1185 if (is_joined) {
1186 ENET_AddMulticastGroup(context->base, mac_addr.addr);
1187 } else {
1188 ENET_LeaveMulticastGroup(context->base, mac_addr.addr);
1189 }
1190 }
1191 #endif /* CONFIG_NET_NATIVE_IPV4 || CONFIG_NET_NATIVE_IPV6 */
1192
eth_iface_init(struct net_if * iface)1193 static void eth_iface_init(struct net_if *iface)
1194 {
1195 const struct device *dev = net_if_get_device(iface);
1196 struct eth_context *context = dev->data;
1197
1198 #if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
1199 static struct net_if_mcast_monitor mon;
1200
1201 net_if_mcast_mon_register(&mon, iface, net_if_mcast_cb);
1202 #endif /* CONFIG_NET_NATIVE_IPV4 || CONFIG_NET_NATIVE_IPV6 */
1203
1204 net_if_set_link_addr(iface, context->mac_addr,
1205 sizeof(context->mac_addr),
1206 NET_LINK_ETHERNET);
1207
1208 /* For VLAN, this value is only used to get the correct L2 driver.
1209 * The iface pointer in context should contain the main interface
1210 * if the VLANs are enabled.
1211 */
1212 if (context->iface == NULL) {
1213 context->iface = iface;
1214 }
1215
1216 #if defined(CONFIG_NET_DSA)
1217 dsa_register_master_tx(iface, ð_tx);
1218 #endif
1219 ethernet_init(iface);
1220 net_if_carrier_off(iface);
1221
1222 context->config_func();
1223 }
1224
eth_mcux_get_capabilities(const struct device * dev)1225 static enum ethernet_hw_caps eth_mcux_get_capabilities(const struct device *dev)
1226 {
1227 ARG_UNUSED(dev);
1228
1229 return ETHERNET_HW_VLAN | ETHERNET_LINK_10BASE_T |
1230 #if defined(CONFIG_PTP_CLOCK_MCUX)
1231 ETHERNET_PTP |
1232 #endif
1233 #if defined(CONFIG_NET_DSA)
1234 ETHERNET_DSA_MASTER_PORT |
1235 #endif
1236 #if defined(CONFIG_ETH_MCUX_HW_ACCELERATION)
1237 ETHERNET_HW_TX_CHKSUM_OFFLOAD |
1238 ETHERNET_HW_RX_CHKSUM_OFFLOAD |
1239 #endif
1240 ETHERNET_AUTO_NEGOTIATION_SET |
1241 ETHERNET_LINK_100BASE_T;
1242 }
1243
eth_mcux_set_config(const struct device * dev,enum ethernet_config_type type,const struct ethernet_config * config)1244 static int eth_mcux_set_config(const struct device *dev,
1245 enum ethernet_config_type type,
1246 const struct ethernet_config *config)
1247 {
1248 struct eth_context *context = dev->data;
1249
1250 switch (type) {
1251 case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
1252 memcpy(context->mac_addr,
1253 config->mac_address.addr,
1254 sizeof(context->mac_addr));
1255 ENET_SetMacAddr(context->base, context->mac_addr);
1256 net_if_set_link_addr(context->iface, context->mac_addr,
1257 sizeof(context->mac_addr),
1258 NET_LINK_ETHERNET);
1259 LOG_DBG("%s MAC set to %02x:%02x:%02x:%02x:%02x:%02x",
1260 dev->name,
1261 context->mac_addr[0], context->mac_addr[1],
1262 context->mac_addr[2], context->mac_addr[3],
1263 context->mac_addr[4], context->mac_addr[5]);
1264 return 0;
1265 default:
1266 break;
1267 }
1268
1269 return -ENOTSUP;
1270 }
1271
1272 #if defined(CONFIG_PTP_CLOCK_MCUX)
eth_mcux_get_ptp_clock(const struct device * dev)1273 static const struct device *eth_mcux_get_ptp_clock(const struct device *dev)
1274 {
1275 struct eth_context *context = dev->data;
1276
1277 return context->ptp_clock;
1278 }
1279 #endif
1280
1281 static const struct ethernet_api api_funcs = {
1282 .iface_api.init = eth_iface_init,
1283 #if defined(CONFIG_PTP_CLOCK_MCUX)
1284 .get_ptp_clock = eth_mcux_get_ptp_clock,
1285 #endif
1286 .get_capabilities = eth_mcux_get_capabilities,
1287 .set_config = eth_mcux_set_config,
1288 #if defined(CONFIG_NET_DSA)
1289 .send = dsa_tx,
1290 #else
1291 .send = eth_tx,
1292 #endif
1293 };
1294
1295 #if defined(CONFIG_PTP_CLOCK_MCUX)
eth_mcux_ptp_isr(const struct device * dev)1296 static void eth_mcux_ptp_isr(const struct device *dev)
1297 {
1298 struct eth_context *context = dev->data;
1299 unsigned int irq_lock_key = irq_lock();
1300 enet_ptp_timer_channel_t channel;
1301
1302 /* clear channel */
1303 for (channel = kENET_PtpTimerChannel1; channel <= kENET_PtpTimerChannel4; channel++) {
1304 if (ENET_Ptp1588GetChannelStatus(context->base, channel)) {
1305 ENET_Ptp1588ClearChannelStatus(context->base, channel);
1306 }
1307 }
1308 ENET_TimeStampIRQHandler(context->base, &context->enet_handle);
1309 irq_unlock(irq_lock_key);
1310 }
1311 #endif
1312
1313 #if DT_INST_IRQ_HAS_NAME(0, common) || DT_INST_IRQ_HAS_NAME(1, common)
eth_mcux_common_isr(const struct device * dev)1314 static void eth_mcux_common_isr(const struct device *dev)
1315 {
1316 struct eth_context *context = dev->data;
1317 uint32_t EIR = ENET_GetInterruptStatus(context->base);
1318 unsigned int irq_lock_key = irq_lock();
1319
1320 if (EIR & (kENET_RxBufferInterrupt | kENET_RxFrameInterrupt)) {
1321 /* disable the IRQ for RX */
1322 context->rx_irq_num++;
1323 #if FSL_FEATURE_ENET_QUEUE > 1
1324 /* Only use ring 0 in this driver */
1325 ENET_ReceiveIRQHandler(context->base, &context->enet_handle, 0);
1326 #else
1327 ENET_ReceiveIRQHandler(context->base, &context->enet_handle);
1328 #endif
1329 ENET_DisableInterrupts(context->base, kENET_RxFrameInterrupt |
1330 kENET_RxBufferInterrupt);
1331 }
1332
1333 if (EIR & kENET_TxFrameInterrupt) {
1334 #if FSL_FEATURE_ENET_QUEUE > 1
1335 ENET_TransmitIRQHandler(context->base, &context->enet_handle, 0);
1336 #else
1337 ENET_TransmitIRQHandler(context->base, &context->enet_handle);
1338 #endif
1339 }
1340
1341 if (EIR | kENET_TxBufferInterrupt) {
1342 ENET_ClearInterruptStatus(context->base, kENET_TxBufferInterrupt);
1343 ENET_DisableInterrupts(context->base, kENET_TxBufferInterrupt);
1344 }
1345
1346 if (EIR & ENET_EIR_MII_MASK) {
1347 k_work_submit(&context->phy_work);
1348 ENET_ClearInterruptStatus(context->base, kENET_MiiInterrupt);
1349 }
1350 #if defined(CONFIG_PTP_CLOCK_MCUX)
1351 if (EIR & ENET_TS_INTERRUPT) {
1352 ENET_TimeStampIRQHandler(context->base, &context->enet_handle);
1353 }
1354 #endif
1355 irq_unlock(irq_lock_key);
1356 }
1357 #endif
1358
1359 #if DT_INST_IRQ_HAS_NAME(0, rx) || DT_INST_IRQ_HAS_NAME(1, rx)
eth_mcux_rx_isr(const struct device * dev)1360 static void eth_mcux_rx_isr(const struct device *dev)
1361 {
1362 struct eth_context *context = dev->data;
1363
1364 ENET_DisableInterrupts(context->base, kENET_RxFrameInterrupt | kENET_RxBufferInterrupt);
1365 ENET_ReceiveIRQHandler(context->base, &context->enet_handle);
1366 }
1367 #endif
1368
1369 #if DT_INST_IRQ_HAS_NAME(0, tx) || DT_INST_IRQ_HAS_NAME(1, tx)
eth_mcux_tx_isr(const struct device * dev)1370 static void eth_mcux_tx_isr(const struct device *dev)
1371 {
1372 struct eth_context *context = dev->data;
1373 #if FSL_FEATURE_ENET_QUEUE > 1
1374 ENET_TransmitIRQHandler(context->base, &context->enet_handle, 0);
1375 #else
1376 ENET_TransmitIRQHandler(context->base, &context->enet_handle);
1377 #endif
1378 }
1379 #endif
1380
1381 #if DT_INST_IRQ_HAS_NAME(0, err) || DT_INST_IRQ_HAS_NAME(1, err)
eth_mcux_err_isr(const struct device * dev)1382 static void eth_mcux_err_isr(const struct device *dev)
1383 {
1384 struct eth_context *context = dev->data;
1385 uint32_t pending = ENET_GetInterruptStatus(context->base);
1386
1387 if (pending & ENET_EIR_MII_MASK) {
1388 k_work_submit(&context->phy_work);
1389 ENET_ClearInterruptStatus(context->base, kENET_MiiInterrupt);
1390 }
1391 }
1392 #endif
1393
1394 #if defined(CONFIG_SOC_SERIES_IMX_RT10XX)
1395 #define ETH_MCUX_UNIQUE_ID (OCOTP->CFG1 ^ OCOTP->CFG2)
1396 #elif defined(CONFIG_SOC_SERIES_IMX_RT11XX)
1397 #define ETH_MCUX_UNIQUE_ID (OCOTP->FUSEN[40].FUSE)
1398 #elif defined(CONFIG_SOC_SERIES_KINETIS_K6X)
1399 #define ETH_MCUX_UNIQUE_ID (SIM->UIDH ^ SIM->UIDMH ^ SIM->UIDML ^ SIM->UIDL)
1400 #else
1401 #error "Unsupported SOC"
1402 #endif
1403
1404 #define ETH_MCUX_NONE
1405
1406 #define ETH_MCUX_IRQ_INIT(n, name) \
1407 do { \
1408 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(n, name, irq), \
1409 DT_INST_IRQ_BY_NAME(n, name, priority), \
1410 eth_mcux_##name##_isr, \
1411 DEVICE_DT_INST_GET(n), \
1412 0); \
1413 irq_enable(DT_INST_IRQ_BY_NAME(n, name, irq)); \
1414 } while (false)
1415
1416 #define ETH_MCUX_IRQ(n, name) \
1417 COND_CODE_1(DT_INST_IRQ_HAS_NAME(n, name), \
1418 (ETH_MCUX_IRQ_INIT(n, name)), \
1419 (ETH_MCUX_NONE))
1420
1421 #if defined(CONFIG_PTP_CLOCK_MCUX)
1422 #define PTP_INST_NODEID(n) DT_INST_CHILD(n, ptp)
1423
1424 #define ETH_MCUX_IRQ_PTP_INIT(n) \
1425 do { \
1426 IRQ_CONNECT(DT_IRQ_BY_NAME(PTP_INST_NODEID(n), ieee1588_tmr, irq), \
1427 DT_IRQ_BY_NAME(PTP_INST_NODEID(n), ieee1588_tmr, priority), \
1428 eth_mcux_ptp_isr, \
1429 DEVICE_DT_INST_GET(n), \
1430 0); \
1431 irq_enable(DT_IRQ_BY_NAME(PTP_INST_NODEID(n), ieee1588_tmr, irq)); \
1432 } while (false)
1433
1434 #define ETH_MCUX_IRQ_PTP(n) \
1435 COND_CODE_1(DT_NODE_HAS_STATUS(PTP_INST_NODEID(n), okay), \
1436 (ETH_MCUX_IRQ_PTP_INIT(n)), \
1437 (ETH_MCUX_NONE))
1438
1439 #define ETH_MCUX_PTP_FRAMEINFO_ARRAY(n) \
1440 static enet_frame_info_t \
1441 eth##n##_tx_frameinfo_array[CONFIG_ETH_MCUX_TX_BUFFERS];
1442
1443 #define ETH_MCUX_PTP_FRAMEINFO(n) \
1444 .txFrameInfo = eth##n##_tx_frameinfo_array,
1445 #else
1446 #define ETH_MCUX_IRQ_PTP(n)
1447
1448 #define ETH_MCUX_PTP_FRAMEINFO_ARRAY(n)
1449
1450 #define ETH_MCUX_PTP_FRAMEINFO(n) \
1451 .txFrameInfo = NULL,
1452 #endif
1453
1454 #define ETH_MCUX_GENERATE_MAC_RANDOM(n) \
1455 static void generate_eth##n##_mac(uint8_t *mac_addr) \
1456 { \
1457 gen_random_mac(mac_addr, \
1458 FREESCALE_OUI_B0, \
1459 FREESCALE_OUI_B1, \
1460 FREESCALE_OUI_B2); \
1461 }
1462
1463 #define ETH_MCUX_GENERATE_MAC_UNIQUE(n) \
1464 static void generate_eth##n##_mac(uint8_t *mac_addr) \
1465 { \
1466 uint32_t id = ETH_MCUX_UNIQUE_ID; \
1467 \
1468 mac_addr[0] = FREESCALE_OUI_B0; \
1469 mac_addr[0] |= 0x02; /* force LAA bit */ \
1470 mac_addr[1] = FREESCALE_OUI_B1; \
1471 mac_addr[2] = FREESCALE_OUI_B2; \
1472 mac_addr[3] = id >> 8; \
1473 mac_addr[4] = id >> 16; \
1474 mac_addr[5] = id >> 0; \
1475 mac_addr[5] += n; \
1476 }
1477
1478 #define ETH_MCUX_GENERATE_MAC(n) \
1479 COND_CODE_1(DT_INST_PROP(n, zephyr_random_mac_address), \
1480 (ETH_MCUX_GENERATE_MAC_RANDOM(n)), \
1481 (ETH_MCUX_GENERATE_MAC_UNIQUE(n)))
1482
1483 #define ETH_MCUX_MAC_ADDR_LOCAL(n) \
1484 .mac_addr = DT_INST_PROP(n, local_mac_address), \
1485 .generate_mac = NULL,
1486
1487 #define ETH_MCUX_MAC_ADDR_GENERATE(n) \
1488 .mac_addr = {0}, \
1489 .generate_mac = generate_eth##n##_mac,
1490
1491 #define ETH_MCUX_MAC_ADDR(n) \
1492 COND_CODE_1(ETH_MCUX_MAC_ADDR_TO_BOOL(n), \
1493 (ETH_MCUX_MAC_ADDR_LOCAL(n)), \
1494 (ETH_MCUX_MAC_ADDR_GENERATE(n)))
1495
1496 #ifdef CONFIG_SOC_FAMILY_KINETIS
1497 #define ETH_MCUX_POWER_INIT(n) \
1498 .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \
1499
1500 #define ETH_MCUX_POWER(n) \
1501 COND_CODE_1(CONFIG_NET_POWER_MANAGEMENT, \
1502 (ETH_MCUX_POWER_INIT(n)), \
1503 (ETH_MCUX_NONE))
1504 #define ETH_MCUX_PM_DEVICE_INIT(n) \
1505 PM_DEVICE_DT_INST_DEFINE(n, eth_mcux_device_pm_action);
1506 #define ETH_MCUX_PM_DEVICE_GET(n) PM_DEVICE_DT_INST_GET(n)
1507 #else
1508 #define ETH_MCUX_POWER(n)
1509 #define ETH_MCUX_PM_DEVICE_INIT(n)
1510 #define ETH_MCUX_PM_DEVICE_GET(n) NULL
1511 #endif /* CONFIG_SOC_FAMILY_KINETIS */
1512
1513 #define ETH_MCUX_GEN_MAC(n) \
1514 COND_CODE_0(ETH_MCUX_MAC_ADDR_TO_BOOL(n), \
1515 (ETH_MCUX_GENERATE_MAC(n)), \
1516 (ETH_MCUX_NONE))
1517
1518 /*
1519 * In the below code we explicitly define
1520 * ETH_MCUX_MAC_ADDR_TO_BOOL_0 for the '0' instance of enet driver.
1521 *
1522 * For instance N one shall add definition for ETH_MCUX_MAC_ADDR_TO_BOOL_N
1523 */
1524 #if (NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(0))) == 0
1525 #define ETH_MCUX_MAC_ADDR_TO_BOOL_0 0
1526 #else
1527 #define ETH_MCUX_MAC_ADDR_TO_BOOL_0 1
1528 #endif
1529 #define ETH_MCUX_MAC_ADDR_TO_BOOL(n) ETH_MCUX_MAC_ADDR_TO_BOOL_##n
1530
1531 #if defined(CONFIG_PINCTRL)
1532 #define ETH_MCUX_PINCTRL_DEFINE(n) PINCTRL_DT_INST_DEFINE(n);
1533 #define ETH_MCUX_PINCTRL_INIT(n) .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n),
1534 #else
1535 #define ETH_MCUX_PINCTRL_DEFINE(n)
1536 #define ETH_MCUX_PINCTRL_INIT(n)
1537 #endif
1538
1539 #if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay) && \
1540 CONFIG_ETH_MCUX_USE_DTCM_FOR_DMA_BUFFER
1541 /* Use DTCM for hardware DMA buffers */
1542 #define _mcux_dma_desc __dtcm_bss_section
1543 #define _mcux_dma_buffer __dtcm_noinit_section
1544 #define _mcux_driver_buffer __dtcm_noinit_section
1545 #elif defined(CONFIG_NOCACHE_MEMORY)
1546 #define _mcux_dma_desc __nocache
1547 #define _mcux_dma_buffer __nocache
1548 #define _mcux_driver_buffer
1549 #else
1550 #define _mcux_dma_desc
1551 #define _mcux_dma_buffer
1552 #define _mcux_driver_buffer
1553 #endif
1554
1555 #if defined(CONFIG_ETH_MCUX_PHY_RESET)
1556 #define ETH_MCUX_PHY_GPIOS(n) \
1557 .int_gpio = GPIO_DT_SPEC_INST_GET(n, int_gpios), \
1558 .reset_gpio = GPIO_DT_SPEC_INST_GET(n, reset_gpios),
1559 #else
1560 #define ETH_MCUX_PHY_GPIOS(n)
1561 #endif
1562
1563 #define ETH_MCUX_INIT(n) \
1564 ETH_MCUX_GEN_MAC(n) \
1565 \
1566 ETH_MCUX_PINCTRL_DEFINE(n) \
1567 \
1568 static void eth##n##_config_func(void); \
1569 static _mcux_driver_buffer uint8_t \
1570 tx_enet_frame_##n##_buf[NET_ETH_MAX_FRAME_SIZE]; \
1571 static _mcux_driver_buffer uint8_t \
1572 rx_enet_frame_##n##_buf[NET_ETH_MAX_FRAME_SIZE]; \
1573 static status_t _MDIO_Write(uint8_t phyAddr, uint8_t regAddr, uint16_t data) \
1574 { \
1575 return ENET_MDIOWrite((ENET_Type *)DT_INST_REG_ADDR(n), phyAddr, regAddr, data);\
1576 }; \
1577 \
1578 static status_t _MDIO_Read(uint8_t phyAddr, uint8_t regAddr, uint16_t *pData) \
1579 { \
1580 return ENET_MDIORead((ENET_Type *)DT_INST_REG_ADDR(n), phyAddr, regAddr, pData); \
1581 }; \
1582 \
1583 static struct _phy_resource eth##n##_phy_resource = { \
1584 .read = _MDIO_Read, \
1585 .write = _MDIO_Write \
1586 }; \
1587 static phy_handle_t eth##n##_phy_handle = { \
1588 .resource = (void *)ð##n##_phy_resource \
1589 }; \
1590 static struct _phy_resource eth##n##_phy_config; \
1591 \
1592 static struct eth_context eth##n##_context = { \
1593 .base = (ENET_Type *)DT_INST_REG_ADDR(n), \
1594 .config_func = eth##n##_config_func, \
1595 .phy_config = ð##n##_phy_config, \
1596 .phy_addr = DT_INST_PROP(n, phy_addr), \
1597 .phy_duplex = kPHY_FullDuplex, \
1598 .phy_speed = kPHY_Speed100M, \
1599 .phy_handle = ð##n##_phy_handle, \
1600 .tx_frame_buf = tx_enet_frame_##n##_buf, \
1601 .rx_frame_buf = rx_enet_frame_##n##_buf, \
1602 ETH_MCUX_PINCTRL_INIT(n) \
1603 ETH_MCUX_PHY_GPIOS(n) \
1604 ETH_MCUX_MAC_ADDR(n) \
1605 ETH_MCUX_POWER(n) \
1606 }; \
1607 \
1608 static __aligned(ENET_BUFF_ALIGNMENT) \
1609 _mcux_dma_desc \
1610 enet_rx_bd_struct_t \
1611 eth##n##_rx_buffer_desc[CONFIG_ETH_MCUX_RX_BUFFERS]; \
1612 \
1613 static __aligned(ENET_BUFF_ALIGNMENT) \
1614 _mcux_dma_desc \
1615 enet_tx_bd_struct_t \
1616 eth##n##_tx_buffer_desc[CONFIG_ETH_MCUX_TX_BUFFERS]; \
1617 \
1618 static uint8_t __aligned(ENET_BUFF_ALIGNMENT) \
1619 _mcux_dma_buffer \
1620 eth##n##_rx_buffer[CONFIG_ETH_MCUX_RX_BUFFERS] \
1621 [ETH_MCUX_BUFFER_SIZE]; \
1622 \
1623 static uint8_t __aligned(ENET_BUFF_ALIGNMENT) \
1624 _mcux_dma_buffer \
1625 eth##n##_tx_buffer[CONFIG_ETH_MCUX_TX_BUFFERS] \
1626 [ETH_MCUX_BUFFER_SIZE]; \
1627 \
1628 ETH_MCUX_PTP_FRAMEINFO_ARRAY(n) \
1629 \
1630 static const enet_buffer_config_t eth##n##_buffer_config = { \
1631 .rxBdNumber = CONFIG_ETH_MCUX_RX_BUFFERS, \
1632 .txBdNumber = CONFIG_ETH_MCUX_TX_BUFFERS, \
1633 .rxBuffSizeAlign = ETH_MCUX_BUFFER_SIZE, \
1634 .txBuffSizeAlign = ETH_MCUX_BUFFER_SIZE, \
1635 .rxBdStartAddrAlign = eth##n##_rx_buffer_desc, \
1636 .txBdStartAddrAlign = eth##n##_tx_buffer_desc, \
1637 .rxBufferAlign = eth##n##_rx_buffer[0], \
1638 .txBufferAlign = eth##n##_tx_buffer[0], \
1639 .rxMaintainEnable = true, \
1640 .txMaintainEnable = true, \
1641 ETH_MCUX_PTP_FRAMEINFO(n) \
1642 }; \
1643 \
1644 ETH_MCUX_PM_DEVICE_INIT(n) \
1645 \
1646 ETH_NET_DEVICE_DT_INST_DEFINE(n, \
1647 eth_init, \
1648 ETH_MCUX_PM_DEVICE_GET(n), \
1649 ð##n##_context, \
1650 ð##n##_buffer_config, \
1651 CONFIG_ETH_INIT_PRIORITY, \
1652 &api_funcs, \
1653 NET_ETH_MTU); \
1654 \
1655 static void eth##n##_config_func(void) \
1656 { \
1657 ETH_MCUX_IRQ(n, rx); \
1658 ETH_MCUX_IRQ(n, tx); \
1659 ETH_MCUX_IRQ(n, err); \
1660 ETH_MCUX_IRQ(n, common); \
1661 ETH_MCUX_IRQ_PTP(n); \
1662 } \
1663
1664 DT_INST_FOREACH_STATUS_OKAY(ETH_MCUX_INIT)
1665
1666 #if defined(CONFIG_PTP_CLOCK_MCUX)
1667 struct ptp_context {
1668 struct eth_context *eth_context;
1669 #if defined(CONFIG_PINCTRL)
1670 const struct pinctrl_dev_config *pincfg;
1671 #endif /* CONFIG_PINCTRL */
1672 };
1673
1674 #if defined(CONFIG_PINCTRL)
1675 #define ETH_MCUX_PTP_PINCTRL_DEFINE(n) PINCTRL_DT_DEFINE(n);
1676 #define ETH_MCUX_PTP_PINCTRL_INIT(n) .pincfg = PINCTRL_DT_DEV_CONFIG_GET(n),
1677 #else
1678 #define ETH_MCUX_PTP_PINCTRL_DEFINE(n)
1679 #define ETH_MCUX_PTP_PINCTRL_INIT(n)
1680 #endif /* CONFIG_PINCTRL */
1681
1682 ETH_MCUX_PTP_PINCTRL_DEFINE(DT_NODELABEL(ptp))
1683
1684 static struct ptp_context ptp_mcux_0_context = {
1685 ETH_MCUX_PTP_PINCTRL_INIT(DT_NODELABEL(ptp))
1686 };
1687
ptp_clock_mcux_set(const struct device * dev,struct net_ptp_time * tm)1688 static int ptp_clock_mcux_set(const struct device *dev,
1689 struct net_ptp_time *tm)
1690 {
1691 struct ptp_context *ptp_context = dev->data;
1692 struct eth_context *context = ptp_context->eth_context;
1693 enet_ptp_time_t enet_time;
1694
1695 enet_time.second = tm->second;
1696 enet_time.nanosecond = tm->nanosecond;
1697
1698 ENET_Ptp1588SetTimer(context->base, &context->enet_handle, &enet_time);
1699 return 0;
1700 }
1701
ptp_clock_mcux_get(const struct device * dev,struct net_ptp_time * tm)1702 static int ptp_clock_mcux_get(const struct device *dev,
1703 struct net_ptp_time *tm)
1704 {
1705 struct ptp_context *ptp_context = dev->data;
1706 struct eth_context *context = ptp_context->eth_context;
1707 enet_ptp_time_t enet_time;
1708
1709 ENET_Ptp1588GetTimer(context->base, &context->enet_handle, &enet_time);
1710
1711 tm->second = enet_time.second;
1712 tm->nanosecond = enet_time.nanosecond;
1713 return 0;
1714 }
1715
ptp_clock_mcux_adjust(const struct device * dev,int increment)1716 static int ptp_clock_mcux_adjust(const struct device *dev, int increment)
1717 {
1718 struct ptp_context *ptp_context = dev->data;
1719 struct eth_context *context = ptp_context->eth_context;
1720 int key, ret;
1721
1722 ARG_UNUSED(dev);
1723
1724 if ((increment <= (int32_t)(-NSEC_PER_SEC)) ||
1725 (increment >= (int32_t)NSEC_PER_SEC)) {
1726 ret = -EINVAL;
1727 } else {
1728 key = irq_lock();
1729 if (context->base->ATPER != NSEC_PER_SEC) {
1730 ret = -EBUSY;
1731 } else {
1732 /* Seconds counter is handled by software. Change the
1733 * period of one software second to adjust the clock.
1734 */
1735 context->base->ATPER = NSEC_PER_SEC - increment;
1736 ret = 0;
1737 }
1738 irq_unlock(key);
1739 }
1740
1741 return ret;
1742 }
1743
ptp_clock_mcux_rate_adjust(const struct device * dev,double ratio)1744 static int ptp_clock_mcux_rate_adjust(const struct device *dev, double ratio)
1745 {
1746 const int hw_inc = NSEC_PER_SEC / CONFIG_ETH_MCUX_PTP_CLOCK_SRC_HZ;
1747 struct ptp_context *ptp_context = dev->data;
1748 struct eth_context *context = ptp_context->eth_context;
1749 int corr;
1750 int32_t mul;
1751 double val;
1752
1753 /* No change needed. */
1754 if ((ratio > 1.0 && ratio - 1.0 < 0.00000001) ||
1755 (ratio < 1.0 && 1.0 - ratio < 0.00000001)) {
1756 return 0;
1757 }
1758
1759 ratio *= context->clk_ratio;
1760
1761 /* Limit possible ratio. */
1762 if ((ratio > 1.0 + 1.0/(2 * hw_inc)) ||
1763 (ratio < 1.0 - 1.0/(2 * hw_inc))) {
1764 return -EINVAL;
1765 }
1766
1767 /* Save new ratio. */
1768 context->clk_ratio = ratio;
1769
1770 if (ratio < 1.0) {
1771 corr = hw_inc - 1;
1772 val = 1.0 / (hw_inc * (1.0 - ratio));
1773 } else if (ratio > 1.0) {
1774 corr = hw_inc + 1;
1775 val = 1.0 / (hw_inc * (ratio - 1.0));
1776 } else {
1777 val = 0;
1778 corr = hw_inc;
1779 }
1780
1781 if (val >= INT32_MAX) {
1782 /* Value is too high.
1783 * It is not possible to adjust the rate of the clock.
1784 */
1785 mul = 0;
1786 } else {
1787 mul = val;
1788 }
1789 k_mutex_lock(&context->ptp_mutex, K_FOREVER);
1790 ENET_Ptp1588AdjustTimer(context->base, corr, mul);
1791 k_mutex_unlock(&context->ptp_mutex);
1792
1793 return 0;
1794 }
1795
1796 static const struct ptp_clock_driver_api api = {
1797 .set = ptp_clock_mcux_set,
1798 .get = ptp_clock_mcux_get,
1799 .adjust = ptp_clock_mcux_adjust,
1800 .rate_adjust = ptp_clock_mcux_rate_adjust,
1801 };
1802
ptp_mcux_init(const struct device * port)1803 static int ptp_mcux_init(const struct device *port)
1804 {
1805 const struct device *const eth_dev = DEVICE_DT_GET(DT_NODELABEL(enet));
1806 struct eth_context *context = eth_dev->data;
1807 struct ptp_context *ptp_context = port->data;
1808 #if defined(CONFIG_PINCTRL)
1809 int err;
1810
1811 err = pinctrl_apply_state(ptp_context->pincfg, PINCTRL_STATE_DEFAULT);
1812 if (err) {
1813 return err;
1814 }
1815 #endif /* CONFIG_PINCTRL */
1816
1817 context->ptp_clock = port;
1818 ptp_context->eth_context = context;
1819
1820 return 0;
1821 }
1822
1823 DEVICE_DEFINE(mcux_ptp_clock_0, PTP_CLOCK_NAME, ptp_mcux_init,
1824 NULL, &ptp_mcux_0_context, NULL, POST_KERNEL,
1825 CONFIG_ETH_MCUX_PTP_CLOCK_INIT_PRIO, &api);
1826
1827 #endif /* CONFIG_PTP_CLOCK_MCUX */
1828