1 /* MCUX Ethernet Driver
2 *
3 * Copyright (c) 2016-2017 ARM Ltd
4 * Copyright (c) 2016 Linaro Ltd
5 * Copyright (c) 2018 Intel Corporation
6 * Copyright 2023 NXP
7 *
8 * SPDX-License-Identifier: Apache-2.0
9 */
10
11 #define DT_DRV_COMPAT nxp_kinetis_ethernet
12
13 /* Driver Limitations:
14 *
15 * There is no statistics collection for either normal operation or
16 * error behaviour.
17 */
18
19 #define LOG_MODULE_NAME eth_mcux
20 #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL
21 #define RING_ID 0
22
23 #include <zephyr/logging/log.h>
24 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
25
26 #include <zephyr/device.h>
27 #include <zephyr/sys/util.h>
28 #include <zephyr/kernel.h>
29 #include <zephyr/sys/__assert.h>
30 #include <zephyr/net/net_pkt.h>
31 #include <zephyr/net/net_if.h>
32 #include <zephyr/net/ethernet.h>
33 #include <ethernet/eth_stats.h>
34 #include <zephyr/pm/device.h>
35 #include <zephyr/irq.h>
36
37 #if defined(CONFIG_PTP_CLOCK_MCUX)
38 #include <zephyr/drivers/ptp_clock.h>
39 #endif
40
41 #if defined(CONFIG_NET_DSA)
42 #include <zephyr/net/dsa.h>
43 #endif
44
45 #include "fsl_enet.h"
46 #include "fsl_phy.h"
47 #include "fsl_phyksz8081.h"
48 #include "fsl_enet_mdio.h"
49 #if defined(CONFIG_NET_POWER_MANAGEMENT)
50 #include "fsl_clock.h"
51 #include <zephyr/drivers/clock_control.h>
52 #endif
53 #include <zephyr/devicetree.h>
54 #include <zephyr/drivers/gpio.h>
55 #if defined(CONFIG_PINCTRL)
56 #include <zephyr/drivers/pinctrl.h>
57 #endif
58
59 #include "../eth.h"
60
61 #define PHY_OMS_OVERRIDE_REG 0x16U /* The PHY Operation Mode Strap Override register. */
62 #define PHY_OMS_STATUS_REG 0x17U /* The PHY Operation Mode Strap Status register. */
63
64 #define PHY_OMS_NANDTREE_MASK 0x0020U /* The PHY NAND Tree Strap-In Override/Status mask. */
65 #define PHY_OMS_FACTORY_MODE_MASK 0x8000U /* The factory mode Override/Status mask. */
66
67 /* Defines the PHY KSZ8081 vendor defined registers. */
68 #define PHY_CONTROL1_REG 0x1EU /* The PHY control one register. */
69 #define PHY_CONTROL2_REG 0x1FU /* The PHY control two register. */
70
71 /* Defines the PHY KSZ8081 ID number. */
72 #define PHY_CONTROL_ID1 0x22U /* The PHY ID1 */
73
74 /* Defines the mask flag of operation mode in control registers */
75 #define PHY_CTL2_REMOTELOOP_MASK 0x0004U /* The PHY remote loopback mask. */
76 #define PHY_CTL2_REFCLK_SELECT_MASK 0x0080U /* The PHY RMII reference clock select. */
77 #define PHY_CTL1_10HALFDUPLEX_MASK 0x0001U /* The PHY 10M half duplex mask. */
78 #define PHY_CTL1_100HALFDUPLEX_MASK 0x0002U /* The PHY 100M half duplex mask. */
79 #define PHY_CTL1_10FULLDUPLEX_MASK 0x0005U /* The PHY 10M full duplex mask. */
80 #define PHY_CTL1_100FULLDUPLEX_MASK 0x0006U /* The PHY 100M full duplex mask. */
81 #define PHY_CTL1_SPEEDUPLX_MASK 0x0007U /* The PHY speed and duplex mask. */
82 #define PHY_CTL1_ENERGYDETECT_MASK 0x10U /* The PHY signal present on rx differential pair. */
83 #define PHY_CTL1_LINKUP_MASK 0x100U /* The PHY link up. */
84 #define PHY_LINK_READY_MASK (PHY_CTL1_ENERGYDETECT_MASK | PHY_CTL1_LINKUP_MASK)
85
86 /* Defines the timeout macro. */
87 #define PHY_READID_TIMEOUT_COUNT 1000U
88
89 /* Define RX and TX thread stack sizes */
90 #define ETH_MCUX_RX_THREAD_STACK_SIZE 1600
91 #define ETH_MCUX_TX_THREAD_STACK_SIZE 1600
92
93 #define FREESCALE_OUI_B0 0x00
94 #define FREESCALE_OUI_B1 0x04
95 #define FREESCALE_OUI_B2 0x9f
96
97 #define ETH_MCUX_FIXED_LINK_NODE \
98 DT_CHILD(DT_NODELABEL(enet), fixed_link)
99 #define ETH_MCUX_FIXED_LINK \
100 DT_NODE_EXISTS(ETH_MCUX_FIXED_LINK_NODE)
101 #define ETH_MCUX_FIXED_LINK_SPEED \
102 DT_PROP(ETH_MCUX_FIXED_LINK_NODE, speed)
103 #define ETH_MCUX_FIXED_LINK_FULL_DUPLEX \
104 DT_PROP(ETH_MCUX_FIXED_LINK_NODE, full_duplex)
105
106 enum eth_mcux_phy_state {
107 eth_mcux_phy_state_initial,
108 eth_mcux_phy_state_reset,
109 eth_mcux_phy_state_autoneg,
110 eth_mcux_phy_state_restart,
111 eth_mcux_phy_state_read_status,
112 eth_mcux_phy_state_read_duplex,
113 eth_mcux_phy_state_wait,
114 eth_mcux_phy_state_closing
115 };
116
117 struct _phy_resource {
118 mdioWrite write;
119 mdioRead read;
120 };
121
122 #if defined(CONFIG_NET_POWER_MANAGEMENT)
123 extern uint32_t ENET_GetInstance(ENET_Type * base);
124 static const clock_ip_name_t enet_clocks[] = ENET_CLOCKS;
125 #endif
126
127 static void eth_mcux_init(const struct device *dev);
128
129 #if defined(CONFIG_ETH_MCUX_PHY_EXTRA_DEBUG)
phy_state_name(enum eth_mcux_phy_state state)130 static const char *phy_state_name(enum eth_mcux_phy_state state)
131 {
132 static const char * const name[] = {
133 "initial",
134 "reset",
135 "autoneg",
136 "restart",
137 "read-status",
138 "read-duplex",
139 "wait",
140 "closing"
141 };
142
143 return name[state];
144 }
145 #endif
146
eth_name(ENET_Type * base)147 static const char *eth_name(ENET_Type *base)
148 {
149 switch ((int)base) {
150 case DT_INST_REG_ADDR(0):
151 return "ETH_0";
152 #if DT_NODE_HAS_STATUS(DT_DRV_INST(1), okay)
153 case DT_INST_REG_ADDR(1):
154 return "ETH_1";
155 #endif
156 default:
157 return "unknown";
158 }
159 }
160
161 struct eth_context {
162 ENET_Type *base;
163 void (*config_func)(void);
164 struct net_if *iface;
165 #if defined(CONFIG_NET_POWER_MANAGEMENT)
166 clock_ip_name_t clock;
167 const struct device *clock_dev;
168 #endif
169 enet_handle_t enet_handle;
170 #if defined(CONFIG_PTP_CLOCK_MCUX)
171 const struct device *ptp_clock;
172 enet_ptp_config_t ptp_config;
173 double clk_ratio;
174 struct k_mutex ptp_mutex;
175 struct k_sem ptp_ts_sem;
176 #endif
177 struct k_sem tx_buf_sem;
178 phy_handle_t *phy_handle;
179 struct _phy_resource *phy_config;
180 struct k_sem rx_thread_sem;
181 enum eth_mcux_phy_state phy_state;
182 bool enabled;
183 bool link_up;
184 uint32_t phy_addr;
185 uint32_t rx_irq_num;
186 uint32_t tx_irq_num;
187 phy_duplex_t phy_duplex;
188 phy_speed_t phy_speed;
189 uint8_t mac_addr[6];
190 void (*generate_mac)(uint8_t *);
191 struct k_work phy_work;
192 struct k_work_delayable delayed_phy_work;
193
194 K_KERNEL_STACK_MEMBER(rx_thread_stack, ETH_MCUX_RX_THREAD_STACK_SIZE);
195 struct k_thread rx_thread;
196
197 /* TODO: FIXME. This Ethernet frame sized buffer is used for
198 * interfacing with MCUX. How it works is that hardware uses
199 * DMA scatter buffers to receive a frame, and then public
200 * MCUX call gathers them into this buffer (there's no other
201 * public interface). All this happens only for this driver
202 * to scatter this buffer again into Zephyr fragment buffers.
203 * This is not efficient, but proper resolution of this issue
204 * depends on introduction of zero-copy networking support
205 * in Zephyr, and adding needed interface to MCUX (or
206 * bypassing it and writing a more complex driver working
207 * directly with hardware).
208 *
209 * Note that we do not copy FCS into this buffer thus the
210 * size is 1514 bytes.
211 */
212 struct k_mutex tx_frame_buf_mutex;
213 struct k_mutex rx_frame_buf_mutex;
214 uint8_t *tx_frame_buf; /* Max MTU + ethernet header */
215 uint8_t *rx_frame_buf; /* Max MTU + ethernet header */
216 #if defined(CONFIG_PINCTRL)
217 const struct pinctrl_dev_config *pincfg;
218 #endif
219 #if defined(CONFIG_ETH_MCUX_PHY_RESET)
220 const struct gpio_dt_spec int_gpio;
221 const struct gpio_dt_spec reset_gpio;
222 #endif
223 };
224
225 /* Use ENET_FRAME_MAX_VLANFRAMELEN for VLAN frame size
226 * Use ENET_FRAME_MAX_FRAMELEN for Ethernet frame size
227 */
228 #if defined(CONFIG_NET_VLAN)
229 #if !defined(ENET_FRAME_MAX_VLANFRAMELEN)
230 #define ENET_FRAME_MAX_VLANFRAMELEN (ENET_FRAME_MAX_FRAMELEN + 4)
231 #endif
232 #define ETH_MCUX_BUFFER_SIZE \
233 ROUND_UP(ENET_FRAME_MAX_VLANFRAMELEN, ENET_BUFF_ALIGNMENT)
234 #else
235 #define ETH_MCUX_BUFFER_SIZE \
236 ROUND_UP(ENET_FRAME_MAX_FRAMELEN, ENET_BUFF_ALIGNMENT)
237 #endif /* CONFIG_NET_VLAN */
238
239 #ifdef CONFIG_SOC_FAMILY_KINETIS
240 #if defined(CONFIG_NET_POWER_MANAGEMENT)
241 static void eth_mcux_phy_enter_reset(struct eth_context *context);
242 void eth_mcux_phy_stop(struct eth_context *context);
243
eth_mcux_device_pm_action(const struct device * dev,enum pm_device_action action)244 static int eth_mcux_device_pm_action(const struct device *dev,
245 enum pm_device_action action)
246 {
247 struct eth_context *eth_ctx = dev->data;
248 int ret = 0;
249
250 if (!device_is_ready(eth_ctx->clock_dev)) {
251 LOG_ERR("No CLOCK dev");
252
253 ret = -EIO;
254 goto out;
255 }
256
257 switch (action) {
258 case PM_DEVICE_ACTION_SUSPEND:
259 LOG_DBG("Suspending");
260
261 ret = net_if_suspend(eth_ctx->iface);
262 if (ret == -EBUSY) {
263 goto out;
264 }
265
266 eth_mcux_phy_enter_reset(eth_ctx);
267 eth_mcux_phy_stop(eth_ctx);
268
269 ENET_Reset(eth_ctx->base);
270 ENET_Deinit(eth_ctx->base);
271 clock_control_off(eth_ctx->clock_dev,
272 (clock_control_subsys_t)eth_ctx->clock);
273 break;
274 case PM_DEVICE_ACTION_RESUME:
275 LOG_DBG("Resuming");
276
277 clock_control_on(eth_ctx->clock_dev,
278 (clock_control_subsys_t)eth_ctx->clock);
279 eth_mcux_init(dev);
280 net_if_resume(eth_ctx->iface);
281 break;
282 default:
283 ret = -ENOTSUP;
284 break;
285 }
286
287 out:
288
289 return ret;
290 }
291 #endif /* CONFIG_NET_POWER_MANAGEMENT */
292 #endif /* CONFIG_SOC_FAMILY_KINETIS */
293
294 #if ETH_MCUX_FIXED_LINK
eth_mcux_get_phy_params(phy_duplex_t * p_phy_duplex,phy_speed_t * p_phy_speed)295 static void eth_mcux_get_phy_params(phy_duplex_t *p_phy_duplex,
296 phy_speed_t *p_phy_speed)
297 {
298 *p_phy_duplex = kPHY_HalfDuplex;
299 #if ETH_MCUX_FIXED_LINK_FULL_DUPLEX
300 *p_phy_duplex = kPHY_FullDuplex;
301 #endif
302
303 *p_phy_speed = kPHY_Speed10M;
304 #if ETH_MCUX_FIXED_LINK_SPEED == 100
305 *p_phy_speed = kPHY_Speed100M;
306 #endif
307 }
308 #else
309
eth_mcux_decode_duplex_and_speed(uint32_t status,phy_duplex_t * p_phy_duplex,phy_speed_t * p_phy_speed)310 static void eth_mcux_decode_duplex_and_speed(uint32_t status,
311 phy_duplex_t *p_phy_duplex,
312 phy_speed_t *p_phy_speed)
313 {
314 switch (status & PHY_CTL1_SPEEDUPLX_MASK) {
315 case PHY_CTL1_10FULLDUPLEX_MASK:
316 *p_phy_duplex = kPHY_FullDuplex;
317 *p_phy_speed = kPHY_Speed10M;
318 break;
319 case PHY_CTL1_100FULLDUPLEX_MASK:
320 *p_phy_duplex = kPHY_FullDuplex;
321 *p_phy_speed = kPHY_Speed100M;
322 break;
323 case PHY_CTL1_100HALFDUPLEX_MASK:
324 *p_phy_duplex = kPHY_HalfDuplex;
325 *p_phy_speed = kPHY_Speed100M;
326 break;
327 case PHY_CTL1_10HALFDUPLEX_MASK:
328 *p_phy_duplex = kPHY_HalfDuplex;
329 *p_phy_speed = kPHY_Speed10M;
330 break;
331 }
332 }
333 #endif /* ETH_MCUX_FIXED_LINK */
334
get_iface(struct eth_context * ctx)335 static inline struct net_if *get_iface(struct eth_context *ctx)
336 {
337 return ctx->iface;
338 }
339
eth_mcux_phy_enter_reset(struct eth_context * context)340 static void eth_mcux_phy_enter_reset(struct eth_context *context)
341 {
342 /* Reset the PHY. */
343 #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
344 ENET_StartSMIWrite(context->base, context->phy_addr,
345 PHY_BASICCONTROL_REG,
346 kENET_MiiWriteValidFrame,
347 PHY_BCTL_RESET_MASK);
348 #endif
349 context->phy_state = eth_mcux_phy_state_reset;
350 #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
351 k_work_submit(&context->phy_work);
352 #endif
353 }
354
eth_mcux_phy_start(struct eth_context * context)355 static void eth_mcux_phy_start(struct eth_context *context)
356 {
357 #if defined(CONFIG_ETH_MCUX_PHY_EXTRA_DEBUG)
358 LOG_DBG("%s phy_state=%s", eth_name(context->base),
359 phy_state_name(context->phy_state));
360 #endif
361
362 context->enabled = true;
363
364 switch (context->phy_state) {
365 case eth_mcux_phy_state_initial:
366 context->phy_handle->phyAddr = context->phy_addr;
367 ENET_ActiveRead(context->base);
368 /* Reset the PHY. */
369 #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
370 ENET_StartSMIWrite(context->base, context->phy_addr,
371 PHY_BASICCONTROL_REG,
372 kENET_MiiWriteValidFrame,
373 PHY_BCTL_RESET_MASK);
374 #else
375 /*
376 * With no SMI communication one needs to wait for
377 * iface being up by the network core.
378 */
379 k_work_submit(&context->phy_work);
380 break;
381 #endif
382 #if defined(CONFIG_SOC_SERIES_IMXRT10XX) || defined(CONFIG_SOC_SERIES_IMXRT11XX)
383 context->phy_state = eth_mcux_phy_state_initial;
384 #else
385 context->phy_state = eth_mcux_phy_state_reset;
386 #endif
387 break;
388 case eth_mcux_phy_state_reset:
389 eth_mcux_phy_enter_reset(context);
390 break;
391 case eth_mcux_phy_state_autoneg:
392 case eth_mcux_phy_state_restart:
393 case eth_mcux_phy_state_read_status:
394 case eth_mcux_phy_state_read_duplex:
395 case eth_mcux_phy_state_wait:
396 case eth_mcux_phy_state_closing:
397 break;
398 }
399 }
400
eth_mcux_phy_stop(struct eth_context * context)401 void eth_mcux_phy_stop(struct eth_context *context)
402 {
403 #if defined(CONFIG_ETH_MCUX_PHY_EXTRA_DEBUG)
404 LOG_DBG("%s phy_state=%s", eth_name(context->base),
405 phy_state_name(context->phy_state));
406 #endif
407
408 context->enabled = false;
409
410 switch (context->phy_state) {
411 case eth_mcux_phy_state_initial:
412 case eth_mcux_phy_state_reset:
413 case eth_mcux_phy_state_autoneg:
414 case eth_mcux_phy_state_restart:
415 case eth_mcux_phy_state_read_status:
416 case eth_mcux_phy_state_read_duplex:
417 /* Do nothing, let the current communication complete
418 * then deal with shutdown.
419 */
420 context->phy_state = eth_mcux_phy_state_closing;
421 break;
422 case eth_mcux_phy_state_wait:
423 k_work_cancel_delayable(&context->delayed_phy_work);
424 /* @todo, actually power down the PHY ? */
425 context->phy_state = eth_mcux_phy_state_initial;
426 break;
427 case eth_mcux_phy_state_closing:
428 /* We are already going down. */
429 break;
430 }
431 }
432
eth_mcux_phy_event(struct eth_context * context)433 static void eth_mcux_phy_event(struct eth_context *context)
434 {
435 #if !(defined(CONFIG_ETH_MCUX_NO_PHY_SMI) && ETH_MCUX_FIXED_LINK)
436 uint32_t status;
437 #endif
438 bool link_up;
439 #if defined(CONFIG_SOC_SERIES_IMXRT10XX) || defined(CONFIG_SOC_SERIES_IMXRT11XX)
440 status_t res;
441 uint16_t ctrl2;
442 #endif
443 phy_duplex_t phy_duplex = kPHY_FullDuplex;
444 phy_speed_t phy_speed = kPHY_Speed100M;
445
446 #if defined(CONFIG_ETH_MCUX_PHY_EXTRA_DEBUG)
447 LOG_DBG("%s phy_state=%s", eth_name(context->base),
448 phy_state_name(context->phy_state));
449 #endif
450 switch (context->phy_state) {
451 case eth_mcux_phy_state_initial:
452 #if defined(CONFIG_SOC_SERIES_IMXRT10XX) || defined(CONFIG_SOC_SERIES_IMXRT11XX)
453 ENET_DisableInterrupts(context->base, ENET_EIR_MII_MASK);
454 res = PHY_Read(context->phy_handle, PHY_CONTROL2_REG, &ctrl2);
455 ENET_EnableInterrupts(context->base, ENET_EIR_MII_MASK);
456 if (res != kStatus_Success) {
457 LOG_WRN("Reading PHY reg failed (status 0x%x)", res);
458 k_work_submit(&context->phy_work);
459 } else {
460 ctrl2 |= PHY_CTL2_REFCLK_SELECT_MASK;
461 ENET_StartSMIWrite(context->base, context->phy_addr,
462 PHY_CONTROL2_REG,
463 kENET_MiiWriteValidFrame,
464 ctrl2);
465 }
466 context->phy_state = eth_mcux_phy_state_reset;
467 #endif
468 #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
469 /*
470 * When the iface is available proceed with the eth link setup,
471 * otherwise reschedule the eth_mcux_phy_event and check after
472 * 1ms
473 */
474 if (context->iface) {
475 context->phy_state = eth_mcux_phy_state_reset;
476 }
477
478 k_work_reschedule(&context->delayed_phy_work, K_MSEC(1));
479 #endif
480 break;
481 case eth_mcux_phy_state_closing:
482 if (context->enabled) {
483 eth_mcux_phy_enter_reset(context);
484 } else {
485 /* @todo, actually power down the PHY ? */
486 context->phy_state = eth_mcux_phy_state_initial;
487 }
488 break;
489 case eth_mcux_phy_state_reset:
490 /* Setup PHY autonegotiation. */
491 #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
492 ENET_StartSMIWrite(context->base, context->phy_addr,
493 PHY_AUTONEG_ADVERTISE_REG,
494 kENET_MiiWriteValidFrame,
495 (PHY_100BASETX_FULLDUPLEX_MASK |
496 PHY_100BASETX_HALFDUPLEX_MASK |
497 PHY_10BASETX_FULLDUPLEX_MASK |
498 PHY_10BASETX_HALFDUPLEX_MASK |
499 PHY_IEEE802_3_SELECTOR_MASK));
500 #endif
501 context->phy_state = eth_mcux_phy_state_autoneg;
502 #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
503 k_work_submit(&context->phy_work);
504 #endif
505 break;
506 case eth_mcux_phy_state_autoneg:
507 #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
508 /* Setup PHY autonegotiation. */
509 ENET_StartSMIWrite(context->base, context->phy_addr,
510 PHY_BASICCONTROL_REG,
511 kENET_MiiWriteValidFrame,
512 (PHY_BCTL_AUTONEG_MASK |
513 PHY_BCTL_RESTART_AUTONEG_MASK));
514 #endif
515 context->phy_state = eth_mcux_phy_state_restart;
516 #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
517 k_work_submit(&context->phy_work);
518 #endif
519 break;
520 case eth_mcux_phy_state_wait:
521 case eth_mcux_phy_state_restart:
522 /* Start reading the PHY basic status. */
523 #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
524 ENET_StartSMIRead(context->base, context->phy_addr,
525 PHY_BASICSTATUS_REG,
526 kENET_MiiReadValidFrame);
527 #endif
528 context->phy_state = eth_mcux_phy_state_read_status;
529 #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
530 k_work_submit(&context->phy_work);
531 #endif
532 break;
533 case eth_mcux_phy_state_read_status:
534 /* PHY Basic status is available. */
535 #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI) && ETH_MCUX_FIXED_LINK
536 link_up = true;
537 #else
538 status = ENET_ReadSMIData(context->base);
539 link_up = status & PHY_BSTATUS_LINKSTATUS_MASK;
540 #endif
541 if (link_up && !context->link_up && context->iface != NULL) {
542 /* Start reading the PHY control register. */
543 #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
544 ENET_StartSMIRead(context->base, context->phy_addr,
545 PHY_CONTROL1_REG,
546 kENET_MiiReadValidFrame);
547 #endif
548 context->link_up = link_up;
549 context->phy_state = eth_mcux_phy_state_read_duplex;
550 net_eth_carrier_on(context->iface);
551 k_msleep(1);
552 #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
553 k_work_submit(&context->phy_work);
554 #endif
555 } else if (!link_up && context->link_up && context->iface != NULL) {
556 LOG_INF("%s link down", eth_name(context->base));
557 context->link_up = link_up;
558 k_work_reschedule(&context->delayed_phy_work,
559 K_MSEC(CONFIG_ETH_MCUX_PHY_TICK_MS));
560 context->phy_state = eth_mcux_phy_state_wait;
561 net_eth_carrier_off(context->iface);
562 } else {
563 k_work_reschedule(&context->delayed_phy_work,
564 K_MSEC(CONFIG_ETH_MCUX_PHY_TICK_MS));
565 context->phy_state = eth_mcux_phy_state_wait;
566 }
567
568 break;
569 case eth_mcux_phy_state_read_duplex:
570 /* PHY control register is available. */
571 #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI) && ETH_MCUX_FIXED_LINK
572 eth_mcux_get_phy_params(&phy_duplex, &phy_speed);
573 LOG_INF("%s - Fixed Link", eth_name(context->base));
574 #else
575 status = ENET_ReadSMIData(context->base);
576 eth_mcux_decode_duplex_and_speed(status,
577 &phy_duplex,
578 &phy_speed);
579 #endif
580 if (phy_speed != context->phy_speed ||
581 phy_duplex != context->phy_duplex) {
582 context->phy_speed = phy_speed;
583 context->phy_duplex = phy_duplex;
584 ENET_SetMII(context->base,
585 (enet_mii_speed_t) phy_speed,
586 (enet_mii_duplex_t) phy_duplex);
587 }
588
589 LOG_INF("%s enabled %sM %s-duplex mode.",
590 eth_name(context->base),
591 (phy_speed ? "100" : "10"),
592 (phy_duplex ? "full" : "half"));
593 k_work_reschedule(&context->delayed_phy_work,
594 K_MSEC(CONFIG_ETH_MCUX_PHY_TICK_MS));
595 context->phy_state = eth_mcux_phy_state_wait;
596 break;
597 }
598 }
599
eth_mcux_phy_work(struct k_work * item)600 static void eth_mcux_phy_work(struct k_work *item)
601 {
602 struct eth_context *context =
603 CONTAINER_OF(item, struct eth_context, phy_work);
604
605 eth_mcux_phy_event(context);
606 }
607
eth_mcux_delayed_phy_work(struct k_work * item)608 static void eth_mcux_delayed_phy_work(struct k_work *item)
609 {
610 struct k_work_delayable *dwork = k_work_delayable_from_work(item);
611 struct eth_context *context =
612 CONTAINER_OF(dwork, struct eth_context, delayed_phy_work);
613
614 eth_mcux_phy_event(context);
615 }
616
eth_mcux_phy_setup(struct eth_context * context)617 static void eth_mcux_phy_setup(struct eth_context *context)
618 {
619 #if defined(CONFIG_SOC_SERIES_IMXRT10XX) || defined(CONFIG_SOC_SERIES_IMXRT11XX)
620 status_t res;
621 uint16_t oms_override;
622
623 /* Disable MII interrupts to prevent triggering PHY events. */
624 ENET_DisableInterrupts(context->base, ENET_EIR_MII_MASK);
625
626 res = PHY_Read(context->phy_handle,
627 PHY_OMS_OVERRIDE_REG, &oms_override);
628 if (res != kStatus_Success) {
629 LOG_WRN("Reading PHY reg failed (status 0x%x)", res);
630 } else {
631 /* Based on strap-in pins the PHY can be in factory test mode.
632 * Force normal operation.
633 */
634 oms_override &= ~PHY_OMS_FACTORY_MODE_MASK;
635
636 /* Prevent PHY entering NAND Tree mode override. */
637 if (oms_override & PHY_OMS_NANDTREE_MASK) {
638 oms_override &= ~PHY_OMS_NANDTREE_MASK;
639 }
640
641 res = PHY_Write(context->phy_handle,
642 PHY_OMS_OVERRIDE_REG, oms_override);
643 if (res != kStatus_Success) {
644 LOG_WRN("Writing PHY reg failed (status 0x%x)", res);
645 }
646 }
647
648 ENET_EnableInterrupts(context->base, ENET_EIR_MII_MASK);
649 #endif
650 }
651
652 #if defined(CONFIG_PTP_CLOCK_MCUX)
653
eth_get_ptp_data(struct net_if * iface,struct net_pkt * pkt)654 static bool eth_get_ptp_data(struct net_if *iface, struct net_pkt *pkt)
655 {
656 int eth_hlen;
657
658 if (ntohs(NET_ETH_HDR(pkt)->type) != NET_ETH_PTYPE_PTP) {
659 return false;
660 }
661
662 eth_hlen = sizeof(struct net_eth_hdr);
663
664 net_pkt_set_priority(pkt, NET_PRIORITY_CA);
665
666 return true;
667 }
668 #endif /* CONFIG_PTP_CLOCK_MCUX */
669
eth_tx(const struct device * dev,struct net_pkt * pkt)670 static int eth_tx(const struct device *dev, struct net_pkt *pkt)
671 {
672 struct eth_context *context = dev->data;
673 uint16_t total_len = net_pkt_get_len(pkt);
674 status_t status;
675
676 #if defined(CONFIG_PTP_CLOCK_MCUX)
677 bool timestamped_frame;
678 #endif
679
680 /* Wait for a TX buffer descriptor to be available */
681 k_sem_take(&context->tx_buf_sem, K_FOREVER);
682
683 k_mutex_lock(&context->tx_frame_buf_mutex, K_FOREVER);
684
685 if (net_pkt_read(pkt, context->tx_frame_buf, total_len)) {
686 k_mutex_unlock(&context->tx_frame_buf_mutex);
687 return -EIO;
688 }
689
690
691 #if defined(CONFIG_PTP_CLOCK_MCUX)
692 timestamped_frame = eth_get_ptp_data(net_pkt_iface(pkt), pkt);
693 if (timestamped_frame) {
694 status = ENET_SendFrame(context->base, &context->enet_handle,
695 context->tx_frame_buf, total_len, RING_ID, true, pkt);
696 if (!status) {
697 net_pkt_ref(pkt);
698 /*
699 * Network stack will modify the packet upon return,
700 * so wait for the packet to be timestamped,
701 * which will occur within the TX ISR, before
702 * returning
703 */
704 k_sem_take(&context->ptp_ts_sem, K_FOREVER);
705 }
706
707 } else
708 #endif
709 {
710 status = ENET_SendFrame(context->base, &context->enet_handle,
711 context->tx_frame_buf, total_len, RING_ID, false, NULL);
712 }
713
714 if (status) {
715 LOG_ERR("ENET_SendFrame error: %d", (int)status);
716 k_mutex_unlock(&context->tx_frame_buf_mutex);
717 ENET_ReclaimTxDescriptor(context->base,
718 &context->enet_handle, RING_ID);
719 return -1;
720 }
721
722 k_mutex_unlock(&context->tx_frame_buf_mutex);
723
724 return 0;
725 }
726
eth_rx(struct eth_context * context)727 static int eth_rx(struct eth_context *context)
728 {
729 uint32_t frame_length = 0U;
730 struct net_if *iface;
731 struct net_pkt *pkt;
732 status_t status;
733 uint32_t ts;
734
735 #if defined(CONFIG_PTP_CLOCK_MCUX)
736 enet_ptp_time_t ptpTimeData;
737 #endif
738
739 status = ENET_GetRxFrameSize(&context->enet_handle,
740 (uint32_t *)&frame_length, RING_ID);
741 if (status == kStatus_ENET_RxFrameEmpty) {
742 return 0;
743 } else if (status == kStatus_ENET_RxFrameError) {
744 enet_data_error_stats_t error_stats;
745
746 LOG_ERR("ENET_GetRxFrameSize return: %d", (int)status);
747
748 ENET_GetRxErrBeforeReadFrame(&context->enet_handle,
749 &error_stats, RING_ID);
750 goto flush;
751 }
752
753 if (frame_length > NET_ETH_MAX_FRAME_SIZE) {
754 LOG_ERR("frame too large (%d)", frame_length);
755 goto flush;
756 }
757
758 /* Using root iface. It will be updated in net_recv_data() */
759 pkt = net_pkt_rx_alloc_with_buffer(context->iface, frame_length,
760 AF_UNSPEC, 0, K_NO_WAIT);
761 if (!pkt) {
762 goto flush;
763 }
764
765 /* in case multiply thread access
766 * we need to protect it with mutex.
767 */
768 k_mutex_lock(&context->rx_frame_buf_mutex, K_FOREVER);
769
770 status = ENET_ReadFrame(context->base, &context->enet_handle,
771 context->rx_frame_buf, frame_length, RING_ID, &ts);
772 if (status) {
773 LOG_ERR("ENET_ReadFrame failed: %d", (int)status);
774 net_pkt_unref(pkt);
775
776 k_mutex_unlock(&context->rx_frame_buf_mutex);
777 goto error;
778 }
779
780 if (net_pkt_write(pkt, context->rx_frame_buf, frame_length)) {
781 LOG_ERR("Unable to write frame into the pkt");
782 net_pkt_unref(pkt);
783 k_mutex_unlock(&context->rx_frame_buf_mutex);
784 goto error;
785 }
786
787 k_mutex_unlock(&context->rx_frame_buf_mutex);
788
789 /*
790 * Use MAC timestamp
791 */
792 #if defined(CONFIG_PTP_CLOCK_MCUX)
793 k_mutex_lock(&context->ptp_mutex, K_FOREVER);
794 if (eth_get_ptp_data(get_iface(context), pkt)) {
795 ENET_Ptp1588GetTimer(context->base, &context->enet_handle,
796 &ptpTimeData);
797 /* If latest timestamp reloads after getting from Rx BD,
798 * then second - 1 to make sure the actual Rx timestamp is
799 * accurate
800 */
801 if (ptpTimeData.nanosecond < ts) {
802 ptpTimeData.second--;
803 }
804
805 pkt->timestamp.nanosecond = ts;
806 pkt->timestamp.second = ptpTimeData.second;
807 } else {
808 /* Invalid value. */
809 pkt->timestamp.nanosecond = UINT32_MAX;
810 pkt->timestamp.second = UINT64_MAX;
811 }
812 k_mutex_unlock(&context->ptp_mutex);
813 #endif /* CONFIG_PTP_CLOCK_MCUX */
814
815 iface = get_iface(context);
816 #if defined(CONFIG_NET_DSA)
817 iface = dsa_net_recv(iface, &pkt);
818 #endif
819 if (net_recv_data(iface, pkt) < 0) {
820 net_pkt_unref(pkt);
821 goto error;
822 }
823
824 return 1;
825 flush:
826 /* Flush the current read buffer. This operation can
827 * only report failure if there is no frame to flush,
828 * which cannot happen in this context.
829 */
830 status = ENET_ReadFrame(context->base, &context->enet_handle, NULL,
831 0, RING_ID, NULL);
832 __ASSERT_NO_MSG(status == kStatus_Success);
833 error:
834 eth_stats_update_errors_rx(get_iface(context));
835 return -EIO;
836 }
837
838 #if defined(CONFIG_PTP_CLOCK_MCUX) && defined(CONFIG_NET_L2_PTP)
ts_register_tx_event(struct eth_context * context,enet_frame_info_t * frameinfo)839 static inline void ts_register_tx_event(struct eth_context *context,
840 enet_frame_info_t *frameinfo)
841 {
842 struct net_pkt *pkt;
843
844 pkt = frameinfo->context;
845 if (pkt && atomic_get(&pkt->atomic_ref) > 0) {
846 if (eth_get_ptp_data(net_pkt_iface(pkt), pkt)) {
847 if (frameinfo->isTsAvail) {
848 k_mutex_lock(&context->ptp_mutex, K_FOREVER);
849
850 pkt->timestamp.nanosecond =
851 frameinfo->timeStamp.nanosecond;
852 pkt->timestamp.second =
853 frameinfo->timeStamp.second;
854
855 net_if_add_tx_timestamp(pkt);
856 k_sem_give(&context->ptp_ts_sem);
857 k_mutex_unlock(&context->ptp_mutex);
858 }
859 }
860
861 net_pkt_unref(pkt);
862 } else {
863 if (IS_ENABLED(CONFIG_ETH_MCUX_PHY_EXTRA_DEBUG) && pkt) {
864 LOG_ERR("pkt %p already freed", pkt);
865 }
866 }
867
868 }
869 #endif /* CONFIG_PTP_CLOCK_MCUX && CONFIG_NET_L2_PTP */
870
eth_callback(ENET_Type * base,enet_handle_t * handle,uint32_t ringId,enet_event_t event,enet_frame_info_t * frameinfo,void * param)871 static void eth_callback(ENET_Type *base, enet_handle_t *handle,
872 #if FSL_FEATURE_ENET_QUEUE > 1
873 uint32_t ringId,
874 #endif /* FSL_FEATURE_ENET_QUEUE > 1 */
875 enet_event_t event, enet_frame_info_t *frameinfo, void *param)
876 {
877 struct eth_context *context = param;
878
879 switch (event) {
880 case kENET_RxEvent:
881 k_sem_give(&context->rx_thread_sem);
882 break;
883 case kENET_TxEvent:
884 #if defined(CONFIG_PTP_CLOCK_MCUX) && defined(CONFIG_NET_L2_PTP)
885 /* Register event */
886 ts_register_tx_event(context, frameinfo);
887 #endif /* CONFIG_PTP_CLOCK_MCUX && CONFIG_NET_L2_PTP */
888 /* Free the TX buffer. */
889 k_sem_give(&context->tx_buf_sem);
890 break;
891 case kENET_ErrEvent:
892 /* Error event: BABR/BABT/EBERR/LC/RL/UN/PLR. */
893 break;
894 case kENET_WakeUpEvent:
895 /* Wake up from sleep mode event. */
896 break;
897 case kENET_TimeStampEvent:
898 /* Time stamp event. */
899 /* Reset periodic timer to default value. */
900 context->base->ATPER = NSEC_PER_SEC;
901 break;
902 case kENET_TimeStampAvailEvent:
903 /* Time stamp available event. */
904 break;
905 }
906 }
907
eth_rx_thread(void * arg1,void * unused1,void * unused2)908 static void eth_rx_thread(void *arg1, void *unused1, void *unused2)
909 {
910 struct eth_context *context = (struct eth_context *)arg1;
911
912 while (1) {
913 if (k_sem_take(&context->rx_thread_sem, K_FOREVER) == 0) {
914 while (eth_rx(context) == 1) {
915 ;
916 }
917 /* enable the IRQ for RX */
918 ENET_EnableInterrupts(context->base,
919 kENET_RxFrameInterrupt | kENET_RxBufferInterrupt);
920 }
921 }
922 }
923
924 #if defined(CONFIG_ETH_MCUX_PHY_RESET)
eth_phy_reset(const struct device * dev)925 static int eth_phy_reset(const struct device *dev)
926 {
927 int err;
928 struct eth_context *context = dev->data;
929
930 /* pull up the ENET_INT before RESET. */
931 err = gpio_pin_configure_dt(&context->int_gpio, GPIO_OUTPUT_ACTIVE);
932 if (err) {
933 return err;
934 }
935 return gpio_pin_configure_dt(&context->reset_gpio, GPIO_OUTPUT_INACTIVE);
936 }
937
eth_phy_init(const struct device * dev)938 static int eth_phy_init(const struct device *dev)
939 {
940 struct eth_context *context = dev->data;
941
942 /* RESET PHY chip. */
943 k_busy_wait(USEC_PER_MSEC * 500);
944 return gpio_pin_set_dt(&context->reset_gpio, 1);
945 }
946 #endif
947
eth_mcux_init(const struct device * dev)948 static void eth_mcux_init(const struct device *dev)
949 {
950 struct eth_context *context = dev->data;
951 const enet_buffer_config_t *buffer_config = dev->config;
952 enet_config_t enet_config;
953 uint32_t sys_clock;
954 #if defined(CONFIG_PTP_CLOCK_MCUX)
955 uint8_t ptp_multicast[6] = { 0x01, 0x1B, 0x19, 0x00, 0x00, 0x00 };
956 uint8_t ptp_peer_multicast[6] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x0E };
957 #endif
958 #if defined(CONFIG_MDNS_RESPONDER) || defined(CONFIG_MDNS_RESOLVER)
959 /* standard multicast MAC address */
960 uint8_t mdns_multicast[6] = { 0x01, 0x00, 0x5E, 0x00, 0x00, 0xFB };
961 #endif
962
963 context->phy_state = eth_mcux_phy_state_initial;
964 context->phy_handle->ops = &phyksz8081_ops;
965
966 #if defined(CONFIG_SOC_SERIES_IMXRT10XX)
967 #if DT_NODE_HAS_STATUS(DT_NODELABEL(enet), okay)
968 sys_clock = CLOCK_GetFreq(kCLOCK_IpgClk);
969 #endif
970 #if DT_NODE_HAS_STATUS(DT_NODELABEL(enet2), okay)
971 sys_clock = CLOCK_GetFreq(kCLOCK_EnetPll1Clk);
972 #endif
973 #elif defined(CONFIG_SOC_SERIES_IMXRT11XX)
974 sys_clock = CLOCK_GetRootClockFreq(kCLOCK_Root_Bus);
975 #else
976 sys_clock = CLOCK_GetFreq(kCLOCK_CoreSysClk);
977 #endif
978
979 ENET_GetDefaultConfig(&enet_config);
980 enet_config.interrupt |= kENET_RxFrameInterrupt;
981 enet_config.interrupt |= kENET_TxFrameInterrupt;
982 #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
983 enet_config.interrupt |= kENET_MiiInterrupt;
984 #endif
985 enet_config.miiMode = kENET_RmiiMode;
986 enet_config.callback = eth_callback;
987 enet_config.userData = context;
988
989 if (IS_ENABLED(CONFIG_ETH_MCUX_PROMISCUOUS_MODE)) {
990 enet_config.macSpecialConfig |= kENET_ControlPromiscuousEnable;
991 }
992
993 if (IS_ENABLED(CONFIG_NET_VLAN)) {
994 enet_config.macSpecialConfig |= kENET_ControlVLANTagEnable;
995 }
996
997 if (IS_ENABLED(CONFIG_ETH_MCUX_HW_ACCELERATION)) {
998 enet_config.txAccelerConfig |=
999 kENET_TxAccelIpCheckEnabled |
1000 kENET_TxAccelProtoCheckEnabled;
1001 enet_config.rxAccelerConfig |=
1002 kENET_RxAccelIpCheckEnabled |
1003 kENET_RxAccelProtoCheckEnabled;
1004 }
1005
1006 ENET_Init(context->base,
1007 &context->enet_handle,
1008 &enet_config,
1009 buffer_config,
1010 context->mac_addr,
1011 sys_clock);
1012
1013 #if defined(CONFIG_PTP_CLOCK_MCUX)
1014 ENET_AddMulticastGroup(context->base, ptp_multicast);
1015 ENET_AddMulticastGroup(context->base, ptp_peer_multicast);
1016
1017 /* only for ERRATA_2579 */
1018 context->ptp_config.channel = kENET_PtpTimerChannel3;
1019 context->ptp_config.ptp1588ClockSrc_Hz =
1020 CONFIG_ETH_MCUX_PTP_CLOCK_SRC_HZ;
1021 context->clk_ratio = 1.0;
1022
1023 ENET_Ptp1588SetChannelMode(context->base, kENET_PtpTimerChannel3,
1024 kENET_PtpChannelPulseHighonCompare, true);
1025 ENET_Ptp1588Configure(context->base, &context->enet_handle,
1026 &context->ptp_config);
1027 #endif
1028
1029 #if defined(CONFIG_MDNS_RESPONDER) || defined(CONFIG_MDNS_RESOLVER)
1030 ENET_AddMulticastGroup(context->base, mdns_multicast);
1031 #endif
1032
1033 #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI)
1034 ENET_SetSMI(context->base, sys_clock, false);
1035 #endif
1036
1037 /* handle PHY setup after SMI initialization */
1038 eth_mcux_phy_setup(context);
1039
1040 #if defined(CONFIG_PTP_CLOCK_MCUX)
1041 /* Enable reclaim of tx descriptors that will have the tx timestamp */
1042 ENET_SetTxReclaim(&context->enet_handle, true, 0);
1043 #endif
1044
1045 eth_mcux_phy_start(context);
1046 }
1047
eth_init(const struct device * dev)1048 static int eth_init(const struct device *dev)
1049 {
1050 struct eth_context *context = dev->data;
1051 #if defined(CONFIG_PINCTRL)
1052 int err;
1053
1054 err = pinctrl_apply_state(context->pincfg, PINCTRL_STATE_DEFAULT);
1055 if (err) {
1056 return err;
1057 }
1058 #endif /* CONFIG_PINCTRL */
1059
1060 #if defined(CONFIG_NET_POWER_MANAGEMENT)
1061 const uint32_t inst = ENET_GetInstance(context->base);
1062
1063 context->clock = enet_clocks[inst];
1064 #endif
1065
1066 #if defined(CONFIG_ETH_MCUX_PHY_RESET)
1067 eth_phy_reset(dev);
1068 eth_phy_init(dev);
1069 #endif
1070
1071 #if defined(CONFIG_PTP_CLOCK_MCUX)
1072 k_mutex_init(&context->ptp_mutex);
1073 k_sem_init(&context->ptp_ts_sem, 0, 1);
1074 #endif
1075 k_mutex_init(&context->rx_frame_buf_mutex);
1076 k_mutex_init(&context->tx_frame_buf_mutex);
1077
1078 k_sem_init(&context->rx_thread_sem, 0, CONFIG_ETH_MCUX_RX_BUFFERS);
1079 k_sem_init(&context->tx_buf_sem,
1080 CONFIG_ETH_MCUX_TX_BUFFERS, CONFIG_ETH_MCUX_TX_BUFFERS);
1081 k_work_init(&context->phy_work, eth_mcux_phy_work);
1082 k_work_init_delayable(&context->delayed_phy_work,
1083 eth_mcux_delayed_phy_work);
1084
1085 /* Start interruption-poll thread */
1086 k_thread_create(&context->rx_thread, context->rx_thread_stack,
1087 K_KERNEL_STACK_SIZEOF(context->rx_thread_stack),
1088 eth_rx_thread, (void *) context, NULL, NULL,
1089 K_PRIO_COOP(2),
1090 0, K_NO_WAIT);
1091 k_thread_name_set(&context->rx_thread, "mcux_eth_rx");
1092 if (context->generate_mac) {
1093 context->generate_mac(context->mac_addr);
1094 }
1095
1096 eth_mcux_init(dev);
1097
1098 LOG_DBG("%s MAC %02x:%02x:%02x:%02x:%02x:%02x",
1099 dev->name,
1100 context->mac_addr[0], context->mac_addr[1],
1101 context->mac_addr[2], context->mac_addr[3],
1102 context->mac_addr[4], context->mac_addr[5]);
1103
1104 return 0;
1105 }
1106
eth_iface_init(struct net_if * iface)1107 static void eth_iface_init(struct net_if *iface)
1108 {
1109 const struct device *dev = net_if_get_device(iface);
1110 struct eth_context *context = dev->data;
1111
1112 net_if_set_link_addr(iface, context->mac_addr,
1113 sizeof(context->mac_addr),
1114 NET_LINK_ETHERNET);
1115
1116 if (context->iface == NULL) {
1117 context->iface = iface;
1118 }
1119
1120 #if defined(CONFIG_NET_DSA)
1121 dsa_register_master_tx(iface, ð_tx);
1122 #endif
1123 ethernet_init(iface);
1124 net_if_carrier_off(iface);
1125
1126 context->config_func();
1127 }
1128
eth_mcux_get_capabilities(const struct device * dev)1129 static enum ethernet_hw_caps eth_mcux_get_capabilities(const struct device *dev)
1130 {
1131 ARG_UNUSED(dev);
1132
1133 return ETHERNET_LINK_10BASE_T |
1134 ETHERNET_HW_FILTERING |
1135 #if defined(CONFIG_NET_VLAN)
1136 ETHERNET_HW_VLAN |
1137 #endif
1138 #if defined(CONFIG_PTP_CLOCK_MCUX)
1139 ETHERNET_PTP |
1140 #endif
1141 #if defined(CONFIG_NET_DSA)
1142 ETHERNET_DSA_MASTER_PORT |
1143 #endif
1144 #if defined(CONFIG_ETH_MCUX_HW_ACCELERATION)
1145 ETHERNET_HW_TX_CHKSUM_OFFLOAD |
1146 ETHERNET_HW_RX_CHKSUM_OFFLOAD |
1147 #endif
1148 ETHERNET_AUTO_NEGOTIATION_SET |
1149 ETHERNET_LINK_100BASE_T;
1150 }
1151
eth_mcux_set_config(const struct device * dev,enum ethernet_config_type type,const struct ethernet_config * config)1152 static int eth_mcux_set_config(const struct device *dev,
1153 enum ethernet_config_type type,
1154 const struct ethernet_config *config)
1155 {
1156 struct eth_context *context = dev->data;
1157
1158 switch (type) {
1159 case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
1160 memcpy(context->mac_addr,
1161 config->mac_address.addr,
1162 sizeof(context->mac_addr));
1163 ENET_SetMacAddr(context->base, context->mac_addr);
1164 net_if_set_link_addr(context->iface, context->mac_addr,
1165 sizeof(context->mac_addr),
1166 NET_LINK_ETHERNET);
1167 LOG_DBG("%s MAC set to %02x:%02x:%02x:%02x:%02x:%02x",
1168 dev->name,
1169 context->mac_addr[0], context->mac_addr[1],
1170 context->mac_addr[2], context->mac_addr[3],
1171 context->mac_addr[4], context->mac_addr[5]);
1172 return 0;
1173 case ETHERNET_CONFIG_TYPE_FILTER:
1174 /* The ENET driver does not modify the address buffer but the API is not const */
1175 if (config->filter.set) {
1176 ENET_AddMulticastGroup(context->base,
1177 (uint8_t *)config->filter.mac_address.addr);
1178 } else {
1179 ENET_LeaveMulticastGroup(context->base,
1180 (uint8_t *)config->filter.mac_address.addr);
1181 }
1182 return 0;
1183 default:
1184 break;
1185 }
1186
1187 return -ENOTSUP;
1188 }
1189
1190 #if defined(CONFIG_PTP_CLOCK_MCUX)
eth_mcux_get_ptp_clock(const struct device * dev)1191 static const struct device *eth_mcux_get_ptp_clock(const struct device *dev)
1192 {
1193 struct eth_context *context = dev->data;
1194
1195 return context->ptp_clock;
1196 }
1197 #endif
1198
1199 static const struct ethernet_api api_funcs = {
1200 .iface_api.init = eth_iface_init,
1201 #if defined(CONFIG_PTP_CLOCK_MCUX)
1202 .get_ptp_clock = eth_mcux_get_ptp_clock,
1203 #endif
1204 .get_capabilities = eth_mcux_get_capabilities,
1205 .set_config = eth_mcux_set_config,
1206 #if defined(CONFIG_NET_DSA)
1207 .send = dsa_tx,
1208 #else
1209 .send = eth_tx,
1210 #endif
1211 };
1212
1213 #if defined(CONFIG_PTP_CLOCK_MCUX)
eth_mcux_ptp_isr(const struct device * dev)1214 static void eth_mcux_ptp_isr(const struct device *dev)
1215 {
1216 struct eth_context *context = dev->data;
1217 unsigned int irq_lock_key = irq_lock();
1218 enet_ptp_timer_channel_t channel;
1219
1220 /* clear channel */
1221 for (channel = kENET_PtpTimerChannel1; channel <= kENET_PtpTimerChannel4; channel++) {
1222 if (ENET_Ptp1588GetChannelStatus(context->base, channel)) {
1223 ENET_Ptp1588ClearChannelStatus(context->base, channel);
1224 }
1225 }
1226 ENET_TimeStampIRQHandler(context->base, &context->enet_handle);
1227 irq_unlock(irq_lock_key);
1228 }
1229 #endif
1230
1231 #if DT_INST_IRQ_HAS_NAME(0, common) || DT_INST_IRQ_HAS_NAME(1, common)
eth_mcux_common_isr(const struct device * dev)1232 static void eth_mcux_common_isr(const struct device *dev)
1233 {
1234 struct eth_context *context = dev->data;
1235 uint32_t EIR = ENET_GetInterruptStatus(context->base);
1236 unsigned int irq_lock_key = irq_lock();
1237
1238 if (EIR & (kENET_RxBufferInterrupt | kENET_RxFrameInterrupt)) {
1239 /* disable the IRQ for RX */
1240 context->rx_irq_num++;
1241 #if FSL_FEATURE_ENET_QUEUE > 1
1242 /* Only use ring 0 in this driver */
1243 ENET_ReceiveIRQHandler(context->base, &context->enet_handle, 0);
1244 #else
1245 ENET_ReceiveIRQHandler(context->base, &context->enet_handle);
1246 #endif
1247 ENET_DisableInterrupts(context->base, kENET_RxFrameInterrupt |
1248 kENET_RxBufferInterrupt);
1249 }
1250
1251 if (EIR & kENET_TxFrameInterrupt) {
1252 #if FSL_FEATURE_ENET_QUEUE > 1
1253 ENET_TransmitIRQHandler(context->base, &context->enet_handle, 0);
1254 #else
1255 ENET_TransmitIRQHandler(context->base, &context->enet_handle);
1256 #endif
1257 }
1258
1259 if (EIR | kENET_TxBufferInterrupt) {
1260 ENET_ClearInterruptStatus(context->base, kENET_TxBufferInterrupt);
1261 ENET_DisableInterrupts(context->base, kENET_TxBufferInterrupt);
1262 }
1263
1264 if (EIR & ENET_EIR_MII_MASK) {
1265 k_work_submit(&context->phy_work);
1266 ENET_ClearInterruptStatus(context->base, kENET_MiiInterrupt);
1267 }
1268 #if defined(CONFIG_PTP_CLOCK_MCUX)
1269 if (EIR & ENET_TS_INTERRUPT) {
1270 ENET_TimeStampIRQHandler(context->base, &context->enet_handle);
1271 }
1272 #endif
1273 irq_unlock(irq_lock_key);
1274 }
1275 #endif
1276
1277 #if DT_INST_IRQ_HAS_NAME(0, rx) || DT_INST_IRQ_HAS_NAME(1, rx)
eth_mcux_rx_isr(const struct device * dev)1278 static void eth_mcux_rx_isr(const struct device *dev)
1279 {
1280 struct eth_context *context = dev->data;
1281
1282 ENET_DisableInterrupts(context->base, kENET_RxFrameInterrupt | kENET_RxBufferInterrupt);
1283 ENET_ReceiveIRQHandler(context->base, &context->enet_handle);
1284 }
1285 #endif
1286
1287 #if DT_INST_IRQ_HAS_NAME(0, tx) || DT_INST_IRQ_HAS_NAME(1, tx)
eth_mcux_tx_isr(const struct device * dev)1288 static void eth_mcux_tx_isr(const struct device *dev)
1289 {
1290 struct eth_context *context = dev->data;
1291 #if FSL_FEATURE_ENET_QUEUE > 1
1292 ENET_TransmitIRQHandler(context->base, &context->enet_handle, 0);
1293 #else
1294 ENET_TransmitIRQHandler(context->base, &context->enet_handle);
1295 #endif
1296 }
1297 #endif
1298
1299 #if DT_INST_IRQ_HAS_NAME(0, err) || DT_INST_IRQ_HAS_NAME(1, err)
eth_mcux_err_isr(const struct device * dev)1300 static void eth_mcux_err_isr(const struct device *dev)
1301 {
1302 struct eth_context *context = dev->data;
1303 uint32_t pending = ENET_GetInterruptStatus(context->base);
1304
1305 if (pending & ENET_EIR_MII_MASK) {
1306 k_work_submit(&context->phy_work);
1307 ENET_ClearInterruptStatus(context->base, kENET_MiiInterrupt);
1308 }
1309 }
1310 #endif
1311
1312 #if defined(CONFIG_SOC_SERIES_IMXRT10XX)
1313 #define ETH_MCUX_UNIQUE_ID (OCOTP->CFG1 ^ OCOTP->CFG2)
1314 #elif defined(CONFIG_SOC_SERIES_IMXRT11XX)
1315 #define ETH_MCUX_UNIQUE_ID (OCOTP->FUSEN[40].FUSE)
1316 #elif defined(CONFIG_SOC_SERIES_KINETIS_K6X)
1317 #define ETH_MCUX_UNIQUE_ID (SIM->UIDH ^ SIM->UIDMH ^ SIM->UIDML ^ SIM->UIDL)
1318 #else
1319 #error "Unsupported SOC"
1320 #endif
1321
1322 #define ETH_MCUX_NONE
1323
1324 #define ETH_MCUX_IRQ_INIT(n, name) \
1325 do { \
1326 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(n, name, irq), \
1327 DT_INST_IRQ_BY_NAME(n, name, priority), \
1328 eth_mcux_##name##_isr, \
1329 DEVICE_DT_INST_GET(n), \
1330 0); \
1331 irq_enable(DT_INST_IRQ_BY_NAME(n, name, irq)); \
1332 } while (false)
1333
1334 #define ETH_MCUX_IRQ(n, name) \
1335 COND_CODE_1(DT_INST_IRQ_HAS_NAME(n, name), \
1336 (ETH_MCUX_IRQ_INIT(n, name)), \
1337 (ETH_MCUX_NONE))
1338
1339 #if defined(CONFIG_PTP_CLOCK_MCUX)
1340 #define PTP_INST_NODEID(n) DT_INST_CHILD(n, ptp)
1341
1342 #define ETH_MCUX_IRQ_PTP_INIT(n) \
1343 do { \
1344 IRQ_CONNECT(DT_IRQ_BY_NAME(PTP_INST_NODEID(n), ieee1588_tmr, irq), \
1345 DT_IRQ_BY_NAME(PTP_INST_NODEID(n), ieee1588_tmr, priority), \
1346 eth_mcux_ptp_isr, \
1347 DEVICE_DT_INST_GET(n), \
1348 0); \
1349 irq_enable(DT_IRQ_BY_NAME(PTP_INST_NODEID(n), ieee1588_tmr, irq)); \
1350 } while (false)
1351
1352 #define ETH_MCUX_IRQ_PTP(n) \
1353 COND_CODE_1(DT_NODE_HAS_STATUS(PTP_INST_NODEID(n), okay), \
1354 (ETH_MCUX_IRQ_PTP_INIT(n)), \
1355 (ETH_MCUX_NONE))
1356
1357 #define ETH_MCUX_PTP_FRAMEINFO_ARRAY(n) \
1358 static enet_frame_info_t \
1359 eth##n##_tx_frameinfo_array[CONFIG_ETH_MCUX_TX_BUFFERS];
1360
1361 #define ETH_MCUX_PTP_FRAMEINFO(n) \
1362 .txFrameInfo = eth##n##_tx_frameinfo_array,
1363 #else
1364 #define ETH_MCUX_IRQ_PTP(n)
1365
1366 #define ETH_MCUX_PTP_FRAMEINFO_ARRAY(n)
1367
1368 #define ETH_MCUX_PTP_FRAMEINFO(n) \
1369 .txFrameInfo = NULL,
1370 #endif
1371
1372 #define ETH_MCUX_GENERATE_MAC_RANDOM(n) \
1373 static void generate_eth##n##_mac(uint8_t *mac_addr) \
1374 { \
1375 gen_random_mac(mac_addr, \
1376 FREESCALE_OUI_B0, \
1377 FREESCALE_OUI_B1, \
1378 FREESCALE_OUI_B2); \
1379 }
1380
1381 #define ETH_MCUX_GENERATE_MAC_UNIQUE(n) \
1382 static void generate_eth##n##_mac(uint8_t *mac_addr) \
1383 { \
1384 uint32_t id = ETH_MCUX_UNIQUE_ID; \
1385 \
1386 mac_addr[0] = FREESCALE_OUI_B0; \
1387 mac_addr[0] |= 0x02; /* force LAA bit */ \
1388 mac_addr[1] = FREESCALE_OUI_B1; \
1389 mac_addr[2] = FREESCALE_OUI_B2; \
1390 mac_addr[3] = id >> 8; \
1391 mac_addr[4] = id >> 16; \
1392 mac_addr[5] = id >> 0; \
1393 mac_addr[5] += n; \
1394 }
1395
1396 #define ETH_MCUX_GENERATE_MAC(n) \
1397 COND_CODE_1(DT_INST_PROP(n, zephyr_random_mac_address), \
1398 (ETH_MCUX_GENERATE_MAC_RANDOM(n)), \
1399 (ETH_MCUX_GENERATE_MAC_UNIQUE(n)))
1400
1401 #define ETH_MCUX_MAC_ADDR_LOCAL(n) \
1402 .mac_addr = DT_INST_PROP(n, local_mac_address), \
1403 .generate_mac = NULL,
1404
1405 #define ETH_MCUX_MAC_ADDR_GENERATE(n) \
1406 .mac_addr = {0}, \
1407 .generate_mac = generate_eth##n##_mac,
1408
1409 #define ETH_MCUX_MAC_ADDR(n) \
1410 COND_CODE_1(ETH_MCUX_MAC_ADDR_TO_BOOL(n), \
1411 (ETH_MCUX_MAC_ADDR_LOCAL(n)), \
1412 (ETH_MCUX_MAC_ADDR_GENERATE(n)))
1413
1414 #ifdef CONFIG_SOC_FAMILY_KINETIS
1415 #define ETH_MCUX_POWER_INIT(n) \
1416 .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \
1417
1418 #define ETH_MCUX_POWER(n) \
1419 COND_CODE_1(CONFIG_NET_POWER_MANAGEMENT, \
1420 (ETH_MCUX_POWER_INIT(n)), \
1421 (ETH_MCUX_NONE))
1422 #define ETH_MCUX_PM_DEVICE_INIT(n) \
1423 PM_DEVICE_DT_INST_DEFINE(n, eth_mcux_device_pm_action);
1424 #define ETH_MCUX_PM_DEVICE_GET(n) PM_DEVICE_DT_INST_GET(n)
1425 #else
1426 #define ETH_MCUX_POWER(n)
1427 #define ETH_MCUX_PM_DEVICE_INIT(n)
1428 #define ETH_MCUX_PM_DEVICE_GET(n) NULL
1429 #endif /* CONFIG_SOC_FAMILY_KINETIS */
1430
1431 #define ETH_MCUX_GEN_MAC(n) \
1432 COND_CODE_0(ETH_MCUX_MAC_ADDR_TO_BOOL(n), \
1433 (ETH_MCUX_GENERATE_MAC(n)), \
1434 (ETH_MCUX_NONE))
1435
1436 /*
1437 * In the below code we explicitly define
1438 * ETH_MCUX_MAC_ADDR_TO_BOOL_0 for the '0' instance of enet driver.
1439 *
1440 * For instance N one shall add definition for ETH_MCUX_MAC_ADDR_TO_BOOL_N
1441 */
1442 #if (NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(0))) == 0
1443 #define ETH_MCUX_MAC_ADDR_TO_BOOL_0 0
1444 #else
1445 #define ETH_MCUX_MAC_ADDR_TO_BOOL_0 1
1446 #endif
1447 #define ETH_MCUX_MAC_ADDR_TO_BOOL(n) ETH_MCUX_MAC_ADDR_TO_BOOL_##n
1448
1449 #if defined(CONFIG_PINCTRL)
1450 #define ETH_MCUX_PINCTRL_DEFINE(n) PINCTRL_DT_INST_DEFINE(n);
1451 #define ETH_MCUX_PINCTRL_INIT(n) .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n),
1452 #else
1453 #define ETH_MCUX_PINCTRL_DEFINE(n)
1454 #define ETH_MCUX_PINCTRL_INIT(n)
1455 #endif
1456
1457 #if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay) && \
1458 CONFIG_ETH_MCUX_USE_DTCM_FOR_DMA_BUFFER
1459 /* Use DTCM for hardware DMA buffers */
1460 #define _mcux_dma_desc __dtcm_bss_section
1461 #define _mcux_dma_buffer __dtcm_noinit_section
1462 #define _mcux_driver_buffer __dtcm_noinit_section
1463 #elif defined(CONFIG_NOCACHE_MEMORY)
1464 #define _mcux_dma_desc __nocache
1465 #define _mcux_dma_buffer __nocache
1466 #define _mcux_driver_buffer
1467 #else
1468 #define _mcux_dma_desc
1469 #define _mcux_dma_buffer
1470 #define _mcux_driver_buffer
1471 #endif
1472
1473 #if defined(CONFIG_ETH_MCUX_PHY_RESET)
1474 #define ETH_MCUX_PHY_GPIOS(n) \
1475 .int_gpio = GPIO_DT_SPEC_INST_GET(n, int_gpios), \
1476 .reset_gpio = GPIO_DT_SPEC_INST_GET(n, reset_gpios),
1477 #else
1478 #define ETH_MCUX_PHY_GPIOS(n)
1479 #endif
1480
1481 #define ETH_MCUX_INIT(n) \
1482 ETH_MCUX_GEN_MAC(n) \
1483 \
1484 ETH_MCUX_PINCTRL_DEFINE(n) \
1485 \
1486 static void eth##n##_config_func(void); \
1487 static _mcux_driver_buffer uint8_t \
1488 tx_enet_frame_##n##_buf[NET_ETH_MAX_FRAME_SIZE]; \
1489 static _mcux_driver_buffer uint8_t \
1490 rx_enet_frame_##n##_buf[NET_ETH_MAX_FRAME_SIZE]; \
1491 static status_t _MDIO_Write(uint8_t phyAddr, uint8_t regAddr, uint16_t data) \
1492 { \
1493 return ENET_MDIOWrite((ENET_Type *)DT_INST_REG_ADDR(n), phyAddr, regAddr, data);\
1494 }; \
1495 \
1496 static status_t _MDIO_Read(uint8_t phyAddr, uint8_t regAddr, uint16_t *pData) \
1497 { \
1498 return ENET_MDIORead((ENET_Type *)DT_INST_REG_ADDR(n), phyAddr, regAddr, pData); \
1499 }; \
1500 \
1501 static struct _phy_resource eth##n##_phy_resource = { \
1502 .read = _MDIO_Read, \
1503 .write = _MDIO_Write \
1504 }; \
1505 static phy_handle_t eth##n##_phy_handle = { \
1506 .resource = (void *)ð##n##_phy_resource \
1507 }; \
1508 static struct _phy_resource eth##n##_phy_config; \
1509 \
1510 static struct eth_context eth##n##_context = { \
1511 .base = (ENET_Type *)DT_INST_REG_ADDR(n), \
1512 .config_func = eth##n##_config_func, \
1513 .phy_config = ð##n##_phy_config, \
1514 .phy_addr = DT_INST_PROP(n, phy_addr), \
1515 .phy_duplex = kPHY_FullDuplex, \
1516 .phy_speed = kPHY_Speed100M, \
1517 .phy_handle = ð##n##_phy_handle, \
1518 .tx_frame_buf = tx_enet_frame_##n##_buf, \
1519 .rx_frame_buf = rx_enet_frame_##n##_buf, \
1520 ETH_MCUX_PINCTRL_INIT(n) \
1521 ETH_MCUX_PHY_GPIOS(n) \
1522 ETH_MCUX_MAC_ADDR(n) \
1523 ETH_MCUX_POWER(n) \
1524 }; \
1525 \
1526 static __aligned(ENET_BUFF_ALIGNMENT) \
1527 _mcux_dma_desc \
1528 enet_rx_bd_struct_t \
1529 eth##n##_rx_buffer_desc[CONFIG_ETH_MCUX_RX_BUFFERS]; \
1530 \
1531 static __aligned(ENET_BUFF_ALIGNMENT) \
1532 _mcux_dma_desc \
1533 enet_tx_bd_struct_t \
1534 eth##n##_tx_buffer_desc[CONFIG_ETH_MCUX_TX_BUFFERS]; \
1535 \
1536 static uint8_t __aligned(ENET_BUFF_ALIGNMENT) \
1537 _mcux_dma_buffer \
1538 eth##n##_rx_buffer[CONFIG_ETH_MCUX_RX_BUFFERS] \
1539 [ETH_MCUX_BUFFER_SIZE]; \
1540 \
1541 static uint8_t __aligned(ENET_BUFF_ALIGNMENT) \
1542 _mcux_dma_buffer \
1543 eth##n##_tx_buffer[CONFIG_ETH_MCUX_TX_BUFFERS] \
1544 [ETH_MCUX_BUFFER_SIZE]; \
1545 \
1546 ETH_MCUX_PTP_FRAMEINFO_ARRAY(n) \
1547 \
1548 static const enet_buffer_config_t eth##n##_buffer_config = { \
1549 .rxBdNumber = CONFIG_ETH_MCUX_RX_BUFFERS, \
1550 .txBdNumber = CONFIG_ETH_MCUX_TX_BUFFERS, \
1551 .rxBuffSizeAlign = ETH_MCUX_BUFFER_SIZE, \
1552 .txBuffSizeAlign = ETH_MCUX_BUFFER_SIZE, \
1553 .rxBdStartAddrAlign = eth##n##_rx_buffer_desc, \
1554 .txBdStartAddrAlign = eth##n##_tx_buffer_desc, \
1555 .rxBufferAlign = eth##n##_rx_buffer[0], \
1556 .txBufferAlign = eth##n##_tx_buffer[0], \
1557 .rxMaintainEnable = true, \
1558 .txMaintainEnable = true, \
1559 ETH_MCUX_PTP_FRAMEINFO(n) \
1560 }; \
1561 \
1562 ETH_MCUX_PM_DEVICE_INIT(n) \
1563 \
1564 ETH_NET_DEVICE_DT_INST_DEFINE(n, \
1565 eth_init, \
1566 ETH_MCUX_PM_DEVICE_GET(n), \
1567 ð##n##_context, \
1568 ð##n##_buffer_config, \
1569 CONFIG_ETH_INIT_PRIORITY, \
1570 &api_funcs, \
1571 NET_ETH_MTU); \
1572 \
1573 static void eth##n##_config_func(void) \
1574 { \
1575 ETH_MCUX_IRQ(n, rx); \
1576 ETH_MCUX_IRQ(n, tx); \
1577 ETH_MCUX_IRQ(n, err); \
1578 ETH_MCUX_IRQ(n, common); \
1579 ETH_MCUX_IRQ_PTP(n); \
1580 } \
1581
1582 DT_INST_FOREACH_STATUS_OKAY(ETH_MCUX_INIT)
1583
1584 #if defined(CONFIG_PTP_CLOCK_MCUX)
1585 struct ptp_context {
1586 struct eth_context *eth_context;
1587 #if defined(CONFIG_PINCTRL)
1588 const struct pinctrl_dev_config *pincfg;
1589 #endif /* CONFIG_PINCTRL */
1590 };
1591
1592 #if defined(CONFIG_PINCTRL)
1593 #define ETH_MCUX_PTP_PINCTRL_DEFINE(n) PINCTRL_DT_DEFINE(n);
1594 #define ETH_MCUX_PTP_PINCTRL_INIT(n) .pincfg = PINCTRL_DT_DEV_CONFIG_GET(n),
1595 #else
1596 #define ETH_MCUX_PTP_PINCTRL_DEFINE(n)
1597 #define ETH_MCUX_PTP_PINCTRL_INIT(n)
1598 #endif /* CONFIG_PINCTRL */
1599
1600 ETH_MCUX_PTP_PINCTRL_DEFINE(DT_NODELABEL(ptp))
1601
1602 static struct ptp_context ptp_mcux_0_context = {
1603 ETH_MCUX_PTP_PINCTRL_INIT(DT_NODELABEL(ptp))
1604 };
1605
ptp_clock_mcux_set(const struct device * dev,struct net_ptp_time * tm)1606 static int ptp_clock_mcux_set(const struct device *dev,
1607 struct net_ptp_time *tm)
1608 {
1609 struct ptp_context *ptp_context = dev->data;
1610 struct eth_context *context = ptp_context->eth_context;
1611 enet_ptp_time_t enet_time;
1612
1613 enet_time.second = tm->second;
1614 enet_time.nanosecond = tm->nanosecond;
1615
1616 ENET_Ptp1588SetTimer(context->base, &context->enet_handle, &enet_time);
1617 return 0;
1618 }
1619
ptp_clock_mcux_get(const struct device * dev,struct net_ptp_time * tm)1620 static int ptp_clock_mcux_get(const struct device *dev,
1621 struct net_ptp_time *tm)
1622 {
1623 struct ptp_context *ptp_context = dev->data;
1624 struct eth_context *context = ptp_context->eth_context;
1625 enet_ptp_time_t enet_time;
1626
1627 ENET_Ptp1588GetTimer(context->base, &context->enet_handle, &enet_time);
1628
1629 tm->second = enet_time.second;
1630 tm->nanosecond = enet_time.nanosecond;
1631 return 0;
1632 }
1633
ptp_clock_mcux_adjust(const struct device * dev,int increment)1634 static int ptp_clock_mcux_adjust(const struct device *dev, int increment)
1635 {
1636 struct ptp_context *ptp_context = dev->data;
1637 struct eth_context *context = ptp_context->eth_context;
1638 int key, ret;
1639
1640 ARG_UNUSED(dev);
1641
1642 if ((increment <= (int32_t)(-NSEC_PER_SEC)) ||
1643 (increment >= (int32_t)NSEC_PER_SEC)) {
1644 ret = -EINVAL;
1645 } else {
1646 key = irq_lock();
1647 if (context->base->ATPER != NSEC_PER_SEC) {
1648 ret = -EBUSY;
1649 } else {
1650 /* Seconds counter is handled by software. Change the
1651 * period of one software second to adjust the clock.
1652 */
1653 context->base->ATPER = NSEC_PER_SEC - increment;
1654 ret = 0;
1655 }
1656 irq_unlock(key);
1657 }
1658
1659 return ret;
1660 }
1661
ptp_clock_mcux_rate_adjust(const struct device * dev,double ratio)1662 static int ptp_clock_mcux_rate_adjust(const struct device *dev, double ratio)
1663 {
1664 const int hw_inc = NSEC_PER_SEC / CONFIG_ETH_MCUX_PTP_CLOCK_SRC_HZ;
1665 struct ptp_context *ptp_context = dev->data;
1666 struct eth_context *context = ptp_context->eth_context;
1667 int corr;
1668 int32_t mul;
1669 double val;
1670
1671 /* No change needed. */
1672 if ((ratio > 1.0 && ratio - 1.0 < 0.00000001) ||
1673 (ratio < 1.0 && 1.0 - ratio < 0.00000001)) {
1674 return 0;
1675 }
1676
1677 ratio *= context->clk_ratio;
1678
1679 /* Limit possible ratio. */
1680 if ((ratio > 1.0 + 1.0/(2 * hw_inc)) ||
1681 (ratio < 1.0 - 1.0/(2 * hw_inc))) {
1682 return -EINVAL;
1683 }
1684
1685 /* Save new ratio. */
1686 context->clk_ratio = ratio;
1687
1688 if (ratio < 1.0) {
1689 corr = hw_inc - 1;
1690 val = 1.0 / (hw_inc * (1.0 - ratio));
1691 } else if (ratio > 1.0) {
1692 corr = hw_inc + 1;
1693 val = 1.0 / (hw_inc * (ratio - 1.0));
1694 } else {
1695 val = 0;
1696 corr = hw_inc;
1697 }
1698
1699 if (val >= INT32_MAX) {
1700 /* Value is too high.
1701 * It is not possible to adjust the rate of the clock.
1702 */
1703 mul = 0;
1704 } else {
1705 mul = val;
1706 }
1707 k_mutex_lock(&context->ptp_mutex, K_FOREVER);
1708 ENET_Ptp1588AdjustTimer(context->base, corr, mul);
1709 k_mutex_unlock(&context->ptp_mutex);
1710
1711 return 0;
1712 }
1713
1714 static const struct ptp_clock_driver_api api = {
1715 .set = ptp_clock_mcux_set,
1716 .get = ptp_clock_mcux_get,
1717 .adjust = ptp_clock_mcux_adjust,
1718 .rate_adjust = ptp_clock_mcux_rate_adjust,
1719 };
1720
ptp_mcux_init(const struct device * port)1721 static int ptp_mcux_init(const struct device *port)
1722 {
1723 const struct device *const eth_dev = DEVICE_DT_GET(DT_NODELABEL(enet));
1724 struct eth_context *context = eth_dev->data;
1725 struct ptp_context *ptp_context = port->data;
1726 #if defined(CONFIG_PINCTRL)
1727 int err;
1728
1729 err = pinctrl_apply_state(ptp_context->pincfg, PINCTRL_STATE_DEFAULT);
1730 if (err) {
1731 return err;
1732 }
1733 #endif /* CONFIG_PINCTRL */
1734
1735 context->ptp_clock = port;
1736 ptp_context->eth_context = context;
1737
1738 return 0;
1739 }
1740
1741 DEVICE_DEFINE(mcux_ptp_clock_0, PTP_CLOCK_NAME, ptp_mcux_init,
1742 NULL, &ptp_mcux_0_context, NULL, POST_KERNEL,
1743 CONFIG_ETH_MCUX_PTP_CLOCK_INIT_PRIO, &api);
1744
1745 #endif /* CONFIG_PTP_CLOCK_MCUX */
1746