1 /* NXP ENET MAC Driver
2 *
3 * Copyright 2023 NXP
4 *
5 * Inspiration from eth_mcux.c, which is:
6 * Copyright (c) 2016-2017 ARM Ltd
7 * Copyright (c) 2016 Linaro Ltd
8 * Copyright (c) 2018 Intel Corporation
9 * Copyright 2023 NXP
10 *
11 * SPDX-License-Identifier: Apache-2.0
12 */
13
14 #define DT_DRV_COMPAT nxp_enet_mac
15
16 /* Set up logging module for this driver */
17 #define LOG_MODULE_NAME eth_nxp_enet_mac
18 #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL
19 #include <zephyr/logging/log.h>
20 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
21
22 /*
23 ************
24 * Includes *
25 ************
26 */
27
28 #include <zephyr/device.h>
29 #include <zephyr/sys/util.h>
30 #include <zephyr/kernel.h>
31 #include <zephyr/sys/__assert.h>
32 #include <zephyr/net/net_pkt.h>
33 #include <zephyr/net/net_if.h>
34 #include <zephyr/net/ethernet.h>
35 #include <ethernet/eth_stats.h>
36 #include <zephyr/drivers/pinctrl.h>
37 #include <zephyr/drivers/clock_control.h>
38 #include <zephyr/drivers/ethernet/eth_nxp_enet.h>
39 #include <zephyr/dt-bindings/ethernet/nxp_enet.h>
40 #include <zephyr/net/phy.h>
41 #include <zephyr/net/mii.h>
42 #include <zephyr/drivers/ptp_clock.h>
43 #if defined(CONFIG_NET_DSA)
44 #include <zephyr/net/dsa.h>
45 #endif
46
47 #include "fsl_enet.h"
48
49 /*
50 ***********
51 * Defines *
52 ***********
53 */
54
55 #define RING_ID 0
56
57 /*
58 *********************
59 * Driver Structures *
60 *********************
61 */
62
63 struct nxp_enet_mac_config {
64 ENET_Type *base;
65 const struct device *clock_dev;
66 clock_control_subsys_t clock_subsys;
67 void (*generate_mac)(uint8_t *mac_addr);
68 const struct pinctrl_dev_config *pincfg;
69 enet_buffer_config_t buffer_config;
70 uint8_t phy_mode;
71 void (*irq_config_func)(void);
72 const struct device *phy_dev;
73 const struct device *mdio;
74 #ifdef CONFIG_PTP_CLOCK_NXP_ENET
75 const struct device *ptp_clock;
76 #endif
77 };
78
79 struct nxp_enet_mac_data {
80 struct net_if *iface;
81 uint8_t mac_addr[6];
82 enet_handle_t enet_handle;
83 struct k_sem tx_buf_sem;
84
85 K_KERNEL_STACK_MEMBER(rx_thread_stack, CONFIG_ETH_NXP_ENET_RX_THREAD_STACK_SIZE);
86
87 struct k_thread rx_thread;
88 struct k_sem rx_thread_sem;
89 struct k_mutex tx_frame_buf_mutex;
90 struct k_mutex rx_frame_buf_mutex;
91 #ifdef CONFIG_PTP_CLOCK_NXP_ENET
92 struct k_sem ptp_ts_sem;
93 struct k_mutex *ptp_mutex; /* created in PTP driver */
94 #endif
95 /* TODO: FIXME. This Ethernet frame sized buffer is used for
96 * interfacing with MCUX. How it works is that hardware uses
97 * DMA scatter buffers to receive a frame, and then public
98 * MCUX call gathers them into this buffer (there's no other
99 * public interface). All this happens only for this driver
100 * to scatter this buffer again into Zephyr fragment buffers.
101 * This is not efficient, but proper resolution of this issue
102 * depends on introduction of zero-copy networking support
103 * in Zephyr, and adding needed interface to MCUX (or
104 * bypassing it and writing a more complex driver working
105 * directly with hardware).
106 *
107 * Note that we do not copy FCS into this buffer thus the
108 * size is 1514 bytes.
109 */
110 uint8_t *tx_frame_buf; /* Max MTU + ethernet header */
111 uint8_t *rx_frame_buf; /* Max MTU + ethernet header */
112 };
113
114 /*
115 ********************
116 * Helper Functions *
117 ********************
118 */
119
get_iface(struct nxp_enet_mac_data * data,uint16_t vlan_tag)120 static inline struct net_if *get_iface(struct nxp_enet_mac_data *data, uint16_t vlan_tag)
121 {
122 struct net_if *iface = net_eth_get_vlan_iface(data->iface, vlan_tag);
123
124 return iface ? iface : data->iface;
125 }
126
net_if_mcast_cb(struct net_if * iface,const struct net_addr * addr,bool is_joined)127 static void net_if_mcast_cb(struct net_if *iface,
128 const struct net_addr *addr,
129 bool is_joined)
130 {
131 const struct device *dev = net_if_get_device(iface);
132 const struct nxp_enet_mac_config *config = dev->config;
133 struct net_eth_addr mac_addr;
134
135 if (IS_ENABLED(CONFIG_NET_IPV4) && addr->family == AF_INET) {
136 net_eth_ipv4_mcast_to_mac_addr(&addr->in_addr, &mac_addr);
137 } else if (IS_ENABLED(CONFIG_NET_IPV6) && addr->family == AF_INET6) {
138 net_eth_ipv6_mcast_to_mac_addr(&addr->in6_addr, &mac_addr);
139 } else {
140 return;
141 }
142
143 if (is_joined) {
144 ENET_AddMulticastGroup(config->base, mac_addr.addr);
145 } else {
146 ENET_LeaveMulticastGroup(config->base, mac_addr.addr);
147 }
148 }
149
150 #if defined(CONFIG_PTP_CLOCK_NXP_ENET)
eth_get_ptp_data(struct net_if * iface,struct net_pkt * pkt)151 static bool eth_get_ptp_data(struct net_if *iface, struct net_pkt *pkt)
152 {
153 struct net_eth_vlan_hdr *hdr_vlan = (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt);
154 struct ethernet_context *eth_ctx = net_if_l2_data(iface);
155 bool pkt_is_ptp;
156
157 if (net_eth_is_vlan_enabled(eth_ctx, iface)) {
158 pkt_is_ptp = ntohs(hdr_vlan->type) == NET_ETH_PTYPE_PTP;
159 } else {
160 pkt_is_ptp = ntohs(NET_ETH_HDR(pkt)->type) == NET_ETH_PTYPE_PTP;
161 }
162
163 if (pkt_is_ptp) {
164 net_pkt_set_priority(pkt, NET_PRIORITY_CA);
165 }
166
167 return pkt_is_ptp;
168 }
169
170
ts_register_tx_event(const struct device * dev,enet_frame_info_t * frameinfo)171 static inline void ts_register_tx_event(const struct device *dev,
172 enet_frame_info_t *frameinfo)
173 {
174 struct nxp_enet_mac_data *data = dev->data;
175 struct net_pkt *pkt = frameinfo->context;
176
177 if (pkt && atomic_get(&pkt->atomic_ref) > 0) {
178 if (eth_get_ptp_data(net_pkt_iface(pkt), pkt) && frameinfo->isTsAvail) {
179 k_mutex_lock(data->ptp_mutex, K_FOREVER);
180
181 pkt->timestamp.nanosecond = frameinfo->timeStamp.nanosecond;
182 pkt->timestamp.second = frameinfo->timeStamp.second;
183
184 net_if_add_tx_timestamp(pkt);
185 k_sem_give(&data->ptp_ts_sem);
186
187 k_mutex_unlock(data->ptp_mutex);
188 }
189 net_pkt_unref(pkt);
190 }
191 }
192
eth_wait_for_ptp_ts(const struct device * dev,struct net_pkt * pkt)193 static inline void eth_wait_for_ptp_ts(const struct device *dev, struct net_pkt *pkt)
194 {
195 struct nxp_enet_mac_data *data = dev->data;
196
197 net_pkt_ref(pkt);
198 k_sem_take(&data->ptp_ts_sem, K_FOREVER);
199 }
200 #else
201 #define eth_get_ptp_data(...) false
202 #define ts_register_tx_event(...)
203 #define eth_wait_for_ptp_ts(...)
204 #endif /* CONFIG_PTP_CLOCK_NXP_ENET */
205
206 #ifdef CONFIG_PTP_CLOCK
eth_nxp_enet_get_ptp_clock(const struct device * dev)207 static const struct device *eth_nxp_enet_get_ptp_clock(const struct device *dev)
208 {
209 const struct nxp_enet_mac_config *config = dev->config;
210
211 return config->ptp_clock;
212 }
213 #endif /* CONFIG_PTP_CLOCK */
214
215 /*
216 *********************************
217 * Ethernet driver API Functions *
218 *********************************
219 */
220
eth_nxp_enet_tx(const struct device * dev,struct net_pkt * pkt)221 static int eth_nxp_enet_tx(const struct device *dev, struct net_pkt *pkt)
222 {
223 const struct nxp_enet_mac_config *config = dev->config;
224 struct nxp_enet_mac_data *data = dev->data;
225 uint16_t total_len = net_pkt_get_len(pkt);
226 bool frame_is_timestamped;
227 status_t ret;
228
229 /* Wait for a TX buffer descriptor to be available */
230 k_sem_take(&data->tx_buf_sem, K_FOREVER);
231
232 /* Enter critical section for TX frame buffer access */
233 k_mutex_lock(&data->tx_frame_buf_mutex, K_FOREVER);
234
235 /* Read network packet from upper layer into frame buffer */
236 ret = net_pkt_read(pkt, data->tx_frame_buf, total_len);
237 if (ret) {
238 k_sem_give(&data->tx_buf_sem);
239 goto exit;
240 }
241
242 frame_is_timestamped = eth_get_ptp_data(net_pkt_iface(pkt), pkt);
243
244 ret = ENET_SendFrame(config->base, &data->enet_handle, data->tx_frame_buf,
245 total_len, RING_ID, frame_is_timestamped, pkt);
246 if (ret == kStatus_Success) {
247 goto exit;
248 }
249
250 if (frame_is_timestamped) {
251 eth_wait_for_ptp_ts(dev, pkt);
252 } else {
253 LOG_ERR("ENET_SendFrame error: %d", ret);
254 ENET_ReclaimTxDescriptor(config->base, &data->enet_handle, RING_ID);
255 }
256
257 exit:
258 /* Leave critical section for TX frame buffer access */
259 k_mutex_unlock(&data->tx_frame_buf_mutex);
260
261 return ret;
262 }
263
eth_nxp_enet_iface_init(struct net_if * iface)264 static void eth_nxp_enet_iface_init(struct net_if *iface)
265 {
266 const struct device *dev = net_if_get_device(iface);
267 struct nxp_enet_mac_data *data = dev->data;
268 const struct nxp_enet_mac_config *config = dev->config;
269 static struct net_if_mcast_monitor mon;
270
271 net_if_mcast_mon_register(&mon, iface, net_if_mcast_cb);
272
273 net_if_set_link_addr(iface, data->mac_addr,
274 sizeof(data->mac_addr),
275 NET_LINK_ETHERNET);
276
277 /* For VLAN, this value is only used to get the correct L2 driver.
278 * The iface pointer in context should contain the main interface
279 * if the VLANs are enabled.
280 */
281 if (data->iface == NULL) {
282 data->iface = iface;
283 }
284
285 #if defined(CONFIG_NET_DSA)
286 dsa_register_master_tx(iface, ð_nxp_enet_tx);
287 #endif
288
289 ethernet_init(iface);
290 net_eth_carrier_off(data->iface);
291
292 config->irq_config_func();
293 }
294
eth_nxp_enet_get_capabilities(const struct device * dev)295 static enum ethernet_hw_caps eth_nxp_enet_get_capabilities(const struct device *dev)
296 {
297 ARG_UNUSED(dev);
298
299 return ETHERNET_HW_VLAN | ETHERNET_LINK_10BASE_T |
300 #if defined(CONFIG_PTP_CLOCK_NXP_ENET)
301 ETHERNET_PTP |
302 #endif
303 #if defined(CONFIG_NET_DSA)
304 ETHERNET_DSA_MASTER_PORT |
305 #endif
306 #if defined(CONFIG_ETH_NXP_ENET_HW_ACCELERATION)
307 ETHERNET_HW_TX_CHKSUM_OFFLOAD |
308 ETHERNET_HW_RX_CHKSUM_OFFLOAD |
309 #endif
310 ETHERNET_LINK_100BASE_T;
311 }
312
eth_nxp_enet_set_config(const struct device * dev,enum ethernet_config_type type,const struct ethernet_config * cfg)313 static int eth_nxp_enet_set_config(const struct device *dev,
314 enum ethernet_config_type type,
315 const struct ethernet_config *cfg)
316 {
317 struct nxp_enet_mac_data *data = dev->data;
318 const struct nxp_enet_mac_config *config = dev->config;
319
320 switch (type) {
321 case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
322 memcpy(data->mac_addr,
323 cfg->mac_address.addr,
324 sizeof(data->mac_addr));
325 ENET_SetMacAddr(config->base, data->mac_addr);
326 net_if_set_link_addr(data->iface, data->mac_addr,
327 sizeof(data->mac_addr),
328 NET_LINK_ETHERNET);
329 LOG_DBG("%s MAC set to %02x:%02x:%02x:%02x:%02x:%02x",
330 dev->name,
331 data->mac_addr[0], data->mac_addr[1],
332 data->mac_addr[2], data->mac_addr[3],
333 data->mac_addr[4], data->mac_addr[5]);
334 return 0;
335 default:
336 break;
337 }
338
339 return -ENOTSUP;
340 }
341
342 /*
343 *****************************
344 * Ethernet RX Functionality *
345 *****************************
346 */
347
eth_nxp_enet_rx(const struct device * dev)348 static int eth_nxp_enet_rx(const struct device *dev)
349 {
350 const struct nxp_enet_mac_config *config = dev->config;
351 struct nxp_enet_mac_data *data = dev->data;
352 uint16_t vlan_tag = NET_VLAN_TAG_UNSPEC;
353 uint32_t frame_length = 0U;
354 struct net_if *iface;
355 struct net_pkt *pkt = NULL;
356 status_t status;
357 uint32_t ts;
358
359 status = ENET_GetRxFrameSize(&data->enet_handle,
360 (uint32_t *)&frame_length, RING_ID);
361 if (status == kStatus_ENET_RxFrameEmpty) {
362 return 0;
363 } else if (status == kStatus_ENET_RxFrameError) {
364 enet_data_error_stats_t error_stats;
365
366 LOG_ERR("ENET_GetRxFrameSize return: %d", (int)status);
367
368 ENET_GetRxErrBeforeReadFrame(&data->enet_handle,
369 &error_stats, RING_ID);
370 goto flush;
371 }
372
373 if (frame_length > NET_ETH_MAX_FRAME_SIZE) {
374 LOG_ERR("Frame too large (%d)", frame_length);
375 goto flush;
376 }
377
378 /* Using root iface. It will be updated in net_recv_data() */
379 pkt = net_pkt_rx_alloc_with_buffer(data->iface, frame_length,
380 AF_UNSPEC, 0, K_NO_WAIT);
381 if (!pkt) {
382 goto flush;
383 }
384
385 k_mutex_lock(&data->rx_frame_buf_mutex, K_FOREVER);
386 status = ENET_ReadFrame(config->base, &data->enet_handle,
387 data->rx_frame_buf, frame_length, RING_ID, &ts);
388 k_mutex_unlock(&data->rx_frame_buf_mutex);
389
390 if (status) {
391 LOG_ERR("ENET_ReadFrame failed: %d", (int)status);
392 goto error;
393 }
394
395 if (net_pkt_write(pkt, data->rx_frame_buf, frame_length)) {
396 LOG_ERR("Unable to write frame into the packet");
397 goto error;
398 }
399
400 if (IS_ENABLED(CONFIG_NET_VLAN) && ntohs(NET_ETH_HDR(pkt)->type) == NET_ETH_PTYPE_VLAN) {
401 struct net_eth_vlan_hdr *hdr_vlan = (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt);
402
403 net_pkt_set_vlan_tci(pkt, ntohs(hdr_vlan->vlan.tci));
404 vlan_tag = net_pkt_vlan_tag(pkt);
405
406 #if CONFIG_NET_TC_RX_COUNT > 1
407 enum net_priority prio = net_vlan2priority(net_pkt_vlan_priority(pkt));
408
409 net_pkt_set_priority(pkt, prio);
410 #endif /* CONFIG_NET_TC_RX_COUNT > 1 */
411 }
412
413 #if defined(CONFIG_PTP_CLOCK_NXP_ENET)
414 k_mutex_lock(data->ptp_mutex, K_FOREVER);
415
416 /* Invalid value by default. */
417 pkt->timestamp.nanosecond = UINT32_MAX;
418 pkt->timestamp.second = UINT64_MAX;
419
420 /* Timestamp the packet using PTP clock */
421 if (eth_get_ptp_data(get_iface(data, vlan_tag), pkt)) {
422 struct net_ptp_time ptp_time;
423
424 ptp_clock_get(config->ptp_clock, &ptp_time);
425
426 /* If latest timestamp reloads after getting from Rx BD,
427 * then second - 1 to make sure the actual Rx timestamp is accurate
428 */
429 if (ptp_time.nanosecond < ts) {
430 ptp_time.second--;
431 }
432
433 pkt->timestamp.nanosecond = ts;
434 pkt->timestamp.second = ptp_time.second;
435 }
436 k_mutex_unlock(data->ptp_mutex);
437 #endif /* CONFIG_PTP_CLOCK_NXP_ENET */
438
439 iface = get_iface(data, vlan_tag);
440 #if defined(CONFIG_NET_DSA)
441 iface = dsa_net_recv(iface, &pkt);
442 #endif
443 if (net_recv_data(iface, pkt) < 0) {
444 goto error;
445 }
446
447 return 1;
448 flush:
449 /* Flush the current read buffer. This operation can
450 * only report failure if there is no frame to flush,
451 * which cannot happen in this context.
452 */
453 status = ENET_ReadFrame(config->base, &data->enet_handle, NULL,
454 0, RING_ID, NULL);
455 __ASSERT_NO_MSG(status == kStatus_Success);
456 error:
457 if (pkt) {
458 net_pkt_unref(pkt);
459 }
460 eth_stats_update_errors_rx(get_iface(data, vlan_tag));
461 return -EIO;
462 }
463
eth_nxp_enet_rx_thread(void * arg1,void * unused1,void * unused2)464 static void eth_nxp_enet_rx_thread(void *arg1, void *unused1, void *unused2)
465 {
466 const struct device *dev = arg1;
467 const struct nxp_enet_mac_config *config = dev->config;
468 struct nxp_enet_mac_data *data = dev->data;
469
470 while (1) {
471 if (k_sem_take(&data->rx_thread_sem, K_FOREVER) == 0) {
472 while (eth_nxp_enet_rx(dev) == 1) {
473 ;
474 }
475 /* enable the IRQ for RX */
476 ENET_EnableInterrupts(config->base,
477 kENET_RxFrameInterrupt | kENET_RxBufferInterrupt);
478 }
479 }
480 }
481
482 /*
483 ****************************
484 * PHY management functions *
485 ****************************
486 */
487
nxp_enet_phy_reset_and_configure(const struct device * phy)488 static int nxp_enet_phy_reset_and_configure(const struct device *phy)
489 {
490 int ret;
491
492 /* Reset the PHY */
493 ret = phy_write(phy, MII_BMCR, MII_BMCR_RESET);
494 if (ret) {
495 return ret;
496 }
497
498 /* 802.3u standard says reset takes up to 0.5s */
499 k_busy_wait(500000);
500
501 /* Configure the PHY */
502 return phy_configure_link(phy, LINK_HALF_10BASE_T | LINK_FULL_10BASE_T |
503 LINK_HALF_100BASE_T | LINK_FULL_100BASE_T);
504 }
505
nxp_enet_phy_cb(const struct device * phy,struct phy_link_state * state,void * eth_dev)506 static void nxp_enet_phy_cb(const struct device *phy,
507 struct phy_link_state *state,
508 void *eth_dev)
509 {
510 const struct device *dev = eth_dev;
511 struct nxp_enet_mac_data *data = dev->data;
512
513 if (!data->iface) {
514 return;
515 }
516
517 if (!state->is_up) {
518 net_eth_carrier_off(data->iface);
519 nxp_enet_phy_reset_and_configure(phy);
520 } else {
521 net_eth_carrier_on(data->iface);
522 }
523
524 LOG_INF("Link is %s", state->is_up ? "up" : "down");
525 }
526
527
nxp_enet_phy_init(const struct device * dev)528 static int nxp_enet_phy_init(const struct device *dev)
529 {
530 const struct nxp_enet_mac_config *config = dev->config;
531 int ret = 0;
532
533 ret = nxp_enet_phy_reset_and_configure(config->phy_dev);
534 if (ret) {
535 return ret;
536 }
537
538 ret = phy_link_callback_set(config->phy_dev, nxp_enet_phy_cb, (void *)dev);
539 if (ret) {
540 return ret;
541 }
542
543 return ret;
544 }
545
546 /*
547 ****************************
548 * Callbacks and interrupts *
549 ****************************
550 */
551
nxp_enet_driver_cb(const struct device * dev,enum nxp_enet_driver dev_type,enum nxp_enet_callback_reason event,void * data)552 void nxp_enet_driver_cb(const struct device *dev, enum nxp_enet_driver dev_type,
553 enum nxp_enet_callback_reason event, void *data)
554 {
555 if (dev_type == NXP_ENET_MDIO) {
556 nxp_enet_mdio_callback(dev, event, data);
557 } else if (dev_type == NXP_ENET_PTP_CLOCK) {
558 nxp_enet_ptp_clock_callback(dev, event, data);
559 }
560 }
561
eth_callback(ENET_Type * base,enet_handle_t * handle,uint32_t ringId,enet_event_t event,enet_frame_info_t * frameinfo,void * param)562 static void eth_callback(ENET_Type *base, enet_handle_t *handle,
563 #if FSL_FEATURE_ENET_QUEUE > 1
564 uint32_t ringId,
565 #endif /* FSL_FEATURE_ENET_QUEUE > 1 */
566 enet_event_t event, enet_frame_info_t *frameinfo, void *param)
567 {
568 const struct device *dev = param;
569 const struct nxp_enet_mac_config *config = dev->config;
570 struct nxp_enet_mac_data *data = dev->data;
571
572 switch (event) {
573 case kENET_RxEvent:
574 k_sem_give(&data->rx_thread_sem);
575 break;
576 case kENET_TxEvent:
577 ts_register_tx_event(dev, frameinfo);
578 /* Free the TX buffer. */
579 k_sem_give(&data->tx_buf_sem);
580 break;
581 case kENET_ErrEvent:
582 /* Error event: BABR/BABT/EBERR/LC/RL/UN/PLR. */
583 break;
584 case kENET_WakeUpEvent:
585 /* Wake up from sleep mode event. */
586 break;
587 case kENET_TimeStampEvent:
588 /* Time stamp event. */
589 /* Reset periodic timer to default value. */
590 config->base->ATPER = NSEC_PER_SEC;
591 break;
592 case kENET_TimeStampAvailEvent:
593 /* Time stamp available event. */
594 break;
595 }
596 }
597
598 #if FSL_FEATURE_ENET_QUEUE > 1
599 #define ENET_IRQ_HANDLER_ARGS(base, handle) base, handle, 0
600 #else
601 #define ENET_IRQ_HANDLER_ARGS(base, handle) base, handle
602 #endif /* FSL_FEATURE_ENET_QUEUE > 1 */
603
eth_nxp_enet_isr(const struct device * dev)604 static void eth_nxp_enet_isr(const struct device *dev)
605 {
606 const struct nxp_enet_mac_config *config = dev->config;
607 struct nxp_enet_mac_data *data = dev->data;
608 unsigned int irq_lock_key = irq_lock();
609
610 uint32_t eir = ENET_GetInterruptStatus(config->base);
611
612 if (eir & (kENET_RxBufferInterrupt | kENET_RxFrameInterrupt)) {
613 ENET_ReceiveIRQHandler(ENET_IRQ_HANDLER_ARGS(config->base, &data->enet_handle));
614 ENET_DisableInterrupts(config->base,
615 kENET_RxFrameInterrupt | kENET_RxBufferInterrupt);
616 }
617
618 if (eir & kENET_TxFrameInterrupt) {
619 ENET_TransmitIRQHandler(ENET_IRQ_HANDLER_ARGS(config->base, &data->enet_handle));
620 }
621
622 if (eir & kENET_TxBufferInterrupt) {
623 ENET_ClearInterruptStatus(config->base, kENET_TxBufferInterrupt);
624 ENET_DisableInterrupts(config->base, kENET_TxBufferInterrupt);
625 }
626
627 if (eir & ENET_EIR_MII_MASK) {
628 nxp_enet_driver_cb(config->mdio, NXP_ENET_MDIO, NXP_ENET_INTERRUPT, NULL);
629 }
630
631 irq_unlock(irq_lock_key);
632 }
633
634 /*
635 ******************
636 * Initialization *
637 ******************
638 */
639
eth_nxp_enet_init(const struct device * dev)640 static int eth_nxp_enet_init(const struct device *dev)
641 {
642 struct nxp_enet_mac_data *data = dev->data;
643 const struct nxp_enet_mac_config *config = dev->config;
644 enet_config_t enet_config;
645 uint32_t enet_module_clock_rate;
646 int err;
647
648 err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
649 if (err) {
650 return err;
651 }
652
653 /* Initialize kernel objects */
654 k_mutex_init(&data->rx_frame_buf_mutex);
655 k_mutex_init(&data->tx_frame_buf_mutex);
656 k_sem_init(&data->rx_thread_sem, 0, CONFIG_ETH_NXP_ENET_RX_BUFFERS);
657 k_sem_init(&data->tx_buf_sem,
658 CONFIG_ETH_NXP_ENET_TX_BUFFERS, CONFIG_ETH_NXP_ENET_TX_BUFFERS);
659 #if defined(CONFIG_PTP_CLOCK_NXP_ENET)
660 k_sem_init(&data->ptp_ts_sem, 0, 1);
661 #endif
662
663 if (config->generate_mac) {
664 config->generate_mac(data->mac_addr);
665 }
666
667 /* Start interruption-poll thread */
668 k_thread_create(&data->rx_thread, data->rx_thread_stack,
669 K_KERNEL_STACK_SIZEOF(data->rx_thread_stack),
670 eth_nxp_enet_rx_thread, (void *) dev, NULL, NULL,
671 K_PRIO_COOP(2),
672 0, K_NO_WAIT);
673 k_thread_name_set(&data->rx_thread, "eth_nxp_enet_rx");
674
675 /* Get ENET IP module clock rate */
676 err = clock_control_get_rate(config->clock_dev, config->clock_subsys,
677 &enet_module_clock_rate);
678 if (err) {
679 return err;
680 }
681
682 /* Use HAL to set up MAC configuration */
683 ENET_GetDefaultConfig(&enet_config);
684
685 if (IS_ENABLED(CONFIG_NET_PROMISCUOUS_MODE)) {
686 enet_config.macSpecialConfig |= kENET_ControlPromiscuousEnable;
687 }
688
689 if (IS_ENABLED(CONFIG_NET_VLAN)) {
690 enet_config.macSpecialConfig |= kENET_ControlVLANTagEnable;
691 }
692
693 if (IS_ENABLED(CONFIG_ETH_NXP_ENET_HW_ACCELERATION)) {
694 enet_config.txAccelerConfig |=
695 kENET_TxAccelIpCheckEnabled | kENET_TxAccelProtoCheckEnabled;
696 enet_config.rxAccelerConfig |=
697 kENET_RxAccelIpCheckEnabled | kENET_RxAccelProtoCheckEnabled;
698 }
699
700 enet_config.interrupt |= kENET_RxFrameInterrupt;
701 enet_config.interrupt |= kENET_TxFrameInterrupt;
702
703 if (config->phy_mode == NXP_ENET_MII_MODE) {
704 enet_config.miiMode = kENET_MiiMode;
705 } else if (config->phy_mode == NXP_ENET_RMII_MODE) {
706 enet_config.miiMode = kENET_RmiiMode;
707 } else {
708 return -EINVAL;
709 }
710
711 enet_config.callback = eth_callback;
712 enet_config.userData = (void *)dev;
713
714 ENET_Up(config->base,
715 &data->enet_handle,
716 &enet_config,
717 &config->buffer_config,
718 data->mac_addr,
719 enet_module_clock_rate);
720
721 nxp_enet_driver_cb(config->mdio, NXP_ENET_MDIO, NXP_ENET_MODULE_RESET, NULL);
722
723 #if defined(CONFIG_PTP_CLOCK_NXP_ENET)
724 nxp_enet_driver_cb(config->ptp_clock, NXP_ENET_PTP_CLOCK,
725 NXP_ENET_MODULE_RESET, &data->ptp_mutex);
726 ENET_SetTxReclaim(&data->enet_handle, true, 0);
727 #endif
728
729 ENET_ActiveRead(config->base);
730
731 err = nxp_enet_phy_init(dev);
732 if (err) {
733 return err;
734 }
735
736 LOG_DBG("%s MAC %02x:%02x:%02x:%02x:%02x:%02x",
737 dev->name,
738 data->mac_addr[0], data->mac_addr[1],
739 data->mac_addr[2], data->mac_addr[3],
740 data->mac_addr[4], data->mac_addr[5]);
741
742 return 0;
743 }
744
745 #ifdef CONFIG_NET_DSA
746 #define NXP_ENET_SEND_FUNC dsa_tx
747 #else
748 #define NXP_ENET_SEND_FUNC eth_nxp_enet_tx
749 #endif /* CONFIG_NET_DSA */
750
751 static const struct ethernet_api api_funcs = {
752 .iface_api.init = eth_nxp_enet_iface_init,
753 .get_capabilities = eth_nxp_enet_get_capabilities,
754 .set_config = eth_nxp_enet_set_config,
755 .send = NXP_ENET_SEND_FUNC,
756 #if defined(CONFIG_PTP_CLOCK)
757 .get_ptp_clock = eth_nxp_enet_get_ptp_clock,
758 #endif
759 };
760
761 #define NXP_ENET_CONNECT_IRQ(node_id, irq_names, idx) \
762 do { \
763 IRQ_CONNECT(DT_IRQ_BY_IDX(node_id, idx, irq), \
764 DT_IRQ_BY_IDX(node_id, idx, priority), \
765 eth_nxp_enet_isr, \
766 DEVICE_DT_GET(node_id), \
767 0); \
768 irq_enable(DT_IRQ_BY_IDX(node_id, idx, irq)); \
769 } while (false);
770
771 #define FREESCALE_OUI_B0 0x00
772 #define FREESCALE_OUI_B1 0x04
773 #define FREESCALE_OUI_B2 0x9f
774
775 #if defined(CONFIG_SOC_SERIES_IMX_RT10XX)
776 #define ETH_NXP_ENET_UNIQUE_ID (OCOTP->CFG1 ^ OCOTP->CFG2)
777 #elif defined(CONFIG_SOC_SERIES_IMX_RT11XX)
778 #define ETH_NXP_ENET_UNIQUE_ID (OCOTP->FUSEN[40].FUSE)
779 #elif defined(CONFIG_SOC_SERIES_KINETIS_K6X)
780 #define ETH_NXP_ENET_UNIQUE_ID (SIM->UIDH ^ SIM->UIDMH ^ SIM->UIDML ^ SIM->UIDL)
781 #else
782 #error "Unsupported SOC"
783 #endif
784
785 #define NXP_ENET_GENERATE_MAC_RANDOM(n) \
786 static void generate_eth_##n##_mac(uint8_t *mac_addr) \
787 { \
788 gen_random_mac(mac_addr, \
789 FREESCALE_OUI_B0, \
790 FREESCALE_OUI_B1, \
791 FREESCALE_OUI_B2); \
792 }
793
794 #define NXP_ENET_GENERATE_MAC_UNIQUE(n) \
795 static void generate_eth_##n##_mac(uint8_t *mac_addr) \
796 { \
797 uint32_t id = ETH_NXP_ENET_UNIQUE_ID; \
798 \
799 mac_addr[0] = FREESCALE_OUI_B0; \
800 mac_addr[0] |= 0x02; /* force LAA bit */ \
801 mac_addr[1] = FREESCALE_OUI_B1; \
802 mac_addr[2] = FREESCALE_OUI_B2; \
803 mac_addr[3] = id >> 8; \
804 mac_addr[4] = id >> 16; \
805 mac_addr[5] = id >> 0; \
806 mac_addr[5] += n; \
807 }
808
809 #define NXP_ENET_GENERATE_MAC(n) \
810 COND_CODE_1(DT_INST_PROP(n, zephyr_random_mac_address), \
811 (NXP_ENET_GENERATE_MAC_RANDOM(n)), \
812 (NXP_ENET_GENERATE_MAC_UNIQUE(n)))
813
814 #define NXP_ENET_DECIDE_MAC_ADDR(n) \
815 COND_CODE_1(NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(n)), \
816 (NXP_ENET_MAC_ADDR_LOCAL(n)), \
817 (NXP_ENET_MAC_ADDR_GENERATED(n)))
818
819 #define NXP_ENET_DECIDE_MAC_GEN_FUNC(n) \
820 COND_CODE_1(NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(n)), \
821 (NXP_ENET_GEN_MAC_FUNCTION_NO(n)), \
822 (NXP_ENET_GEN_MAC_FUNCTION_YES(n)))
823
824 #define NXP_ENET_MAC_ADDR_LOCAL(n) \
825 .mac_addr = DT_INST_PROP(n, local_mac_address),
826
827 #define NXP_ENET_MAC_ADDR_GENERATED(n) \
828 .mac_addr = {0},
829
830 #define NXP_ENET_GEN_MAC_FUNCTION_NO(n) \
831 .generate_mac = NULL,
832
833 #define NXP_ENET_GEN_MAC_FUNCTION_YES(n) \
834 .generate_mac = generate_eth_##n##_mac,
835
836 #define NXP_ENET_DT_PHY_DEV(node_id, phy_phandle, idx) \
837 DEVICE_DT_GET(DT_PHANDLE_BY_IDX(node_id, phy_phandle, idx))
838
839 #if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay) && \
840 CONFIG_ETH_NXP_ENET_USE_DTCM_FOR_DMA_BUFFER
841 #define _nxp_enet_dma_desc_section __dtcm_bss_section
842 #define _nxp_enet_dma_buffer_section __dtcm_noinit_section
843 #define _nxp_enet_driver_buffer_section __dtcm_noinit_section
844 #elif defined(CONFIG_NOCACHE_MEMORY)
845 #define _nxp_enet_dma_desc_section __nocache
846 #define _nxp_enet_dma_buffer_section __nocache
847 #define _nxp_enet_driver_buffer_section
848 #else
849 #define _nxp_enet_dma_desc_section
850 #define _nxp_enet_dma_buffer_section
851 #define _nxp_enet_driver_buffer_section
852 #endif
853
854 /* Use ENET_FRAME_MAX_VLANFRAMELEN for VLAN frame size
855 * Use ENET_FRAME_MAX_FRAMELEN for Ethernet frame size
856 */
857 #if defined(CONFIG_NET_VLAN)
858 #if !defined(ENET_FRAME_MAX_VLANFRAMELEN)
859 #define ENET_FRAME_MAX_VLANFRAMELEN (ENET_FRAME_MAX_FRAMELEN + 4)
860 #endif
861 #define ETH_NXP_ENET_BUFFER_SIZE \
862 ROUND_UP(ENET_FRAME_MAX_VLANFRAMELEN, ENET_BUFF_ALIGNMENT)
863 #else
864 #define ETH_NXP_ENET_BUFFER_SIZE \
865 ROUND_UP(ENET_FRAME_MAX_FRAMELEN, ENET_BUFF_ALIGNMENT)
866 #endif /* CONFIG_NET_VLAN */
867
868 #define NXP_ENET_PHY_MODE(node_id) \
869 DT_ENUM_HAS_VALUE(node_id, phy_connection_type, mii) ? NXP_ENET_MII_MODE : \
870 (DT_ENUM_HAS_VALUE(node_id, phy_connection_type, rmii) ? NXP_ENET_RMII_MODE : \
871 NXP_ENET_INVALID_MII_MODE)
872
873 #ifdef CONFIG_PTP_CLOCK_NXP_ENET
874 #define NXP_ENET_PTP_DEV(n) .ptp_clock = DEVICE_DT_GET(DT_INST_PHANDLE(n, nxp_ptp_clock)),
875 #define NXP_ENET_FRAMEINFO_ARRAY(n) \
876 static enet_frame_info_t \
877 nxp_enet_##n##_tx_frameinfo_array[CONFIG_ETH_NXP_ENET_TX_BUFFERS];
878 #define NXP_ENET_FRAMEINFO(n) \
879 .txFrameInfo = nxp_enet_##n##_tx_frameinfo_array,
880 #else
881 #define NXP_ENET_PTP_DEV(n)
882 #define NXP_ENET_FRAMEINFO_ARRAY(n)
883 #define NXP_ENET_FRAMEINFO(n) \
884 .txFrameInfo = NULL
885 #endif
886
887 #define NXP_ENET_MAC_INIT(n) \
888 NXP_ENET_GENERATE_MAC(n) \
889 \
890 PINCTRL_DT_INST_DEFINE(n); \
891 \
892 NXP_ENET_FRAMEINFO_ARRAY(n) \
893 \
894 static void nxp_enet_##n##_irq_config_func(void) \
895 { \
896 DT_INST_FOREACH_PROP_ELEM(n, interrupt_names, \
897 NXP_ENET_CONNECT_IRQ); \
898 } \
899 \
900 volatile static __aligned(ENET_BUFF_ALIGNMENT) \
901 _nxp_enet_dma_desc_section \
902 enet_rx_bd_struct_t \
903 nxp_enet_##n##_rx_buffer_desc[CONFIG_ETH_NXP_ENET_RX_BUFFERS]; \
904 \
905 volatile static __aligned(ENET_BUFF_ALIGNMENT) \
906 _nxp_enet_dma_desc_section \
907 enet_tx_bd_struct_t \
908 nxp_enet_##n##_tx_buffer_desc[CONFIG_ETH_NXP_ENET_TX_BUFFERS]; \
909 \
910 static uint8_t __aligned(ENET_BUFF_ALIGNMENT) \
911 _nxp_enet_dma_buffer_section \
912 nxp_enet_##n##_rx_buffer[CONFIG_ETH_NXP_ENET_RX_BUFFERS] \
913 [ETH_NXP_ENET_BUFFER_SIZE]; \
914 \
915 static uint8_t __aligned(ENET_BUFF_ALIGNMENT) \
916 _nxp_enet_dma_buffer_section \
917 nxp_enet_##n##_tx_buffer[CONFIG_ETH_NXP_ENET_TX_BUFFERS] \
918 [ETH_NXP_ENET_BUFFER_SIZE]; \
919 \
920 const struct nxp_enet_mac_config nxp_enet_##n##_config = { \
921 .base = (ENET_Type *)DT_REG_ADDR(DT_INST_PARENT(n)), \
922 .irq_config_func = nxp_enet_##n##_irq_config_func, \
923 .clock_dev = DEVICE_DT_GET(DT_CLOCKS_CTLR(DT_INST_PARENT(n))), \
924 .clock_subsys = (void *)DT_CLOCKS_CELL_BY_IDX( \
925 DT_INST_PARENT(n), 0, name), \
926 .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
927 .buffer_config = { \
928 .rxBdNumber = CONFIG_ETH_NXP_ENET_RX_BUFFERS, \
929 .txBdNumber = CONFIG_ETH_NXP_ENET_TX_BUFFERS, \
930 .rxBuffSizeAlign = ETH_NXP_ENET_BUFFER_SIZE, \
931 .txBuffSizeAlign = ETH_NXP_ENET_BUFFER_SIZE, \
932 .rxBdStartAddrAlign = nxp_enet_##n##_rx_buffer_desc, \
933 .txBdStartAddrAlign = nxp_enet_##n##_tx_buffer_desc, \
934 .rxBufferAlign = nxp_enet_##n##_rx_buffer[0], \
935 .txBufferAlign = nxp_enet_##n##_tx_buffer[0], \
936 .rxMaintainEnable = true, \
937 .txMaintainEnable = true, \
938 NXP_ENET_FRAMEINFO(n) \
939 }, \
940 .phy_mode = NXP_ENET_PHY_MODE(DT_DRV_INST(n)), \
941 .phy_dev = DEVICE_DT_GET(DT_INST_PHANDLE(n, phy_handle)), \
942 .mdio = DEVICE_DT_GET(DT_INST_PHANDLE(n, nxp_mdio)), \
943 NXP_ENET_PTP_DEV(n) \
944 NXP_ENET_DECIDE_MAC_GEN_FUNC(n) \
945 }; \
946 \
947 static _nxp_enet_driver_buffer_section uint8_t \
948 nxp_enet_##n##_tx_frame_buf[NET_ETH_MAX_FRAME_SIZE]; \
949 static _nxp_enet_driver_buffer_section uint8_t \
950 nxp_enet_##n##_rx_frame_buf[NET_ETH_MAX_FRAME_SIZE]; \
951 \
952 struct nxp_enet_mac_data nxp_enet_##n##_data = { \
953 NXP_ENET_DECIDE_MAC_ADDR(n) \
954 .tx_frame_buf = nxp_enet_##n##_tx_frame_buf, \
955 .rx_frame_buf = nxp_enet_##n##_rx_frame_buf, \
956 }; \
957 \
958 ETH_NET_DEVICE_DT_INST_DEFINE(n, eth_nxp_enet_init, NULL, \
959 &nxp_enet_##n##_data, &nxp_enet_##n##_config, \
960 CONFIG_ETH_INIT_PRIORITY, \
961 &api_funcs, NET_ETH_MTU);
962
963 DT_INST_FOREACH_STATUS_OKAY(NXP_ENET_MAC_INIT)
964
965 /*
966 * ENET module-level management
967 */
968 #undef DT_DRV_COMPAT
969 #define DT_DRV_COMPAT nxp_enet
970
971 #define NXP_ENET_INIT(n) \
972 \
973 int nxp_enet_##n##_init(void) \
974 { \
975 clock_control_on(DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \
976 (void *)DT_INST_CLOCKS_CELL_BY_IDX(n, 0, name)); \
977 \
978 ENET_Reset((ENET_Type *)DT_INST_REG_ADDR(n)); \
979 \
980 return 0; \
981 } \
982 \
983 /* Init the module before any of the MAC, MDIO, or PTP clock */ \
984 SYS_INIT(nxp_enet_##n##_init, POST_KERNEL, 0);
985
986 DT_INST_FOREACH_STATUS_OKAY(NXP_ENET_INIT)
987