1 /* NXP ENET MAC Driver
2  *
3  * Copyright 2023-2024 NXP
4  *
5  * Inspiration from eth_mcux.c, which was:
6  *  Copyright (c) 2016-2017 ARM Ltd
7  *  Copyright (c) 2016 Linaro Ltd
8  *  Copyright (c) 2018 Intel Corporation
9  *  Copyright 2023 NXP
10  *
11  * SPDX-License-Identifier: Apache-2.0
12  */
13 
14 #define DT_DRV_COMPAT nxp_enet_mac
15 
16 /* Set up logging module for this driver */
17 #define LOG_MODULE_NAME eth_nxp_enet_mac
18 #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL
19 #include <zephyr/logging/log.h>
20 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
21 
22 #include <zephyr/device.h>
23 #include <zephyr/sys/util.h>
24 #include <zephyr/kernel.h>
25 #include <zephyr/sys/__assert.h>
26 #include <zephyr/kernel/thread_stack.h>
27 
28 #include <zephyr/net/net_pkt.h>
29 #include <zephyr/net/net_if.h>
30 #include <zephyr/net/ethernet.h>
31 #include <zephyr/net/phy.h>
32 #include <zephyr/net/mii.h>
33 #include <ethernet/eth_stats.h>
34 
35 #include <zephyr/drivers/pinctrl.h>
36 #include <zephyr/drivers/clock_control.h>
37 
38 #ifdef CONFIG_PTP_CLOCK
39 #include <zephyr/drivers/ptp_clock.h>
40 #endif
41 
42 #ifdef CONFIG_NET_DSA
43 #include <zephyr/net/dsa.h>
44 #endif
45 
46 #if defined(CONFIG_NET_POWER_MANAGEMENT) && defined(CONFIG_PM_DEVICE)
47 #include <zephyr/pm/device.h>
48 #endif
49 
50 #include "../eth.h"
51 #include <zephyr/drivers/ethernet/eth_nxp_enet.h>
52 #include <zephyr/dt-bindings/ethernet/nxp_enet.h>
53 #include <fsl_enet.h>
54 
55 #define FREESCALE_OUI_B0 0x00
56 #define FREESCALE_OUI_B1 0x04
57 #define FREESCALE_OUI_B2 0x9f
58 
59 #if defined(CONFIG_SOC_SERIES_IMXRT10XX)
60 #define ETH_NXP_ENET_UNIQUE_ID	(OCOTP->CFG1 ^ OCOTP->CFG2)
61 #elif defined(CONFIG_SOC_SERIES_IMXRT11XX)
62 #define ETH_NXP_ENET_UNIQUE_ID	(OCOTP->FUSEN[40].FUSE)
63 #elif defined(CONFIG_SOC_SERIES_KINETIS_K6X)
64 #define ETH_NXP_ENET_UNIQUE_ID	(SIM->UIDH ^ SIM->UIDMH ^ SIM->UIDML ^ SIM->UIDL)
65 #elif defined(CONFIG_SOC_SERIES_RW6XX)
66 #define ETH_NXP_ENET_UNIQUE_ID	(OCOTP->OTP_SHADOW[46])
67 #else
68 #define ETH_NXP_ENET_UNIQUE_ID 0xFFFFFF
69 #endif
70 
71 #define RING_ID 0
72 
73 enum mac_address_source {
74 	MAC_ADDR_SOURCE_LOCAL,
75 	MAC_ADDR_SOURCE_RANDOM,
76 	MAC_ADDR_SOURCE_UNIQUE,
77 	MAC_ADDR_SOURCE_FUSED,
78 	MAC_ADDR_SOURCE_INVALID,
79 };
80 
81 struct nxp_enet_mac_config {
82 	const struct device *module_dev;
83 	const struct device *clock_dev;
84 	clock_control_subsys_t clock_subsys;
85 	enum mac_address_source mac_addr_source;
86 	const struct pinctrl_dev_config *pincfg;
87 	enet_buffer_config_t buffer_config[1];
88 	uint8_t phy_mode;
89 	void (*irq_config_func)(void);
90 	const struct device *phy_dev;
91 	const struct device *mdio;
92 #ifdef CONFIG_PTP_CLOCK_NXP_ENET
93 	const struct device *ptp_clock;
94 #endif
95 };
96 
97 struct nxp_enet_mac_data {
98 	ENET_Type *base;
99 	struct net_if *iface;
100 	uint8_t mac_addr[6];
101 	enet_handle_t enet_handle;
102 	struct k_sem tx_buf_sem;
103 	struct k_work rx_work;
104 	const struct device *dev;
105 	struct k_sem rx_thread_sem;
106 	struct k_mutex tx_frame_buf_mutex;
107 	struct k_mutex rx_frame_buf_mutex;
108 #ifdef CONFIG_PTP_CLOCK_NXP_ENET
109 	struct k_sem ptp_ts_sem;
110 	struct k_mutex *ptp_mutex; /* created in PTP driver */
111 #endif
112 	uint8_t *tx_frame_buf;
113 	uint8_t *rx_frame_buf;
114 };
115 
116 static K_THREAD_STACK_DEFINE(enet_rx_stack, CONFIG_ETH_NXP_ENET_RX_THREAD_STACK_SIZE);
117 static struct k_work_q rx_work_queue;
118 
rx_queue_init(void)119 static int rx_queue_init(void)
120 {
121 	struct k_work_queue_config cfg = {.name = "ENET_RX"};
122 
123 	k_work_queue_init(&rx_work_queue);
124 	k_work_queue_start(&rx_work_queue, enet_rx_stack,
125 			   K_THREAD_STACK_SIZEOF(enet_rx_stack),
126 			   K_PRIO_COOP(CONFIG_ETH_NXP_ENET_RX_THREAD_PRIORITY),
127 			   &cfg);
128 
129 	return 0;
130 }
131 
132 SYS_INIT(rx_queue_init, POST_KERNEL, 0);
133 
get_iface(struct nxp_enet_mac_data * data)134 static inline struct net_if *get_iface(struct nxp_enet_mac_data *data)
135 {
136 	return data->iface;
137 }
138 
139 #if defined(CONFIG_PTP_CLOCK_NXP_ENET)
eth_get_ptp_data(struct net_if * iface,struct net_pkt * pkt)140 static bool eth_get_ptp_data(struct net_if *iface, struct net_pkt *pkt)
141 {
142 	struct net_eth_vlan_hdr *hdr_vlan = (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt);
143 	struct ethernet_context *eth_ctx = net_if_l2_data(iface);
144 	bool pkt_is_ptp;
145 
146 	if (net_eth_is_vlan_enabled(eth_ctx, iface)) {
147 		pkt_is_ptp = ntohs(hdr_vlan->type) == NET_ETH_PTYPE_PTP;
148 	} else {
149 		pkt_is_ptp = ntohs(NET_ETH_HDR(pkt)->type) == NET_ETH_PTYPE_PTP;
150 	}
151 
152 	if (pkt_is_ptp) {
153 		net_pkt_set_priority(pkt, NET_PRIORITY_CA);
154 	}
155 
156 	return pkt_is_ptp;
157 }
158 
159 
ts_register_tx_event(const struct device * dev,enet_frame_info_t * frameinfo)160 static inline void ts_register_tx_event(const struct device *dev,
161 					 enet_frame_info_t *frameinfo)
162 {
163 	struct nxp_enet_mac_data *data = dev->data;
164 	struct net_pkt *pkt = frameinfo->context;
165 
166 	if (pkt && atomic_get(&pkt->atomic_ref) > 0) {
167 		if (eth_get_ptp_data(net_pkt_iface(pkt), pkt) && frameinfo->isTsAvail) {
168 			k_mutex_lock(data->ptp_mutex, K_FOREVER);
169 
170 			pkt->timestamp.nanosecond = frameinfo->timeStamp.nanosecond;
171 			pkt->timestamp.second = frameinfo->timeStamp.second;
172 
173 			net_if_add_tx_timestamp(pkt);
174 			k_sem_give(&data->ptp_ts_sem);
175 
176 			k_mutex_unlock(data->ptp_mutex);
177 		}
178 		net_pkt_unref(pkt);
179 	}
180 }
181 
eth_wait_for_ptp_ts(const struct device * dev,struct net_pkt * pkt)182 static inline void eth_wait_for_ptp_ts(const struct device *dev, struct net_pkt *pkt)
183 {
184 	struct nxp_enet_mac_data *data = dev->data;
185 
186 	net_pkt_ref(pkt);
187 	k_sem_take(&data->ptp_ts_sem, K_FOREVER);
188 }
189 #else
190 #define eth_get_ptp_data(...) false
191 #define ts_register_tx_event(...)
192 #define eth_wait_for_ptp_ts(...)
193 #endif /* CONFIG_PTP_CLOCK_NXP_ENET */
194 
195 #ifdef CONFIG_PTP_CLOCK
eth_nxp_enet_get_ptp_clock(const struct device * dev)196 static const struct device *eth_nxp_enet_get_ptp_clock(const struct device *dev)
197 {
198 	const struct nxp_enet_mac_config *config = dev->config;
199 
200 	return config->ptp_clock;
201 }
202 #endif /* CONFIG_PTP_CLOCK */
203 
eth_nxp_enet_tx(const struct device * dev,struct net_pkt * pkt)204 static int eth_nxp_enet_tx(const struct device *dev, struct net_pkt *pkt)
205 {
206 	struct nxp_enet_mac_data *data = dev->data;
207 	uint16_t total_len = net_pkt_get_len(pkt);
208 	bool frame_is_timestamped;
209 	status_t ret;
210 
211 	/* Wait for a TX buffer descriptor to be available */
212 	k_sem_take(&data->tx_buf_sem, K_FOREVER);
213 
214 	/* Enter critical section for TX frame buffer access */
215 	k_mutex_lock(&data->tx_frame_buf_mutex, K_FOREVER);
216 
217 	ret = net_pkt_read(pkt, data->tx_frame_buf, total_len);
218 	if (ret) {
219 		k_sem_give(&data->tx_buf_sem);
220 		goto exit;
221 	}
222 
223 	frame_is_timestamped = eth_get_ptp_data(net_pkt_iface(pkt), pkt);
224 
225 	ret = ENET_SendFrame(data->base, &data->enet_handle, data->tx_frame_buf,
226 			     total_len, RING_ID, frame_is_timestamped, pkt);
227 
228 	if (ret != kStatus_Success) {
229 		LOG_ERR("ENET_SendFrame error: %d", ret);
230 		ENET_ReclaimTxDescriptor(data->base, &data->enet_handle, RING_ID);
231 		ret = -EIO;
232 		goto exit;
233 	}
234 
235 	if (frame_is_timestamped) {
236 		eth_wait_for_ptp_ts(dev, pkt);
237 	}
238 
239 exit:
240 	/* Leave critical section for TX frame buffer access */
241 	k_mutex_unlock(&data->tx_frame_buf_mutex);
242 
243 	return ret;
244 }
245 
eth_nxp_enet_get_capabilities(const struct device * dev)246 static enum ethernet_hw_caps eth_nxp_enet_get_capabilities(const struct device *dev)
247 {
248 #if defined(CONFIG_ETH_NXP_ENET_1G)
249 	const struct nxp_enet_mac_config *config = dev->config;
250 #else
251 	ARG_UNUSED(dev);
252 #endif
253 	enum ethernet_hw_caps caps;
254 
255 	caps = ETHERNET_LINK_10BASE_T |
256 		ETHERNET_HW_FILTERING |
257 #if defined(CONFIG_NET_VLAN)
258 		ETHERNET_HW_VLAN |
259 #endif
260 #if defined(CONFIG_PTP_CLOCK_NXP_ENET)
261 		ETHERNET_PTP |
262 #endif
263 #if defined(CONFIG_NET_DSA)
264 		ETHERNET_DSA_MASTER_PORT |
265 #endif
266 #if defined(CONFIG_ETH_NXP_ENET_HW_ACCELERATION)
267 		ETHERNET_HW_TX_CHKSUM_OFFLOAD |
268 		ETHERNET_HW_RX_CHKSUM_OFFLOAD |
269 #endif
270 		ETHERNET_LINK_100BASE_T;
271 
272 	if (COND_CODE_1(IS_ENABLED(CONFIG_ETH_NXP_ENET_1G),
273 	   (config->phy_mode == NXP_ENET_RGMII_MODE), (0))) {
274 		caps |= ETHERNET_LINK_1000BASE_T;
275 	}
276 
277 	return caps;
278 }
279 
eth_nxp_enet_set_config(const struct device * dev,enum ethernet_config_type type,const struct ethernet_config * cfg)280 static int eth_nxp_enet_set_config(const struct device *dev,
281 			       enum ethernet_config_type type,
282 			       const struct ethernet_config *cfg)
283 {
284 	struct nxp_enet_mac_data *data = dev->data;
285 
286 	switch (type) {
287 	case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
288 		memcpy(data->mac_addr,
289 		       cfg->mac_address.addr,
290 		       sizeof(data->mac_addr));
291 		ENET_SetMacAddr(data->base, data->mac_addr);
292 		net_if_set_link_addr(data->iface, data->mac_addr,
293 				     sizeof(data->mac_addr),
294 				     NET_LINK_ETHERNET);
295 		LOG_DBG("%s MAC set to %02x:%02x:%02x:%02x:%02x:%02x",
296 			dev->name,
297 			data->mac_addr[0], data->mac_addr[1],
298 			data->mac_addr[2], data->mac_addr[3],
299 			data->mac_addr[4], data->mac_addr[5]);
300 		return 0;
301 	case ETHERNET_CONFIG_TYPE_FILTER:
302 		/* The ENET driver does not modify the address buffer but the API is not const */
303 		if (cfg->filter.set) {
304 			ENET_AddMulticastGroup(data->base,
305 					       (uint8_t *)cfg->filter.mac_address.addr);
306 		} else {
307 			ENET_LeaveMulticastGroup(data->base,
308 						 (uint8_t *)cfg->filter.mac_address.addr);
309 		}
310 		return 0;
311 	default:
312 		break;
313 	}
314 
315 	return -ENOTSUP;
316 }
317 
eth_nxp_enet_get_config(const struct device * dev,enum ethernet_config_type type,struct ethernet_config * cfg)318 static int eth_nxp_enet_get_config(const struct device *dev,
319 			       enum ethernet_config_type type,
320 			       struct ethernet_config *cfg)
321 {
322 	switch (type) {
323 	case ETHERNET_CONFIG_TYPE_RX_CHECKSUM_SUPPORT:
324 	case ETHERNET_CONFIG_TYPE_TX_CHECKSUM_SUPPORT:
325 		cfg->chksum_support = ETHERNET_CHECKSUM_SUPPORT_IPV4_HEADER	|
326 				      ETHERNET_CHECKSUM_SUPPORT_IPV4_ICMP	|
327 				      ETHERNET_CHECKSUM_SUPPORT_IPV6_HEADER	|
328 				      ETHERNET_CHECKSUM_SUPPORT_TCP		|
329 				      ETHERNET_CHECKSUM_SUPPORT_UDP;
330 		return 0;
331 	default:
332 		break;
333 	}
334 
335 	return -ENOTSUP;
336 }
337 
eth_nxp_enet_rx(const struct device * dev)338 static int eth_nxp_enet_rx(const struct device *dev)
339 {
340 #if defined(CONFIG_PTP_CLOCK_NXP_ENET)
341 	const struct nxp_enet_mac_config *config = dev->config;
342 #endif
343 	struct nxp_enet_mac_data *data = dev->data;
344 	uint32_t frame_length = 0U;
345 	struct net_if *iface;
346 	struct net_pkt *pkt = NULL;
347 	status_t status;
348 	uint32_t ts;
349 
350 	status = ENET_GetRxFrameSize(&data->enet_handle,
351 				     (uint32_t *)&frame_length, RING_ID);
352 	if (status == kStatus_ENET_RxFrameEmpty) {
353 		return 0;
354 	} else if (status == kStatus_ENET_RxFrameError) {
355 		enet_data_error_stats_t error_stats;
356 
357 		LOG_ERR("ENET_GetRxFrameSize return: %d", (int)status);
358 
359 		ENET_GetRxErrBeforeReadFrame(&data->enet_handle,
360 					     &error_stats, RING_ID);
361 		goto flush;
362 	}
363 
364 	if (frame_length > NET_ETH_MAX_FRAME_SIZE) {
365 		LOG_ERR("Frame too large (%d)", frame_length);
366 		goto flush;
367 	}
368 
369 	/* Using root iface. It will be updated in net_recv_data() */
370 	pkt = net_pkt_rx_alloc_with_buffer(data->iface, frame_length,
371 					   AF_UNSPEC, 0, K_NO_WAIT);
372 	if (!pkt) {
373 		goto flush;
374 	}
375 
376 	k_mutex_lock(&data->rx_frame_buf_mutex, K_FOREVER);
377 	status = ENET_ReadFrame(data->base, &data->enet_handle,
378 				data->rx_frame_buf, frame_length, RING_ID, &ts);
379 	k_mutex_unlock(&data->rx_frame_buf_mutex);
380 
381 	if (status) {
382 		LOG_ERR("ENET_ReadFrame failed: %d", (int)status);
383 		goto error;
384 	}
385 
386 	if (net_pkt_write(pkt, data->rx_frame_buf, frame_length)) {
387 		LOG_ERR("Unable to write frame into the packet");
388 		goto error;
389 	}
390 
391 #if defined(CONFIG_PTP_CLOCK_NXP_ENET)
392 	k_mutex_lock(data->ptp_mutex, K_FOREVER);
393 
394 	/* Invalid value by default. */
395 	pkt->timestamp.nanosecond = UINT32_MAX;
396 	pkt->timestamp.second = UINT64_MAX;
397 
398 	/* Timestamp the packet using PTP clock */
399 	if (eth_get_ptp_data(get_iface(data), pkt)) {
400 		struct net_ptp_time ptp_time;
401 
402 		ptp_clock_get(config->ptp_clock, &ptp_time);
403 
404 		/* If latest timestamp reloads after getting from Rx BD,
405 		 * then second - 1 to make sure the actual Rx timestamp is accurate
406 		 */
407 		if (ptp_time.nanosecond < ts) {
408 			ptp_time.second--;
409 		}
410 
411 		pkt->timestamp.nanosecond = ts;
412 		pkt->timestamp.second = ptp_time.second;
413 	}
414 	k_mutex_unlock(data->ptp_mutex);
415 #endif /* CONFIG_PTP_CLOCK_NXP_ENET */
416 
417 	iface = get_iface(data);
418 #if defined(CONFIG_NET_DSA)
419 	iface = dsa_net_recv(iface, &pkt);
420 #endif
421 	if (net_recv_data(iface, pkt) < 0) {
422 		goto error;
423 	}
424 
425 	return 1;
426 flush:
427 	/* Flush the current read buffer.  This operation can
428 	 * only report failure if there is no frame to flush,
429 	 * which cannot happen in this context.
430 	 */
431 	status = ENET_ReadFrame(data->base, &data->enet_handle, NULL,
432 					0, RING_ID, NULL);
433 	__ASSERT_NO_MSG(status == kStatus_Success);
434 error:
435 	if (pkt) {
436 		net_pkt_unref(pkt);
437 	}
438 	eth_stats_update_errors_rx(get_iface(data));
439 	return -EIO;
440 }
441 
eth_nxp_enet_rx_thread(struct k_work * work)442 static void eth_nxp_enet_rx_thread(struct k_work *work)
443 {
444 	struct nxp_enet_mac_data *data =
445 		CONTAINER_OF(work, struct nxp_enet_mac_data, rx_work);
446 	const struct device *dev = data->dev;
447 	int ret;
448 
449 	if (k_sem_take(&data->rx_thread_sem, K_FOREVER)) {
450 		return;
451 	}
452 
453 	do {
454 		ret = eth_nxp_enet_rx(dev);
455 	} while (ret == 1);
456 
457 	ENET_EnableInterrupts(data->base, kENET_RxFrameInterrupt);
458 }
459 
nxp_enet_phy_configure(const struct device * phy,uint8_t phy_mode)460 static int nxp_enet_phy_configure(const struct device *phy, uint8_t phy_mode)
461 {
462 	enum phy_link_speed speeds = LINK_HALF_10BASE_T | LINK_FULL_10BASE_T |
463 				       LINK_HALF_100BASE_T | LINK_FULL_100BASE_T;
464 
465 	if (COND_CODE_1(IS_ENABLED(CONFIG_ETH_NXP_ENET_1G),
466 	   (phy_mode == NXP_ENET_RGMII_MODE), (0))) {
467 		speeds |= (LINK_HALF_1000BASE_T | LINK_FULL_1000BASE_T);
468 	}
469 
470 	/* Configure the PHY */
471 	return phy_configure_link(phy, speeds);
472 }
473 
nxp_enet_phy_cb(const struct device * phy,struct phy_link_state * state,void * eth_dev)474 static void nxp_enet_phy_cb(const struct device *phy,
475 				struct phy_link_state *state,
476 				void *eth_dev)
477 {
478 	const struct device *dev = eth_dev;
479 	struct nxp_enet_mac_data *data = dev->data;
480 	const struct nxp_enet_mac_config *config = dev->config;
481 	enet_mii_speed_t speed;
482 	enet_mii_duplex_t duplex;
483 
484 	if (state->is_up) {
485 #if defined(CONFIG_ETH_NXP_ENET_1G)
486 		if (PHY_LINK_IS_SPEED_1000M(state->speed)) {
487 			speed = kENET_MiiSpeed1000M;
488 		} else if (PHY_LINK_IS_SPEED_100M(state->speed)) {
489 #else
490 		if (PHY_LINK_IS_SPEED_100M(state->speed)) {
491 #endif
492 			speed = kENET_MiiSpeed100M;
493 		} else {
494 			speed = kENET_MiiSpeed10M;
495 		}
496 
497 		if (PHY_LINK_IS_FULL_DUPLEX(state->speed)) {
498 			duplex = kENET_MiiFullDuplex;
499 		} else {
500 			duplex = kENET_MiiHalfDuplex;
501 		}
502 
503 		ENET_SetMII(data->base, speed, duplex);
504 	}
505 
506 	if (!data->iface) {
507 		return;
508 	}
509 
510 	LOG_INF("Link is %s", state->is_up ? "up" : "down");
511 
512 	if (!state->is_up) {
513 		net_eth_carrier_off(data->iface);
514 		nxp_enet_phy_configure(phy, config->phy_mode);
515 	} else {
516 		net_eth_carrier_on(data->iface);
517 	}
518 }
519 
520 static void eth_nxp_enet_iface_init(struct net_if *iface)
521 {
522 	const struct device *dev = net_if_get_device(iface);
523 	struct nxp_enet_mac_data *data = dev->data;
524 	const struct nxp_enet_mac_config *config = dev->config;
525 	const struct device *phy_dev = config->phy_dev;
526 	struct phy_link_state state;
527 
528 	net_if_set_link_addr(iface, data->mac_addr,
529 			     sizeof(data->mac_addr),
530 			     NET_LINK_ETHERNET);
531 
532 	if (data->iface == NULL) {
533 		data->iface = iface;
534 	}
535 
536 #if defined(CONFIG_NET_DSA)
537 	dsa_register_master_tx(iface, &eth_nxp_enet_tx);
538 #endif
539 
540 	ethernet_init(iface);
541 	net_if_carrier_off(iface);
542 
543 	/* In case the phy driver doesn't report a state change due to link being up
544 	 * before calling phy_configure, we should check the state ourself, and then do a
545 	 * pseudo-callback
546 	 */
547 	phy_get_link_state(phy_dev, &state);
548 
549 	nxp_enet_phy_cb(phy_dev, &state, (void *)dev);
550 
551 	config->irq_config_func();
552 
553 	nxp_enet_driver_cb(config->mdio, NXP_ENET_MDIO, NXP_ENET_INTERRUPT_ENABLED, NULL);
554 }
555 
556 static int nxp_enet_phy_init(const struct device *dev)
557 {
558 	const struct nxp_enet_mac_config *config = dev->config;
559 	int ret = 0;
560 
561 	ret = nxp_enet_phy_configure(config->phy_dev, config->phy_mode);
562 	if (ret) {
563 		return ret;
564 	}
565 
566 	ret = phy_link_callback_set(config->phy_dev, nxp_enet_phy_cb, (void *)dev);
567 	if (ret) {
568 		return ret;
569 	}
570 
571 	return ret;
572 }
573 
574 void nxp_enet_driver_cb(const struct device *dev, enum nxp_enet_driver dev_type,
575 				enum nxp_enet_callback_reason event, void *data)
576 {
577 	if (dev_type == NXP_ENET_MDIO) {
578 		nxp_enet_mdio_callback(dev, event, data);
579 	} else if (dev_type == NXP_ENET_PTP_CLOCK) {
580 		nxp_enet_ptp_clock_callback(dev, event, data);
581 	}
582 }
583 
584 static void eth_callback(ENET_Type *base, enet_handle_t *handle,
585 #if FSL_FEATURE_ENET_QUEUE > 1
586 			 uint32_t ringId,
587 #endif /* FSL_FEATURE_ENET_QUEUE > 1 */
588 			 enet_event_t event, enet_frame_info_t *frameinfo, void *param)
589 {
590 	const struct device *dev = param;
591 	struct nxp_enet_mac_data *data = dev->data;
592 
593 	switch (event) {
594 	case kENET_RxEvent:
595 		k_sem_give(&data->rx_thread_sem);
596 		break;
597 	case kENET_TxEvent:
598 		ts_register_tx_event(dev, frameinfo);
599 		k_sem_give(&data->tx_buf_sem);
600 		break;
601 	case kENET_TimeStampEvent:
602 		/* Reset periodic timer to default value. */
603 		data->base->ATPER = NSEC_PER_SEC;
604 		break;
605 	default:
606 		break;
607 	}
608 }
609 
610 #if FSL_FEATURE_ENET_QUEUE > 1
611 #define ENET_IRQ_HANDLER_ARGS(base, handle) base, handle, 0
612 #else
613 #define ENET_IRQ_HANDLER_ARGS(base, handle) base, handle
614 #endif /* FSL_FEATURE_ENET_QUEUE > 1 */
615 
616 static void eth_nxp_enet_isr(const struct device *dev)
617 {
618 	const struct nxp_enet_mac_config *config = dev->config;
619 	struct nxp_enet_mac_data *data = dev->data;
620 	unsigned int irq_lock_key = irq_lock();
621 
622 	uint32_t eir = ENET_GetInterruptStatus(data->base);
623 
624 	if (eir & (kENET_RxFrameInterrupt)) {
625 		ENET_ReceiveIRQHandler(ENET_IRQ_HANDLER_ARGS(data->base, &data->enet_handle));
626 		ENET_DisableInterrupts(data->base, kENET_RxFrameInterrupt);
627 		k_work_submit_to_queue(&rx_work_queue, &data->rx_work);
628 	}
629 
630 	if (eir & kENET_TxFrameInterrupt) {
631 		ENET_TransmitIRQHandler(ENET_IRQ_HANDLER_ARGS(data->base, &data->enet_handle));
632 	}
633 
634 	if (eir & ENET_EIR_MII_MASK) {
635 		nxp_enet_driver_cb(config->mdio, NXP_ENET_MDIO, NXP_ENET_INTERRUPT, NULL);
636 	}
637 
638 	irq_unlock(irq_lock_key);
639 }
640 
641 static const struct device *eth_nxp_enet_get_phy(const struct device *dev)
642 {
643 	const struct nxp_enet_mac_config *config = dev->config;
644 
645 	return config->phy_dev;
646 }
647 
648 /* Note this is not universally unique, it just is probably unique on a network */
649 static inline void nxp_enet_unique_mac(uint8_t *mac_addr)
650 {
651 	uint32_t id = ETH_NXP_ENET_UNIQUE_ID;
652 
653 	if (id == 0xFFFFFF) {
654 		LOG_ERR("No unique MAC can be provided in this platform");
655 	}
656 
657 	/* Setting LAA bit because it is not guaranteed universally unique */
658 	mac_addr[0] = FREESCALE_OUI_B0 | 0x02;
659 	mac_addr[1] = FREESCALE_OUI_B1;
660 	mac_addr[2] = FREESCALE_OUI_B2;
661 	mac_addr[3] = FIELD_GET(0xFF0000, id);
662 	mac_addr[4] = FIELD_GET(0x00FF00, id);
663 	mac_addr[5] = FIELD_GET(0x0000FF, id);
664 }
665 
666 #ifdef CONFIG_SOC_FAMILY_NXP_IMXRT
667 #include <fsl_ocotp.h>
668 #endif
669 
670 static inline void nxp_enet_fused_mac(uint8_t *mac_addr)
671 {
672 #ifdef CONFIG_SOC_FAMILY_NXP_IMXRT
673 	uint32_t mac_addr_fuse[2] = {0};
674 
675 #if defined(CONFIG_SOC_SERIES_IMXRT10XX)
676 	OCOTP_Init((OCOTP_Type *)OCOTP_BASE, CLOCK_GetIpgFreq());
677 	/* OTP bank 4, word 2: MAC0 */
678 	OCOTP_ReadFuseShadowRegisterExt((OCOTP_Type *)OCOTP_BASE,
679 		0x22, &mac_addr_fuse[0], 1);
680 	/* OTP bank 4, word 3: MAC1*/
681 	OCOTP_ReadFuseShadowRegisterExt((OCOTP_Type *)OCOTP_BASE,
682 		0x23, &mac_addr_fuse[1], 1);
683 #elif defined(CONFIG_SOC_SERIES_IMXRT11XX)
684 	OCOTP_Init((OCOTP_Type *)OCOTP_BASE, 0);
685 	OCOTP_ReadFuseShadowRegisterExt((OCOTP_Type *)OCOTP_BASE,
686 		0x28, &mac_addr_fuse[0], 2);
687 #endif
688 	mac_addr[0] = mac_addr_fuse[0] & 0x000000FF;
689 	mac_addr[1] = (mac_addr_fuse[0] & 0x0000FF00) >> 8;
690 	mac_addr[2] = (mac_addr_fuse[0] & 0x00FF0000) >> 16;
691 	mac_addr[3] = (mac_addr_fuse[0] & 0xFF000000) >> 24;
692 	mac_addr[4] = (mac_addr_fuse[1] & 0x00FF);
693 	mac_addr[5] = (mac_addr_fuse[1] & 0xFF00) >> 8;
694 #else
695 	ARG_UNUSED(mac_addr);
696 #endif
697 }
698 
699 static int eth_nxp_enet_init(const struct device *dev)
700 {
701 	struct nxp_enet_mac_data *data = dev->data;
702 	const struct nxp_enet_mac_config *config = dev->config;
703 	enet_config_t enet_config;
704 	uint32_t enet_module_clock_rate;
705 	int err;
706 
707 	data->base = (ENET_Type *)DEVICE_MMIO_GET(config->module_dev);
708 
709 	err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
710 	if (err) {
711 		return err;
712 	}
713 
714 	k_mutex_init(&data->rx_frame_buf_mutex);
715 	k_mutex_init(&data->tx_frame_buf_mutex);
716 	k_sem_init(&data->rx_thread_sem, 0, CONFIG_ETH_NXP_ENET_RX_BUFFERS);
717 	k_sem_init(&data->tx_buf_sem,
718 		   CONFIG_ETH_NXP_ENET_TX_BUFFERS, CONFIG_ETH_NXP_ENET_TX_BUFFERS);
719 #if defined(CONFIG_PTP_CLOCK_NXP_ENET)
720 	k_sem_init(&data->ptp_ts_sem, 0, 1);
721 #endif
722 	k_work_init(&data->rx_work, eth_nxp_enet_rx_thread);
723 
724 	switch (config->mac_addr_source) {
725 	case MAC_ADDR_SOURCE_LOCAL:
726 		break;
727 	case MAC_ADDR_SOURCE_RANDOM:
728 		gen_random_mac(data->mac_addr,
729 			FREESCALE_OUI_B0, FREESCALE_OUI_B1, FREESCALE_OUI_B2);
730 		break;
731 	case MAC_ADDR_SOURCE_UNIQUE:
732 		nxp_enet_unique_mac(data->mac_addr);
733 		break;
734 	case MAC_ADDR_SOURCE_FUSED:
735 		nxp_enet_fused_mac(data->mac_addr);
736 		break;
737 	default:
738 		return -ENOTSUP;
739 	}
740 
741 	err = clock_control_get_rate(config->clock_dev, config->clock_subsys,
742 			&enet_module_clock_rate);
743 	if (err) {
744 		return err;
745 	}
746 
747 	ENET_GetDefaultConfig(&enet_config);
748 
749 	if (IS_ENABLED(CONFIG_NET_PROMISCUOUS_MODE)) {
750 		enet_config.macSpecialConfig |= kENET_ControlPromiscuousEnable;
751 	}
752 
753 	if (IS_ENABLED(CONFIG_NET_VLAN)) {
754 		enet_config.macSpecialConfig |= kENET_ControlVLANTagEnable;
755 	}
756 
757 	if (IS_ENABLED(CONFIG_ETH_NXP_ENET_HW_ACCELERATION)) {
758 		enet_config.txAccelerConfig |=
759 			kENET_TxAccelIpCheckEnabled | kENET_TxAccelProtoCheckEnabled;
760 		enet_config.rxAccelerConfig |=
761 			kENET_RxAccelIpCheckEnabled | kENET_RxAccelProtoCheckEnabled;
762 	}
763 
764 	enet_config.interrupt |= kENET_RxFrameInterrupt;
765 	enet_config.interrupt |= kENET_TxFrameInterrupt;
766 
767 	if (config->phy_mode == NXP_ENET_MII_MODE) {
768 		enet_config.miiMode = kENET_MiiMode;
769 	} else if (config->phy_mode == NXP_ENET_RMII_MODE) {
770 		enet_config.miiMode = kENET_RmiiMode;
771 #if defined(CONFIG_ETH_NXP_ENET_1G)
772 	} else if (config->phy_mode == NXP_ENET_RGMII_MODE) {
773 		enet_config.miiMode = kENET_RgmiiMode;
774 #endif
775 	} else {
776 		return -EINVAL;
777 	}
778 
779 	enet_config.callback = eth_callback;
780 	enet_config.userData = (void *)dev;
781 
782 	ENET_Up(data->base,
783 		  &data->enet_handle,
784 		  &enet_config,
785 		  config->buffer_config,
786 		  data->mac_addr,
787 		  enet_module_clock_rate);
788 
789 	nxp_enet_driver_cb(config->mdio, NXP_ENET_MDIO, NXP_ENET_MODULE_RESET, NULL);
790 
791 #if defined(CONFIG_PTP_CLOCK_NXP_ENET)
792 	nxp_enet_driver_cb(config->ptp_clock, NXP_ENET_PTP_CLOCK,
793 				NXP_ENET_MODULE_RESET, &data->ptp_mutex);
794 	ENET_SetTxReclaim(&data->enet_handle, true, 0);
795 #endif
796 
797 	ENET_ActiveRead(data->base);
798 
799 	err = nxp_enet_phy_init(dev);
800 	if (err) {
801 		return err;
802 	}
803 
804 	LOG_DBG("%s MAC %02x:%02x:%02x:%02x:%02x:%02x",
805 		dev->name,
806 		data->mac_addr[0], data->mac_addr[1],
807 		data->mac_addr[2], data->mac_addr[3],
808 		data->mac_addr[4], data->mac_addr[5]);
809 
810 	return 0;
811 }
812 
813 #if defined(CONFIG_NET_POWER_MANAGEMENT)
814 static int eth_nxp_enet_device_pm_action(const struct device *dev, enum pm_device_action action)
815 {
816 	const struct nxp_enet_mac_config *config = dev->config;
817 	struct nxp_enet_mac_data *data = dev->data;
818 	int ret;
819 
820 	if (!device_is_ready(config->clock_dev)) {
821 		return -ENODEV;
822 	}
823 
824 	if (action == PM_DEVICE_ACTION_SUSPEND) {
825 		LOG_DBG("Suspending");
826 
827 		ret = net_if_suspend(data->iface);
828 		if (ret) {
829 			return ret;
830 		}
831 
832 		ENET_Reset(data->base);
833 		ENET_Down(data->base);
834 		clock_control_off(config->clock_dev, (clock_control_subsys_t)config->clock_subsys);
835 	} else if (action == PM_DEVICE_ACTION_RESUME) {
836 		LOG_DBG("Resuming");
837 
838 		clock_control_on(config->clock_dev, (clock_control_subsys_t)config->clock_subsys);
839 		eth_nxp_enet_init(dev);
840 		net_if_resume(data->iface);
841 	} else {
842 		return -ENOTSUP;
843 	}
844 
845 	return 0;
846 }
847 
848 #define ETH_NXP_ENET_PM_DEVICE_INIT(n)	\
849 	PM_DEVICE_DT_INST_DEFINE(n, eth_nxp_enet_device_pm_action);
850 #define ETH_NXP_ENET_PM_DEVICE_GET(n) PM_DEVICE_DT_INST_GET(n)
851 
852 #else
853 #define ETH_NXP_ENET_PM_DEVICE_INIT(n)
854 #define ETH_NXP_ENET_PM_DEVICE_GET(n) NULL
855 #endif /* CONFIG_NET_POWER_MANAGEMENT */
856 
857 #ifdef CONFIG_NET_DSA
858 #define NXP_ENET_SEND_FUNC dsa_tx
859 #else
860 #define NXP_ENET_SEND_FUNC eth_nxp_enet_tx
861 #endif /* CONFIG_NET_DSA */
862 
863 static const struct ethernet_api api_funcs = {
864 	.iface_api.init		= eth_nxp_enet_iface_init,
865 	.get_capabilities	= eth_nxp_enet_get_capabilities,
866 	.get_phy                = eth_nxp_enet_get_phy,
867 	.set_config		= eth_nxp_enet_set_config,
868 	.get_config		= eth_nxp_enet_get_config,
869 	.send			= NXP_ENET_SEND_FUNC,
870 #if defined(CONFIG_PTP_CLOCK)
871 	.get_ptp_clock		= eth_nxp_enet_get_ptp_clock,
872 #endif
873 };
874 
875 #define NXP_ENET_CONNECT_IRQ(node_id, irq_names, idx)				\
876 	do {									\
877 		IRQ_CONNECT(DT_IRQ_BY_IDX(node_id, idx, irq),			\
878 				DT_IRQ_BY_IDX(node_id, idx, priority),		\
879 				eth_nxp_enet_isr,				\
880 				DEVICE_DT_GET(node_id),				\
881 				0);						\
882 		irq_enable(DT_IRQ_BY_IDX(node_id, idx, irq));			\
883 	} while (false);
884 
885 #define NXP_ENET_DT_PHY_DEV(node_id, phy_phandle, idx)						\
886 	DEVICE_DT_GET(DT_PHANDLE_BY_IDX(node_id, phy_phandle, idx))
887 
888 #if DT_NODE_HAS_STATUS_OKAY(DT_CHOSEN(zephyr_dtcm)) && \
889 	CONFIG_ETH_NXP_ENET_USE_DTCM_FOR_DMA_BUFFER
890 #define _nxp_enet_dma_desc_section __dtcm_bss_section
891 #define _nxp_enet_dma_buffer_section __dtcm_noinit_section
892 #define _nxp_enet_driver_buffer_section __dtcm_noinit_section
893 #define driver_cache_maintain	false
894 #elif defined(CONFIG_NOCACHE_MEMORY)
895 #define _nxp_enet_dma_desc_section __nocache
896 #define _nxp_enet_dma_buffer_section
897 #define _nxp_enet_driver_buffer_section
898 #define driver_cache_maintain	true
899 #else
900 #define _nxp_enet_dma_desc_section
901 #define _nxp_enet_dma_buffer_section
902 #define _nxp_enet_driver_buffer_section
903 #define driver_cache_maintain	true
904 #endif
905 
906 /* Use ENET_FRAME_MAX_VLANFRAMELEN for VLAN frame size
907  * Use ENET_FRAME_MAX_FRAMELEN for Ethernet frame size
908  */
909 #if defined(CONFIG_NET_VLAN)
910 #if !defined(ENET_FRAME_MAX_VLANFRAMELEN)
911 #define ENET_FRAME_MAX_VLANFRAMELEN (ENET_FRAME_MAX_FRAMELEN + 4)
912 #endif
913 #define ETH_NXP_ENET_BUFFER_SIZE \
914 		ROUND_UP(ENET_FRAME_MAX_VLANFRAMELEN, ENET_BUFF_ALIGNMENT)
915 #else
916 #define ETH_NXP_ENET_BUFFER_SIZE \
917 		ROUND_UP(ENET_FRAME_MAX_FRAMELEN, ENET_BUFF_ALIGNMENT)
918 #endif /* CONFIG_NET_VLAN */
919 
920 #define NXP_ENET_PHY_MODE(node_id)							\
921 	DT_ENUM_HAS_VALUE(node_id, phy_connection_type, mii) ? NXP_ENET_MII_MODE :	\
922 	(DT_ENUM_HAS_VALUE(node_id, phy_connection_type, rmii) ? NXP_ENET_RMII_MODE :	\
923 	(DT_ENUM_HAS_VALUE(node_id, phy_connection_type, rgmii) ? NXP_ENET_RGMII_MODE :	\
924 	NXP_ENET_INVALID_MII_MODE))
925 
926 #ifdef CONFIG_PTP_CLOCK_NXP_ENET
927 #define NXP_ENET_PTP_DEV(n) .ptp_clock = DEVICE_DT_GET(DT_INST_PHANDLE(n, nxp_ptp_clock)),
928 #define NXP_ENET_FRAMEINFO_ARRAY(n)							\
929 	static enet_frame_info_t							\
930 		nxp_enet_##n##_tx_frameinfo_array[CONFIG_ETH_NXP_ENET_TX_BUFFERS];
931 #define NXP_ENET_FRAMEINFO(n)	\
932 	.txFrameInfo = nxp_enet_##n##_tx_frameinfo_array,
933 #else
934 #define NXP_ENET_PTP_DEV(n)
935 #define NXP_ENET_FRAMEINFO_ARRAY(n)
936 #define NXP_ENET_FRAMEINFO(n)	\
937 	.txFrameInfo = NULL
938 #endif
939 
940 #define NXP_ENET_NODE_HAS_MAC_ADDR_CHECK(n)						\
941 	BUILD_ASSERT(NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(n)) ||				\
942 			DT_INST_PROP(n, zephyr_random_mac_address) ||			\
943 			DT_INST_PROP(n, nxp_unique_mac) ||				\
944 			DT_INST_PROP(n, nxp_fused_mac),					\
945 			"MAC address not specified on ENET DT node");
946 
947 #define NXP_ENET_NODE_PHY_MODE_CHECK(n)							\
948 BUILD_ASSERT(NXP_ENET_PHY_MODE(DT_DRV_INST(n)) != NXP_ENET_RGMII_MODE ||		\
949 			(IS_ENABLED(CONFIG_ETH_NXP_ENET_1G) &&				\
950 			DT_NODE_HAS_COMPAT(DT_INST_PARENT(n), nxp_enet1g)),		\
951 			"RGMII mode requires nxp,enet1g compatible on ENET DT node"	\
952 			" and CONFIG_ETH_NXP_ENET_1G enabled");
953 
954 #define NXP_ENET_MAC_ADDR_SOURCE(n)							\
955 	COND_CODE_1(DT_NODE_HAS_PROP(DT_DRV_INST(n), local_mac_address),		\
956 			(MAC_ADDR_SOURCE_LOCAL),					\
957 	(COND_CODE_1(DT_INST_PROP(n, zephyr_random_mac_address),			\
958 			(MAC_ADDR_SOURCE_RANDOM),					\
959 	(COND_CODE_1(DT_INST_PROP(n, nxp_unique_mac), (MAC_ADDR_SOURCE_UNIQUE),		\
960 	(COND_CODE_1(DT_INST_PROP(n, nxp_fused_mac), (MAC_ADDR_SOURCE_FUSED),		\
961 	(MAC_ADDR_SOURCE_INVALID))))))))
962 
963 #define NXP_ENET_MAC_INIT(n)								\
964 		NXP_ENET_NODE_HAS_MAC_ADDR_CHECK(n)					\
965 											\
966 		NXP_ENET_NODE_PHY_MODE_CHECK(n)						\
967 											\
968 		PINCTRL_DT_INST_DEFINE(n);						\
969 											\
970 		NXP_ENET_FRAMEINFO_ARRAY(n)						\
971 											\
972 		static void nxp_enet_##n##_irq_config_func(void)			\
973 		{									\
974 			DT_INST_FOREACH_PROP_ELEM(n, interrupt_names,			\
975 						NXP_ENET_CONNECT_IRQ);			\
976 		}									\
977 											\
978 		volatile static __aligned(ENET_BUFF_ALIGNMENT)				\
979 			_nxp_enet_dma_desc_section					\
980 			enet_rx_bd_struct_t						\
981 			nxp_enet_##n##_rx_buffer_desc[CONFIG_ETH_NXP_ENET_RX_BUFFERS];	\
982 											\
983 		volatile static __aligned(ENET_BUFF_ALIGNMENT)				\
984 			_nxp_enet_dma_desc_section					\
985 			enet_tx_bd_struct_t						\
986 			nxp_enet_##n##_tx_buffer_desc[CONFIG_ETH_NXP_ENET_TX_BUFFERS];	\
987 											\
988 		static uint8_t __aligned(ENET_BUFF_ALIGNMENT)				\
989 			_nxp_enet_dma_buffer_section					\
990 			nxp_enet_##n##_rx_buffer[CONFIG_ETH_NXP_ENET_RX_BUFFERS]	\
991 						[ETH_NXP_ENET_BUFFER_SIZE];		\
992 											\
993 		static uint8_t __aligned(ENET_BUFF_ALIGNMENT)				\
994 			_nxp_enet_dma_buffer_section					\
995 			nxp_enet_##n##_tx_buffer[CONFIG_ETH_NXP_ENET_TX_BUFFERS]	\
996 						[ETH_NXP_ENET_BUFFER_SIZE];		\
997 											\
998 		const struct nxp_enet_mac_config nxp_enet_##n##_config = {		\
999 			.irq_config_func = nxp_enet_##n##_irq_config_func,		\
1000 			.module_dev = DEVICE_DT_GET(DT_INST_PARENT(n)),			\
1001 			.clock_dev = DEVICE_DT_GET(DT_CLOCKS_CTLR(DT_INST_PARENT(n))),	\
1002 			.clock_subsys = (void *)DT_CLOCKS_CELL_BY_IDX(			\
1003 						DT_INST_PARENT(n), 0, name),		\
1004 			.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n),			\
1005 			.buffer_config = {{						\
1006 				.rxBdNumber = CONFIG_ETH_NXP_ENET_RX_BUFFERS,		\
1007 				.txBdNumber = CONFIG_ETH_NXP_ENET_TX_BUFFERS,		\
1008 				.rxBuffSizeAlign = ETH_NXP_ENET_BUFFER_SIZE,		\
1009 				.txBuffSizeAlign = ETH_NXP_ENET_BUFFER_SIZE,		\
1010 				.rxBdStartAddrAlign = nxp_enet_##n##_rx_buffer_desc,	\
1011 				.txBdStartAddrAlign = nxp_enet_##n##_tx_buffer_desc,	\
1012 				.rxBufferAlign = nxp_enet_##n##_rx_buffer[0],		\
1013 				.txBufferAlign = nxp_enet_##n##_tx_buffer[0],		\
1014 				.rxMaintainEnable = driver_cache_maintain,		\
1015 				.txMaintainEnable = driver_cache_maintain,		\
1016 				NXP_ENET_FRAMEINFO(n)					\
1017 			}},								\
1018 			.phy_mode = NXP_ENET_PHY_MODE(DT_DRV_INST(n)),			\
1019 			.phy_dev = DEVICE_DT_GET(DT_INST_PHANDLE(n, phy_handle)),	\
1020 			.mdio = DEVICE_DT_GET(DT_INST_PHANDLE(n, nxp_mdio)),		\
1021 			NXP_ENET_PTP_DEV(n)						\
1022 			.mac_addr_source = NXP_ENET_MAC_ADDR_SOURCE(n),			\
1023 		};									\
1024 											\
1025 		static _nxp_enet_driver_buffer_section uint8_t				\
1026 			nxp_enet_##n##_tx_frame_buf[NET_ETH_MAX_FRAME_SIZE];		\
1027 		static _nxp_enet_driver_buffer_section uint8_t				\
1028 			nxp_enet_##n##_rx_frame_buf[NET_ETH_MAX_FRAME_SIZE];		\
1029 											\
1030 		struct nxp_enet_mac_data nxp_enet_##n##_data = {			\
1031 			.tx_frame_buf = nxp_enet_##n##_tx_frame_buf,			\
1032 			.rx_frame_buf = nxp_enet_##n##_rx_frame_buf,			\
1033 			.dev = DEVICE_DT_INST_GET(n),					\
1034 			.mac_addr = DT_INST_PROP_OR(n, local_mac_address, {0}),		\
1035 		};									\
1036 											\
1037 		ETH_NXP_ENET_PM_DEVICE_INIT(n)						\
1038 											\
1039 		ETH_NET_DEVICE_DT_INST_DEFINE(n, eth_nxp_enet_init,			\
1040 					ETH_NXP_ENET_PM_DEVICE_GET(n),			\
1041 					&nxp_enet_##n##_data, &nxp_enet_##n##_config,	\
1042 					CONFIG_ETH_INIT_PRIORITY,			\
1043 					&api_funcs, NET_ETH_MTU);
1044 
1045 DT_INST_FOREACH_STATUS_OKAY(NXP_ENET_MAC_INIT)
1046 
1047 struct nxp_enet_mod_config {
1048 	DEVICE_MMIO_ROM;
1049 	const struct device *clock_dev;
1050 	clock_control_subsys_t clock_subsys;
1051 };
1052 
1053 struct nxp_enet_mod_data {
1054 	DEVICE_MMIO_RAM;
1055 };
1056 
1057 static int nxp_enet_mod_init(const struct device *dev)
1058 {
1059 	const struct nxp_enet_mod_config *config = dev->config;
1060 	int ret;
1061 
1062 	ret = clock_control_on(config->clock_dev, config->clock_subsys);
1063 	if (ret) {
1064 		LOG_ERR("ENET module clock error");
1065 		return ret;
1066 	}
1067 
1068 	DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE | K_MEM_DIRECT_MAP);
1069 
1070 	ENET_Reset((ENET_Type *)DEVICE_MMIO_GET(dev));
1071 
1072 	return 0;
1073 }
1074 
1075 #define NXP_ENET_INIT(n, compat)							\
1076 											\
1077 static const struct nxp_enet_mod_config nxp_enet_mod_cfg_##n = {			\
1078 		DEVICE_MMIO_ROM_INIT(DT_DRV_INST(n)),					\
1079 		.clock_dev = DEVICE_DT_GET(DT_CLOCKS_CTLR(DT_DRV_INST(n))),		\
1080 		.clock_subsys = (void *) DT_CLOCKS_CELL_BY_IDX(				\
1081 							DT_DRV_INST(n), 0, name),	\
1082 };											\
1083 											\
1084 static struct nxp_enet_mod_data nxp_enet_mod_data_##n;					\
1085 											\
1086 /* Init the module before any of the MAC, MDIO, or PTP clock */				\
1087 DEVICE_DT_INST_DEFINE(n, nxp_enet_mod_init, NULL,					\
1088 		&nxp_enet_mod_data_##n, &nxp_enet_mod_cfg_##n,				\
1089 		POST_KERNEL, 0, NULL);
1090 
1091 #undef DT_DRV_COMPAT
1092 #define DT_DRV_COMPAT nxp_enet
1093 
1094 DT_INST_FOREACH_STATUS_OKAY_VARGS(NXP_ENET_INIT, DT_DRV_COMPAT)
1095 
1096 #define NXP_ENET1G_INIT(n, compat)							\
1097 											\
1098 static const struct nxp_enet_mod_config nxp_enet1g_mod_cfg_##n = {			\
1099 		DEVICE_MMIO_ROM_INIT(DT_DRV_INST(n)),					\
1100 		.clock_dev = DEVICE_DT_GET(DT_CLOCKS_CTLR(DT_DRV_INST(n))),		\
1101 		.clock_subsys = (void *) DT_CLOCKS_CELL_BY_IDX(				\
1102 							DT_DRV_INST(n), 0, name),	\
1103 };											\
1104 											\
1105 static struct nxp_enet_mod_data nxp_enet1g_mod_data_##n;				\
1106 											\
1107 /* Init the module before any of the MAC, MDIO, or PTP clock */				\
1108 DEVICE_DT_INST_DEFINE(n, nxp_enet_mod_init, NULL,					\
1109 		&nxp_enet1g_mod_data_##n, &nxp_enet1g_mod_cfg_##n,			\
1110 		POST_KERNEL, 0, NULL);
1111 
1112 #undef DT_DRV_COMPAT
1113 #define DT_DRV_COMPAT nxp_enet1g
1114 
1115 DT_INST_FOREACH_STATUS_OKAY_VARGS(NXP_ENET1G_INIT, DT_DRV_COMPAT)
1116