1 /*
2  * Copyright 2024-2025 NXP
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL
8 #include <zephyr/logging/log.h>
9 LOG_MODULE_REGISTER(nxp_imx_eth);
10 
11 #include <zephyr/kernel.h>
12 #include <zephyr/device.h>
13 #include <zephyr/drivers/mbox.h>
14 #include <zephyr/drivers/pinctrl.h>
15 #include <zephyr/net/ethernet.h>
16 #include <zephyr/net/net_if.h>
17 #include <zephyr/net/net_pkt.h>
18 #include <zephyr/net/phy.h>
19 #include <ethernet/eth_stats.h>
20 
21 #include "../eth.h"
22 #include "eth_nxp_imx_netc_priv.h"
23 
24 const struct device *netc_dev_list[NETC_DRV_MAX_INST_SUPPORT];
25 
netc_eth_rx(const struct device * dev)26 static int netc_eth_rx(const struct device *dev)
27 {
28 	struct netc_eth_data *data = dev->data;
29 	struct net_pkt *pkt;
30 	int key;
31 	int ret = 0;
32 	status_t result;
33 	uint32_t length;
34 
35 	key = irq_lock();
36 
37 	/* Check rx frame */
38 	result = EP_GetRxFrameSize(&data->handle, 0, &length);
39 	if (result == kStatus_NETC_RxFrameEmpty) {
40 		ret = -ENOBUFS;
41 		goto out;
42 	}
43 
44 	if (result != kStatus_Success) {
45 		LOG_ERR("Error on received frame");
46 		ret = -EIO;
47 		goto out;
48 	}
49 
50 	/* Receive frame */
51 	result = EP_ReceiveFrameCopy(&data->handle, 0, data->rx_frame, length, NULL);
52 	if (result != kStatus_Success) {
53 		LOG_ERR("Error on received frame");
54 		ret = -EIO;
55 		goto out;
56 	}
57 
58 	/* Copy to pkt */
59 	pkt = net_pkt_rx_alloc_with_buffer(data->iface, length, AF_UNSPEC, 0, NETC_TIMEOUT);
60 	if (!pkt) {
61 		eth_stats_update_errors_rx(data->iface);
62 		ret = -ENOBUFS;
63 		goto out;
64 	}
65 
66 	ret = net_pkt_write(pkt, data->rx_frame, length);
67 	if (ret) {
68 		eth_stats_update_errors_rx(data->iface);
69 		net_pkt_unref(pkt);
70 		goto out;
71 	}
72 
73 	/* Send to upper layer */
74 	ret = net_recv_data(data->iface, pkt);
75 	if (ret < 0) {
76 		eth_stats_update_errors_rx(data->iface);
77 		net_pkt_unref(pkt);
78 		LOG_ERR("Failed to enqueue frame into rx queue: %d", ret);
79 	}
80 out:
81 	irq_unlock(key);
82 	return ret;
83 }
84 
netc_eth_rx_thread(void * arg1,void * unused1,void * unused2)85 static void netc_eth_rx_thread(void *arg1, void *unused1, void *unused2)
86 {
87 	const struct device *dev = (const struct device *)arg1;
88 	struct netc_eth_data *data = dev->data;
89 	int ret;
90 	int work;
91 
92 	ARG_UNUSED(unused1);
93 	ARG_UNUSED(unused2);
94 
95 	while (1) {
96 		ret = k_sem_take(&data->rx_sem, K_FOREVER);
97 		if (ret != 0) {
98 			LOG_ERR("Take rx_sem error: %d", ret);
99 			continue;
100 		}
101 
102 		work = 0;
103 		while (netc_eth_rx(dev) != -ENOBUFS) {
104 			if (++work == CONFIG_ETH_NXP_IMX_RX_BUDGET) {
105 				/* more work to do, reschedule */
106 				work = 0;
107 				k_yield();
108 			}
109 		}
110 	}
111 }
112 
msgintr_isr(void)113 static void msgintr_isr(void)
114 {
115 	uint32_t irqs = NETC_MSGINTR->MSI[NETC_MSGINTR_CHANNEL].MSIR;
116 
117 	for (int i = 0; i < NETC_DRV_MAX_INST_SUPPORT; i++) {
118 		const struct device *dev = netc_dev_list[i];
119 		const struct netc_eth_config *config;
120 		struct netc_eth_data *data;
121 
122 		if (!dev) {
123 			return;
124 		}
125 
126 		config = dev->config;
127 		data = dev->data;
128 		/* Transmit interrupt */
129 		if (irqs & (1 << config->tx_intr_msg_data)) {
130 			EP_CleanTxIntrFlags(&data->handle, 1, 0);
131 			data->tx_done = true;
132 		}
133 		/* Receive interrupt */
134 		if (irqs & (1 << config->rx_intr_msg_data)) {
135 			EP_CleanRxIntrFlags(&data->handle, 1);
136 			k_sem_give(&data->rx_sem);
137 		}
138 	}
139 
140 	SDK_ISR_EXIT_BARRIER;
141 }
142 
netc_eth_reclaim_callback(ep_handle_t * handle,uint8_t ring,netc_tx_frame_info_t * frameInfo,void * userData)143 static status_t netc_eth_reclaim_callback(ep_handle_t *handle, uint8_t ring,
144 					  netc_tx_frame_info_t *frameInfo, void *userData)
145 {
146 	struct netc_eth_data *data = userData;
147 
148 	data->tx_info = *frameInfo;
149 	return kStatus_Success;
150 }
151 
netc_eth_init_common(const struct device * dev)152 int netc_eth_init_common(const struct device *dev)
153 {
154 	const struct netc_eth_config *config = dev->config;
155 	struct netc_eth_data *data = dev->data;
156 	netc_msix_entry_t msix_entry[NETC_MSIX_ENTRY_NUM];
157 	netc_rx_bdr_config_t rx_bdr_config = {0};
158 	netc_tx_bdr_config_t tx_bdr_config = {0};
159 	netc_bdr_config_t bdr_config = {0};
160 	ep_config_t ep_config;
161 	uint32_t msg_addr;
162 	status_t result;
163 
164 	config->bdr_init(&bdr_config, &rx_bdr_config, &tx_bdr_config);
165 
166 	/* MSIX entry configuration */
167 	msg_addr = MSGINTR_GetIntrSelectAddr(NETC_MSGINTR, NETC_MSGINTR_CHANNEL);
168 	msix_entry[NETC_TX_MSIX_ENTRY_IDX].control = kNETC_MsixIntrMaskBit;
169 	msix_entry[NETC_TX_MSIX_ENTRY_IDX].msgAddr = msg_addr;
170 	msix_entry[NETC_TX_MSIX_ENTRY_IDX].msgData = config->tx_intr_msg_data;
171 
172 	msix_entry[NETC_RX_MSIX_ENTRY_IDX].control = kNETC_MsixIntrMaskBit;
173 	msix_entry[NETC_RX_MSIX_ENTRY_IDX].msgAddr = msg_addr;
174 	msix_entry[NETC_RX_MSIX_ENTRY_IDX].msgData = config->rx_intr_msg_data;
175 
176 	if (!irq_is_enabled(NETC_MSGINTR_IRQ)) {
177 		IRQ_CONNECT(NETC_MSGINTR_IRQ, 0, msgintr_isr, 0, 0);
178 		irq_enable(NETC_MSGINTR_IRQ);
179 	}
180 
181 	/* Endpoint configuration. */
182 	EP_GetDefaultConfig(&ep_config);
183 	ep_config.si = config->si_idx;
184 	ep_config.siConfig.txRingUse = 1;
185 	ep_config.siConfig.rxRingUse = 1;
186 	ep_config.userData = data;
187 	ep_config.reclaimCallback = netc_eth_reclaim_callback;
188 	ep_config.msixEntry = &msix_entry[0];
189 	ep_config.entryNum = NETC_MSIX_ENTRY_NUM;
190 	ep_config.port.ethMac.miiMode = config->phy_mode;
191 	ep_config.port.ethMac.miiSpeed = kNETC_MiiSpeed100M;
192 	ep_config.port.ethMac.miiDuplex = kNETC_MiiFullDuplex;
193 	ep_config.rxCacheMaintain = true;
194 	ep_config.txCacheMaintain = true;
195 
196 	config->generate_mac(&data->mac_addr[0]);
197 
198 	result = EP_Init(&data->handle, &data->mac_addr[0], &ep_config, &bdr_config);
199 	if (result != kStatus_Success) {
200 		return -ENOBUFS;
201 	}
202 
203 	for (int i = 0; i < NETC_DRV_MAX_INST_SUPPORT; i++) {
204 		if (!netc_dev_list[i]) {
205 			netc_dev_list[i] = dev;
206 			break;
207 		}
208 	}
209 
210 	/* Unmask MSIX message interrupt. */
211 	EP_MsixSetEntryMask(&data->handle, NETC_TX_MSIX_ENTRY_IDX, false);
212 	EP_MsixSetEntryMask(&data->handle, NETC_RX_MSIX_ENTRY_IDX, false);
213 
214 	k_mutex_init(&data->tx_mutex);
215 
216 	k_sem_init(&data->rx_sem, 0, 1);
217 	k_thread_create(&data->rx_thread, data->rx_thread_stack,
218 			K_KERNEL_STACK_SIZEOF(data->rx_thread_stack), netc_eth_rx_thread,
219 			(void *)dev, NULL, NULL, K_PRIO_COOP(CONFIG_ETH_NXP_IMX_RX_THREAD_PRIO), 0,
220 			K_NO_WAIT);
221 	k_thread_name_set(&data->rx_thread, "netc_eth_rx");
222 
223 	return 0;
224 }
225 
netc_eth_tx(const struct device * dev,struct net_pkt * pkt)226 int netc_eth_tx(const struct device *dev, struct net_pkt *pkt)
227 {
228 	const struct netc_eth_config *cfg = dev->config;
229 	struct netc_eth_data *data = dev->data;
230 	netc_buffer_struct_t buff = {.buffer = data->tx_buff, .length = sizeof(data->tx_buff)};
231 	netc_frame_struct_t frame = {.buffArray = &buff, .length = 1};
232 	size_t pkt_len = net_pkt_get_len(pkt);
233 	status_t result;
234 	int ret;
235 
236 	__ASSERT(pkt, "Packet pointer is NULL");
237 
238 	/* TODO: support DSA master */
239 	if (cfg->pseudo_mac) {
240 		return -ENOSYS;
241 	}
242 
243 	k_mutex_lock(&data->tx_mutex, K_FOREVER);
244 
245 	/* Copy packet to tx buffer */
246 	buff.length = (uint16_t)pkt_len;
247 	ret = net_pkt_read(pkt, buff.buffer, pkt_len);
248 	if (ret) {
249 		LOG_ERR("Failed to copy packet to tx buffer: %d", ret);
250 		ret = -ENOBUFS;
251 		goto error;
252 	}
253 
254 	/* Send */
255 	data->tx_done = false;
256 	result = EP_SendFrame(&data->handle, 0, &frame, NULL, NULL);
257 	if (result != kStatus_Success) {
258 		LOG_ERR("Failed to tx frame");
259 		ret = -EIO;
260 		goto error;
261 	}
262 
263 	while (!data->tx_done) {
264 	}
265 
266 	EP_ReclaimTxDescriptor(&data->handle, 0);
267 	if (data->tx_info.status != kNETC_EPTxSuccess) {
268 		LOG_ERR("Failed to tx frame");
269 		ret = -EIO;
270 		goto error;
271 	}
272 	ret = 0;
273 error:
274 	k_mutex_unlock(&data->tx_mutex);
275 
276 	if (ret != 0) {
277 		eth_stats_update_errors_tx(data->iface);
278 	}
279 	return ret;
280 }
281 
netc_eth_get_capabilities(const struct device * dev)282 enum ethernet_hw_caps netc_eth_get_capabilities(const struct device *dev)
283 {
284 	const struct netc_eth_config *cfg = dev->config;
285 	uint32_t caps;
286 
287 	caps = (ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T | ETHERNET_LINK_1000BASE_T |
288 		ETHERNET_HW_RX_CHKSUM_OFFLOAD | ETHERNET_HW_FILTERING
289 #if defined(CONFIG_NET_VLAN)
290 		| ETHERNET_HW_VLAN
291 #endif
292 #if defined(CONFIG_NET_PROMISCUOUS_MODE)
293 		| ETHERNET_PROMISC_MODE
294 #endif
295 	);
296 
297 	if (cfg->pseudo_mac) {
298 		caps |= ETHERNET_DSA_MASTER_PORT;
299 	}
300 
301 	return caps;
302 }
303 
netc_eth_set_config(const struct device * dev,enum ethernet_config_type type,const struct ethernet_config * config)304 int netc_eth_set_config(const struct device *dev, enum ethernet_config_type type,
305 			const struct ethernet_config *config)
306 {
307 	struct netc_eth_data *data = dev->data;
308 	const struct netc_eth_config *cfg = dev->config;
309 	status_t result;
310 	int ret = 0;
311 
312 	switch (type) {
313 	case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
314 		/* Set new Ethernet MAC address and register it with the upper layer */
315 		memcpy(data->mac_addr, config->mac_address.addr, sizeof(data->mac_addr));
316 		result = EP_SetPrimaryMacAddr(&data->handle, (uint8_t *)data->mac_addr);
317 		if (result != kStatus_Success) {
318 			LOG_ERR("PHY device (%p) is not ready, cannot init iface", cfg->phy_dev);
319 			ret = -ENOTSUP;
320 			break;
321 		}
322 		net_if_set_link_addr(data->iface, data->mac_addr, sizeof(data->mac_addr),
323 				     NET_LINK_ETHERNET);
324 		LOG_INF("SI%d MAC set to: %02x:%02x:%02x:%02x:%02x:%02x", getSiIdx(cfg->si_idx),
325 			data->mac_addr[0], data->mac_addr[1], data->mac_addr[2], data->mac_addr[3],
326 			data->mac_addr[4], data->mac_addr[5]);
327 		break;
328 	default:
329 		ret = -ENOTSUP;
330 		break;
331 	}
332 
333 	return ret;
334 }
335