1 /*
2 * Copyright 2024 NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL
8 #include <zephyr/logging/log.h>
9 LOG_MODULE_REGISTER(nxp_imx_eth);
10
11 #include <zephyr/kernel.h>
12 #include <zephyr/device.h>
13 #include <zephyr/drivers/mbox.h>
14 #include <zephyr/drivers/pinctrl.h>
15 #include <zephyr/net/ethernet.h>
16 #include <zephyr/net/net_if.h>
17 #include <zephyr/net/net_pkt.h>
18 #include <zephyr/net/phy.h>
19 #include <ethernet/eth_stats.h>
20
21 #include "eth.h"
22 #include "eth_nxp_imx_netc_priv.h"
23
24 const struct device *netc_dev_list[NETC_DRV_MAX_INST_SUPPORT];
25
netc_eth_rx(const struct device * dev)26 static int netc_eth_rx(const struct device *dev)
27 {
28 struct netc_eth_data *data = dev->data;
29 struct net_pkt *pkt;
30 int key;
31 int ret = 0;
32 status_t result;
33 uint32_t length;
34
35 key = irq_lock();
36
37 /* Check rx frame */
38 result = EP_GetRxFrameSize(&data->handle, 0, &length);
39 if (result == kStatus_NETC_RxFrameEmpty) {
40 ret = -ENOBUFS;
41 goto out;
42 }
43
44 if (result != kStatus_Success) {
45 LOG_ERR("Error on received frame");
46 ret = -EIO;
47 goto out;
48 }
49
50 /* Receive frame */
51 result = EP_ReceiveFrameCopy(&data->handle, 0, data->rx_frame, length, NULL);
52 if (result != kStatus_Success) {
53 LOG_ERR("Error on received frame");
54 ret = -EIO;
55 goto out;
56 }
57
58 /* Copy to pkt */
59 pkt = net_pkt_rx_alloc_with_buffer(data->iface, length, AF_UNSPEC, 0, NETC_TIMEOUT);
60 if (!pkt) {
61 eth_stats_update_errors_rx(data->iface);
62 ret = -ENOBUFS;
63 goto out;
64 }
65
66 ret = net_pkt_write(pkt, data->rx_frame, length);
67 if (ret) {
68 eth_stats_update_errors_rx(data->iface);
69 net_pkt_unref(pkt);
70 goto out;
71 }
72
73 /* Send to upper layer */
74 ret = net_recv_data(data->iface, pkt);
75 if (ret < 0) {
76 eth_stats_update_errors_rx(data->iface);
77 net_pkt_unref(pkt);
78 LOG_ERR("Failed to enqueue frame into rx queue: %d", ret);
79 }
80 out:
81 irq_unlock(key);
82 return ret;
83 }
84
netc_eth_rx_thread(void * arg1,void * unused1,void * unused2)85 static void netc_eth_rx_thread(void *arg1, void *unused1, void *unused2)
86 {
87 const struct device *dev = (const struct device *)arg1;
88 struct netc_eth_data *data = dev->data;
89 int ret;
90 int work;
91
92 ARG_UNUSED(unused1);
93 ARG_UNUSED(unused2);
94
95 while (1) {
96 ret = k_sem_take(&data->rx_sem, K_FOREVER);
97 if (ret != 0) {
98 LOG_ERR("Take rx_sem error: %d", ret);
99 continue;
100 }
101
102 work = 0;
103 while (netc_eth_rx(dev) != -ENOBUFS) {
104 if (++work == CONFIG_ETH_NXP_IMX_RX_BUDGET) {
105 /* more work to do, reschedule */
106 work = 0;
107 k_yield();
108 }
109 }
110 }
111 }
112
msgintr_isr(void)113 static void msgintr_isr(void)
114 {
115 uint32_t irqs = NETC_MSGINTR->MSI[NETC_MSGINTR_CHANNEL].MSIR;
116
117 for (int i = 0; i < NETC_DRV_MAX_INST_SUPPORT; i++) {
118 const struct device *dev = netc_dev_list[i];
119 const struct netc_eth_config *config;
120 struct netc_eth_data *data;
121
122 if (!dev) {
123 return;
124 }
125
126 config = dev->config;
127 data = dev->data;
128 /* Transmit interrupt */
129 if (irqs & (1 << config->tx_intr_msg_data)) {
130 EP_CleanTxIntrFlags(&data->handle, 1, 0);
131 data->tx_done = true;
132 }
133 /* Receive interrupt */
134 if (irqs & (1 << config->rx_intr_msg_data)) {
135 EP_CleanRxIntrFlags(&data->handle, 1);
136 k_sem_give(&data->rx_sem);
137 }
138 }
139
140 SDK_ISR_EXIT_BARRIER;
141 }
142
netc_eth_reclaim_callback(ep_handle_t * handle,uint8_t ring,netc_tx_frame_info_t * frameInfo,void * userData)143 static status_t netc_eth_reclaim_callback(ep_handle_t *handle, uint8_t ring,
144 netc_tx_frame_info_t *frameInfo, void *userData)
145 {
146 struct netc_eth_data *data = userData;
147
148 data->tx_info = *frameInfo;
149 return kStatus_Success;
150 }
151
netc_eth_init_common(const struct device * dev)152 int netc_eth_init_common(const struct device *dev)
153 {
154 const struct netc_eth_config *config = dev->config;
155 struct netc_eth_data *data = dev->data;
156 netc_msix_entry_t msix_entry[NETC_MSIX_ENTRY_NUM];
157 netc_rx_bdr_config_t rx_bdr_config = {0};
158 netc_tx_bdr_config_t tx_bdr_config = {0};
159 netc_bdr_config_t bdr_config = {0};
160 ep_config_t ep_config;
161 uint32_t msg_addr;
162 status_t result;
163
164 config->bdr_init(&bdr_config, &rx_bdr_config, &tx_bdr_config);
165
166 /* MSIX entry configuration */
167 msg_addr = MSGINTR_GetIntrSelectAddr(NETC_MSGINTR, NETC_MSGINTR_CHANNEL);
168 msix_entry[NETC_TX_MSIX_ENTRY_IDX].control = kNETC_MsixIntrMaskBit;
169 msix_entry[NETC_TX_MSIX_ENTRY_IDX].msgAddr = msg_addr;
170 msix_entry[NETC_TX_MSIX_ENTRY_IDX].msgData = config->tx_intr_msg_data;
171
172 msix_entry[NETC_RX_MSIX_ENTRY_IDX].control = kNETC_MsixIntrMaskBit;
173 msix_entry[NETC_RX_MSIX_ENTRY_IDX].msgAddr = msg_addr;
174 msix_entry[NETC_RX_MSIX_ENTRY_IDX].msgData = config->rx_intr_msg_data;
175
176 if (!irq_is_enabled(NETC_MSGINTR_IRQ)) {
177 IRQ_CONNECT(NETC_MSGINTR_IRQ, 0, msgintr_isr, 0, 0);
178 irq_enable(NETC_MSGINTR_IRQ);
179 }
180
181 /* Endpoint configuration. */
182 EP_GetDefaultConfig(&ep_config);
183 ep_config.si = config->si_idx;
184 ep_config.siConfig.txRingUse = 1;
185 ep_config.siConfig.rxRingUse = 1;
186 ep_config.userData = data;
187 ep_config.reclaimCallback = netc_eth_reclaim_callback;
188 ep_config.msixEntry = &msix_entry[0];
189 ep_config.entryNum = NETC_MSIX_ENTRY_NUM;
190 ep_config.port.ethMac.miiMode = kNETC_RmiiMode;
191 ep_config.port.ethMac.miiSpeed = kNETC_MiiSpeed100M;
192 ep_config.port.ethMac.miiDuplex = kNETC_MiiFullDuplex;
193 ep_config.rxCacheMaintain = true;
194 ep_config.txCacheMaintain = true;
195
196 config->generate_mac(&data->mac_addr[0]);
197
198 result = EP_Init(&data->handle, &data->mac_addr[0], &ep_config, &bdr_config);
199 if (result != kStatus_Success) {
200 return -ENOBUFS;
201 }
202
203 for (int i = 0; i < NETC_DRV_MAX_INST_SUPPORT; i++) {
204 if (!netc_dev_list[i]) {
205 netc_dev_list[i] = dev;
206 break;
207 }
208 }
209
210 /* Unmask MSIX message interrupt. */
211 EP_MsixSetEntryMask(&data->handle, NETC_TX_MSIX_ENTRY_IDX, false);
212 EP_MsixSetEntryMask(&data->handle, NETC_RX_MSIX_ENTRY_IDX, false);
213
214 k_mutex_init(&data->tx_mutex);
215
216 k_sem_init(&data->rx_sem, 0, 1);
217 k_thread_create(&data->rx_thread, data->rx_thread_stack,
218 K_KERNEL_STACK_SIZEOF(data->rx_thread_stack), netc_eth_rx_thread,
219 (void *)dev, NULL, NULL, K_PRIO_COOP(CONFIG_ETH_NXP_IMX_RX_THREAD_PRIO), 0,
220 K_NO_WAIT);
221 k_thread_name_set(&data->rx_thread, "netc_eth_rx");
222
223 return 0;
224 }
225
netc_eth_tx(const struct device * dev,struct net_pkt * pkt)226 int netc_eth_tx(const struct device *dev, struct net_pkt *pkt)
227 {
228 struct netc_eth_data *data = dev->data;
229 netc_buffer_struct_t buff = {.buffer = data->tx_buff, .length = sizeof(data->tx_buff)};
230 netc_frame_struct_t frame = {.buffArray = &buff, .length = 1};
231 size_t pkt_len = net_pkt_get_len(pkt);
232 status_t result;
233 int ret;
234
235 __ASSERT(pkt, "Packet pointer is NULL");
236
237 k_mutex_lock(&data->tx_mutex, K_FOREVER);
238
239 /* Copy packet to tx buffer */
240 buff.length = (uint16_t)pkt_len;
241 ret = net_pkt_read(pkt, buff.buffer, pkt_len);
242 if (ret) {
243 LOG_ERR("Failed to copy packet to tx buffer: %d", ret);
244 ret = -ENOBUFS;
245 goto error;
246 }
247
248 /* Send */
249 data->tx_done = false;
250 result = EP_SendFrame(&data->handle, 0, &frame, NULL, NULL);
251 if (result != kStatus_Success) {
252 LOG_ERR("Failed to tx frame");
253 ret = -EIO;
254 goto error;
255 }
256
257 while (!data->tx_done) {
258 }
259
260 EP_ReclaimTxDescriptor(&data->handle, 0);
261 if (data->tx_info.status != kNETC_EPTxSuccess) {
262 LOG_ERR("Failed to tx frame");
263 ret = -EIO;
264 goto error;
265 }
266 ret = 0;
267 error:
268 k_mutex_unlock(&data->tx_mutex);
269
270 if (ret != 0) {
271 eth_stats_update_errors_tx(data->iface);
272 }
273 return ret;
274 }
275
netc_eth_get_capabilities(const struct device * dev)276 enum ethernet_hw_caps netc_eth_get_capabilities(const struct device *dev)
277 {
278 ARG_UNUSED(dev);
279
280 return (ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T | ETHERNET_LINK_1000BASE_T |
281 ETHERNET_HW_RX_CHKSUM_OFFLOAD | ETHERNET_HW_FILTERING
282 #if defined(CONFIG_NET_VLAN)
283 | ETHERNET_HW_VLAN
284 #endif
285 #if defined(CONFIG_NET_PROMISCUOUS_MODE)
286 | ETHERNET_PROMISC_MODE
287 #endif
288 );
289 }
290
netc_eth_set_config(const struct device * dev,enum ethernet_config_type type,const struct ethernet_config * config)291 int netc_eth_set_config(const struct device *dev, enum ethernet_config_type type,
292 const struct ethernet_config *config)
293 {
294 struct netc_eth_data *data = dev->data;
295 const struct netc_eth_config *cfg = dev->config;
296 status_t result;
297 int ret = 0;
298
299 switch (type) {
300 case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
301 /* Set new Ethernet MAC address and register it with the upper layer */
302 memcpy(data->mac_addr, config->mac_address.addr, sizeof(data->mac_addr));
303 result = EP_SetPrimaryMacAddr(&data->handle, (uint8_t *)data->mac_addr);
304 if (result != kStatus_Success) {
305 LOG_ERR("PHY device (%p) is not ready, cannot init iface", cfg->phy_dev);
306 ret = -ENOTSUP;
307 break;
308 }
309 net_if_set_link_addr(data->iface, data->mac_addr, sizeof(data->mac_addr),
310 NET_LINK_ETHERNET);
311 LOG_INF("SI%d MAC set to: %02x:%02x:%02x:%02x:%02x:%02x", getSiIdx(cfg->si_idx),
312 data->mac_addr[0], data->mac_addr[1], data->mac_addr[2], data->mac_addr[3],
313 data->mac_addr[4], data->mac_addr[5]);
314 break;
315 default:
316 ret = -ENOTSUP;
317 break;
318 }
319
320 return ret;
321 }
322