1 /*
2 * Copyright 2022 NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL
8 #include <zephyr/logging/log.h>
9 LOG_MODULE_REGISTER(nxp_s32_eth);
10
11 #include <zephyr/kernel.h>
12 #include <zephyr/device.h>
13 #include <zephyr/drivers/mbox.h>
14 #include <zephyr/drivers/pinctrl.h>
15 #include <zephyr/net/ethernet.h>
16 #include <zephyr/net/net_if.h>
17 #include <zephyr/net/net_pkt.h>
18 #include <zephyr/net/phy.h>
19 #include <ethernet/eth_stats.h>
20
21 #include <soc.h>
22 #include <Netc_Eth_Ip.h>
23 #include <Netc_Eth_Ip_Irq.h>
24 #include <Netc_EthSwt_Ip.h>
25
26 #include "eth.h"
27 #include "eth_nxp_s32_netc_priv.h"
28
29 /* Global MAC filter hash table required for the baremetal driver */
30 Netc_Eth_Ip_MACFilterHashTableEntryType * MACFilterHashTableAddrs[FEATURE_NETC_ETH_NUMBER_OF_CTRLS];
31
32 static void nxp_s32_eth_rx_thread(void *arg1, void *unused1, void *unused2);
33
nxp_s32_eth_msix_wrapper(const struct device * dev,uint32_t channel,void * user_data,struct mbox_msg * msg)34 static void nxp_s32_eth_msix_wrapper(const struct device *dev, uint32_t channel,
35 void *user_data, struct mbox_msg *msg)
36 {
37 const struct nxp_s32_eth_msix *msix = (const struct nxp_s32_eth_msix *)user_data;
38
39 ARG_UNUSED(dev);
40 ARG_UNUSED(msg);
41
42 /* Handler doesn't require any data to be passed, used only for signalling */
43 msix->handler(channel, NULL, 0);
44 }
45
get_iface(struct nxp_s32_eth_data * ctx)46 static inline struct net_if *get_iface(struct nxp_s32_eth_data *ctx)
47 {
48 return ctx->iface;
49 }
50
nxp_s32_eth_initialize_common(const struct device * dev)51 int nxp_s32_eth_initialize_common(const struct device *dev)
52 {
53 const struct nxp_s32_eth_config *cfg = dev->config;
54 struct nxp_s32_eth_data *ctx = dev->data;
55 Netc_Eth_Ip_StatusType status;
56 const struct nxp_s32_eth_msix *msix;
57 int err;
58
59 /* Populate the MAC filter hash table addresses for this SI */
60 __ASSERT_NO_MSG(cfg->si_idx < FEATURE_NETC_ETH_NUMBER_OF_CTRLS);
61 MACFilterHashTableAddrs[cfg->si_idx] = cfg->mac_filter_hash_table;
62
63 status = Netc_Eth_Ip_Init(cfg->si_idx, &cfg->netc_cfg);
64 if (status != NETC_ETH_IP_STATUS_SUCCESS) {
65 LOG_ERR("Failed to initialize SI%d (%d)", cfg->si_idx, status);
66 return -EIO;
67 }
68
69 for (int i = 0; i < NETC_MSIX_EVENTS_COUNT; i++) {
70 msix = &cfg->msix[i];
71 if (mbox_is_ready_dt(&msix->mbox_spec)) {
72 err = mbox_register_callback_dt(&msix->mbox_spec,
73 nxp_s32_eth_msix_wrapper,
74 (void *)msix);
75 if (err != 0) {
76 LOG_ERR("Failed to register MRU callback on channel %u",
77 msix->mbox_spec.channel_id);
78 return err;
79 }
80 }
81 }
82
83 k_mutex_init(&ctx->tx_mutex);
84 k_sem_init(&ctx->rx_sem, 0, 1);
85
86 k_thread_create(&ctx->rx_thread, ctx->rx_thread_stack,
87 K_KERNEL_STACK_SIZEOF(ctx->rx_thread_stack),
88 nxp_s32_eth_rx_thread, (void *)dev, NULL, NULL,
89 K_PRIO_COOP(CONFIG_ETH_NXP_S32_RX_THREAD_PRIO),
90 0, K_NO_WAIT);
91 k_thread_name_set(&ctx->rx_thread, "nxp_s32_eth_rx");
92
93 status = Netc_Eth_Ip_EnableController(cfg->si_idx);
94 if (status != NETC_ETH_IP_STATUS_SUCCESS) {
95 LOG_ERR("Failed to enable ENETC SI%d (%d)", cfg->si_idx, status);
96 return -EIO;
97 }
98
99 if (cfg->generate_mac) {
100 cfg->generate_mac(&ctx->mac_addr[0]);
101 }
102
103 return 0;
104 }
105
nxp_s32_eth_mcast_filter(const struct device * dev,const struct ethernet_filter * filter)106 void nxp_s32_eth_mcast_filter(const struct device *dev, const struct ethernet_filter *filter)
107 {
108 const struct nxp_s32_eth_config *cfg = dev->config;
109 Netc_Eth_Ip_StatusType status;
110
111 if (filter->set) {
112 status = Netc_Eth_Ip_AddMulticastDstAddrToHashFilter(cfg->si_idx,
113 filter->mac_address.addr);
114 } else {
115 status = Netc_Eth_Ip_RemoveMulticastDstAddrFromHashFilter(cfg->si_idx,
116 filter->mac_address.addr);
117 }
118 if (status != NETC_ETH_IP_STATUS_SUCCESS) {
119 LOG_ERR("Failed to update multicast hash table: %d", status);
120 }
121 }
122
nxp_s32_eth_tx(const struct device * dev,struct net_pkt * pkt)123 int nxp_s32_eth_tx(const struct device *dev, struct net_pkt *pkt)
124 {
125 struct nxp_s32_eth_data *ctx = dev->data;
126 const struct nxp_s32_eth_config *cfg = dev->config;
127 size_t pkt_len = net_pkt_get_len(pkt);
128 int res = 0;
129 Netc_Eth_Ip_StatusType status;
130 Netc_Eth_Ip_BufferType buf;
131
132 __ASSERT(pkt, "Packet pointer is NULL");
133
134 k_mutex_lock(&ctx->tx_mutex, K_FOREVER);
135
136 buf.length = (uint16_t)pkt_len;
137 buf.data = NULL;
138 status = Netc_Eth_Ip_GetTxBuff(cfg->si_idx, cfg->tx_ring_idx, &buf, NULL);
139 if (status == NETC_ETH_IP_STATUS_TX_BUFF_BUSY) {
140 /* Reclaim the buffers already transmitted and try again */
141 Netc_Eth_Ip_ReleaseTxBuffers(cfg->si_idx, cfg->tx_ring_idx);
142 status = Netc_Eth_Ip_GetTxBuff(cfg->si_idx, cfg->tx_ring_idx, &buf, NULL);
143 }
144 if (status != NETC_ETH_IP_STATUS_SUCCESS) {
145 LOG_ERR("Failed to get tx buffer: %d", status);
146 res = -ENOBUFS;
147 goto error;
148 }
149 buf.length = (uint16_t)pkt_len;
150
151 res = net_pkt_read(pkt, buf.data, pkt_len);
152 if (res) {
153 LOG_ERR("Failed to copy packet to tx buffer: %d", res);
154 res = -ENOBUFS;
155 goto error;
156 }
157
158 status = Netc_Eth_Ip_SendFrame(cfg->si_idx, cfg->tx_ring_idx, &buf, NULL);
159 if (status != NETC_ETH_IP_STATUS_SUCCESS) {
160 LOG_ERR("Failed to tx frame: %d", status);
161 res = -EIO;
162 goto error;
163 }
164
165 error:
166 k_mutex_unlock(&ctx->tx_mutex);
167
168 if (res != 0) {
169 eth_stats_update_errors_tx(ctx->iface);
170 }
171 return res;
172 }
173
nxp_s32_eth_get_pkt(const struct device * dev,Netc_Eth_Ip_BufferType * buf)174 static struct net_pkt *nxp_s32_eth_get_pkt(const struct device *dev,
175 Netc_Eth_Ip_BufferType *buf)
176 {
177 struct nxp_s32_eth_data *ctx = dev->data;
178 struct net_pkt *pkt = NULL;
179 int res = 0;
180
181 /* Use root iface, it will be updated later in net_recv_data() */
182 pkt = net_pkt_rx_alloc_with_buffer(ctx->iface, buf->length,
183 AF_UNSPEC, 0, NETC_TIMEOUT);
184 if (!pkt) {
185 goto exit;
186 }
187
188 res = net_pkt_write(pkt, buf->data, buf->length);
189 if (res) {
190 net_pkt_unref(pkt);
191 pkt = NULL;
192 goto exit;
193 }
194
195 exit:
196 if (!pkt) {
197 eth_stats_update_errors_rx(get_iface(ctx));
198 }
199
200 return pkt;
201 }
202
nxp_s32_eth_rx(const struct device * dev)203 static int nxp_s32_eth_rx(const struct device *dev)
204 {
205 struct nxp_s32_eth_data *ctx = dev->data;
206 const struct nxp_s32_eth_config *cfg = dev->config;
207 Netc_Eth_Ip_BufferType buf;
208 Netc_Eth_Ip_RxInfoType info;
209 Netc_Eth_Ip_StatusType status;
210 struct net_pkt *pkt;
211 int key;
212 int res = 0;
213
214 key = irq_lock();
215 status = Netc_Eth_Ip_ReadFrame(cfg->si_idx, cfg->rx_ring_idx, &buf, &info);
216 if (status == NETC_ETH_IP_STATUS_RX_QUEUE_EMPTY) {
217 res = -ENOBUFS;
218 } else if (status != NETC_ETH_IP_STATUS_SUCCESS) {
219 LOG_ERR("Error on received frame: %d (0x%X)", status, info.rxStatus);
220 res = -EIO;
221 } else {
222 pkt = nxp_s32_eth_get_pkt(dev, &buf);
223 Netc_Eth_Ip_ProvideRxBuff(cfg->si_idx, cfg->rx_ring_idx, &buf);
224
225 if (pkt != NULL) {
226 res = net_recv_data(get_iface(ctx), pkt);
227 if (res < 0) {
228 eth_stats_update_errors_rx(get_iface(ctx));
229 net_pkt_unref(pkt);
230 LOG_ERR("Failed to enqueue frame into rx queue: %d", res);
231 }
232 }
233 }
234 irq_unlock(key);
235
236 return res;
237 }
238
nxp_s32_eth_rx_thread(void * arg1,void * unused1,void * unused2)239 static void nxp_s32_eth_rx_thread(void *arg1, void *unused1, void *unused2)
240 {
241 const struct device *dev = (const struct device *)arg1;
242 struct nxp_s32_eth_data *ctx = dev->data;
243 int res;
244 int work;
245
246 ARG_UNUSED(unused1);
247 ARG_UNUSED(unused2);
248 __ASSERT_NO_MSG(arg1 != NULL);
249 __ASSERT_NO_MSG(ctx != NULL);
250
251 while (1) {
252 res = k_sem_take(&ctx->rx_sem, K_FOREVER);
253 __ASSERT_NO_MSG(res == 0);
254
255 work = 0;
256 while (nxp_s32_eth_rx(dev) != -ENOBUFS) {
257 if (++work == CONFIG_ETH_NXP_S32_RX_BUDGET) {
258 /* more work to do, reschedule */
259 work = 0;
260 k_yield();
261 }
262 }
263 }
264 }
265
nxp_s32_eth_get_capabilities(const struct device * dev)266 enum ethernet_hw_caps nxp_s32_eth_get_capabilities(const struct device *dev)
267 {
268 ARG_UNUSED(dev);
269
270 return (ETHERNET_LINK_10BASE_T
271 | ETHERNET_LINK_100BASE_T
272 | ETHERNET_LINK_1000BASE_T
273 | ETHERNET_HW_RX_CHKSUM_OFFLOAD
274 | ETHERNET_HW_FILTERING
275 #if defined(CONFIG_NET_VLAN)
276 | ETHERNET_HW_VLAN
277 #endif
278 #if defined(CONFIG_NET_PROMISCUOUS_MODE)
279 | ETHERNET_PROMISC_MODE
280 #endif
281 );
282 }
283
nxp_s32_eth_set_config(const struct device * dev,enum ethernet_config_type type,const struct ethernet_config * config)284 int nxp_s32_eth_set_config(const struct device *dev, enum ethernet_config_type type,
285 const struct ethernet_config *config)
286 {
287 struct nxp_s32_eth_data *ctx = dev->data;
288 const struct nxp_s32_eth_config *cfg = dev->config;
289 int res = 0;
290
291 switch (type) {
292 case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
293 /* Set new Ethernet MAC address and register it with the upper layer */
294 memcpy(ctx->mac_addr, config->mac_address.addr, sizeof(ctx->mac_addr));
295 Netc_Eth_Ip_SetMacAddr(cfg->si_idx, (const uint8_t *)ctx->mac_addr);
296 net_if_set_link_addr(ctx->iface, ctx->mac_addr, sizeof(ctx->mac_addr),
297 NET_LINK_ETHERNET);
298 LOG_INF("SI%d MAC set to: %02x:%02x:%02x:%02x:%02x:%02x", cfg->si_idx,
299 ctx->mac_addr[0], ctx->mac_addr[1], ctx->mac_addr[2],
300 ctx->mac_addr[3], ctx->mac_addr[4], ctx->mac_addr[5]);
301 break;
302 case ETHERNET_CONFIG_TYPE_FILTER:
303 nxp_s32_eth_mcast_filter(dev, &config->filter);
304 break;
305 default:
306 res = -ENOTSUP;
307 break;
308 }
309
310 return res;
311 }
312
313 BUILD_ASSERT((CONFIG_ETH_NXP_S32_RX_RING_LEN % 8) == 0,
314 "Rx ring length must be multiple of 8");
315 BUILD_ASSERT((CONFIG_ETH_NXP_S32_TX_RING_LEN % 8) == 0,
316 "Tx ring length must be multiple of 8");
317 BUILD_ASSERT((CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE % 8) == 0,
318 "Rx ring data buffer size must be multiple of 8");
319 BUILD_ASSERT((CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE % 8) == 0,
320 "Tx ring data buffer size must be multiple of 8");
321