1 /*
2 * Copyright 2022 NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL
8 #include <zephyr/logging/log.h>
9 LOG_MODULE_REGISTER(nxp_s32_eth);
10
11 #include <zephyr/kernel.h>
12 #include <zephyr/device.h>
13 #include <zephyr/drivers/mbox.h>
14 #include <zephyr/drivers/pinctrl.h>
15 #include <zephyr/net/ethernet.h>
16 #include <zephyr/net/net_if.h>
17 #include <zephyr/net/net_pkt.h>
18 #include <zephyr/net/phy.h>
19 #include <ethernet/eth_stats.h>
20
21 #include <soc.h>
22 #include <Netc_Eth_Ip.h>
23 #include <Netc_Eth_Ip_Irq.h>
24 #include <Netc_EthSwt_Ip.h>
25
26 #include "eth.h"
27 #include "eth_nxp_s32_netc_priv.h"
28
29 /* Global MAC filter hash table required for the baremetal driver */
30 Netc_Eth_Ip_MACFilterHashTableEntryType * MACFilterHashTableAddrs[FEATURE_NETC_ETH_NUMBER_OF_CTRLS];
31
32 static void nxp_s32_eth_rx_thread(void *arg1, void *unused1, void *unused2);
33
nxp_s32_eth_msix_wrapper(const struct device * dev,uint32_t channel,void * user_data,struct mbox_msg * msg)34 static void nxp_s32_eth_msix_wrapper(const struct device *dev, uint32_t channel,
35 void *user_data, struct mbox_msg *msg)
36 {
37 const struct nxp_s32_eth_msix *msix = (const struct nxp_s32_eth_msix *)user_data;
38
39 ARG_UNUSED(dev);
40 ARG_UNUSED(msg);
41
42 /* Handler doesn't require any data to be passed, used only for signalling */
43 msix->handler(channel, NULL, 0);
44 }
45
get_iface(struct nxp_s32_eth_data * ctx,uint16_t vlan_tag)46 static inline struct net_if *get_iface(struct nxp_s32_eth_data *ctx, uint16_t vlan_tag)
47 {
48 #if defined(CONFIG_NET_VLAN)
49 struct net_if *iface;
50
51 iface = net_eth_get_vlan_iface(ctx->iface, vlan_tag);
52 if (!iface) {
53 return ctx->iface;
54 }
55
56 return iface;
57 #else
58 ARG_UNUSED(vlan_tag);
59
60 return ctx->iface;
61 #endif
62 }
63
nxp_s32_eth_initialize_common(const struct device * dev)64 int nxp_s32_eth_initialize_common(const struct device *dev)
65 {
66 const struct nxp_s32_eth_config *cfg = dev->config;
67 struct nxp_s32_eth_data *ctx = dev->data;
68 Netc_Eth_Ip_StatusType status;
69 const struct nxp_s32_eth_msix *msix;
70 int err;
71
72 /* Populate the MAC filter hash table addresses for this SI */
73 __ASSERT_NO_MSG(cfg->si_idx < FEATURE_NETC_ETH_NUMBER_OF_CTRLS);
74 MACFilterHashTableAddrs[cfg->si_idx] = cfg->mac_filter_hash_table;
75
76 status = Netc_Eth_Ip_Init(cfg->si_idx, &cfg->netc_cfg);
77 if (status != NETC_ETH_IP_STATUS_SUCCESS) {
78 LOG_ERR("Failed to initialize SI%d (%d)", cfg->si_idx, status);
79 return -EIO;
80 }
81
82 for (int i = 0; i < NETC_MSIX_EVENTS_COUNT; i++) {
83 msix = &cfg->msix[i];
84 if (msix->mbox_channel.dev != NULL) {
85 err = mbox_register_callback(&msix->mbox_channel,
86 nxp_s32_eth_msix_wrapper,
87 (void *)msix);
88 if (err != 0) {
89 LOG_ERR("Failed to register MRU callback on channel %u",
90 msix->mbox_channel.id);
91 return err;
92 }
93 }
94 }
95
96 k_mutex_init(&ctx->tx_mutex);
97 k_sem_init(&ctx->rx_sem, 0, 1);
98
99 k_thread_create(&ctx->rx_thread, ctx->rx_thread_stack,
100 K_KERNEL_STACK_SIZEOF(ctx->rx_thread_stack),
101 nxp_s32_eth_rx_thread, (void *)dev, NULL, NULL,
102 K_PRIO_COOP(CONFIG_ETH_NXP_S32_RX_THREAD_PRIO),
103 0, K_NO_WAIT);
104 k_thread_name_set(&ctx->rx_thread, "nxp_s32_eth_rx");
105
106 status = Netc_Eth_Ip_EnableController(cfg->si_idx);
107 if (status != NETC_ETH_IP_STATUS_SUCCESS) {
108 LOG_ERR("Failed to enable ENETC SI%d (%d)", cfg->si_idx, status);
109 return -EIO;
110 }
111
112 if (cfg->generate_mac) {
113 cfg->generate_mac(&ctx->mac_addr[0]);
114 }
115
116 return 0;
117 }
118
119 #if defined(CONFIG_NET_IPV6)
nxp_s32_eth_mcast_cb(struct net_if * iface,const struct net_addr * addr,bool is_joined)120 void nxp_s32_eth_mcast_cb(struct net_if *iface, const struct net_addr *addr, bool is_joined)
121 {
122 const struct device *dev = net_if_get_device(iface);
123 const struct nxp_s32_eth_config *cfg = dev->config;
124 struct net_eth_addr mac_addr;
125 Netc_Eth_Ip_StatusType status;
126
127 if (addr->family != AF_INET6) {
128 return;
129 }
130
131 net_eth_ipv6_mcast_to_mac_addr(&addr->in6_addr, &mac_addr);
132
133 if (is_joined) {
134 status = Netc_Eth_Ip_AddMulticastDstAddrToHashFilter(cfg->si_idx,
135 mac_addr.addr);
136 } else {
137 status = Netc_Eth_Ip_RemoveMulticastDstAddrFromHashFilter(cfg->si_idx,
138 mac_addr.addr);
139 }
140 if (status != NETC_ETH_IP_STATUS_SUCCESS) {
141 LOG_ERR("Failed to update multicast hash table: %d", status);
142 }
143 }
144 #endif /* CONFIG_NET_IPV6 */
145
nxp_s32_eth_tx(const struct device * dev,struct net_pkt * pkt)146 int nxp_s32_eth_tx(const struct device *dev, struct net_pkt *pkt)
147 {
148 struct nxp_s32_eth_data *ctx = dev->data;
149 const struct nxp_s32_eth_config *cfg = dev->config;
150 size_t pkt_len = net_pkt_get_len(pkt);
151 int res = 0;
152 Netc_Eth_Ip_StatusType status;
153 Netc_Eth_Ip_BufferType buf;
154
155 __ASSERT(pkt, "Packet pointer is NULL");
156
157 k_mutex_lock(&ctx->tx_mutex, K_FOREVER);
158
159 buf.length = (uint16_t)pkt_len;
160 buf.data = NULL;
161 status = Netc_Eth_Ip_GetTxBuff(cfg->si_idx, cfg->tx_ring_idx, &buf, NULL);
162 if (status == NETC_ETH_IP_STATUS_TX_BUFF_BUSY) {
163 /* Reclaim the buffers already transmitted and try again */
164 Netc_Eth_Ip_ReleaseTxBuffers(cfg->si_idx, cfg->tx_ring_idx);
165 status = Netc_Eth_Ip_GetTxBuff(cfg->si_idx, cfg->tx_ring_idx, &buf, NULL);
166 }
167 if (status != NETC_ETH_IP_STATUS_SUCCESS) {
168 LOG_ERR("Failed to get tx buffer: %d", status);
169 res = -ENOBUFS;
170 goto error;
171 }
172 buf.length = (uint16_t)pkt_len;
173
174 res = net_pkt_read(pkt, buf.data, pkt_len);
175 if (res) {
176 LOG_ERR("Failed to copy packet to tx buffer: %d", res);
177 res = -ENOBUFS;
178 goto error;
179 }
180
181 status = Netc_Eth_Ip_SendFrame(cfg->si_idx, cfg->tx_ring_idx, &buf, NULL);
182 if (status != NETC_ETH_IP_STATUS_SUCCESS) {
183 LOG_ERR("Failed to tx frame: %d", status);
184 res = -EIO;
185 goto error;
186 }
187
188 error:
189 k_mutex_unlock(&ctx->tx_mutex);
190
191 if (res != 0) {
192 eth_stats_update_errors_tx(ctx->iface);
193 }
194 return res;
195 }
196
nxp_s32_eth_get_pkt(const struct device * dev,Netc_Eth_Ip_BufferType * buf,uint16_t * vlan_tag)197 static struct net_pkt *nxp_s32_eth_get_pkt(const struct device *dev,
198 Netc_Eth_Ip_BufferType *buf,
199 uint16_t *vlan_tag)
200 {
201 struct nxp_s32_eth_data *ctx = dev->data;
202 struct net_pkt *pkt = NULL;
203 int res = 0;
204 #if defined(CONFIG_NET_VLAN)
205 struct net_eth_hdr *hdr;
206 struct net_eth_vlan_hdr *hdr_vlan;
207 #if CONFIG_NET_TC_RX_COUNT > 1
208 enum net_priority prio;
209 #endif
210 #endif /* CONFIG_NET_VLAN */
211
212 /* Use root iface, it will be updated later in net_recv_data() */
213 pkt = net_pkt_rx_alloc_with_buffer(ctx->iface, buf->length,
214 AF_UNSPEC, 0, NETC_TIMEOUT);
215 if (!pkt) {
216 goto exit;
217 }
218
219 res = net_pkt_write(pkt, buf->data, buf->length);
220 if (res) {
221 net_pkt_unref(pkt);
222 pkt = NULL;
223 goto exit;
224 }
225
226 #if defined(CONFIG_NET_VLAN)
227 hdr = NET_ETH_HDR(pkt);
228 if (ntohs(hdr->type) == NET_ETH_PTYPE_VLAN) {
229 hdr_vlan = (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt);
230 net_pkt_set_vlan_tci(pkt, ntohs(hdr_vlan->vlan.tci));
231 *vlan_tag = net_pkt_vlan_tag(pkt);
232
233 #if CONFIG_NET_TC_RX_COUNT > 1
234 prio = net_vlan2priority(net_pkt_vlan_priority(pkt));
235 net_pkt_set_priority(pkt, prio);
236 #endif
237 }
238 #endif /* CONFIG_NET_VLAN */
239
240 exit:
241 if (!pkt) {
242 eth_stats_update_errors_rx(get_iface(ctx, *vlan_tag));
243 }
244
245 return pkt;
246 }
247
nxp_s32_eth_rx(const struct device * dev)248 static int nxp_s32_eth_rx(const struct device *dev)
249 {
250 struct nxp_s32_eth_data *ctx = dev->data;
251 const struct nxp_s32_eth_config *cfg = dev->config;
252 Netc_Eth_Ip_BufferType buf;
253 Netc_Eth_Ip_RxInfoType info;
254 Netc_Eth_Ip_StatusType status;
255 uint16_t vlan_tag = NET_VLAN_TAG_UNSPEC;
256 struct net_pkt *pkt;
257 int key;
258 int res = 0;
259
260 key = irq_lock();
261 status = Netc_Eth_Ip_ReadFrame(cfg->si_idx, cfg->rx_ring_idx, &buf, &info);
262 if (status == NETC_ETH_IP_STATUS_RX_QUEUE_EMPTY) {
263 res = -ENOBUFS;
264 } else if (status != NETC_ETH_IP_STATUS_SUCCESS) {
265 LOG_ERR("Error on received frame: %d (0x%X)", status, info.rxStatus);
266 res = -EIO;
267 } else {
268 pkt = nxp_s32_eth_get_pkt(dev, &buf, &vlan_tag);
269 Netc_Eth_Ip_ProvideRxBuff(cfg->si_idx, cfg->rx_ring_idx, &buf);
270
271 if (pkt != NULL) {
272 res = net_recv_data(get_iface(ctx, vlan_tag), pkt);
273 if (res < 0) {
274 eth_stats_update_errors_rx(get_iface(ctx, vlan_tag));
275 net_pkt_unref(pkt);
276 LOG_ERR("Failed to enqueue frame into rx queue: %d", res);
277 }
278 }
279 }
280 irq_unlock(key);
281
282 return res;
283 }
284
nxp_s32_eth_rx_thread(void * arg1,void * unused1,void * unused2)285 static void nxp_s32_eth_rx_thread(void *arg1, void *unused1, void *unused2)
286 {
287 const struct device *dev = (const struct device *)arg1;
288 struct nxp_s32_eth_data *ctx = dev->data;
289 int res;
290 int work;
291
292 ARG_UNUSED(unused1);
293 ARG_UNUSED(unused2);
294 __ASSERT_NO_MSG(arg1 != NULL);
295 __ASSERT_NO_MSG(ctx != NULL);
296
297 while (1) {
298 res = k_sem_take(&ctx->rx_sem, K_FOREVER);
299 __ASSERT_NO_MSG(res == 0);
300
301 work = 0;
302 while (nxp_s32_eth_rx(dev) != -ENOBUFS) {
303 if (++work == CONFIG_ETH_NXP_S32_RX_BUDGET) {
304 /* more work to do, reschedule */
305 work = 0;
306 k_yield();
307 }
308 }
309 }
310 }
311
nxp_s32_eth_get_capabilities(const struct device * dev)312 enum ethernet_hw_caps nxp_s32_eth_get_capabilities(const struct device *dev)
313 {
314 ARG_UNUSED(dev);
315
316 return (ETHERNET_LINK_10BASE_T
317 | ETHERNET_LINK_100BASE_T
318 | ETHERNET_LINK_1000BASE_T
319 | ETHERNET_HW_RX_CHKSUM_OFFLOAD
320 #if defined(CONFIG_NET_VLAN)
321 | ETHERNET_HW_VLAN
322 #endif
323 #if defined(CONFIG_NET_PROMISCUOUS_MODE)
324 | ETHERNET_PROMISC_MODE
325 #endif
326 );
327 }
328
nxp_s32_eth_set_config(const struct device * dev,enum ethernet_config_type type,const struct ethernet_config * config)329 int nxp_s32_eth_set_config(const struct device *dev, enum ethernet_config_type type,
330 const struct ethernet_config *config)
331 {
332 struct nxp_s32_eth_data *ctx = dev->data;
333 const struct nxp_s32_eth_config *cfg = dev->config;
334 int res = 0;
335
336 switch (type) {
337 case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
338 /* Set new Ethernet MAC address and register it with the upper layer */
339 memcpy(ctx->mac_addr, config->mac_address.addr, sizeof(ctx->mac_addr));
340 Netc_Eth_Ip_SetMacAddr(cfg->si_idx, (const uint8_t *)ctx->mac_addr);
341 net_if_set_link_addr(ctx->iface, ctx->mac_addr, sizeof(ctx->mac_addr),
342 NET_LINK_ETHERNET);
343 LOG_INF("SI%d MAC set to: %02x:%02x:%02x:%02x:%02x:%02x", cfg->si_idx,
344 ctx->mac_addr[0], ctx->mac_addr[1], ctx->mac_addr[2],
345 ctx->mac_addr[3], ctx->mac_addr[4], ctx->mac_addr[5]);
346 break;
347 default:
348 res = -ENOTSUP;
349 break;
350 }
351
352 return res;
353 }
354
355 BUILD_ASSERT((CONFIG_ETH_NXP_S32_RX_RING_LEN % 8) == 0,
356 "Rx ring length must be multiple of 8");
357 BUILD_ASSERT((CONFIG_ETH_NXP_S32_TX_RING_LEN % 8) == 0,
358 "Tx ring length must be multiple of 8");
359 BUILD_ASSERT((CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE % 8) == 0,
360 "Rx ring data buffer size must be multiple of 8");
361 BUILD_ASSERT((CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE % 8) == 0,
362 "Tx ring data buffer size must be multiple of 8");
363