1 /*
2 * Copyright 2023 NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT nxp_s32_gmac
8
9 #include <zephyr/logging/log.h>
10 LOG_MODULE_REGISTER(nxp_s32_eth, CONFIG_ETHERNET_LOG_LEVEL);
11
12 #include <zephyr/kernel.h>
13 #include <zephyr/device.h>
14 #include <zephyr/drivers/pinctrl.h>
15 #include <zephyr/net/ethernet.h>
16 #include <zephyr/net/net_if.h>
17 #include <zephyr/net/net_pkt.h>
18 #include <zephyr/net/phy.h>
19 #include <ethernet/eth_stats.h>
20 #include <soc.h>
21
22 #include <Gmac_Ip.h>
23 #include <Gmac_Ip_Hw_Access.h>
24 #include <Gmac_Ip_Irq.h>
25 #include <Clock_Ip.h>
26
27 #include "eth.h"
28
29 #define ETH_NXP_S32_BUF_TIMEOUT K_MSEC(20)
30 #define ETH_NXP_S32_DMA_TX_TIMEOUT K_MSEC(20)
31
32 #define ETH_NXP_S32_MAC_ADDR_LEN 6U
33
34 #define FREESCALE_OUI_B0 0x00
35 #define FREESCALE_OUI_B1 0x04
36 #define FREESCALE_OUI_B2 0x9f
37
38 struct eth_nxp_s32_config {
39 uint8_t instance;
40 uint8_t tx_ring_idx;
41 uint8_t rx_ring_idx;
42 uint32_t rx_irq;
43 uint32_t tx_irq;
44 void (*do_config)(void);
45 const struct pinctrl_dev_config *pincfg;
46 const struct device *phy_dev;
47
48 const Gmac_CtrlConfigType ctrl_cfg;
49 GMAC_Type *base;
50 };
51
52 struct eth_nxp_s32_data {
53 struct net_if *iface;
54 uint8_t mac_addr[ETH_NXP_S32_MAC_ADDR_LEN];
55 uint8_t if_suspended;
56 struct k_mutex tx_mutex;
57 struct k_sem rx_sem;
58 struct k_sem tx_sem;
59 struct k_thread rx_thread;
60
61 K_KERNEL_STACK_MEMBER(rx_thread_stack, CONFIG_ETH_NXP_S32_RX_THREAD_STACK_SIZE);
62 };
63
64 static void eth_nxp_s32_rx_thread(void *arg1, void *unused1, void *unused2);
65
get_iface(struct eth_nxp_s32_data * ctx,uint16_t vlan_tag)66 static inline struct net_if *get_iface(struct eth_nxp_s32_data *ctx, uint16_t vlan_tag)
67 {
68 #if defined(CONFIG_NET_VLAN)
69 struct net_if *iface;
70
71 iface = net_eth_get_vlan_iface(ctx->iface, vlan_tag);
72 if (!iface) {
73 return ctx->iface;
74 }
75
76 return iface;
77 #else
78 ARG_UNUSED(vlan_tag);
79
80 return ctx->iface;
81 #endif
82 }
83
convert_phy_to_mac_config(Gmac_Ip_ConfigType * gmac_cfg,enum phy_link_speed phy_speed)84 static void convert_phy_to_mac_config(Gmac_Ip_ConfigType *gmac_cfg, enum phy_link_speed phy_speed)
85 {
86 switch (phy_speed) {
87 case LINK_HALF_10BASE_T:
88 gmac_cfg->Speed = GMAC_SPEED_10M;
89 gmac_cfg->Duplex = GMAC_HALF_DUPLEX;
90 break;
91 case LINK_FULL_10BASE_T:
92 gmac_cfg->Speed = GMAC_SPEED_10M;
93 gmac_cfg->Duplex = GMAC_FULL_DUPLEX;
94 break;
95 case LINK_HALF_100BASE_T:
96 gmac_cfg->Speed = GMAC_SPEED_100M;
97 gmac_cfg->Duplex = GMAC_HALF_DUPLEX;
98 break;
99 case LINK_FULL_100BASE_T:
100 gmac_cfg->Speed = GMAC_SPEED_100M;
101 gmac_cfg->Duplex = GMAC_FULL_DUPLEX;
102 break;
103 case LINK_HALF_1000BASE_T:
104 gmac_cfg->Speed = GMAC_SPEED_1G;
105 gmac_cfg->Duplex = GMAC_HALF_DUPLEX;
106 break;
107 case LINK_FULL_1000BASE_T:
108 __fallthrough;
109 default:
110 gmac_cfg->Speed = GMAC_SPEED_1G;
111 gmac_cfg->Duplex = GMAC_FULL_DUPLEX;
112 break;
113 }
114 }
115
phy_link_state_changed(const struct device * pdev,struct phy_link_state * state,void * user_data)116 static void phy_link_state_changed(const struct device *pdev,
117 struct phy_link_state *state,
118 void *user_data)
119 {
120 const struct device *dev = (struct device *)user_data;
121 const struct eth_nxp_s32_config *cfg = dev->config;
122 struct eth_nxp_s32_data *ctx = dev->data;
123 Gmac_Ip_ConfigType gmac_cfg;
124
125 ARG_UNUSED(pdev);
126
127 if (state->is_up) {
128 /* Porting phy link config to mac */
129 convert_phy_to_mac_config(&gmac_cfg, state->speed);
130 /* Set MAC configuration */
131 Gmac_Ip_SetSpeed(cfg->instance, gmac_cfg.Speed);
132
133 cfg->base->MAC_CONFIGURATION |= GMAC_MAC_CONFIGURATION_DM(gmac_cfg.Duplex);
134
135 /* net iface should be down even if PHY link state is up
136 * till the upper network layers have suspended the iface.
137 */
138 if (ctx->if_suspended) {
139 return;
140 }
141
142 LOG_DBG("Link up");
143 net_eth_carrier_on(ctx->iface);
144 } else {
145 LOG_DBG("Link down");
146 net_eth_carrier_off(ctx->iface);
147 }
148 }
149
150 #if defined(CONFIG_SOC_SERIES_S32K3XX)
select_phy_interface(Gmac_Ip_MiiModeType mode)151 static int select_phy_interface(Gmac_Ip_MiiModeType mode)
152 {
153 uint32_t regval;
154
155 switch (mode) {
156 case GMAC_MII_MODE:
157 regval = DCM_GPR_DCMRWF1_EMAC_CONF_SEL(0U);
158 break;
159 case GMAC_RMII_MODE:
160 regval = DCM_GPR_DCMRWF1_EMAC_CONF_SEL(2U);
161 break;
162 #if (FEATURE_GMAC_RGMII_EN == 1U)
163 case GMAC_RGMII_MODE:
164 regval = DCM_GPR_DCMRWF1_EMAC_CONF_SEL(1U);
165 break;
166 #endif
167 default:
168 return -EINVAL;
169 }
170
171 IP_DCM_GPR->DCMRWF1 = (IP_DCM_GPR->DCMRWF1 & ~DCM_GPR_DCMRWF1_EMAC_CONF_SEL_MASK) | regval;
172
173 return 0;
174 }
175 #else
176 #error "SoC not supported"
177 #endif /* CONFIG_SOC_SERIES_S32K3XX */
178
eth_nxp_s32_init(const struct device * dev)179 static int eth_nxp_s32_init(const struct device *dev)
180 {
181 const struct eth_nxp_s32_config *cfg = dev->config;
182 struct eth_nxp_s32_data *ctx = dev->data;
183 Gmac_Ip_StatusType mac_status;
184 Clock_Ip_StatusType clk_status;
185 int err;
186
187 err = pinctrl_apply_state(cfg->pincfg, PINCTRL_STATE_DEFAULT);
188 if (err != 0) {
189 return err;
190 }
191
192 /*
193 * Currently, clock control shim driver does not support configuring clock
194 * muxes individually, so use the HAL directly.
195 */
196 clk_status = Clock_Ip_Init(&Clock_Ip_aClockConfig[CONFIG_ETH_NXP_S32_CLOCK_CONFIG_IDX]);
197 if (clk_status != CLOCK_IP_SUCCESS) {
198 LOG_ERR("Failed to configure clocks (%d)", clk_status);
199 return -EIO;
200 }
201
202 /*
203 * PHY mode selection must be done before the controller is reset,
204 * because the interface type is latched at controller's reset
205 */
206 err = select_phy_interface(cfg->ctrl_cfg.Gmac_pCtrlConfig->MiiMode);
207 if (err != 0) {
208 LOG_ERR("Failed to select PHY interface (%d)", err);
209 return -EIO;
210 }
211
212 mac_status = Gmac_Ip_Init(cfg->instance, &cfg->ctrl_cfg);
213 if (mac_status != GMAC_STATUS_SUCCESS) {
214 LOG_ERR("Failed to initialize GMAC%d (%d)", cfg->instance, mac_status);
215 return -EIO;
216 }
217
218 k_mutex_init(&ctx->tx_mutex);
219 k_sem_init(&ctx->rx_sem, 0, 1);
220 k_sem_init(&ctx->tx_sem, 0, 1);
221
222 k_thread_create(&ctx->rx_thread, ctx->rx_thread_stack,
223 K_KERNEL_STACK_SIZEOF(ctx->rx_thread_stack),
224 eth_nxp_s32_rx_thread, (void *)dev, NULL, NULL,
225 K_PRIO_COOP(CONFIG_ETH_NXP_S32_RX_THREAD_PRIO),
226 0, K_NO_WAIT);
227 k_thread_name_set(&ctx->rx_thread, "eth_nxp_s32_rx");
228
229 if (cfg->do_config != NULL) {
230 cfg->do_config();
231 }
232
233 return 0;
234 }
235
eth_nxp_s32_start(const struct device * dev)236 static int eth_nxp_s32_start(const struct device *dev)
237 {
238 const struct eth_nxp_s32_config *cfg = dev->config;
239 struct eth_nxp_s32_data *ctx = dev->data;
240 struct phy_link_state state;
241
242 Gmac_Ip_EnableController(cfg->instance);
243
244 irq_enable(cfg->rx_irq);
245 irq_enable(cfg->tx_irq);
246
247 /* If upper layers enable the net iface then mark it as
248 * not suspended so that PHY Link changes can have the impact
249 */
250 ctx->if_suspended = false;
251
252 if (cfg->phy_dev) {
253 phy_get_link_state(cfg->phy_dev, &state);
254
255 /* Enable net_iface only when Ethernet PHY link is up or else
256 * if net_iface is enabled when link is down and tx happens
257 * in this state then the used tx buffers will never be recovered back.
258 */
259 if (state.is_up == true) {
260 net_eth_carrier_on(ctx->iface);
261 }
262 } else {
263 net_eth_carrier_on(ctx->iface);
264 }
265
266 LOG_DBG("GMAC%d started", cfg->instance);
267
268 return 0;
269 }
270
eth_nxp_s32_stop(const struct device * dev)271 static int eth_nxp_s32_stop(const struct device *dev)
272 {
273 const struct eth_nxp_s32_config *cfg = dev->config;
274 struct eth_nxp_s32_data *ctx = dev->data;
275 Gmac_Ip_StatusType status;
276 int err = 0;
277
278 irq_disable(cfg->rx_irq);
279 irq_disable(cfg->tx_irq);
280
281 /* If upper layers disable the net iface then mark it as suspended
282 * in order to save it from the PHY link state changes
283 */
284 ctx->if_suspended = true;
285
286 net_eth_carrier_off(ctx->iface);
287
288 status = Gmac_Ip_DisableController(cfg->instance);
289 if (status != GMAC_STATUS_SUCCESS) {
290 LOG_ERR("Failed to disable controller GMAC%d (%d)", cfg->instance, status);
291 err = -EIO;
292 }
293
294 LOG_DBG("GMAC%d stopped", cfg->instance);
295
296 return err;
297 }
298
299 #if defined(ETH_NXP_S32_MULTICAST_FILTER)
eth_nxp_s32_mcast_cb(struct net_if * iface,const struct net_addr * addr,bool is_joined)300 static void eth_nxp_s32_mcast_cb(struct net_if *iface, const struct net_addr *addr, bool is_joined)
301 {
302 const struct device *dev = net_if_get_device(iface);
303 const struct eth_nxp_s32_config *cfg = dev->config;
304 struct net_eth_addr mac_addr;
305
306 switch (addr->family) {
307 #if defined(CONFIG_NET_IPV4)
308 case AF_INET:
309 net_eth_ipv4_mcast_to_mac_addr(&addr->in_addr, &mac_addr);
310 break;
311 #endif /* CONFIG_NET_IPV4 */
312 #if defined(CONFIG_NET_IPV6)
313 case AF_INET6:
314 net_eth_ipv6_mcast_to_mac_addr(&addr->in6_addr, &mac_addr);
315 break;
316 #endif /* CONFIG_NET_IPV6 */
317 default:
318 return -EINVAL;
319 }
320
321 if (is_joined) {
322 Gmac_Ip_AddDstAddrToHashFilter(cfg->instance, mac_addr.addr);
323 } else {
324 Gmac_Ip_RemoveDstAddrFromHashFilter(cfg->instance, mac_addr.addr);
325 }
326 }
327 #endif /* ETH_NXP_S32_MULTICAST_FILTER */
328
eth_nxp_s32_iface_init(struct net_if * iface)329 static void eth_nxp_s32_iface_init(struct net_if *iface)
330 {
331 const struct device *dev = net_if_get_device(iface);
332 const struct eth_nxp_s32_config *cfg = dev->config;
333 struct eth_nxp_s32_data *ctx = dev->data;
334 #if defined(ETH_NXP_S32_MULTICAST_FILTER)
335 static struct net_if_mcast_monitor mon;
336
337 net_if_mcast_mon_register(&mon, iface, eth_nxp_s32_mcast_cb);
338 #endif /* ETH_NXP_S32_MULTICAST_FILTER */
339
340 /* For VLAN, this value is only used to get the correct L2 driver.
341 * The iface pointer in context should contain the main interface
342 * if the VLANs are enabled.
343 */
344 if (ctx->iface == NULL) {
345 ctx->iface = iface;
346 }
347
348 ethernet_init(iface);
349
350 net_if_set_link_addr(iface, ctx->mac_addr, sizeof(ctx->mac_addr), NET_LINK_ETHERNET);
351
352 LOG_INF("GMAC%d MAC address %02x:%02x:%02x:%02x:%02x:%02x", cfg->instance,
353 ctx->mac_addr[0], ctx->mac_addr[1], ctx->mac_addr[2],
354 ctx->mac_addr[3], ctx->mac_addr[4], ctx->mac_addr[5]);
355
356 /* Make sure that the net iface state is not suspended unless
357 * upper layers explicitly stop the iface
358 */
359 ctx->if_suspended = false;
360
361 /* No PHY available, link is always up and MAC speed/duplex settings are fixed */
362 if (cfg->phy_dev == NULL) {
363 net_if_carrier_on(iface);
364 return;
365 }
366
367 /*
368 * GMAC controls the PHY. If PHY is configured either as fixed
369 * link or autoneg, the callback is executed at least once
370 * immediately after setting it.
371 */
372 if (!device_is_ready(cfg->phy_dev)) {
373 LOG_ERR("PHY device (%p) is not ready, cannot init iface",
374 cfg->phy_dev);
375 return;
376 }
377
378 phy_link_callback_set(cfg->phy_dev, &phy_link_state_changed, (void *)dev);
379 }
380
eth_nxp_s32_tx(const struct device * dev,struct net_pkt * pkt)381 static int eth_nxp_s32_tx(const struct device *dev, struct net_pkt *pkt)
382 {
383 struct eth_nxp_s32_data *ctx = dev->data;
384 const struct eth_nxp_s32_config *cfg = dev->config;
385 size_t pkt_len = net_pkt_get_len(pkt);
386 int res = 0;
387 Gmac_Ip_BufferType buf;
388 Gmac_Ip_TxInfoType tx_info;
389 Gmac_Ip_StatusType status;
390 Gmac_Ip_TxOptionsType tx_options = {
391 .NoInt = FALSE,
392 .CrcPadIns = GMAC_CRC_AND_PAD_INSERTION,
393 .ChecksumIns = GMAC_CHECKSUM_INSERTION_PROTO_PSEUDOH
394 };
395
396 __ASSERT(pkt, "Packet pointer is NULL");
397
398 k_mutex_lock(&ctx->tx_mutex, K_FOREVER);
399 k_sem_reset(&ctx->tx_sem);
400
401 buf.Length = (uint16_t)pkt_len;
402 buf.Data = NULL;
403 status = Gmac_Ip_GetTxBuff(cfg->instance, cfg->tx_ring_idx, &buf, NULL);
404 if (status != GMAC_STATUS_SUCCESS) {
405 LOG_ERR("Failed to get tx buffer (%d)", status);
406 res = -ENOBUFS;
407 goto error;
408 }
409
410 res = net_pkt_read(pkt, buf.Data, pkt_len);
411 if (res) {
412 LOG_ERR("Failed to copy packet to tx buffer (%d)", res);
413 res = -ENOBUFS;
414 goto error;
415 }
416
417 buf.Length = (uint16_t)pkt_len;
418 status = Gmac_Ip_SendFrame(cfg->instance, cfg->tx_ring_idx, &buf, &tx_options);
419 if (status != GMAC_STATUS_SUCCESS) {
420 LOG_ERR("Failed to tx frame (%d)", status);
421 res = -EIO;
422 goto error;
423 }
424
425 /* Wait for the transmission to complete */
426 if (k_sem_take(&ctx->tx_sem, ETH_NXP_S32_DMA_TX_TIMEOUT) != 0) {
427 LOG_ERR("Timeout transmitting frame");
428 res = -EIO;
429 goto error;
430 }
431
432 /* Restore the buffer address pointer and clear the descriptor after the status is read */
433 status = Gmac_Ip_GetTransmitStatus(cfg->instance, cfg->tx_ring_idx, &buf, &tx_info);
434 if (status != GMAC_STATUS_SUCCESS) {
435 LOG_ERR("Failed to restore tx buffer: %s (%d) ",
436 (status == GMAC_STATUS_BUSY ? "busy" : "buf not found"), status);
437 res = -EIO;
438 } else if (tx_info.ErrMask != 0U) {
439 LOG_ERR("Tx frame has errors (error mask 0x%X)", tx_info.ErrMask);
440 res = -EIO;
441 }
442
443 error:
444 k_mutex_unlock(&ctx->tx_mutex);
445
446 if (res != 0) {
447 eth_stats_update_errors_tx(ctx->iface);
448 }
449 return res;
450 }
451
eth_nxp_s32_get_pkt(const struct device * dev,Gmac_Ip_BufferType * buf,Gmac_Ip_RxInfoType * rx_info,uint16_t * vlan_tag)452 static struct net_pkt *eth_nxp_s32_get_pkt(const struct device *dev,
453 Gmac_Ip_BufferType *buf,
454 Gmac_Ip_RxInfoType *rx_info,
455 uint16_t *vlan_tag)
456 {
457 struct eth_nxp_s32_data *ctx = dev->data;
458 struct net_pkt *pkt = NULL;
459 int res = 0;
460 #if defined(CONFIG_NET_VLAN)
461 struct net_eth_hdr *hdr;
462 struct net_eth_vlan_hdr *hdr_vlan;
463 #if CONFIG_NET_TC_RX_COUNT > 1
464 enum net_priority prio;
465 #endif /* CONFIG_NET_TC_RX_COUNT > 1 */
466 #endif /* CONFIG_NET_VLAN */
467
468 /* Using root iface, it will be updated in net_recv_data() */
469 pkt = net_pkt_rx_alloc_with_buffer(ctx->iface, rx_info->PktLen,
470 AF_UNSPEC, 0, ETH_NXP_S32_BUF_TIMEOUT);
471 if (!pkt) {
472 LOG_ERR("Failed to allocate rx buffer of length %u", rx_info->PktLen);
473 goto exit;
474 }
475
476 res = net_pkt_write(pkt, buf->Data, rx_info->PktLen);
477 if (res) {
478 LOG_ERR("Failed to write rx frame into pkt buffer (%d)", res);
479 net_pkt_unref(pkt);
480 pkt = NULL;
481 goto exit;
482 }
483
484 #if defined(CONFIG_NET_VLAN)
485 hdr = NET_ETH_HDR(pkt);
486 if (ntohs(hdr->type) == NET_ETH_PTYPE_VLAN) {
487 hdr_vlan = (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt);
488 net_pkt_set_vlan_tci(pkt, ntohs(hdr_vlan->vlan.tci));
489 *vlan_tag = net_pkt_vlan_tag(pkt);
490
491 #if CONFIG_NET_TC_RX_COUNT > 1
492 prio = net_vlan2priority(net_pkt_vlan_priority(pkt));
493 net_pkt_set_priority(pkt, prio);
494 #endif /* CONFIG_NET_TC_RX_COUNT > 1 */
495 }
496 #endif /* CONFIG_NET_VLAN */
497
498 exit:
499 if (!pkt) {
500 eth_stats_update_errors_rx(get_iface(ctx, *vlan_tag));
501 }
502
503 return pkt;
504 }
505
eth_nxp_s32_rx(const struct device * dev)506 static void eth_nxp_s32_rx(const struct device *dev)
507 {
508 struct eth_nxp_s32_data *ctx = dev->data;
509 const struct eth_nxp_s32_config *cfg = dev->config;
510 uint16_t vlan_tag = NET_VLAN_TAG_UNSPEC;
511 struct net_pkt *pkt;
512 int res = 0;
513 Gmac_Ip_RxInfoType rx_info = {0};
514 Gmac_Ip_BufferType buf;
515 Gmac_Ip_StatusType status;
516
517 status = Gmac_Ip_ReadFrame(cfg->instance, cfg->rx_ring_idx, &buf, &rx_info);
518 if (rx_info.ErrMask != 0U) {
519 Gmac_Ip_ProvideRxBuff(cfg->instance, cfg->rx_ring_idx, &buf);
520 LOG_ERR("Rx frame has errors (error mask 0x%X)", rx_info.ErrMask);
521 } else if (status == GMAC_STATUS_SUCCESS) {
522 pkt = eth_nxp_s32_get_pkt(dev, &buf, &rx_info, &vlan_tag);
523 Gmac_Ip_ProvideRxBuff(cfg->instance, cfg->rx_ring_idx, &buf);
524 if (pkt != NULL) {
525 res = net_recv_data(get_iface(ctx, vlan_tag), pkt);
526 if (res < 0) {
527 eth_stats_update_errors_rx(get_iface(ctx, vlan_tag));
528 net_pkt_unref(pkt);
529 LOG_ERR("Failed to enqueue frame into rx queue (%d)", res);
530 }
531 }
532 }
533 }
534
eth_nxp_s32_rx_thread(void * arg1,void * unused1,void * unused2)535 static void eth_nxp_s32_rx_thread(void *arg1, void *unused1, void *unused2)
536 {
537 const struct device *dev = (const struct device *)arg1;
538 struct eth_nxp_s32_data *ctx = dev->data;
539 const struct eth_nxp_s32_config *cfg = dev->config;
540 int res;
541 int work;
542
543 ARG_UNUSED(unused1);
544 ARG_UNUSED(unused2);
545 __ASSERT_NO_MSG(arg1 != NULL);
546 __ASSERT_NO_MSG(ctx != NULL);
547
548 while (1) {
549 res = k_sem_take(&ctx->rx_sem, K_FOREVER);
550 __ASSERT_NO_MSG(res == 0);
551
552 work = 0;
553 while (Gmac_Ip_IsFrameAvailable(cfg->instance, cfg->rx_ring_idx)) {
554 eth_nxp_s32_rx(dev);
555 if (++work == CONFIG_ETH_NXP_S32_RX_BUDGET) {
556 /* More work to do, reschedule */
557 work = 0;
558 k_yield();
559 }
560 }
561
562 /* All work done, re-enable rx interrupt and exit polling */
563 irq_enable(cfg->rx_irq);
564
565 /* In case a frame arrived after last eth_nxp_s32_rx() and before irq_enable() */
566 if (Gmac_Ip_IsFrameAvailable(cfg->instance, cfg->rx_ring_idx)) {
567 eth_nxp_s32_rx(dev);
568 }
569 }
570 }
571
eth_nxp_s32_set_config(const struct device * dev,enum ethernet_config_type type,const struct ethernet_config * config)572 static int eth_nxp_s32_set_config(const struct device *dev,
573 enum ethernet_config_type type,
574 const struct ethernet_config *config)
575 {
576 struct eth_nxp_s32_data *ctx = dev->data;
577 const struct eth_nxp_s32_config *cfg = dev->config;
578 int res = 0;
579 uint32_t regval;
580
581 ARG_UNUSED(cfg);
582 ARG_UNUSED(regval);
583
584 switch (type) {
585 case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
586 /* Set new Ethernet MAC address and register it with the upper layer */
587 memcpy(ctx->mac_addr, config->mac_address.addr, sizeof(ctx->mac_addr));
588 Gmac_Ip_SetMacAddr(cfg->instance, (const uint8_t *)ctx->mac_addr);
589 net_if_set_link_addr(ctx->iface, ctx->mac_addr, sizeof(ctx->mac_addr),
590 NET_LINK_ETHERNET);
591 LOG_INF("MAC set to: %02x:%02x:%02x:%02x:%02x:%02x",
592 ctx->mac_addr[0], ctx->mac_addr[1], ctx->mac_addr[2],
593 ctx->mac_addr[3], ctx->mac_addr[4], ctx->mac_addr[5]);
594 break;
595 #if defined(CONFIG_NET_PROMISCUOUS_MODE)
596 case ETHERNET_CONFIG_TYPE_PROMISC_MODE:
597 regval = cfg->base->MAC_PACKET_FILTER;
598 if (config->promisc_mode && !(regval & GMAC_MAC_PACKET_FILTER_PR_MASK)) {
599 cfg->base->MAC_PACKET_FILTER |= GMAC_MAC_PACKET_FILTER_PR_MASK;
600 } else if (!config->promisc_mode && (regval & GMAC_MAC_PACKET_FILTER_PR_MASK)) {
601 cfg->base->MAC_PACKET_FILTER &= ~GMAC_MAC_PACKET_FILTER_PR_MASK;
602 } else {
603 res = -EALREADY;
604 }
605 break;
606 #endif
607 default:
608 res = -ENOTSUP;
609 break;
610 }
611
612 return res;
613 }
614
eth_nxp_s32_get_capabilities(const struct device * dev)615 static enum ethernet_hw_caps eth_nxp_s32_get_capabilities(const struct device *dev)
616 {
617 ARG_UNUSED(dev);
618
619 return (ETHERNET_LINK_10BASE_T
620 | ETHERNET_LINK_100BASE_T
621 #if (FEATURE_GMAC_RGMII_EN == 1U)
622 | ETHERNET_LINK_1000BASE_T
623 #endif
624 | ETHERNET_DUPLEX_SET
625 | ETHERNET_HW_TX_CHKSUM_OFFLOAD
626 | ETHERNET_HW_RX_CHKSUM_OFFLOAD
627 #if defined(CONFIG_NET_VLAN)
628 | ETHERNET_HW_VLAN
629 #endif
630 #if defined(CONFIG_NET_PROMISCUOUS_MODE)
631 | ETHERNET_PROMISC_MODE
632 #endif
633 );
634 }
635
eth_nxp_s32_tx_irq(const struct device * dev)636 static void eth_nxp_s32_tx_irq(const struct device *dev)
637 {
638 const struct eth_nxp_s32_config *cfg = dev->config;
639
640 GMAC_TxIRQHandler(cfg->instance, cfg->tx_ring_idx);
641 }
642
eth_nxp_s32_rx_irq(const struct device * dev)643 static void eth_nxp_s32_rx_irq(const struct device *dev)
644 {
645 const struct eth_nxp_s32_config *cfg = dev->config;
646
647 GMAC_RxIRQHandler(cfg->instance, cfg->rx_ring_idx);
648 }
649
650 static const struct ethernet_api eth_api = {
651 .iface_api.init = eth_nxp_s32_iface_init,
652 .get_capabilities = eth_nxp_s32_get_capabilities,
653 .start = eth_nxp_s32_start,
654 .stop = eth_nxp_s32_stop,
655 .send = eth_nxp_s32_tx,
656 .set_config = eth_nxp_s32_set_config,
657 };
658
659
660 BUILD_ASSERT(((CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE * CONFIG_ETH_NXP_S32_RX_RING_LEN)
661 % FEATURE_GMAC_MTL_RX_FIFO_BLOCK_SIZE) == 0,
662 "CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE * CONFIG_ETH_NXP_S32_RX_RING_LEN "
663 "must be multiple of RX FIFO block size");
664 BUILD_ASSERT(((CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE * CONFIG_ETH_NXP_S32_TX_RING_LEN)
665 % FEATURE_GMAC_MTL_TX_FIFO_BLOCK_SIZE) == 0,
666 "CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE * CONFIG_ETH_NXP_S32_TX_RING_LEN "
667 "must be multiple of TX FIFO block size");
668 BUILD_ASSERT((CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE % FEATURE_GMAC_DATA_BUS_WIDTH_BYTES) == 0,
669 "CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE must be multiple of the data bus width");
670 BUILD_ASSERT((CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE % FEATURE_GMAC_DATA_BUS_WIDTH_BYTES) == 0,
671 "CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE must be multiple of the data bus width");
672
673 #define ETH_NXP_S32_FIXED_LINK_NODE(n) \
674 DT_INST_CHILD(n, fixed_link)
675
676 #define ETH_NXP_S32_IS_FIXED_LINK(n) \
677 DT_NODE_EXISTS(ETH_NXP_S32_FIXED_LINK_NODE(n))
678
679 #define ETH_NXP_S32_FIXED_LINK_SPEED(n) \
680 DT_PROP(ETH_NXP_S32_FIXED_LINK_NODE(n), speed)
681
682 #define ETH_NXP_S32_FIXED_LINK_FULL_DUPLEX(n) \
683 DT_PROP(ETH_NXP_S32_FIXED_LINK_NODE(n), full_duplex)
684
685 #define ETH_NXP_S32_MAC_SPEED(n) \
686 COND_CODE_1(ETH_NXP_S32_IS_FIXED_LINK(n), \
687 (_CONCAT(_CONCAT(GMAC_SPEED_, ETH_NXP_S32_FIXED_LINK_SPEED(n)), M)), \
688 (GMAC_SPEED_100M))
689
690 #define ETH_NXP_S32_MAC_DUPLEX(n) \
691 COND_CODE_1(ETH_NXP_S32_IS_FIXED_LINK(n), \
692 (COND_CODE_1(ETH_NXP_S32_FIXED_LINK_FULL_DUPLEX(n), \
693 (GMAC_FULL_DUPLEX), (GMAC_HALF_DUPLEX))), \
694 (GMAC_FULL_DUPLEX))
695
696 #define ETH_NXP_S32_MAC_MII(n) \
697 _CONCAT(_CONCAT(GMAC_, DT_INST_STRING_UPPER_TOKEN(n, phy_connection_type)), _MODE)
698
699 #define ETH_NXP_S32_IRQ_INIT(n, name) \
700 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(n, name, irq), \
701 DT_INST_IRQ_BY_NAME(n, name, priority), \
702 eth_nxp_s32_##name##_irq, \
703 DEVICE_DT_INST_GET(n), \
704 COND_CODE_1(DT_INST_IRQ_HAS_CELL(n, flags), \
705 (DT_INST_IRQ_BY_NAME(n, name, flags)), (0)));
706
707 #define ETH_NXP_S32_INIT_CONFIG(n) \
708 static void eth_nxp_s32_init_config_##n(void) \
709 { \
710 const struct device *dev = DEVICE_DT_INST_GET(n); \
711 struct eth_nxp_s32_data *ctx = dev->data; \
712 const struct eth_nxp_s32_config *cfg = dev->config; \
713 \
714 ETH_NXP_S32_IRQ_INIT(n, tx); \
715 ETH_NXP_S32_IRQ_INIT(n, rx); \
716 \
717 COND_CODE_1(DT_INST_PROP(n, zephyr_random_mac_address), ( \
718 gen_random_mac(ctx->mac_addr, FREESCALE_OUI_B0, \
719 FREESCALE_OUI_B1, FREESCALE_OUI_B2); \
720 Gmac_Ip_SetMacAddr(cfg->instance, ctx->mac_addr); \
721 ), ( \
722 Gmac_Ip_GetMacAddr(cfg->instance, ctx->mac_addr); \
723 )) \
724 }
725
726 #define ETH_NXP_S32_RX_CALLBACK(n) \
727 static void eth_nxp_s32_rx_callback_##n(uint8_t inst, uint8_t chan) \
728 { \
729 const struct device *dev = DEVICE_DT_INST_GET(n); \
730 struct eth_nxp_s32_data *ctx = dev->data; \
731 const struct eth_nxp_s32_config *cfg = dev->config; \
732 \
733 ARG_UNUSED(inst); \
734 ARG_UNUSED(chan); \
735 \
736 /* Rx irq will be re-enabled from Rx thread */ \
737 irq_disable(cfg->rx_irq); \
738 k_sem_give(&ctx->rx_sem); \
739 }
740
741 #define ETH_NXP_S32_TX_CALLBACK(n) \
742 static void eth_nxp_s32_tx_callback_##n(uint8_t inst, uint8_t chan) \
743 { \
744 const struct device *dev = DEVICE_DT_INST_GET(n); \
745 struct eth_nxp_s32_data *ctx = dev->data; \
746 \
747 ARG_UNUSED(inst); \
748 ARG_UNUSED(chan); \
749 \
750 k_sem_give(&ctx->tx_sem); \
751 }
752
753 #define _ETH_NXP_S32_RING(n, name, len, buf_size) \
754 static Gmac_Ip_BufferDescriptorType eth_nxp_s32_##name##ring_desc_##n[len] \
755 __nocache __aligned(FEATURE_GMAC_BUFFDESCR_ALIGNMENT_BYTES); \
756 static uint8_t eth_nxp_s32_##name##ring_buf_##n[len * buf_size] \
757 __nocache __aligned(FEATURE_GMAC_BUFF_ALIGNMENT_BYTES)
758
759 #define ETH_NXP_S32_RX_RING(n) \
760 _ETH_NXP_S32_RING(n, rx, \
761 CONFIG_ETH_NXP_S32_RX_RING_LEN, \
762 CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE)
763
764 #define ETH_NXP_S32_TX_RING(n) \
765 _ETH_NXP_S32_RING(n, tx, \
766 CONFIG_ETH_NXP_S32_TX_RING_LEN, \
767 CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE)
768
769 #define ETH_NXP_S32_MAC_TXTIMESHAPER_CONFIG(n) \
770 static const Gmac_Ip_TxTimeAwareShaper eth_nxp_s32_mac_txtimeshaper_config_##n = {\
771 .GateControlList = NULL, \
772 }
773
774 #define ETH_NXP_S32_MAC_RXRING_CONFIG(n) \
775 static const Gmac_Ip_RxRingConfigType eth_nxp_s32_mac_rxring_config_##n = { \
776 .RingDesc = eth_nxp_s32_rxring_desc_##n, \
777 .Callback = eth_nxp_s32_rx_callback_##n, \
778 .Buffer = eth_nxp_s32_rxring_buf_##n, \
779 .Interrupts = (uint32_t)GMAC_CH_INTERRUPT_RI, \
780 .BufferLen = CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE, \
781 .RingSize = CONFIG_ETH_NXP_S32_RX_RING_LEN, \
782 .PriorityMask = 0U, \
783 .DmaBurstLength = 32U, \
784 }
785
786 #define ETH_NXP_S32_MAC_TXRING_CONFIG(n) \
787 static const Gmac_Ip_TxRingConfigType eth_nxp_s32_mac_txring_config_##n = { \
788 .Weight = 0U, \
789 .IdleSlopeCredit = 0U, \
790 .SendSlopeCredit = 0U, \
791 .HiCredit = 0U, \
792 .LoCredit = 0, \
793 .RingDesc = eth_nxp_s32_txring_desc_##n, \
794 .Callback = eth_nxp_s32_tx_callback_##n, \
795 .Buffer = eth_nxp_s32_txring_buf_##n, \
796 .Interrupts = (uint32_t)GMAC_CH_INTERRUPT_TI, \
797 .BufferLen = CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE, \
798 .RingSize = CONFIG_ETH_NXP_S32_TX_RING_LEN, \
799 .PriorityMask = 0U, \
800 .DmaBurstLength = 32U, \
801 .QueueOpMode = GMAC_OP_MODE_DCB_GEN, \
802 }
803
804 #define ETH_NXP_S32_MAC_PKT_FILTER(n) \
805 ((uint32_t)(0U \
806 COND_CODE_1(CONFIG_ETH_NXP_S32_MULTICAST_FILTER, \
807 (|GMAC_PKT_FILTER_HASH_MULTICAST), \
808 (|GMAC_PKT_FILTER_PASS_ALL_MULTICAST)) \
809 ))
810
811 #define ETH_NXP_S32_MAC_CONF(n) \
812 ((uint32_t)(GMAC_MAC_CONFIG_CRC_STRIPPING \
813 | GMAC_MAC_CONFIG_AUTO_PAD \
814 | GMAC_MAC_CONFIG_CHECKSUM_OFFLOAD \
815 IF_ENABLED(CONFIG_ETH_NXP_S32_LOOPBACK, \
816 (|GMAC_MAC_CONFIG_LOOPBACK)) \
817 ))
818
819 #define ETH_NXP_S32_MAC_CONFIG(n) \
820 static const Gmac_Ip_ConfigType eth_nxp_s32_mac_config_##n = { \
821 .RxRingCount = 1U, \
822 .TxRingCount = 1U, \
823 .Interrupts = 0U, \
824 .Callback = NULL, \
825 .TxSchedAlgo = GMAC_SCHED_ALGO_SP, \
826 .MiiMode = ETH_NXP_S32_MAC_MII(n), \
827 .Speed = ETH_NXP_S32_MAC_SPEED(n), \
828 .Duplex = ETH_NXP_S32_MAC_DUPLEX(n), \
829 .MacConfig = ETH_NXP_S32_MAC_CONF(n), \
830 .MacPktFilterConfig = ETH_NXP_S32_MAC_PKT_FILTER(n), \
831 .EnableCtrl = false, \
832 }
833
834 #define ETH_NXP_S32_MAC_ADDR(n) \
835 BUILD_ASSERT(DT_INST_PROP(n, zephyr_random_mac_address) || \
836 NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(n)), \
837 "eth_nxp_s32_gmac requires either a fixed or random MAC address"); \
838 static const uint8_t eth_nxp_s32_mac_addr_##n[ETH_NXP_S32_MAC_ADDR_LEN] = \
839 DT_INST_PROP_OR(n, local_mac_address, {0U})
840
841 #define ETH_NXP_S32_MAC_STATE(n) Gmac_Ip_StateType eth_nxp_s32_mac_state_##n
842
843 #define ETH_NXP_S32_CTRL_CONFIG(n) \
844 { \
845 .Gmac_pCtrlState = ð_nxp_s32_mac_state_##n, \
846 .Gmac_pCtrlConfig = ð_nxp_s32_mac_config_##n, \
847 .Gmac_paCtrlRxRingConfig = ð_nxp_s32_mac_rxring_config_##n, \
848 .Gmac_paCtrlTxRingConfig = ð_nxp_s32_mac_txring_config_##n, \
849 .Gmac_pau8CtrlPhysAddr = ð_nxp_s32_mac_addr_##n[0], \
850 .Gmac_pCtrlTxTimeAwareShaper = ð_nxp_s32_mac_txtimeshaper_config_##n,\
851 }
852
853 #define ETH_NXP_S32_HW_INSTANCE_CHECK(i, n) \
854 ((DT_INST_REG_ADDR(n) == IP_GMAC_##i##_BASE) ? i : 0)
855
856 #define ETH_NXP_S32_HW_INSTANCE(n) \
857 LISTIFY(__DEBRACKET FEATURE_GMAC_NUM_INSTANCES, \
858 ETH_NXP_S32_HW_INSTANCE_CHECK, (|), n)
859
860 #define ETH_NXP_S32_PHY_DEV(n) \
861 COND_CODE_1(ETH_NXP_S32_IS_FIXED_LINK(n), NULL, \
862 (COND_CODE_1(DT_INST_NODE_HAS_PROP(n, phy_handle), \
863 (DEVICE_DT_GET(DT_INST_PHANDLE(n, phy_handle))), NULL)))
864
865 #define ETH_NXP_S32_DEVICE(n) \
866 ETH_NXP_S32_TX_CALLBACK(n) \
867 ETH_NXP_S32_RX_CALLBACK(n) \
868 ETH_NXP_S32_INIT_CONFIG(n) \
869 ETH_NXP_S32_RX_RING(n); \
870 ETH_NXP_S32_TX_RING(n); \
871 ETH_NXP_S32_MAC_STATE(n); \
872 ETH_NXP_S32_MAC_TXTIMESHAPER_CONFIG(n); \
873 ETH_NXP_S32_MAC_RXRING_CONFIG(n); \
874 ETH_NXP_S32_MAC_TXRING_CONFIG(n); \
875 ETH_NXP_S32_MAC_CONFIG(n); \
876 ETH_NXP_S32_MAC_ADDR(n); \
877 PINCTRL_DT_INST_DEFINE(n); \
878 \
879 static const struct eth_nxp_s32_config eth_nxp_s32_config_##n = { \
880 .instance = ETH_NXP_S32_HW_INSTANCE(n), \
881 .base = (GMAC_Type *)DT_INST_REG_ADDR(n), \
882 .ctrl_cfg = ETH_NXP_S32_CTRL_CONFIG(n), \
883 .do_config = eth_nxp_s32_init_config_##n, \
884 .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
885 .phy_dev = ETH_NXP_S32_PHY_DEV(n), \
886 .rx_irq = DT_INST_IRQ_BY_NAME(n, rx, irq), \
887 .tx_irq = DT_INST_IRQ_BY_NAME(n, tx, irq), \
888 .tx_ring_idx = 0U, \
889 .rx_ring_idx = 0U, \
890 }; \
891 \
892 static struct eth_nxp_s32_data eth_nxp_s32_data_##n; \
893 \
894 ETH_NET_DEVICE_DT_INST_DEFINE(n, \
895 eth_nxp_s32_init, \
896 NULL, \
897 ð_nxp_s32_data_##n, \
898 ð_nxp_s32_config_##n, \
899 CONFIG_ETH_INIT_PRIORITY, \
900 ð_api, \
901 NET_ETH_MTU);
902
903 DT_INST_FOREACH_STATUS_OKAY(ETH_NXP_S32_DEVICE)
904