1 /*
2 * Copyright 2023 NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT nxp_s32_gmac
8
9 #include <zephyr/logging/log.h>
10 LOG_MODULE_REGISTER(nxp_s32_eth, CONFIG_ETHERNET_LOG_LEVEL);
11
12 #include <zephyr/kernel.h>
13 #include <zephyr/device.h>
14 #include <zephyr/drivers/pinctrl.h>
15 #include <zephyr/net/ethernet.h>
16 #include <zephyr/net/net_if.h>
17 #include <zephyr/net/net_pkt.h>
18 #include <zephyr/net/phy.h>
19 #include <ethernet/eth_stats.h>
20 #include <soc.h>
21
22 #include <Gmac_Ip.h>
23 #include <Gmac_Ip_Hw_Access.h>
24 #include <Gmac_Ip_Irq.h>
25 #include <Clock_Ip.h>
26
27 #include "eth.h"
28
29 #define ETH_NXP_S32_BUF_TIMEOUT K_MSEC(20)
30 #define ETH_NXP_S32_DMA_TX_TIMEOUT K_MSEC(20)
31
32 #define ETH_NXP_S32_MAC_ADDR_LEN 6U
33
34 #define FREESCALE_OUI_B0 0x00
35 #define FREESCALE_OUI_B1 0x04
36 #define FREESCALE_OUI_B2 0x9f
37
38 struct eth_nxp_s32_config {
39 uint8_t instance;
40 uint8_t tx_ring_idx;
41 uint8_t rx_ring_idx;
42 uint32_t rx_irq;
43 uint32_t tx_irq;
44 void (*do_config)(void);
45 const struct pinctrl_dev_config *pincfg;
46 const struct device *phy_dev;
47
48 const Gmac_CtrlConfigType ctrl_cfg;
49 GMAC_Type *base;
50 };
51
52 struct eth_nxp_s32_data {
53 struct net_if *iface;
54 uint8_t mac_addr[ETH_NXP_S32_MAC_ADDR_LEN];
55 uint8_t if_suspended;
56 struct k_mutex tx_mutex;
57 struct k_sem rx_sem;
58 struct k_sem tx_sem;
59 struct k_thread rx_thread;
60
61 K_KERNEL_STACK_MEMBER(rx_thread_stack, CONFIG_ETH_NXP_S32_RX_THREAD_STACK_SIZE);
62 };
63
64 static void eth_nxp_s32_rx_thread(void *arg1, void *unused1, void *unused2);
65
get_iface(struct eth_nxp_s32_data * ctx)66 static inline struct net_if *get_iface(struct eth_nxp_s32_data *ctx)
67 {
68 return ctx->iface;
69 }
70
convert_phy_to_mac_config(Gmac_Ip_ConfigType * gmac_cfg,enum phy_link_speed phy_speed)71 static void convert_phy_to_mac_config(Gmac_Ip_ConfigType *gmac_cfg, enum phy_link_speed phy_speed)
72 {
73 switch (phy_speed) {
74 case LINK_HALF_10BASE_T:
75 gmac_cfg->Speed = GMAC_SPEED_10M;
76 gmac_cfg->Duplex = GMAC_HALF_DUPLEX;
77 break;
78 case LINK_FULL_10BASE_T:
79 gmac_cfg->Speed = GMAC_SPEED_10M;
80 gmac_cfg->Duplex = GMAC_FULL_DUPLEX;
81 break;
82 case LINK_HALF_100BASE_T:
83 gmac_cfg->Speed = GMAC_SPEED_100M;
84 gmac_cfg->Duplex = GMAC_HALF_DUPLEX;
85 break;
86 case LINK_FULL_100BASE_T:
87 gmac_cfg->Speed = GMAC_SPEED_100M;
88 gmac_cfg->Duplex = GMAC_FULL_DUPLEX;
89 break;
90 case LINK_HALF_1000BASE_T:
91 gmac_cfg->Speed = GMAC_SPEED_1G;
92 gmac_cfg->Duplex = GMAC_HALF_DUPLEX;
93 break;
94 case LINK_FULL_1000BASE_T:
95 __fallthrough;
96 default:
97 gmac_cfg->Speed = GMAC_SPEED_1G;
98 gmac_cfg->Duplex = GMAC_FULL_DUPLEX;
99 break;
100 }
101 }
102
phy_link_state_changed(const struct device * pdev,struct phy_link_state * state,void * user_data)103 static void phy_link_state_changed(const struct device *pdev,
104 struct phy_link_state *state,
105 void *user_data)
106 {
107 const struct device *dev = (struct device *)user_data;
108 const struct eth_nxp_s32_config *cfg = dev->config;
109 struct eth_nxp_s32_data *ctx = dev->data;
110 Gmac_Ip_ConfigType gmac_cfg;
111
112 ARG_UNUSED(pdev);
113
114 if (state->is_up) {
115 /* Porting phy link config to mac */
116 convert_phy_to_mac_config(&gmac_cfg, state->speed);
117 /* Set MAC configuration */
118 Gmac_Ip_SetSpeed(cfg->instance, gmac_cfg.Speed);
119
120 cfg->base->MAC_CONFIGURATION |= GMAC_MAC_CONFIGURATION_DM(gmac_cfg.Duplex);
121
122 /* net iface should be down even if PHY link state is up
123 * till the upper network layers have suspended the iface.
124 */
125 if (ctx->if_suspended) {
126 return;
127 }
128
129 LOG_DBG("Link up");
130 net_eth_carrier_on(ctx->iface);
131 } else {
132 LOG_DBG("Link down");
133 net_eth_carrier_off(ctx->iface);
134 }
135 }
136
eth_nxp_s32_get_phy(const struct device * dev)137 static const struct device *eth_nxp_s32_get_phy(const struct device *dev)
138 {
139 const struct eth_nxp_s32_config *cfg = dev->config;
140
141 return cfg->phy_dev;
142 }
143
144 #if defined(CONFIG_SOC_SERIES_S32K3)
select_phy_interface(Gmac_Ip_MiiModeType mode)145 static int select_phy_interface(Gmac_Ip_MiiModeType mode)
146 {
147 uint32_t regval;
148
149 switch (mode) {
150 case GMAC_MII_MODE:
151 regval = DCM_GPR_DCMRWF1_EMAC_CONF_SEL(0U);
152 break;
153 case GMAC_RMII_MODE:
154 regval = DCM_GPR_DCMRWF1_EMAC_CONF_SEL(2U);
155 break;
156 #if (FEATURE_GMAC_RGMII_EN == 1U)
157 case GMAC_RGMII_MODE:
158 regval = DCM_GPR_DCMRWF1_EMAC_CONF_SEL(1U);
159 break;
160 #endif
161 default:
162 return -EINVAL;
163 }
164
165 IP_DCM_GPR->DCMRWF1 = (IP_DCM_GPR->DCMRWF1 & ~DCM_GPR_DCMRWF1_EMAC_CONF_SEL_MASK) | regval;
166
167 return 0;
168 }
169 #else
170 #error "SoC not supported"
171 #endif /* CONFIG_SOC_SERIES_S32K3 */
172
eth_nxp_s32_init(const struct device * dev)173 static int eth_nxp_s32_init(const struct device *dev)
174 {
175 const struct eth_nxp_s32_config *cfg = dev->config;
176 struct eth_nxp_s32_data *ctx = dev->data;
177 Gmac_Ip_StatusType mac_status;
178 Clock_Ip_StatusType clk_status;
179 int err;
180
181 err = pinctrl_apply_state(cfg->pincfg, PINCTRL_STATE_DEFAULT);
182 if (err != 0) {
183 return err;
184 }
185
186 /*
187 * Currently, clock control shim driver does not support configuring clock
188 * muxes individually, so use the HAL directly.
189 */
190 clk_status = Clock_Ip_Init(&Clock_Ip_aClockConfig[CONFIG_ETH_NXP_S32_CLOCK_CONFIG_IDX]);
191 if (clk_status != CLOCK_IP_SUCCESS) {
192 LOG_ERR("Failed to configure clocks (%d)", clk_status);
193 return -EIO;
194 }
195
196 /*
197 * PHY mode selection must be done before the controller is reset,
198 * because the interface type is latched at controller's reset
199 */
200 err = select_phy_interface(cfg->ctrl_cfg.Gmac_pCtrlConfig->MiiMode);
201 if (err != 0) {
202 LOG_ERR("Failed to select PHY interface (%d)", err);
203 return -EIO;
204 }
205
206 mac_status = Gmac_Ip_Init(cfg->instance, &cfg->ctrl_cfg);
207 if (mac_status != GMAC_STATUS_SUCCESS) {
208 LOG_ERR("Failed to initialize GMAC%d (%d)", cfg->instance, mac_status);
209 return -EIO;
210 }
211
212 k_mutex_init(&ctx->tx_mutex);
213 k_sem_init(&ctx->rx_sem, 0, 1);
214 k_sem_init(&ctx->tx_sem, 0, 1);
215
216 k_thread_create(&ctx->rx_thread, ctx->rx_thread_stack,
217 K_KERNEL_STACK_SIZEOF(ctx->rx_thread_stack),
218 eth_nxp_s32_rx_thread, (void *)dev, NULL, NULL,
219 K_PRIO_COOP(CONFIG_ETH_NXP_S32_RX_THREAD_PRIO),
220 0, K_NO_WAIT);
221 k_thread_name_set(&ctx->rx_thread, "eth_nxp_s32_rx");
222
223 if (cfg->do_config != NULL) {
224 cfg->do_config();
225 }
226
227 return 0;
228 }
229
eth_nxp_s32_start(const struct device * dev)230 static int eth_nxp_s32_start(const struct device *dev)
231 {
232 const struct eth_nxp_s32_config *cfg = dev->config;
233 struct eth_nxp_s32_data *ctx = dev->data;
234 struct phy_link_state state;
235
236 Gmac_Ip_EnableController(cfg->instance);
237
238 irq_enable(cfg->rx_irq);
239 irq_enable(cfg->tx_irq);
240
241 /* If upper layers enable the net iface then mark it as
242 * not suspended so that PHY Link changes can have the impact
243 */
244 ctx->if_suspended = false;
245
246 if (cfg->phy_dev) {
247 phy_get_link_state(cfg->phy_dev, &state);
248
249 /* Enable net_iface only when Ethernet PHY link is up or else
250 * if net_iface is enabled when link is down and tx happens
251 * in this state then the used tx buffers will never be recovered back.
252 */
253 if (state.is_up == true) {
254 net_eth_carrier_on(ctx->iface);
255 }
256 } else {
257 net_eth_carrier_on(ctx->iface);
258 }
259
260 LOG_DBG("GMAC%d started", cfg->instance);
261
262 return 0;
263 }
264
eth_nxp_s32_stop(const struct device * dev)265 static int eth_nxp_s32_stop(const struct device *dev)
266 {
267 const struct eth_nxp_s32_config *cfg = dev->config;
268 struct eth_nxp_s32_data *ctx = dev->data;
269 Gmac_Ip_StatusType status;
270 int err = 0;
271
272 irq_disable(cfg->rx_irq);
273 irq_disable(cfg->tx_irq);
274
275 /* If upper layers disable the net iface then mark it as suspended
276 * in order to save it from the PHY link state changes
277 */
278 ctx->if_suspended = true;
279
280 net_eth_carrier_off(ctx->iface);
281
282 status = Gmac_Ip_DisableController(cfg->instance);
283 if (status != GMAC_STATUS_SUCCESS) {
284 LOG_ERR("Failed to disable controller GMAC%d (%d)", cfg->instance, status);
285 err = -EIO;
286 }
287
288 LOG_DBG("GMAC%d stopped", cfg->instance);
289
290 return err;
291 }
292
eth_nxp_s32_iface_init(struct net_if * iface)293 static void eth_nxp_s32_iface_init(struct net_if *iface)
294 {
295 const struct device *dev = net_if_get_device(iface);
296 const struct eth_nxp_s32_config *cfg = dev->config;
297 struct eth_nxp_s32_data *ctx = dev->data;
298
299 if (ctx->iface == NULL) {
300 ctx->iface = iface;
301 }
302
303 ethernet_init(iface);
304
305 net_if_set_link_addr(iface, ctx->mac_addr, sizeof(ctx->mac_addr), NET_LINK_ETHERNET);
306
307 LOG_INF("GMAC%d MAC address %02x:%02x:%02x:%02x:%02x:%02x", cfg->instance,
308 ctx->mac_addr[0], ctx->mac_addr[1], ctx->mac_addr[2],
309 ctx->mac_addr[3], ctx->mac_addr[4], ctx->mac_addr[5]);
310
311 /* Make sure that the net iface state is not suspended unless
312 * upper layers explicitly stop the iface
313 */
314 ctx->if_suspended = false;
315
316 /* No PHY available, link is always up and MAC speed/duplex settings are fixed */
317 if (cfg->phy_dev == NULL) {
318 net_if_carrier_on(iface);
319 return;
320 }
321
322 /*
323 * GMAC controls the PHY. If PHY is configured either as fixed
324 * link or autoneg, the callback is executed at least once
325 * immediately after setting it.
326 */
327 if (!device_is_ready(cfg->phy_dev)) {
328 LOG_ERR("PHY device (%p) is not ready, cannot init iface",
329 cfg->phy_dev);
330 return;
331 }
332
333 phy_link_callback_set(cfg->phy_dev, &phy_link_state_changed, (void *)dev);
334 }
335
eth_nxp_s32_tx(const struct device * dev,struct net_pkt * pkt)336 static int eth_nxp_s32_tx(const struct device *dev, struct net_pkt *pkt)
337 {
338 struct eth_nxp_s32_data *ctx = dev->data;
339 const struct eth_nxp_s32_config *cfg = dev->config;
340 size_t pkt_len = net_pkt_get_len(pkt);
341 int res = 0;
342 Gmac_Ip_BufferType buf;
343 Gmac_Ip_TxInfoType tx_info;
344 Gmac_Ip_StatusType status;
345 Gmac_Ip_TxOptionsType tx_options = {
346 .NoInt = FALSE,
347 .CrcPadIns = GMAC_CRC_AND_PAD_INSERTION,
348 .ChecksumIns = GMAC_CHECKSUM_INSERTION_PROTO_PSEUDOH
349 };
350
351 __ASSERT(pkt, "Packet pointer is NULL");
352
353 k_mutex_lock(&ctx->tx_mutex, K_FOREVER);
354 k_sem_reset(&ctx->tx_sem);
355
356 buf.Length = (uint16_t)pkt_len;
357 buf.Data = NULL;
358 status = Gmac_Ip_GetTxBuff(cfg->instance, cfg->tx_ring_idx, &buf, NULL);
359 if (status != GMAC_STATUS_SUCCESS) {
360 LOG_ERR("Failed to get tx buffer (%d)", status);
361 res = -ENOBUFS;
362 goto error;
363 }
364
365 res = net_pkt_read(pkt, buf.Data, pkt_len);
366 if (res) {
367 LOG_ERR("Failed to copy packet to tx buffer (%d)", res);
368 res = -ENOBUFS;
369 goto error;
370 }
371
372 buf.Length = (uint16_t)pkt_len;
373 status = Gmac_Ip_SendFrame(cfg->instance, cfg->tx_ring_idx, &buf, &tx_options);
374 if (status != GMAC_STATUS_SUCCESS) {
375 LOG_ERR("Failed to tx frame (%d)", status);
376 res = -EIO;
377 goto error;
378 }
379
380 /* Wait for the transmission to complete */
381 if (k_sem_take(&ctx->tx_sem, ETH_NXP_S32_DMA_TX_TIMEOUT) != 0) {
382 LOG_ERR("Timeout transmitting frame");
383 res = -EIO;
384 goto error;
385 }
386
387 /* Restore the buffer address pointer and clear the descriptor after the status is read */
388 status = Gmac_Ip_GetTransmitStatus(cfg->instance, cfg->tx_ring_idx, &buf, &tx_info);
389 if (status != GMAC_STATUS_SUCCESS) {
390 LOG_ERR("Failed to restore tx buffer: %s (%d) ",
391 (status == GMAC_STATUS_BUSY ? "busy" : "buf not found"), status);
392 res = -EIO;
393 } else if (tx_info.ErrMask != 0U) {
394 LOG_ERR("Tx frame has errors (error mask 0x%X)", tx_info.ErrMask);
395 res = -EIO;
396 }
397
398 error:
399 k_mutex_unlock(&ctx->tx_mutex);
400
401 if (res != 0) {
402 eth_stats_update_errors_tx(ctx->iface);
403 }
404 return res;
405 }
406
eth_nxp_s32_get_pkt(const struct device * dev,Gmac_Ip_BufferType * buf,Gmac_Ip_RxInfoType * rx_info)407 static struct net_pkt *eth_nxp_s32_get_pkt(const struct device *dev,
408 Gmac_Ip_BufferType *buf,
409 Gmac_Ip_RxInfoType *rx_info)
410 {
411 struct eth_nxp_s32_data *ctx = dev->data;
412 struct net_pkt *pkt = NULL;
413 int res = 0;
414
415 /* Using root iface, it will be updated in net_recv_data() */
416 pkt = net_pkt_rx_alloc_with_buffer(ctx->iface, rx_info->PktLen,
417 AF_UNSPEC, 0, ETH_NXP_S32_BUF_TIMEOUT);
418 if (!pkt) {
419 LOG_ERR("Failed to allocate rx buffer of length %u", rx_info->PktLen);
420 goto exit;
421 }
422
423 res = net_pkt_write(pkt, buf->Data, rx_info->PktLen);
424 if (res) {
425 LOG_ERR("Failed to write rx frame into pkt buffer (%d)", res);
426 net_pkt_unref(pkt);
427 pkt = NULL;
428 goto exit;
429 }
430
431 exit:
432 if (!pkt) {
433 eth_stats_update_errors_rx(get_iface(ctx));
434 }
435
436 return pkt;
437 }
438
eth_nxp_s32_rx(const struct device * dev)439 static void eth_nxp_s32_rx(const struct device *dev)
440 {
441 struct eth_nxp_s32_data *ctx = dev->data;
442 const struct eth_nxp_s32_config *cfg = dev->config;
443 struct net_pkt *pkt;
444 int res = 0;
445 Gmac_Ip_RxInfoType rx_info = {0};
446 Gmac_Ip_BufferType buf;
447 Gmac_Ip_StatusType status;
448
449 status = Gmac_Ip_ReadFrame(cfg->instance, cfg->rx_ring_idx, &buf, &rx_info);
450 if (rx_info.ErrMask != 0U) {
451 Gmac_Ip_ProvideRxBuff(cfg->instance, cfg->rx_ring_idx, &buf);
452 LOG_ERR("Rx frame has errors (error mask 0x%X)", rx_info.ErrMask);
453 } else if (status == GMAC_STATUS_SUCCESS) {
454 pkt = eth_nxp_s32_get_pkt(dev, &buf, &rx_info);
455 Gmac_Ip_ProvideRxBuff(cfg->instance, cfg->rx_ring_idx, &buf);
456 if (pkt != NULL) {
457 res = net_recv_data(get_iface(ctx), pkt);
458 if (res < 0) {
459 eth_stats_update_errors_rx(get_iface(ctx));
460 net_pkt_unref(pkt);
461 LOG_ERR("Failed to enqueue frame into rx queue (%d)", res);
462 }
463 }
464 }
465 }
466
eth_nxp_s32_rx_thread(void * arg1,void * unused1,void * unused2)467 static void eth_nxp_s32_rx_thread(void *arg1, void *unused1, void *unused2)
468 {
469 const struct device *dev = (const struct device *)arg1;
470 struct eth_nxp_s32_data *ctx = dev->data;
471 const struct eth_nxp_s32_config *cfg = dev->config;
472 int res;
473 int work;
474
475 ARG_UNUSED(unused1);
476 ARG_UNUSED(unused2);
477 __ASSERT_NO_MSG(arg1 != NULL);
478 __ASSERT_NO_MSG(ctx != NULL);
479
480 while (1) {
481 res = k_sem_take(&ctx->rx_sem, K_FOREVER);
482 __ASSERT_NO_MSG(res == 0);
483
484 work = 0;
485 while (Gmac_Ip_IsFrameAvailable(cfg->instance, cfg->rx_ring_idx)) {
486 eth_nxp_s32_rx(dev);
487 if (++work == CONFIG_ETH_NXP_S32_RX_BUDGET) {
488 /* More work to do, reschedule */
489 work = 0;
490 k_yield();
491 }
492 }
493
494 /* All work done, re-enable rx interrupt and exit polling */
495 irq_enable(cfg->rx_irq);
496
497 /* In case a frame arrived after last eth_nxp_s32_rx() and before irq_enable() */
498 if (Gmac_Ip_IsFrameAvailable(cfg->instance, cfg->rx_ring_idx)) {
499 eth_nxp_s32_rx(dev);
500 }
501 }
502 }
503
eth_nxp_s32_set_config(const struct device * dev,enum ethernet_config_type type,const struct ethernet_config * config)504 static int eth_nxp_s32_set_config(const struct device *dev,
505 enum ethernet_config_type type,
506 const struct ethernet_config *config)
507 {
508 struct eth_nxp_s32_data *ctx = dev->data;
509 const struct eth_nxp_s32_config *cfg = dev->config;
510 int res = 0;
511 uint32_t regval;
512
513 ARG_UNUSED(cfg);
514 ARG_UNUSED(regval);
515
516 switch (type) {
517 case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
518 /* Set new Ethernet MAC address and register it with the upper layer */
519 memcpy(ctx->mac_addr, config->mac_address.addr, sizeof(ctx->mac_addr));
520 Gmac_Ip_SetMacAddr(cfg->instance, (const uint8_t *)ctx->mac_addr);
521 net_if_set_link_addr(ctx->iface, ctx->mac_addr, sizeof(ctx->mac_addr),
522 NET_LINK_ETHERNET);
523 LOG_INF("MAC set to: %02x:%02x:%02x:%02x:%02x:%02x",
524 ctx->mac_addr[0], ctx->mac_addr[1], ctx->mac_addr[2],
525 ctx->mac_addr[3], ctx->mac_addr[4], ctx->mac_addr[5]);
526 break;
527 #if defined(CONFIG_NET_PROMISCUOUS_MODE)
528 case ETHERNET_CONFIG_TYPE_PROMISC_MODE:
529 regval = cfg->base->MAC_PACKET_FILTER;
530 if (config->promisc_mode && !(regval & GMAC_MAC_PACKET_FILTER_PR_MASK)) {
531 cfg->base->MAC_PACKET_FILTER |= GMAC_MAC_PACKET_FILTER_PR_MASK;
532 } else if (!config->promisc_mode && (regval & GMAC_MAC_PACKET_FILTER_PR_MASK)) {
533 cfg->base->MAC_PACKET_FILTER &= ~GMAC_MAC_PACKET_FILTER_PR_MASK;
534 } else {
535 res = -EALREADY;
536 }
537 break;
538 #endif
539 #if defined(CONFIG_ETH_NXP_S32_MULTICAST_FILTER)
540 case ETHERNET_HW_FILTERING:
541 if (config->filter.set) {
542 Gmac_Ip_AddDstAddrToHashFilter(cfg->instance,
543 config->filter.mac_address.addr);
544 } else {
545 Gmac_Ip_RemoveDstAddrFromHashFilter(cfg->instance,
546 config->filter.mac_address.addr);
547 }
548 break;
549 #endif
550 default:
551 res = -ENOTSUP;
552 break;
553 }
554
555 return res;
556 }
557
eth_nxp_s32_get_capabilities(const struct device * dev)558 static enum ethernet_hw_caps eth_nxp_s32_get_capabilities(const struct device *dev)
559 {
560 ARG_UNUSED(dev);
561
562 return (ETHERNET_LINK_10BASE_T
563 | ETHERNET_LINK_100BASE_T
564 #if (FEATURE_GMAC_RGMII_EN == 1U)
565 | ETHERNET_LINK_1000BASE_T
566 #endif
567 | ETHERNET_DUPLEX_SET
568 | ETHERNET_HW_TX_CHKSUM_OFFLOAD
569 | ETHERNET_HW_RX_CHKSUM_OFFLOAD
570 #if defined(CONFIG_NET_VLAN)
571 | ETHERNET_HW_VLAN
572 #endif
573 #if defined(CONFIG_NET_PROMISCUOUS_MODE)
574 | ETHERNET_PROMISC_MODE
575 #endif
576 #if defined(CONFIG_ETH_NXP_S32_MULTICAST_FILTER)
577 | ETHERNET_HW_FILTERING
578 #endif
579 );
580 }
581
eth_nxp_s32_tx_irq(const struct device * dev)582 static void eth_nxp_s32_tx_irq(const struct device *dev)
583 {
584 const struct eth_nxp_s32_config *cfg = dev->config;
585
586 GMAC_TxIRQHandler(cfg->instance, cfg->tx_ring_idx);
587 }
588
eth_nxp_s32_rx_irq(const struct device * dev)589 static void eth_nxp_s32_rx_irq(const struct device *dev)
590 {
591 const struct eth_nxp_s32_config *cfg = dev->config;
592
593 GMAC_RxIRQHandler(cfg->instance, cfg->rx_ring_idx);
594 }
595
596 static const struct ethernet_api eth_api = {
597 .iface_api.init = eth_nxp_s32_iface_init,
598 .get_capabilities = eth_nxp_s32_get_capabilities,
599 .get_phy = eth_nxp_s32_get_phy,
600 .start = eth_nxp_s32_start,
601 .stop = eth_nxp_s32_stop,
602 .send = eth_nxp_s32_tx,
603 .set_config = eth_nxp_s32_set_config,
604 };
605
606
607 BUILD_ASSERT(((CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE * CONFIG_ETH_NXP_S32_RX_RING_LEN)
608 % FEATURE_GMAC_MTL_RX_FIFO_BLOCK_SIZE) == 0,
609 "CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE * CONFIG_ETH_NXP_S32_RX_RING_LEN "
610 "must be multiple of RX FIFO block size");
611 BUILD_ASSERT(((CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE * CONFIG_ETH_NXP_S32_TX_RING_LEN)
612 % FEATURE_GMAC_MTL_TX_FIFO_BLOCK_SIZE) == 0,
613 "CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE * CONFIG_ETH_NXP_S32_TX_RING_LEN "
614 "must be multiple of TX FIFO block size");
615 BUILD_ASSERT((CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE % FEATURE_GMAC_DATA_BUS_WIDTH_BYTES) == 0,
616 "CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE must be multiple of the data bus width");
617 BUILD_ASSERT((CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE % FEATURE_GMAC_DATA_BUS_WIDTH_BYTES) == 0,
618 "CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE must be multiple of the data bus width");
619
620 #define ETH_NXP_S32_MAC_MII(n) \
621 _CONCAT(_CONCAT(GMAC_, DT_INST_STRING_UPPER_TOKEN(n, phy_connection_type)), _MODE)
622
623 #define ETH_NXP_S32_IRQ_INIT(n, name) \
624 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(n, name, irq), \
625 DT_INST_IRQ_BY_NAME(n, name, priority), \
626 eth_nxp_s32_##name##_irq, \
627 DEVICE_DT_INST_GET(n), \
628 COND_CODE_1(DT_INST_IRQ_HAS_CELL(n, flags), \
629 (DT_INST_IRQ_BY_NAME(n, name, flags)), (0)));
630
631 #define ETH_NXP_S32_INIT_CONFIG(n) \
632 static void eth_nxp_s32_init_config_##n(void) \
633 { \
634 const struct device *dev = DEVICE_DT_INST_GET(n); \
635 struct eth_nxp_s32_data *ctx = dev->data; \
636 const struct eth_nxp_s32_config *cfg = dev->config; \
637 \
638 ETH_NXP_S32_IRQ_INIT(n, tx); \
639 ETH_NXP_S32_IRQ_INIT(n, rx); \
640 \
641 COND_CODE_1(DT_INST_PROP(n, zephyr_random_mac_address), ( \
642 gen_random_mac(ctx->mac_addr, FREESCALE_OUI_B0, \
643 FREESCALE_OUI_B1, FREESCALE_OUI_B2); \
644 Gmac_Ip_SetMacAddr(cfg->instance, ctx->mac_addr); \
645 ), ( \
646 Gmac_Ip_GetMacAddr(cfg->instance, ctx->mac_addr); \
647 )) \
648 }
649
650 #define ETH_NXP_S32_RX_CALLBACK(n) \
651 static void eth_nxp_s32_rx_callback_##n(uint8_t inst, uint8_t chan) \
652 { \
653 const struct device *dev = DEVICE_DT_INST_GET(n); \
654 struct eth_nxp_s32_data *ctx = dev->data; \
655 const struct eth_nxp_s32_config *cfg = dev->config; \
656 \
657 ARG_UNUSED(inst); \
658 ARG_UNUSED(chan); \
659 \
660 /* Rx irq will be re-enabled from Rx thread */ \
661 irq_disable(cfg->rx_irq); \
662 k_sem_give(&ctx->rx_sem); \
663 }
664
665 #define ETH_NXP_S32_TX_CALLBACK(n) \
666 static void eth_nxp_s32_tx_callback_##n(uint8_t inst, uint8_t chan) \
667 { \
668 const struct device *dev = DEVICE_DT_INST_GET(n); \
669 struct eth_nxp_s32_data *ctx = dev->data; \
670 \
671 ARG_UNUSED(inst); \
672 ARG_UNUSED(chan); \
673 \
674 k_sem_give(&ctx->tx_sem); \
675 }
676
677 #define _ETH_NXP_S32_RING(n, name, len, buf_size) \
678 static Gmac_Ip_BufferDescriptorType eth_nxp_s32_##name##ring_desc_##n[len] \
679 __nocache __aligned(FEATURE_GMAC_BUFFDESCR_ALIGNMENT_BYTES); \
680 static uint8_t eth_nxp_s32_##name##ring_buf_##n[len * buf_size] \
681 __nocache __aligned(FEATURE_GMAC_BUFF_ALIGNMENT_BYTES)
682
683 #define ETH_NXP_S32_RX_RING(n) \
684 _ETH_NXP_S32_RING(n, rx, \
685 CONFIG_ETH_NXP_S32_RX_RING_LEN, \
686 CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE)
687
688 #define ETH_NXP_S32_TX_RING(n) \
689 _ETH_NXP_S32_RING(n, tx, \
690 CONFIG_ETH_NXP_S32_TX_RING_LEN, \
691 CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE)
692
693 #define ETH_NXP_S32_MAC_TXTIMESHAPER_CONFIG(n) \
694 static const Gmac_Ip_TxTimeAwareShaper eth_nxp_s32_mac_txtimeshaper_config_##n = {\
695 .GateControlList = NULL, \
696 }
697
698 #define ETH_NXP_S32_MAC_RXRING_CONFIG(n) \
699 static const Gmac_Ip_RxRingConfigType eth_nxp_s32_mac_rxring_config_##n = { \
700 .RingDesc = eth_nxp_s32_rxring_desc_##n, \
701 .Callback = eth_nxp_s32_rx_callback_##n, \
702 .Buffer = eth_nxp_s32_rxring_buf_##n, \
703 .Interrupts = (uint32_t)GMAC_CH_INTERRUPT_RI, \
704 .BufferLen = CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE, \
705 .RingSize = CONFIG_ETH_NXP_S32_RX_RING_LEN, \
706 .PriorityMask = 0U, \
707 .DmaBurstLength = 32U, \
708 }
709
710 #define ETH_NXP_S32_MAC_TXRING_CONFIG(n) \
711 static const Gmac_Ip_TxRingConfigType eth_nxp_s32_mac_txring_config_##n = { \
712 .Weight = 0U, \
713 .IdleSlopeCredit = 0U, \
714 .SendSlopeCredit = 0U, \
715 .HiCredit = 0U, \
716 .LoCredit = 0, \
717 .RingDesc = eth_nxp_s32_txring_desc_##n, \
718 .Callback = eth_nxp_s32_tx_callback_##n, \
719 .Buffer = eth_nxp_s32_txring_buf_##n, \
720 .Interrupts = (uint32_t)GMAC_CH_INTERRUPT_TI, \
721 .BufferLen = CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE, \
722 .RingSize = CONFIG_ETH_NXP_S32_TX_RING_LEN, \
723 .PriorityMask = 0U, \
724 .DmaBurstLength = 32U, \
725 .QueueOpMode = GMAC_OP_MODE_DCB_GEN, \
726 }
727
728 #define ETH_NXP_S32_MAC_PKT_FILTER(n) \
729 ((uint32_t)(0U \
730 COND_CODE_1(CONFIG_ETH_NXP_S32_MULTICAST_FILTER, \
731 (|GMAC_PKT_FILTER_HASH_MULTICAST), \
732 (|GMAC_PKT_FILTER_PASS_ALL_MULTICAST)) \
733 ))
734
735 #define ETH_NXP_S32_MAC_CONF(n) \
736 ((uint32_t)(GMAC_MAC_CONFIG_CRC_STRIPPING \
737 | GMAC_MAC_CONFIG_AUTO_PAD \
738 | GMAC_MAC_CONFIG_CHECKSUM_OFFLOAD \
739 IF_ENABLED(CONFIG_ETH_NXP_S32_LOOPBACK, \
740 (|GMAC_MAC_CONFIG_LOOPBACK)) \
741 ))
742
743 #define ETH_NXP_S32_MAC_CONFIG(n) \
744 static const Gmac_Ip_ConfigType eth_nxp_s32_mac_config_##n = { \
745 .RxRingCount = 1U, \
746 .TxRingCount = 1U, \
747 .Interrupts = 0U, \
748 .Callback = NULL, \
749 .TxSchedAlgo = GMAC_SCHED_ALGO_SP, \
750 .MiiMode = ETH_NXP_S32_MAC_MII(n), \
751 .Speed = GMAC_SPEED_100M, \
752 .Duplex = GMAC_FULL_DUPLEX, \
753 .MacConfig = ETH_NXP_S32_MAC_CONF(n), \
754 .MacPktFilterConfig = ETH_NXP_S32_MAC_PKT_FILTER(n), \
755 .EnableCtrl = false, \
756 }
757
758 #define ETH_NXP_S32_MAC_ADDR(n) \
759 BUILD_ASSERT(DT_INST_PROP(n, zephyr_random_mac_address) || \
760 NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(n)), \
761 "eth_nxp_s32_gmac requires either a fixed or random MAC address"); \
762 static const uint8_t eth_nxp_s32_mac_addr_##n[ETH_NXP_S32_MAC_ADDR_LEN] = \
763 DT_INST_PROP_OR(n, local_mac_address, {0U})
764
765 #define ETH_NXP_S32_MAC_STATE(n) Gmac_Ip_StateType eth_nxp_s32_mac_state_##n
766
767 #define ETH_NXP_S32_CTRL_CONFIG(n) \
768 { \
769 .Gmac_pCtrlState = ð_nxp_s32_mac_state_##n, \
770 .Gmac_pCtrlConfig = ð_nxp_s32_mac_config_##n, \
771 .Gmac_paCtrlRxRingConfig = ð_nxp_s32_mac_rxring_config_##n, \
772 .Gmac_paCtrlTxRingConfig = ð_nxp_s32_mac_txring_config_##n, \
773 .Gmac_pau8CtrlPhysAddr = ð_nxp_s32_mac_addr_##n[0], \
774 .Gmac_pCtrlTxTimeAwareShaper = ð_nxp_s32_mac_txtimeshaper_config_##n,\
775 }
776
777 #define ETH_NXP_S32_HW_INSTANCE_CHECK(i, n) \
778 ((DT_INST_REG_ADDR(n) == IP_GMAC_##i##_BASE) ? i : 0)
779
780 #define ETH_NXP_S32_HW_INSTANCE(n) \
781 LISTIFY(__DEBRACKET FEATURE_GMAC_NUM_INSTANCES, \
782 ETH_NXP_S32_HW_INSTANCE_CHECK, (|), n)
783
784 #define ETH_NXP_S32_PHY_DEV(n) \
785 (COND_CODE_1(DT_INST_NODE_HAS_PROP(n, phy_handle), \
786 (DEVICE_DT_GET(DT_INST_PHANDLE(n, phy_handle))), NULL))
787
788 #define ETH_NXP_S32_DEVICE(n) \
789 ETH_NXP_S32_TX_CALLBACK(n) \
790 ETH_NXP_S32_RX_CALLBACK(n) \
791 ETH_NXP_S32_INIT_CONFIG(n) \
792 ETH_NXP_S32_RX_RING(n); \
793 ETH_NXP_S32_TX_RING(n); \
794 ETH_NXP_S32_MAC_STATE(n); \
795 ETH_NXP_S32_MAC_TXTIMESHAPER_CONFIG(n); \
796 ETH_NXP_S32_MAC_RXRING_CONFIG(n); \
797 ETH_NXP_S32_MAC_TXRING_CONFIG(n); \
798 ETH_NXP_S32_MAC_CONFIG(n); \
799 ETH_NXP_S32_MAC_ADDR(n); \
800 PINCTRL_DT_INST_DEFINE(n); \
801 \
802 static const struct eth_nxp_s32_config eth_nxp_s32_config_##n = { \
803 .instance = ETH_NXP_S32_HW_INSTANCE(n), \
804 .base = (GMAC_Type *)DT_INST_REG_ADDR(n), \
805 .ctrl_cfg = ETH_NXP_S32_CTRL_CONFIG(n), \
806 .do_config = eth_nxp_s32_init_config_##n, \
807 .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
808 .phy_dev = ETH_NXP_S32_PHY_DEV(n), \
809 .rx_irq = DT_INST_IRQ_BY_NAME(n, rx, irq), \
810 .tx_irq = DT_INST_IRQ_BY_NAME(n, tx, irq), \
811 .tx_ring_idx = 0U, \
812 .rx_ring_idx = 0U, \
813 }; \
814 \
815 static struct eth_nxp_s32_data eth_nxp_s32_data_##n; \
816 \
817 ETH_NET_DEVICE_DT_INST_DEFINE(n, \
818 eth_nxp_s32_init, \
819 NULL, \
820 ð_nxp_s32_data_##n, \
821 ð_nxp_s32_config_##n, \
822 CONFIG_ETH_INIT_PRIORITY, \
823 ð_api, \
824 NET_ETH_MTU);
825
826 DT_INST_FOREACH_STATUS_OKAY(ETH_NXP_S32_DEVICE)
827