Lines Matching +full:mtl +full:- +full:etsalg
4 * SPDX-License-Identifier: Apache-2.0
19 #define UPDATE_ETH_STATS_TX_PKT_CNT(dev_data, incr) (dev_data->stats.pkts.tx += incr)
20 #define UPDATE_ETH_STATS_RX_PKT_CNT(dev_data, incr) (dev_data->stats.pkts.rx += incr)
21 #define UPDATE_ETH_STATS_TX_BYTE_CNT(dev_data, incr) (dev_data->stats.bytes.sent += incr)
22 #define UPDATE_ETH_STATS_RX_BYTE_CNT(dev_data, incr) (dev_data->stats.bytes.received += incr)
23 #define UPDATE_ETH_STATS_TX_ERROR_PKT_CNT(dev_data, incr) (dev_data->stats.errors.tx += incr)
24 #define UPDATE_ETH_STATS_RX_ERROR_PKT_CNT(dev_data, incr) (dev_data->stats.errors.rx += incr)
25 #define UPDATE_ETH_STATS_TX_DROP_PKT_CNT(dev_data, incr) (dev_data->stats.tx_dropped += incr)
37 * @brief Run-time device configuration data structure.
40 * controller instance which is modifiable at run-time, such as
52 /* Ethernet auto-negotiation status. */
132 /* XGMAC MTL configuration */
161 uint32_t reg_val = DMA_SYSBUS_MODE_RD_OSR_LMT_SET(dma_cfg->rd_osr_lmt) | in dwxgmac_dma_init()
162 DMA_SYSBUS_MODE_WR_OSR_LMT_SET(dma_cfg->wr_osr_lmt) | in dwxgmac_dma_init()
163 DMA_SYSBUS_MODE_AAL_SET(dma_cfg->aal) | in dwxgmac_dma_init()
164 DMA_SYSBUS_MODE_EAME_SET(dma_cfg->eame) | in dwxgmac_dma_init()
165 DMA_SYSBUS_MODE_BLEN4_SET(dma_cfg->blen4) | in dwxgmac_dma_init()
166 DMA_SYSBUS_MODE_BLEN8_SET(dma_cfg->blen8) | in dwxgmac_dma_init()
167 DMA_SYSBUS_MODE_BLEN16_SET(dma_cfg->blen16) | in dwxgmac_dma_init()
168 DMA_SYSBUS_MODE_BLEN32_SET(dma_cfg->blen32) | in dwxgmac_dma_init()
169 DMA_SYSBUS_MODE_BLEN64_SET(dma_cfg->blen64) | in dwxgmac_dma_init()
170 DMA_SYSBUS_MODE_BLEN128_SET(dma_cfg->blen128) | in dwxgmac_dma_init()
171 DMA_SYSBUS_MODE_BLEN256_SET(dma_cfg->blen256) | in dwxgmac_dma_init()
172 DMA_SYSBUS_MODE_UNDEF_SET(dma_cfg->ubl); in dwxgmac_dma_init()
176 /* Configure TX Descriptor Pre-fetch threshold Size in TX enhanced DMA control register*/ in dwxgmac_dma_init()
179 reg_val = DMA_TX_EDMA_CONTROL_TDPS_SET(dma_cfg->edma_tdps); in dwxgmac_dma_init()
183 /* Configure RX Descriptor Pre-fetch threshold Size in TX enhanced DMA control register*/ in dwxgmac_dma_init()
186 reg_val = DMA_RX_EDMA_CONTROL_RDPS_SET(dma_cfg->edma_rdps); in dwxgmac_dma_init()
189 LOG_DBG("%s: DMA engine common initialization completed", dev->name); in dwxgmac_dma_init()
197 uint32_t max_dma_chnl = config->num_dma_chnl; in dwxgmac_dma_chnl_init()
199 (struct xgmac_dma_chnl_config *)&config->dma_chnl_cfg; in dwxgmac_dma_chnl_init()
207 tx_desc_meta = (struct xgmac_dma_tx_desc_meta *)&data->tx_desc_meta[dma_chnl]; in dwxgmac_dma_chnl_init()
208 rx_desc_meta = (struct xgmac_dma_rx_desc_meta *)&data->rx_desc_meta[dma_chnl]; in dwxgmac_dma_chnl_init()
211 * Configure Header-Payload Split feature, 8xPBL mode (burst length) and in dwxgmac_dma_chnl_init()
216 reg_val = DMA_CHx_CONTROL_SPH_SET(dma_chnl_cfg->sph) | in dwxgmac_dma_chnl_init()
217 DMA_CHx_CONTROL_PBLX8_SET(dma_chnl_cfg->pblx8) | in dwxgmac_dma_chnl_init()
218 DMA_CHx_CONTROL_MSS_SET(dma_chnl_cfg->mss); in dwxgmac_dma_chnl_init()
227 reg_val = DMA_CHx_TX_CONTROL_TXPBL_SET(dma_chnl_cfg->txpbl) | in dwxgmac_dma_chnl_init()
228 DMA_CHx_TX_CONTROL_TSE_SET(dma_chnl_cfg->tse) | in dwxgmac_dma_chnl_init()
229 DMA_CHx_TX_CONTROL_RESERVED_OSP_SET(dma_chnl_cfg->osp); in dwxgmac_dma_chnl_init()
239 DMA_CHx_RX_CONTROL_RXPBL_SET(dma_chnl_cfg->rxpbl) | in dwxgmac_dma_chnl_init()
246 reg_val = DMA_CHx_TXDESC_LIST_HADDRESS_TDESHA_SET(tx_desc_meta->desc_list_addr >> in dwxgmac_dma_chnl_init()
253 reg_val = tx_desc_meta->desc_list_addr; in dwxgmac_dma_chnl_init()
259 reg_val = rx_desc_meta->desc_list_addr >> 32u; in dwxgmac_dma_chnl_init()
265 reg_val = rx_desc_meta->desc_list_addr; in dwxgmac_dma_chnl_init()
271 reg_val = DMA_CHx_TXDESC_TAIL_LPOINTER_TDT_SET(tx_desc_meta->desc_tail_addr); in dwxgmac_dma_chnl_init()
277 reg_val = DMA_CHx_RXDESC_TAIL_LPOINTER_RDT_SET(rx_desc_meta->desc_tail_addr); in dwxgmac_dma_chnl_init()
283 reg_val = DMA_CHx_TX_CONTROL2_TDRL_SET((dma_chnl_cfg->tdrl - 1u)); in dwxgmac_dma_chnl_init()
289 reg_val = DMA_CHx_RX_CONTROL2_RDRL_SET((dma_chnl_cfg->rdrl - 1u)); in dwxgmac_dma_chnl_init()
293 tx_desc_meta->next_to_use = 0u; in dwxgmac_dma_chnl_init()
294 rx_desc_meta->next_to_read = 0u; in dwxgmac_dma_chnl_init()
295 rx_desc_meta->rx_pkt = (struct net_pkt *)NULL; in dwxgmac_dma_chnl_init()
296 LOG_DBG("%s: DMA channel %d initialization completed", dev->name, dma_chnl); in dwxgmac_dma_chnl_init()
303 const uint32_t max_dma_chnl = config->num_dma_chnl; in dwxgmac_dma_desc_init()
306 (struct xgmac_dma_chnl_config *)&config->dma_chnl_cfg; in dwxgmac_dma_desc_init()
312 tx_desc_meta = (struct xgmac_dma_tx_desc_meta *)&data->tx_desc_meta[dma_chnl]; in dwxgmac_dma_desc_init()
313 rx_desc_meta = (struct xgmac_dma_rx_desc_meta *)&data->rx_desc_meta[dma_chnl]; in dwxgmac_dma_desc_init()
315 tx_desc_meta->desc_list_addr = in dwxgmac_dma_desc_init()
316 POINTER_TO_UINT(data->dma_tx_desc + (dma_chnl * dma_chnl_cfg->tdrl)); in dwxgmac_dma_desc_init()
317 tx_desc_meta->desc_tail_addr = POINTER_TO_UINT(tx_desc_meta->desc_list_addr); in dwxgmac_dma_desc_init()
319 memset((void *)(tx_desc_meta->desc_list_addr), 0, in dwxgmac_dma_desc_init()
320 ((dma_chnl_cfg->tdrl) * sizeof(struct xgmac_dma_tx_desc))); in dwxgmac_dma_desc_init()
322 rx_desc_meta->desc_list_addr = in dwxgmac_dma_desc_init()
323 POINTER_TO_UINT(data->dma_rx_desc + (dma_chnl * dma_chnl_cfg->rdrl)); in dwxgmac_dma_desc_init()
324 rx_desc_meta->desc_tail_addr = POINTER_TO_UINT(rx_desc_meta->desc_list_addr); in dwxgmac_dma_desc_init()
326 memset((void *)(rx_desc_meta->desc_list_addr), 0, in dwxgmac_dma_desc_init()
327 ((dma_chnl_cfg->rdrl) * sizeof(struct xgmac_dma_rx_desc))); in dwxgmac_dma_desc_init()
335 config->num_tx_Qs > config->num_rx_Qs ? config->num_tx_Qs : config->num_rx_Qs; in dwxgmac_dma_mtl_init()
338 struct xgmac_mtl_config *mtl_cfg = (struct xgmac_mtl_config *)&config->mtl_cfg; in dwxgmac_dma_mtl_init()
339 struct xgmac_tcq_config *const tcq_config = (struct xgmac_tcq_config *)config->tcq_config; in dwxgmac_dma_mtl_init()
343 /* Configure MTL operation mode options */ in dwxgmac_dma_mtl_init()
346 uint32_t reg_val = MTL_OPERATION_MODE_ETSALG_SET(mtl_cfg->etsalg) | in dwxgmac_dma_mtl_init()
347 MTL_OPERATION_MODE_RAA_SET(mtl_cfg->raa); in dwxgmac_dma_mtl_init()
351 for (uint32_t tc_id = 0; tc_id < config->num_TCs; tc_id++) { in dwxgmac_dma_mtl_init()
357 tcq_config->pstc[tc_id]); in dwxgmac_dma_mtl_init()
363 * Below sequence of register initializations are required for the MTL transmit in dwxgmac_dma_mtl_init()
366 * - Enable dynamic mapping of RX queues to RX DMA channels by programming in dwxgmac_dma_mtl_init()
368 * - Configure MTL TX queue options and enable the TX queue. in dwxgmac_dma_mtl_init()
375 READ_BIT(tcq_config->rx_q_ddma_en, q_idx)) | in dwxgmac_dma_mtl_init()
377 tcq_config->rx_q_dma_chnl_sel[q_idx]); in dwxgmac_dma_mtl_init()
382 reg_val = MTL_TCQx_MTL_TXQx_OPERATION_MODE_TQS_SET(tcq_config->tx_q_size[q_idx]) | in dwxgmac_dma_mtl_init()
384 tcq_config->q_to_tc_map[q_idx]) | in dwxgmac_dma_mtl_init()
385 MTL_TCQx_MTL_TXQx_OPERATION_MODE_TTC_SET(tcq_config->ttc[q_idx]) | in dwxgmac_dma_mtl_init()
388 READ_BIT(tcq_config->tsf_en, q_idx)); in dwxgmac_dma_mtl_init()
393 reg_val = MTL_TCQx_MTC_TCx_ETS_CONTROL_TSA_SET(tcq_config->tsa[q_idx]); in dwxgmac_dma_mtl_init()
398 reg_val = MTL_TCQx_MTL_RXQx_OPERATION_MODE_RQS_SET(tcq_config->rx_q_size[q_idx]) | in dwxgmac_dma_mtl_init()
400 READ_BIT(tcq_config->hfc_en, q_idx)) | in dwxgmac_dma_mtl_init()
402 READ_BIT(tcq_config->cs_err_pkt_drop_dis, q_idx)) | in dwxgmac_dma_mtl_init()
404 READ_BIT(tcq_config->rsf_en, q_idx)) | in dwxgmac_dma_mtl_init()
406 READ_BIT(tcq_config->fep_en, q_idx)) | in dwxgmac_dma_mtl_init()
408 READ_BIT(tcq_config->fup_en, q_idx)) | in dwxgmac_dma_mtl_init()
409 MTL_TCQx_MTL_RXQx_OPERATION_MODE_RTC_SET(tcq_config->rtc[q_idx]); in dwxgmac_dma_mtl_init()
437 LOG_DBG("%s: Update MAC address %x %x %x %x %x %x at index %d", dev->name, in dwxgmac_set_mac_addr_by_idx()
454 LOG_DBG("%s: MAC link speed updated to 10Mbps", dev->name); in eth_dwc_xgmac_update_link_speed()
458 LOG_DBG("%s: MAC link speed updated to 100Mbps", dev->name); in eth_dwc_xgmac_update_link_speed()
462 LOG_DBG("%s: MAC link speed updated to 1Gbps", dev->name); in eth_dwc_xgmac_update_link_speed()
465 LOG_ERR("%s: Invalid link speed configuration value", dev->name); in eth_dwc_xgmac_update_link_speed()
475 struct xgmac_mac_config *const mac_cfg = (struct xgmac_mac_config *)&config->mac_cfg; in dwxgmac_mac_init()
495 for (uint32_t q = 0; q < config->num_rx_Qs; q++) { in dwxgmac_mac_init()
514 CORE_MAC_RX_CONFIGURATION_JE_SET(mac_cfg->je) | in dwxgmac_mac_init()
515 CORE_MAC_RX_CONFIGURATION_ARPEN_SET(mac_cfg->arp_offload_en) | in dwxgmac_mac_init()
516 CORE_MAC_RX_CONFIGURATION_GPSL_SET(mac_cfg->gpsl); in dwxgmac_mac_init()
521 eth_dwc_xgmac_update_link_speed(dev, data->link_speed); in dwxgmac_mac_init()
526 struct eth_dwc_xgmac_dev_data *const data = (struct eth_dwc_xgmac_dev_data *)dev->data; in dwxgmac_irq_init()
534 data->irq_cntxt_data.dev = dev; in dwxgmac_irq_init()
541 buff1->len = buff1_len; in add_buffs_to_pkt()
542 arch_dcache_invd_range(buff1->data, CONFIG_NET_BUF_DATA_SIZE); in add_buffs_to_pkt()
545 buff2->len = buff2_len; in add_buffs_to_pkt()
546 arch_dcache_invd_range(buff2->data, CONFIG_NET_BUF_DATA_SIZE); in add_buffs_to_pkt()
581 rx_desc->rdes0 = POINTER_TO_UINT(new_buff->data); in get_and_refill_desc_buffs()
582 rx_desc->rdes1 = POINTER_TO_UINT(new_buff->data) >> XGMAC_REG_SIZE_BITS; in get_and_refill_desc_buffs()
590 rx_desc->rdes0 = 0u; in get_and_refill_desc_buffs()
591 rx_desc->rdes0 = 1u; in get_and_refill_desc_buffs()
607 rx_desc->rdes2 = POINTER_TO_UINT(new_buff->data); in get_and_refill_desc_buffs()
613 rx_desc->rdes3 = XGMAC_RDES3_OWN | XGMAC_RDES3_IOC | in get_and_refill_desc_buffs()
614 (POINTER_TO_UINT(new_buff->data) >> XGMAC_REG_SIZE_BITS); in get_and_refill_desc_buffs()
619 struct eth_dwc_xgmac_dev_data *const data = (struct eth_dwc_xgmac_dev_data *)dev->data; in eth_dwc_xgmac_rx_irq_work()
621 (struct eth_dwc_xgmac_config *)dev->config; in eth_dwc_xgmac_rx_irq_work()
623 (struct xgmac_dma_chnl_config *)&config->dma_chnl_cfg; in eth_dwc_xgmac_rx_irq_work()
625 (struct xgmac_dma_rx_desc_meta *)&data->rx_desc_meta[dma_chnl]; in eth_dwc_xgmac_rx_irq_work()
627 (struct xgmac_dma_rx_desc *)(data->dma_rx_desc + (dma_chnl * dma_chnl_cfg->rdrl)); in eth_dwc_xgmac_rx_irq_work()
633 mem_addr_t *rx_buffs = (mem_addr_t *)(data->rx_buffs + (((dma_chnl * dma_chnl_cfg->rdrl)) * in eth_dwc_xgmac_rx_irq_work()
636 rx_desc = (struct xgmac_dma_rx_desc *)(fisrt_rx_desc + rx_desc_meta->next_to_read); in eth_dwc_xgmac_rx_irq_work()
640 get_and_refill_desc_buffs(rx_desc, rx_desc_meta->next_to_read, rx_buffs, &buff1, in eth_dwc_xgmac_rx_irq_work()
645 LOG_DBG("%s: received FD buffer. descriptor indx = %d", dev->name, in eth_dwc_xgmac_rx_irq_work()
646 rx_desc_meta->next_to_read); in eth_dwc_xgmac_rx_irq_work()
647 if (rx_desc_meta->rx_pkt) { in eth_dwc_xgmac_rx_irq_work()
648 net_pkt_frag_unref(rx_desc_meta->rx_pkt->frags); in eth_dwc_xgmac_rx_irq_work()
649 net_pkt_unref(rx_desc_meta->rx_pkt); in eth_dwc_xgmac_rx_irq_work()
651 rx_desc_meta->rx_pkt = net_pkt_rx_alloc_on_iface(data->iface, K_NO_WAIT); in eth_dwc_xgmac_rx_irq_work()
652 if (!rx_desc_meta->rx_pkt) { in eth_dwc_xgmac_rx_irq_work()
654 dev->name); in eth_dwc_xgmac_rx_irq_work()
660 if (rx_desc_meta->rx_pkt != NULL) { in eth_dwc_xgmac_rx_irq_work()
662 LOG_DBG("%s: received LD buffer. descriptor indx = %d", dev->name, in eth_dwc_xgmac_rx_irq_work()
663 rx_desc_meta->next_to_read); in eth_dwc_xgmac_rx_irq_work()
673 rx_desc_meta->rx_pkt, buff1, in eth_dwc_xgmac_rx_irq_work()
675 (desc_data_len - CONFIG_NET_BUF_DATA_SIZE)); in eth_dwc_xgmac_rx_irq_work()
677 add_buffs_to_pkt(rx_desc_meta->rx_pkt, buff1, in eth_dwc_xgmac_rx_irq_work()
684 err = net_recv_data(data->iface, rx_desc_meta->rx_pkt); in eth_dwc_xgmac_rx_irq_work()
687 net_pkt_unref(rx_desc_meta->rx_pkt); in eth_dwc_xgmac_rx_irq_work()
688 LOG_DBG("%s: received packet dropped %d", dev->name, in eth_dwc_xgmac_rx_irq_work()
691 LOG_DBG("%s: received a packet", dev->name); in eth_dwc_xgmac_rx_irq_work()
694 net_pkt_get_len(rx_desc_meta->rx_pkt)); in eth_dwc_xgmac_rx_irq_work()
697 LOG_ERR("%s: rx packet error", dev->name); in eth_dwc_xgmac_rx_irq_work()
699 net_pkt_unref(rx_desc_meta->rx_pkt); in eth_dwc_xgmac_rx_irq_work()
701 rx_desc_meta->rx_pkt = (struct net_pkt *)NULL; in eth_dwc_xgmac_rx_irq_work()
703 add_buffs_to_pkt(rx_desc_meta->rx_pkt, buff1, in eth_dwc_xgmac_rx_irq_work()
710 dev->name); in eth_dwc_xgmac_rx_irq_work()
712 rx_desc_meta->next_to_read = in eth_dwc_xgmac_rx_irq_work()
713 ((rx_desc_meta->next_to_read + 1) % dma_chnl_cfg->rdrl); in eth_dwc_xgmac_rx_irq_work()
714 rx_desc = (struct xgmac_dma_rx_desc *)(fisrt_rx_desc + rx_desc_meta->next_to_read); in eth_dwc_xgmac_rx_irq_work()
728 struct eth_dwc_xgmac_dev_data *const data = (struct eth_dwc_xgmac_dev_data *)dev->data; in eth_dwc_xgmac_tx_irq_work()
730 (struct eth_dwc_xgmac_config *)dev->config; in eth_dwc_xgmac_tx_irq_work()
732 (struct xgmac_dma_chnl_config *)&config->dma_chnl_cfg; in eth_dwc_xgmac_tx_irq_work()
734 (struct xgmac_dma_tx_desc_meta *)&data->tx_desc_meta[dma_chnl]; in eth_dwc_xgmac_tx_irq_work()
736 (struct xgmac_dma_tx_desc *)(data->dma_tx_desc + (dma_chnl * dma_chnl_cfg->tdrl)); in eth_dwc_xgmac_tx_irq_work()
742 ((tx_desc_meta->next_to_use + k_sem_count_get(&tx_desc_meta->free_tx_descs_sem)) % in eth_dwc_xgmac_tx_irq_work()
743 dma_chnl_cfg->tdrl); in eth_dwc_xgmac_tx_irq_work()
744 for (; desc_idx != tx_desc_meta->next_to_use; in eth_dwc_xgmac_tx_irq_work()
745 desc_idx = ((desc_idx + 1) % dma_chnl_cfg->tdrl)) { in eth_dwc_xgmac_tx_irq_work()
748 if (!(tx_desc->tdes3 & XGMAC_TDES3_OWN)) { in eth_dwc_xgmac_tx_irq_work()
750 if (tx_desc->tdes3 & XGMAC_TDES3_LD) { in eth_dwc_xgmac_tx_irq_work()
752 data->tx_pkts, dma_chnl, dma_chnl_cfg->tdrl, desc_idx)); in eth_dwc_xgmac_tx_irq_work()
754 LOG_DBG("%s: %p packet unreferenced for after tx", dev->name, pkt); in eth_dwc_xgmac_tx_irq_work()
756 *(tx_pkt_location_in_array(data->tx_pkts, dma_chnl, in eth_dwc_xgmac_tx_irq_work()
757 dma_chnl_cfg->tdrl, desc_idx)) = in eth_dwc_xgmac_tx_irq_work()
761 tx_desc->tdes0 = 0u; in eth_dwc_xgmac_tx_irq_work()
762 tx_desc->tdes1 = 0u; in eth_dwc_xgmac_tx_irq_work()
763 tx_desc->tdes2 = 0u; in eth_dwc_xgmac_tx_irq_work()
764 tx_desc->tdes3 = 0u; in eth_dwc_xgmac_tx_irq_work()
766 k_sem_give(&tx_desc_meta->free_tx_descs_sem); in eth_dwc_xgmac_tx_irq_work()
784 LOG_DBG("%s: DMA channel %d Rx interrupt", dev->name, dma_chnl); in eth_dwc_xgmac_dmach_isr()
788 LOG_ERR("%s: DMA channel %d Transmit process stopped", dev->name, dma_chnl); in eth_dwc_xgmac_dmach_isr()
792 LOG_DBG("%s: DMA channel %d Transmit buffer unavailable", dev->name, dma_chnl); in eth_dwc_xgmac_dmach_isr()
796 LOG_ERR("%s: DMA channel %d Receive buffer unavailable", dev->name, dma_chnl); in eth_dwc_xgmac_dmach_isr()
800 LOG_ERR("%s: DMA channel %d Receive process stopped", dev->name, dma_chnl); in eth_dwc_xgmac_dmach_isr()
804 LOG_ERR("%s: DMA channel %d Descriptor definition error", dev->name, dma_chnl); in eth_dwc_xgmac_dmach_isr()
808 LOG_ERR("%s: DMA channel %d Fatal bus error", dev->name, dma_chnl); in eth_dwc_xgmac_dmach_isr()
812 LOG_ERR("%s: DMA channel %d Context descriptor error", dev->name, dma_chnl); in eth_dwc_xgmac_dmach_isr()
816 LOG_ERR("%s: DMA channel %d Abnormal error", dev->name, dma_chnl); in eth_dwc_xgmac_dmach_isr()
824 /* Handle MTL interrupts */ in eth_dwc_xgmac_mtl_isr()
846 (struct xgmac_irq_cntxt_data *)&data->irq_cntxt_data; in eth_dwc_xgmac_irq_work()
847 const struct device *dev = cntxt_data->dev; in eth_dwc_xgmac_irq_work()
849 (struct eth_dwc_xgmac_config *)dev->config; in eth_dwc_xgmac_irq_work()
852 for (uint32_t x = 0; x < config->num_dma_chnl; x++) { in eth_dwc_xgmac_irq_work()
853 if (cntxt_data->dma_interrupt_sts & BIT(x)) { in eth_dwc_xgmac_irq_work()
854 dma_chnl_interrupt_sts = cntxt_data->dma_chnl_interrupt_sts[x]; in eth_dwc_xgmac_irq_work()
855 cntxt_data->dma_chnl_interrupt_sts[x] ^= dma_chnl_interrupt_sts; in eth_dwc_xgmac_irq_work()
857 WRITE_BIT(cntxt_data->dma_interrupt_sts, x, 0); in eth_dwc_xgmac_irq_work()
873 (struct eth_dwc_xgmac_config *)dev->config; in eth_dwc_xgmac_isr()
874 struct eth_dwc_xgmac_dev_data *const data = (struct eth_dwc_xgmac_dev_data *)dev->data; in eth_dwc_xgmac_isr()
876 (struct xgmac_irq_cntxt_data *)&data->irq_cntxt_data; in eth_dwc_xgmac_isr()
883 if (!data->dev_started || data->link_speed == LINK_DOWN || in eth_dwc_xgmac_isr()
884 (!net_if_flag_is_set(data->iface, NET_IF_UP))) { in eth_dwc_xgmac_isr()
887 for (uint32_t x = 0; x < config->num_dma_chnl; x++) { in eth_dwc_xgmac_isr()
889 LOG_ERR("%s ignoring dma ch %d interrupt: %x ", dev->name, x, in eth_dwc_xgmac_isr()
905 dev->name); in eth_dwc_xgmac_isr()
917 cntxt_data->dma_interrupt_sts |= sys_read32(reg_addr); in eth_dwc_xgmac_isr()
918 for (uint32_t x = 0; x < config->num_dma_chnl; x++) { in eth_dwc_xgmac_isr()
919 if (cntxt_data->dma_interrupt_sts & BIT(x)) { in eth_dwc_xgmac_isr()
930 cntxt_data->dma_chnl_interrupt_sts[x] |= dmach_interrupt_sts; in eth_dwc_xgmac_isr()
933 WRITE_BIT(cntxt_data->dma_interrupt_sts, x, 0); in eth_dwc_xgmac_isr()
941 cntxt_data->mtl_interrupt_sts |= reg_val; in eth_dwc_xgmac_isr()
949 cntxt_data->mac_interrupt_sts |= reg_val; in eth_dwc_xgmac_isr()
956 k_work_submit(&data->isr_work); in eth_dwc_xgmac_isr()
970 const struct device *dev = dev_data->irq_cntxt_data.dev; in eth_dwc_xgmac_irq_poll()
986 (struct eth_dwc_xgmac_config *)dev->config; in eth_dwc_xgmac_dev_init()
987 struct eth_dwc_xgmac_dev_data *const data = (struct eth_dwc_xgmac_dev_data *)dev->data; in eth_dwc_xgmac_dev_init()
995 dwxgmac_dma_init(dev, &config->dma_cfg); in eth_dwc_xgmac_dev_init()
1006 if (config->random_mac_address == true) { in eth_dwc_xgmac_dev_init()
1011 gen_random_mac(data->mac_addr, data->mac_addr[MAC_ADDR_BYTE_0], in eth_dwc_xgmac_dev_init()
1012 data->mac_addr[MAC_ADDR_BYTE_1], data->mac_addr[MAC_ADDR_BYTE_2]); in eth_dwc_xgmac_dev_init()
1014 dwxgmac_set_mac_addr_by_idx(dev, data->mac_addr, 0, false); in eth_dwc_xgmac_dev_init()
1026 struct eth_dwc_xgmac_dev_data *dev_data = (struct eth_dwc_xgmac_dev_data *)mac_dev->data; in phy_link_state_change_callback()
1027 bool is_up = state->is_up; in phy_link_state_change_callback()
1031 switch (state->speed) { in phy_link_state_change_callback()
1034 dev_data->link_speed = LINK_1GBIT; in phy_link_state_change_callback()
1038 dev_data->link_speed = LINK_100MBIT; in phy_link_state_change_callback()
1043 dev_data->link_speed = LINK_10MBIT; in phy_link_state_change_callback()
1046 eth_dwc_xgmac_update_link_speed(mac_dev, dev_data->link_speed); in phy_link_state_change_callback()
1048 net_eth_carrier_on(dev_data->iface); in phy_link_state_change_callback()
1049 LOG_DBG("%s: Link up", mac_dev->name); in phy_link_state_change_callback()
1052 dev_data->link_speed = LINK_DOWN; in phy_link_state_change_callback()
1054 net_eth_carrier_off(dev_data->iface); in phy_link_state_change_callback()
1055 LOG_DBG("%s: Link down", mac_dev->name); in phy_link_state_change_callback()
1069 struct eth_dwc_xgmac_dev_data *const dev_data = (struct eth_dwc_xgmac_dev_data *)dev->data; in eth_dwc_xgmac_prefill_rx_desc()
1071 (struct eth_dwc_xgmac_config *)dev->config; in eth_dwc_xgmac_prefill_rx_desc()
1073 (struct xgmac_dma_chnl_config *)&dev_conf->dma_chnl_cfg; in eth_dwc_xgmac_prefill_rx_desc()
1085 for (uint32_t dma_chnl = 0u; dma_chnl < dev_conf->num_dma_chnl; dma_chnl++) { in eth_dwc_xgmac_prefill_rx_desc()
1086 tx_desc_meta = (struct xgmac_dma_tx_desc_meta *)&dev_data->tx_desc_meta[dma_chnl]; in eth_dwc_xgmac_prefill_rx_desc()
1087 rx_desc_meta = (struct xgmac_dma_rx_desc_meta *)&dev_data->rx_desc_meta[dma_chnl]; in eth_dwc_xgmac_prefill_rx_desc()
1089 k_sem_init(&tx_desc_meta->free_tx_descs_sem, (dma_chnl_cfg->tdrl), in eth_dwc_xgmac_prefill_rx_desc()
1090 (dma_chnl_cfg->tdrl)); in eth_dwc_xgmac_prefill_rx_desc()
1091 k_mutex_init(&tx_desc_meta->ring_lock); in eth_dwc_xgmac_prefill_rx_desc()
1092 for (; desc_id < dma_chnl_cfg->rdrl; desc_id++) { in eth_dwc_xgmac_prefill_rx_desc()
1093 rx_desc = (struct xgmac_dma_rx_desc *)(dev_data->dma_rx_desc + in eth_dwc_xgmac_prefill_rx_desc()
1094 (dma_chnl * dma_chnl_cfg->rdrl) + in eth_dwc_xgmac_prefill_rx_desc()
1096 rx_buffs = (mem_addr_t *)(dev_data->rx_buffs + in eth_dwc_xgmac_prefill_rx_desc()
1097 (((dma_chnl * dma_chnl_cfg->rdrl) + desc_id) * in eth_dwc_xgmac_prefill_rx_desc()
1105 dev->name, desc_id); in eth_dwc_xgmac_prefill_rx_desc()
1109 rx_desc->rdes0 = in eth_dwc_xgmac_prefill_rx_desc()
1110 POINTER_TO_UINT(((struct net_buf *)rx_buffs[RX_FRAG_ONE])->data); in eth_dwc_xgmac_prefill_rx_desc()
1111 rx_desc->rdes1 = in eth_dwc_xgmac_prefill_rx_desc()
1112 POINTER_TO_UINT(((struct net_buf *)rx_buffs[RX_FRAG_ONE])->data) >> in eth_dwc_xgmac_prefill_rx_desc()
1121 dev->name, desc_id); in eth_dwc_xgmac_prefill_rx_desc()
1124 rx_desc->rdes2 = in eth_dwc_xgmac_prefill_rx_desc()
1125 POINTER_TO_UINT(((struct net_buf *)rx_buffs[RX_FRAG_TWO])->data); in eth_dwc_xgmac_prefill_rx_desc()
1126 rx_desc->rdes3 = in eth_dwc_xgmac_prefill_rx_desc()
1128 (POINTER_TO_UINT(((struct net_buf *)rx_buffs[RX_FRAG_TWO])->data) >> in eth_dwc_xgmac_prefill_rx_desc()
1131 rx_desc_meta->desc_tail_addr = (mem_addr_t)(rx_desc + 1); in eth_dwc_xgmac_prefill_rx_desc()
1135 reg_val = DMA_CHx_RXDESC_TAIL_LPOINTER_RDT_SET(rx_desc_meta->desc_tail_addr); in eth_dwc_xgmac_prefill_rx_desc()
1137 LOG_DBG("%s: DMA channel %d Rx descriptors initialization completed", dev->name, in eth_dwc_xgmac_prefill_rx_desc()
1152 (struct eth_dwc_xgmac_config *)dev->config; in eth_dwc_xgmac_iface_init()
1153 struct eth_dwc_xgmac_dev_data *const dev_data = (struct eth_dwc_xgmac_dev_data *)dev->data; in eth_dwc_xgmac_iface_init()
1155 k_mutex_init(&dev_data->dev_cfg_lock); in eth_dwc_xgmac_iface_init()
1158 k_work_init(&(dev_data->isr_work), eth_dwc_xgmac_irq_work); in eth_dwc_xgmac_iface_init()
1162 k_timer_init(&dev_data->isr_polling_timer, eth_dwc_xgmac_irq_poll, NULL); in eth_dwc_xgmac_iface_init()
1164 dev_conf->irq_config_fn(dev); in eth_dwc_xgmac_iface_init()
1169 /* Set the initial contents of the current instance's run-time data */ in eth_dwc_xgmac_iface_init()
1170 dev_data->iface = iface; in eth_dwc_xgmac_iface_init()
1171 (void)net_if_set_link_addr(iface, dev_data->mac_addr, ETH_MAC_ADDRESS_SIZE, in eth_dwc_xgmac_iface_init()
1175 net_if_set_mtu(iface, dev_conf->mtu); in eth_dwc_xgmac_iface_init()
1176 LOG_DBG("%s: MTU size is set to %d", dev->name, dev_conf->mtu); in eth_dwc_xgmac_iface_init()
1177 if (device_is_ready(dev_conf->phy_dev)) { in eth_dwc_xgmac_iface_init()
1178 phy_link_callback_set(dev_conf->phy_dev, &phy_link_state_change_callback, in eth_dwc_xgmac_iface_init()
1181 LOG_ERR("%s: PHY device not ready", dev->name); in eth_dwc_xgmac_iface_init()
1183 LOG_DBG("%s: Ethernet iface init done binded to iface@0x%p", dev->name, iface); in eth_dwc_xgmac_iface_init()
1196 const struct eth_dwc_xgmac_config *dev_conf = (struct eth_dwc_xgmac_config *)dev->config; in eth_dwc_xgmac_start_device()
1197 struct eth_dwc_xgmac_dev_data *dev_data = (struct eth_dwc_xgmac_dev_data *)dev->data; in eth_dwc_xgmac_start_device()
1202 if (dev_data->dev_started) { in eth_dwc_xgmac_start_device()
1209 for (uint32_t dma_chnl = 0u; dma_chnl < dev_conf->num_dma_chnl; dma_chnl++) { in eth_dwc_xgmac_start_device()
1235 LOG_DBG("%s: Interrupts enabled for DMA Channel %d", dev->name, dma_chnl); in eth_dwc_xgmac_start_device()
1252 k_timer_start(&dev_data->isr_polling_timer, in eth_dwc_xgmac_start_device()
1256 dev_conf->irq_enable_fn(dev, true); in eth_dwc_xgmac_start_device()
1259 dev_data->dev_started = true; in eth_dwc_xgmac_start_device()
1260 LOG_DBG("%s: Device started", dev->name); in eth_dwc_xgmac_start_device()
1274 const struct eth_dwc_xgmac_config *dev_conf = (struct eth_dwc_xgmac_config *)dev->config; in eth_dwc_xgmac_stop_device()
1275 struct eth_dwc_xgmac_dev_data *dev_data = (struct eth_dwc_xgmac_dev_data *)dev->data; in eth_dwc_xgmac_stop_device()
1280 if (!dev_data->dev_started) { in eth_dwc_xgmac_stop_device()
1284 dev_data->dev_started = false; in eth_dwc_xgmac_stop_device()
1288 for (uint32_t dma_chnl = 0; dma_chnl < dev_conf->num_dma_chnl; dma_chnl++) { in eth_dwc_xgmac_stop_device()
1304 LOG_DBG("%s: Interrupts disabled for DMA Channel %d", dev->name, dma_chnl); in eth_dwc_xgmac_stop_device()
1321 k_timer_stop(&dev_data->isr_polling_timer); in eth_dwc_xgmac_stop_device()
1324 dev_conf->irq_enable_fn(dev, false); in eth_dwc_xgmac_stop_device()
1326 LOG_DBG("%s: Device stopped", dev->name); in eth_dwc_xgmac_stop_device()
1348 * @retval -EINVAL in case of invalid parameters, e.g. zero data length
1349 * @retval -EIO in case of:
1352 * @retval -ETIMEDOUT in case of:
1356 * @retval -EBUSY in case of:
1365 const struct eth_dwc_xgmac_config *dev_conf = (struct eth_dwc_xgmac_config *)dev->config; in eth_dwc_xgmac_send()
1366 struct eth_dwc_xgmac_dev_data *dev_data = (struct eth_dwc_xgmac_dev_data *)dev->data; in eth_dwc_xgmac_send()
1368 (struct xgmac_dma_chnl_config *)&dev_conf->dma_chnl_cfg; in eth_dwc_xgmac_send()
1371 if (!pkt || !pkt->frags) { in eth_dwc_xgmac_send()
1372 LOG_ERR("%s: cannot TX, invalid argument", dev->name); in eth_dwc_xgmac_send()
1373 return -EINVAL; in eth_dwc_xgmac_send()
1377 LOG_ERR("%s cannot TX, zero packet length", dev->name); in eth_dwc_xgmac_send()
1379 return -EINVAL; in eth_dwc_xgmac_send()
1382 if (!dev_data->dev_started || dev_data->link_speed == LINK_DOWN || in eth_dwc_xgmac_send()
1383 (!net_if_flag_is_set(dev_data->iface, NET_IF_UP))) { in eth_dwc_xgmac_send()
1386 dev->name); in eth_dwc_xgmac_send()
1388 return -EIO; in eth_dwc_xgmac_send()
1392 context.descmeta = (struct xgmac_dma_tx_desc_meta *)&dev_data->tx_desc_meta[context.q_id]; in eth_dwc_xgmac_send()
1393 context.pkt_desc_id = context.descmeta->next_to_use; in eth_dwc_xgmac_send()
1395 (void)k_mutex_lock(&(context.descmeta->ring_lock), K_FOREVER); in eth_dwc_xgmac_send()
1397 LOG_DBG("%s: %p packet referanced for tx", dev->name, pkt); in eth_dwc_xgmac_send()
1399 for (struct net_buf *frag = pkt->frags; frag; frag = frag->frags) { in eth_dwc_xgmac_send()
1400 ret = k_sem_take(&context.descmeta->free_tx_descs_sem, K_MSEC(1)); in eth_dwc_xgmac_send()
1402 LOG_DBG("%s: enough free tx descriptors are not available", dev->name); in eth_dwc_xgmac_send()
1405 context.tx_desc = (struct xgmac_dma_tx_desc *)(dev_data->dma_tx_desc + in eth_dwc_xgmac_send()
1406 (context.q_id * dma_ch_cfg->tdrl) + in eth_dwc_xgmac_send()
1409 arch_dcache_flush_range(frag->data, CONFIG_NET_BUF_DATA_SIZE); in eth_dwc_xgmac_send()
1410 context.tx_desc->tdes0 = (uint32_t)POINTER_TO_UINT(frag->data); in eth_dwc_xgmac_send()
1411 context.tx_desc->tdes1 = (uint32_t)(POINTER_TO_UINT(frag->data) >> 32u); in eth_dwc_xgmac_send()
1412 tdes2_flgs = frag->len; in eth_dwc_xgmac_send()
1420 if (!frag->frags) { /* check last fragment of the packet */ in eth_dwc_xgmac_send()
1428 *(dev_data->tx_pkts + ((context.q_id * dma_ch_cfg->tdrl) + in eth_dwc_xgmac_send()
1430 context.descmeta->desc_tail_addr = in eth_dwc_xgmac_send()
1434 context.tx_desc->tdes2 = tdes2_flgs; in eth_dwc_xgmac_send()
1435 context.tx_desc->tdes3 = tdes3_flgs; in eth_dwc_xgmac_send()
1437 context.pkt_desc_id = ((context.pkt_desc_id + 1) % dma_ch_cfg->tdrl); in eth_dwc_xgmac_send()
1439 context.descmeta->next_to_use = context.pkt_desc_id; in eth_dwc_xgmac_send()
1441 if (context.descmeta->desc_tail_addr == in eth_dwc_xgmac_send()
1443 (struct xgmac_dma_tx_desc *)(dev_data->dma_tx_desc + in eth_dwc_xgmac_send()
1444 (context.q_id * dma_ch_cfg->tdrl) + in eth_dwc_xgmac_send()
1445 dma_ch_cfg->tdrl))) { in eth_dwc_xgmac_send()
1446 context.descmeta->desc_tail_addr = (mem_addr_t)POINTER_TO_UINT( in eth_dwc_xgmac_send()
1447 (struct xgmac_dma_tx_desc *)(dev_data->dma_tx_desc + in eth_dwc_xgmac_send()
1448 (context.q_id * dma_ch_cfg->tdrl))); in eth_dwc_xgmac_send()
1452 update_desc_tail_ptr(dev, context.q_id, (uint32_t)context.descmeta->desc_tail_addr); in eth_dwc_xgmac_send()
1454 (void)k_mutex_unlock(&(context.descmeta->ring_lock)); in eth_dwc_xgmac_send()
1463 for (uint16_t desc_id = context.descmeta->next_to_use; desc_id != context.pkt_desc_id; in eth_dwc_xgmac_send()
1464 desc_id = ((desc_id + 1) % dma_ch_cfg->tdrl)) { in eth_dwc_xgmac_send()
1466 (struct xgmac_dma_tx_desc *)(dev_data->dma_tx_desc + in eth_dwc_xgmac_send()
1467 (context.q_id * dma_ch_cfg->tdrl) + desc_id); in eth_dwc_xgmac_send()
1468 context.tx_desc->tdes0 = 0u; in eth_dwc_xgmac_send()
1469 context.tx_desc->tdes1 = 0u; in eth_dwc_xgmac_send()
1470 context.tx_desc->tdes2 = 0u; in eth_dwc_xgmac_send()
1471 context.tx_desc->tdes3 = 0u; in eth_dwc_xgmac_send()
1472 k_sem_give(&context.descmeta->free_tx_descs_sem); in eth_dwc_xgmac_send()
1474 (void)k_mutex_unlock(&(context.descmeta->ring_lock)); in eth_dwc_xgmac_send()
1475 LOG_DBG("%s: %p packet unreferenced after dropping", dev->name, pkt); in eth_dwc_xgmac_send()
1478 return -EIO; in eth_dwc_xgmac_send()
1530 LOG_ERR("%s, MAC address filter failed. All MAC address slots are in use", dev->name); in get_free_mac_addr_indx()
1531 return -EIO; in get_free_mac_addr_indx()
1557 * @retval -EALREADY in case of:
1559 * -ENOTSUP for invalid config type
1564 const struct eth_dwc_xgmac_config *dev_conf = (struct eth_dwc_xgmac_config *)dev->config; in eth_dwc_xgmac_set_config()
1565 struct eth_dwc_xgmac_dev_data *dev_data = (struct eth_dwc_xgmac_dev_data *)dev->data; in eth_dwc_xgmac_set_config()
1566 const struct device *phy = dev_conf->phy_dev; in eth_dwc_xgmac_set_config()
1567 const struct ethphy_driver_api *phy_api = phy->api; in eth_dwc_xgmac_set_config()
1572 (void)k_mutex_lock(&dev_data->dev_cfg_lock, K_FOREVER); in eth_dwc_xgmac_set_config()
1575 if (dev_data->auto_neg != config->auto_negotiation) { in eth_dwc_xgmac_set_config()
1576 dev_data->auto_neg = config->auto_negotiation; in eth_dwc_xgmac_set_config()
1578 get_phy_adv_speeds(dev_data->auto_neg, dev_data->enable_full_duplex, in eth_dwc_xgmac_set_config()
1579 dev_data->link_speed); in eth_dwc_xgmac_set_config()
1580 retval = phy_api->cfg_link(phy, adv_speeds); in eth_dwc_xgmac_set_config()
1582 retval = -EALREADY; in eth_dwc_xgmac_set_config()
1586 if ((config->l.link_10bt && dev_data->link_speed == LINK_10MBIT) || in eth_dwc_xgmac_set_config()
1587 (config->l.link_100bt && dev_data->link_speed == LINK_100MBIT) || in eth_dwc_xgmac_set_config()
1588 (config->l.link_1000bt && dev_data->link_speed == LINK_1GBIT)) { in eth_dwc_xgmac_set_config()
1589 retval = -EALREADY; in eth_dwc_xgmac_set_config()
1593 if (config->l.link_1000bt) { in eth_dwc_xgmac_set_config()
1594 dev_data->link_speed = LINK_1GBIT; in eth_dwc_xgmac_set_config()
1595 } else if (config->l.link_100bt) { in eth_dwc_xgmac_set_config()
1596 dev_data->link_speed = LINK_100MBIT; in eth_dwc_xgmac_set_config()
1597 } else if (config->l.link_10bt) { in eth_dwc_xgmac_set_config()
1598 dev_data->link_speed = LINK_10MBIT; in eth_dwc_xgmac_set_config()
1600 adv_speeds = get_phy_adv_speeds(dev_data->auto_neg, dev_data->enable_full_duplex, in eth_dwc_xgmac_set_config()
1601 dev_data->link_speed); in eth_dwc_xgmac_set_config()
1602 retval = phy_api->cfg_link(phy, adv_speeds); in eth_dwc_xgmac_set_config()
1605 if (config->full_duplex == dev_data->enable_full_duplex) { in eth_dwc_xgmac_set_config()
1606 retval = -EALREADY; in eth_dwc_xgmac_set_config()
1609 dev_data->enable_full_duplex = config->full_duplex; in eth_dwc_xgmac_set_config()
1611 adv_speeds = get_phy_adv_speeds(dev_data->auto_neg, dev_data->enable_full_duplex, in eth_dwc_xgmac_set_config()
1612 dev_data->link_speed); in eth_dwc_xgmac_set_config()
1613 retval = phy_api->cfg_link(phy, adv_speeds); in eth_dwc_xgmac_set_config()
1616 memcpy(dev_data->mac_addr, config->mac_address.addr, ETH_MAC_ADDRESS_SIZE); in eth_dwc_xgmac_set_config()
1617 retval = net_if_set_link_addr(dev_data->iface, dev_data->mac_addr, in eth_dwc_xgmac_set_config()
1620 dwxgmac_set_mac_addr_by_idx(dev, dev_data->mac_addr, 0u, false); in eth_dwc_xgmac_set_config()
1628 if (config->promisc_mode != dev_data->promisc_mode) { in eth_dwc_xgmac_set_config()
1631 dev_data->promisc_mode = config->promisc_mode; in eth_dwc_xgmac_set_config()
1633 reg_val |= CORE_MAC_PACKET_FILTER_PR_SET(dev_data->promisc_mode); in eth_dwc_xgmac_set_config()
1636 retval = -EALREADY; in eth_dwc_xgmac_set_config()
1645 if (!(config->filter.set)) { in eth_dwc_xgmac_set_config()
1647 (uint8_t *)config->filter.mac_address.addr); in eth_dwc_xgmac_set_config()
1653 ioaddr, (uint8_t *)config->filter.mac_address.addr, mac_idx, in eth_dwc_xgmac_set_config()
1654 config->filter.type); in eth_dwc_xgmac_set_config()
1656 retval = -EIO; in eth_dwc_xgmac_set_config()
1664 retval = -ENOTSUP; in eth_dwc_xgmac_set_config()
1667 k_mutex_unlock(&dev_data->dev_cfg_lock); in eth_dwc_xgmac_set_config()
1679 * -ENOTSUP for invalid config type
1684 struct eth_dwc_xgmac_dev_data *dev_data = (struct eth_dwc_xgmac_dev_data *)dev->data; in eth_dwc_xgmac_get_config()
1688 config->auto_negotiation = dev_data->auto_neg; in eth_dwc_xgmac_get_config()
1691 if (dev_data->link_speed == LINK_1GBIT) { in eth_dwc_xgmac_get_config()
1692 config->l.link_1000bt = true; in eth_dwc_xgmac_get_config()
1693 } else if (dev_data->link_speed == LINK_100MBIT) { in eth_dwc_xgmac_get_config()
1694 config->l.link_100bt = true; in eth_dwc_xgmac_get_config()
1695 } else if (dev_data->link_speed == LINK_10MBIT) { in eth_dwc_xgmac_get_config()
1696 config->l.link_10bt = true; in eth_dwc_xgmac_get_config()
1700 config->full_duplex = dev_data->enable_full_duplex; in eth_dwc_xgmac_get_config()
1703 memcpy(config->mac_address.addr, dev_data->mac_addr, 6); in eth_dwc_xgmac_get_config()
1707 config->promisc_mode = dev_data->promisc_mode; in eth_dwc_xgmac_get_config()
1711 return -ENOTSUP; in eth_dwc_xgmac_get_config()
1763 struct eth_dwc_xgmac_dev_data *dev_data = (struct eth_dwc_xgmac_dev_data *)dev->data; in eth_dwc_xgmac_stats()
1765 return &dev_data->stats; in eth_dwc_xgmac_stats()
1827 /* Device run-time data declaration macro */
1880 .mtl_cfg.etsalg = DT_INST_PROP(port, mtl_etsalg), \
1898 /* Top-level device initialization macro - bundles all of the above */
1908 * Insert the configuration & run-time data for all XGMAC instances which