Lines Matching +full:init +full:- +full:mdio +full:- +full:phy

5  * SPDX-License-Identifier: Apache-2.0
22 #include <zephyr/net/phy.h>
40 #define PHY_NODE DT_PHANDLE_BY_IDX(ETH_NODE, phy, 0)
49 #define IS_OWNED_BY_DMA_TX(desc) (((desc)->status & ETH_MAC_DMA_TDES0_OWN) != 0)
50 #define IS_OWNED_BY_DMA_RX(desc) (((desc)->status & ETH_MAC_DMA_RDES0_OWN) != 0)
52 #define IS_START_OF_FRAME_RX(desc) (((desc)->status & ETH_MAC_DMA_RDES0_FS) != 0)
53 #define IS_END_OF_FRAME_RX(desc) (((desc)->status & ETH_MAC_DMA_RDES0_LS) != 0)
55 #define IS_TIMESTAMP_AVAILABLE_RX(desc) (((desc)->status & ETH_MAC_DMA_RDES0_TSA) != 0)
56 #define IS_TIMESTAMP_AVAILABLE_TX(desc) (((desc)->status & ETH_MAC_DMA_TDES0_TTSS) != 0)
58 #define TOTAL_FRAME_LENGTH(desc) (FIELD_GET(ETH_MAC_DMA_RDES0_FL, (desc)->status) - 4)
134 return ctx->iface; in get_iface()
139 const struct eth_xmc4xxx_config *dev_cfg = dev->config; in eth_xmc4xxx_tx_dma_descriptors_init()
143 dev_cfg->regs->TRANSMIT_DESCRIPTOR_LIST_ADDRESS = (uint32_t)&tx_dma_desc[0]; in eth_xmc4xxx_tx_dma_descriptors_init()
146 for (int i = 0; i < NUM_TX_DMA_DESCRIPTORS - 1; i++) { in eth_xmc4xxx_tx_dma_descriptors_init()
149 dma_desc->buffer2 = (volatile uint32_t)&tx_dma_desc[i + 1]; in eth_xmc4xxx_tx_dma_descriptors_init()
152 /* TER: transmit end of ring - it is the last descriptor in ring */ in eth_xmc4xxx_tx_dma_descriptors_init()
153 tx_dma_desc[NUM_TX_DMA_DESCRIPTORS - 1].status |= ETH_MAC_DMA_TDES0_TER; in eth_xmc4xxx_tx_dma_descriptors_init()
154 tx_dma_desc[NUM_TX_DMA_DESCRIPTORS - 1].buffer2 = (volatile uint32_t)&tx_dma_desc[0]; in eth_xmc4xxx_tx_dma_descriptors_init()
159 const struct eth_xmc4xxx_config *dev_cfg = dev->config; in eth_xmc4xxx_flush_rx()
160 struct eth_xmc4xxx_data *dev_data = dev->data; in eth_xmc4xxx_flush_rx()
162 dev_cfg->regs->OPERATION_MODE &= ~ETH_OPERATION_MODE_SR_Msk; in eth_xmc4xxx_flush_rx()
168 dev_cfg->regs->OPERATION_MODE |= ETH_OPERATION_MODE_SR_Msk; in eth_xmc4xxx_flush_rx()
169 dev_data->dma_desc_rx_tail = 0; in eth_xmc4xxx_flush_rx()
174 const struct eth_xmc4xxx_config *dev_cfg = dev->config; in eth_xmc4xxx_flush_tx()
175 struct eth_xmc4xxx_data *dev_data = dev->data; in eth_xmc4xxx_flush_tx()
180 if (dev_data->tx_frames_flushed) { in eth_xmc4xxx_flush_tx()
184 dev_cfg->regs->OPERATION_MODE &= ~ETH_OPERATION_MODE_ST_Msk; in eth_xmc4xxx_flush_tx()
186 node = sys_slist_get(&dev_data->tx_frame_list); in eth_xmc4xxx_flush_tx()
190 net_pkt_unref(tx_frame->pkt); in eth_xmc4xxx_flush_tx()
193 node = sys_slist_get(&dev_data->tx_frame_list); in eth_xmc4xxx_flush_tx()
195 dev_data->stats.errors.tx++; in eth_xmc4xxx_flush_tx()
196 dev_data->stats.error_details.tx_aborted_errors++; in eth_xmc4xxx_flush_tx()
200 k_sem_reset(&dev_data->tx_desc_sem); in eth_xmc4xxx_flush_tx()
203 dev_cfg->regs->OPERATION_MODE |= ETH_OPERATION_MODE_ST_Msk; in eth_xmc4xxx_flush_tx()
204 dev_data->dma_desc_tx_head = 0; in eth_xmc4xxx_flush_tx()
205 dev_data->tx_frames_flushed = true; in eth_xmc4xxx_flush_tx()
208 k_sem_give(&dev_data->tx_desc_sem); in eth_xmc4xxx_flush_tx()
214 regs->STATUS = ETH_STATUS_TPS_Msk; in eth_xmc4xxx_trigger_dma_tx()
215 regs->TRANSMIT_POLL_DEMAND = 0; in eth_xmc4xxx_trigger_dma_tx()
220 regs->STATUS = ETH_STATUS_RU_Msk; in eth_xmc4xxx_trigger_dma_rx()
221 regs->RECEIVE_POLL_DEMAND = 0U; in eth_xmc4xxx_trigger_dma_rx()
226 struct eth_xmc4xxx_data *dev_data = dev->data; in eth_xmc4xxx_send()
227 const struct eth_xmc4xxx_config *dev_cfg = dev->config; in eth_xmc4xxx_send()
237 frag = pkt->frags; in eth_xmc4xxx_send()
240 frag = frag->frags; in eth_xmc4xxx_send()
245 dev_data->stats.error_details.tx_dma_failed++; in eth_xmc4xxx_send()
248 return -ENOMEM; in eth_xmc4xxx_send()
253 eth_xmc4xxx_trigger_dma_tx(dev_cfg->regs); in eth_xmc4xxx_send()
262 dev_data->tx_frames_flushed = false; in eth_xmc4xxx_send()
265 tx_frame->pkt = pkt; in eth_xmc4xxx_send()
266 tx_frame->tail_index = dev_data->dma_desc_tx_head; in eth_xmc4xxx_send()
268 frag = pkt->frags; in eth_xmc4xxx_send()
270 ret = k_sem_take(&dev_data->tx_desc_sem, K_FOREVER); in eth_xmc4xxx_send()
272 if (ret < 0 || dev_data->tx_frames_flushed) { in eth_xmc4xxx_send()
276 dev_data->stats.error_details.tx_aborted_errors++; in eth_xmc4xxx_send()
279 return -EIO; in eth_xmc4xxx_send()
286 dma_desc = &tx_dma_desc[dev_data->dma_desc_tx_head]; in eth_xmc4xxx_send()
288 frag_data = frag->data; in eth_xmc4xxx_send()
289 frag_len = frag->len; in eth_xmc4xxx_send()
291 dma_desc->buffer1 = (volatile uint32_t)frag_data; in eth_xmc4xxx_send()
292 dma_desc->length = frag_len; in eth_xmc4xxx_send()
296 dma_desc->status = ETH_MAC_DMA_TDES0_CIC | ETH_MAC_DMA_TDES0_TCH; in eth_xmc4xxx_send()
301 dma_desc->status |= ETH_MAC_DMA_TDES0_OWN; in eth_xmc4xxx_send()
303 dma_desc->status |= ETH_MAC_DMA_TDES0_FS; in eth_xmc4xxx_send()
308 if (ntohs(hdr->type) == NET_ETH_PTYPE_PTP) { in eth_xmc4xxx_send()
309 dma_desc->status |= ETH_MAC_DMA_TDES0_TTSE; in eth_xmc4xxx_send()
315 tx_frame->head_index = dev_data->dma_desc_tx_head; in eth_xmc4xxx_send()
317 MODULO_INC_TX(dev_data->dma_desc_tx_head); in eth_xmc4xxx_send()
321 frag = frag->frags; in eth_xmc4xxx_send()
324 if (dev_data->tx_frames_flushed) { in eth_xmc4xxx_send()
328 dev_data->stats.error_details.tx_aborted_errors++; in eth_xmc4xxx_send()
331 return -EIO; in eth_xmc4xxx_send()
337 dma_desc->status |= ETH_MAC_DMA_TDES0_IC | ETH_MAC_DMA_TDES0_LS; in eth_xmc4xxx_send()
341 tx_dma_desc[tx_frame->tail_index].status |= ETH_MAC_DMA_TDES0_OWN; in eth_xmc4xxx_send()
343 sys_slist_append(&dev_data->tx_frame_list, &tx_frame->node); in eth_xmc4xxx_send()
345 eth_xmc4xxx_trigger_dma_tx(dev_cfg->regs); in eth_xmc4xxx_send()
354 struct eth_xmc4xxx_data *dev_data = dev->data; in eth_xmc4xxx_rx_pkt()
355 const struct eth_xmc4xxx_config *dev_cfg = dev->config; in eth_xmc4xxx_rx_pkt()
366 tail = dev_data->dma_desc_rx_tail; in eth_xmc4xxx_rx_pkt()
374 /* handle this error - missing SOF packet? */ in eth_xmc4xxx_rx_pkt()
388 if (tail == dev_data->dma_desc_rx_tail) { in eth_xmc4xxx_rx_pkt()
405 dev_data->stats.errors.rx++; in eth_xmc4xxx_rx_pkt()
406 dev_data->stats.error_details.rx_no_buffer_count++; in eth_xmc4xxx_rx_pkt()
412 tail = dev_data->dma_desc_rx_tail; in eth_xmc4xxx_rx_pkt()
418 frag = dev_data->rx_frag_list[tail]; in eth_xmc4xxx_rx_pkt()
420 frag_len = TOTAL_FRAME_LENGTH(dma_desc) - in eth_xmc4xxx_rx_pkt()
421 CONFIG_NET_BUF_DATA_SIZE * (num_frags - 1); in eth_xmc4xxx_rx_pkt()
425 .second = dma_desc->time_stamp_seconds, in eth_xmc4xxx_rx_pkt()
426 .nanosecond = dma_desc->time_stamp_nanoseconds}; in eth_xmc4xxx_rx_pkt()
436 dev_data->stats.errors.rx++; in eth_xmc4xxx_rx_pkt()
437 dev_data->stats.error_details.rx_buf_alloc_failed++; in eth_xmc4xxx_rx_pkt()
452 dev_data->rx_frag_list[tail] = frag; in eth_xmc4xxx_rx_pkt()
456 dma_desc->buffer1 = (uint32_t)dev_data->rx_frag_list[tail]->data; in eth_xmc4xxx_rx_pkt()
457 dma_desc->length = dev_data->rx_frag_list[tail]->size | in eth_xmc4xxx_rx_pkt()
459 dma_desc->status = ETH_MAC_DMA_RDES0_OWN; in eth_xmc4xxx_rx_pkt()
471 dev_data->dma_desc_rx_tail = tail; in eth_xmc4xxx_rx_pkt()
473 eth_xmc4xxx_trigger_dma_rx(dev_cfg->regs); in eth_xmc4xxx_rx_pkt()
480 struct eth_xmc4xxx_data *dev_data = dev->data; in eth_xmc4xxx_handle_rx()
498 struct eth_xmc4xxx_data *dev_data = dev->data; in eth_xmc4xxx_handle_tx()
499 sys_snode_t *node = sys_slist_peek_head(&dev_data->tx_frame_list); in eth_xmc4xxx_handle_tx()
507 if (tx_frame->head_index >= tx_frame->tail_index) { in eth_xmc4xxx_handle_tx()
508 num_descriptors = tx_frame->head_index - tx_frame->tail_index + 1; in eth_xmc4xxx_handle_tx()
510 num_descriptors = tx_frame->head_index + NUM_TX_DMA_DESCRIPTORS - in eth_xmc4xxx_handle_tx()
511 tx_frame->tail_index + 1; in eth_xmc4xxx_handle_tx()
514 index = tx_frame->tail_index; in eth_xmc4xxx_handle_tx()
526 XMC_ETH_MAC_DMA_DESC_t *dma_desc = &tx_dma_desc[tx_frame->head_index]; in eth_xmc4xxx_handle_tx()
529 struct net_pkt *pkt = tx_frame->pkt; in eth_xmc4xxx_handle_tx()
531 if (atomic_get(&pkt->atomic_ref) > 1) { in eth_xmc4xxx_handle_tx()
533 .second = dma_desc->time_stamp_seconds, in eth_xmc4xxx_handle_tx()
534 .nanosecond = dma_desc->time_stamp_nanoseconds}; in eth_xmc4xxx_handle_tx()
543 k_sem_give(&dev_data->tx_desc_sem); in eth_xmc4xxx_handle_tx()
546 sys_slist_get(&dev_data->tx_frame_list); in eth_xmc4xxx_handle_tx()
547 net_pkt_unref(tx_frame->pkt); in eth_xmc4xxx_handle_tx()
549 node = sys_slist_peek_head(&dev_data->tx_frame_list); in eth_xmc4xxx_handle_tx()
560 const struct eth_xmc4xxx_config *dev_cfg = dev->config; in eth_xmc4xxx_isr()
563 status = dev_cfg->regs->STATUS; in eth_xmc4xxx_isr()
583 dev_cfg->regs->STATUS = status & ETH_STATUS_CLEARABLE_BITS; in eth_xmc4xxx_isr()
590 regs->OPERATION_MODE |= ETH_OPERATION_MODE_ST_Msk; in eth_xmc4xxx_enable_tx()
591 regs->MAC_CONFIGURATION |= ETH_MAC_CONFIGURATION_TE_Msk; in eth_xmc4xxx_enable_tx()
596 regs->OPERATION_MODE |= ETH_OPERATION_MODE_SR_Msk; in eth_xmc4xxx_enable_rx()
597 regs->MAC_CONFIGURATION |= ETH_MAC_CONFIGURATION_RE_Msk; in eth_xmc4xxx_enable_rx()
602 uint32_t reg = regs->MAC_CONFIGURATION; in eth_xmc4xxx_set_link()
607 val = PHY_LINK_IS_FULL_DUPLEX(state->speed) ? ETH_LINK_DUPLEX_FULL : in eth_xmc4xxx_set_link()
611 val = PHY_LINK_IS_SPEED_100M(state->speed) ? ETH_LINK_SPEED_100M : in eth_xmc4xxx_set_link()
615 regs->MAC_CONFIGURATION = reg; in eth_xmc4xxx_set_link()
622 struct eth_xmc4xxx_data *dev_data = dev->data; in phy_link_state_changed()
623 const struct eth_xmc4xxx_config *dev_cfg = dev->config; in phy_link_state_changed()
624 bool is_up = state->is_up; in phy_link_state_changed()
626 if (is_up && !dev_data->link_up) { in phy_link_state_changed()
628 dev_data->link_up = true; in phy_link_state_changed()
629 net_eth_carrier_on(dev_data->iface); in phy_link_state_changed()
630 eth_xmc4xxx_set_link(dev_cfg->regs, state); in phy_link_state_changed()
631 } else if (!is_up && dev_data->link_up) { in phy_link_state_changed()
633 dev_data->link_up = false; in phy_link_state_changed()
634 net_eth_carrier_off(dev_data->iface); in phy_link_state_changed()
640 const struct eth_xmc4xxx_config *dev_cfg = dev->config; in eth_xmc4xxx_get_phy()
642 return dev_cfg->phy_dev; in eth_xmc4xxx_get_phy()
648 struct eth_xmc4xxx_data *dev_data = dev->data; in eth_xmc4xxx_iface_init()
649 const struct eth_xmc4xxx_config *dev_cfg = dev->config; in eth_xmc4xxx_iface_init()
651 dev_data->iface = iface; in eth_xmc4xxx_iface_init()
653 net_if_set_link_addr(iface, dev_data->mac_addr, sizeof(dev_data->mac_addr), in eth_xmc4xxx_iface_init()
658 dev_cfg->irq_config_func(); in eth_xmc4xxx_iface_init()
660 /* Do not start the interface until PHY link is up */ in eth_xmc4xxx_iface_init()
663 phy_link_callback_set(dev_cfg->phy_dev, &phy_link_state_changed, (void *)dev); in eth_xmc4xxx_iface_init()
665 dev_cfg->regs->INTERRUPT_ENABLE |= ETH_STATUS_ALL_EVENTS; in eth_xmc4xxx_iface_init()
667 eth_xmc4xxx_enable_tx(dev_cfg->regs); in eth_xmc4xxx_iface_init()
668 eth_xmc4xxx_enable_rx(dev_cfg->regs); in eth_xmc4xxx_iface_init()
674 struct eth_xmc4xxx_data *dev_data = dev->data; in eth_xmc4xxx_stats()
676 return &dev_data->stats; in eth_xmc4xxx_stats()
682 struct eth_xmc4xxx_data *dev_data = dev->data; in eth_xmc4xxx_free_rx_bufs()
685 if (dev_data->rx_frag_list[i]) { in eth_xmc4xxx_free_rx_bufs()
686 net_buf_unref(dev_data->rx_frag_list[i]); in eth_xmc4xxx_free_rx_bufs()
687 dev_data->rx_frag_list[i] = NULL; in eth_xmc4xxx_free_rx_bufs()
694 struct eth_xmc4xxx_data *dev_data = dev->data; in eth_xmc4xxx_rx_dma_descriptors_init()
695 const struct eth_xmc4xxx_config *dev_cfg = dev->config; in eth_xmc4xxx_rx_dma_descriptors_init()
697 dev_cfg->regs->RECEIVE_DESCRIPTOR_LIST_ADDRESS = (uint32_t)&rx_dma_desc[0]; in eth_xmc4xxx_rx_dma_descriptors_init()
699 for (int i = 0; i < NUM_RX_DMA_DESCRIPTORS - 1; i++) { in eth_xmc4xxx_rx_dma_descriptors_init()
702 dma_desc->buffer2 = (volatile uint32_t)&rx_dma_desc[i + 1]; in eth_xmc4xxx_rx_dma_descriptors_init()
705 rx_dma_desc[NUM_RX_DMA_DESCRIPTORS - 1].status |= ETH_MAC_DMA_TDES0_TER; in eth_xmc4xxx_rx_dma_descriptors_init()
706 rx_dma_desc[NUM_RX_DMA_DESCRIPTORS - 1].buffer2 = (volatile uint32_t)&rx_dma_desc[0]; in eth_xmc4xxx_rx_dma_descriptors_init()
716 return -ENOBUFS; in eth_xmc4xxx_rx_dma_descriptors_init()
719 dev_data->rx_frag_list[i] = rx_buf; in eth_xmc4xxx_rx_dma_descriptors_init()
720 dma_desc->buffer1 = (uint32_t)rx_buf->data; in eth_xmc4xxx_rx_dma_descriptors_init()
721 dma_desc->length = rx_buf->size | ETH_RX_DMA_DESC_SECOND_ADDR_CHAINED_MASK; in eth_xmc4xxx_rx_dma_descriptors_init()
722 dma_desc->status = ETH_MAC_DMA_RDES0_OWN; in eth_xmc4xxx_rx_dma_descriptors_init()
730 const struct eth_xmc4xxx_config *dev_cfg = dev->config; in eth_xmc4xxx_reset()
732 dev_cfg->regs->BUS_MODE |= ETH_BUS_MODE_SWR_Msk; in eth_xmc4xxx_reset()
735 if (!WAIT_FOR((dev_cfg->regs->BUS_MODE & ETH_BUS_MODE_SWR_Msk) == 0, in eth_xmc4xxx_reset()
737 return -ETIMEDOUT; in eth_xmc4xxx_reset()
745 regs->MAC_ADDRESS0_HIGH = addr[4] | (addr[5] << 8); in eth_xmc4xxx_set_mac_address()
746 regs->MAC_ADDRESS0_LOW = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24); in eth_xmc4xxx_set_mac_address()
752 regs->MMC_TRANSMIT_INTERRUPT_MASK = ETH_MAC_DISABLE_MMC_INTERRUPT_MSK; in eth_xmc4xxx_mask_unused_interrupts()
753 regs->MMC_RECEIVE_INTERRUPT_MASK = ETH_MAC_DISABLE_MMC_INTERRUPT_MSK; in eth_xmc4xxx_mask_unused_interrupts()
755 /* IPC - Receive IP checksum checker */ in eth_xmc4xxx_mask_unused_interrupts()
756 regs->MMC_IPC_RECEIVE_INTERRUPT_MASK = ETH_MAC_DISABLE_MMC_IPC_RECEIVE_INTERRUPT_MSK; in eth_xmc4xxx_mask_unused_interrupts()
759 regs->INTERRUPT_MASK = ETH_INTERRUPT_MASK_PMTIM_Msk | ETH_INTERRUPT_MASK_TSIM_Msk; in eth_xmc4xxx_mask_unused_interrupts()
765 regs->TIMESTAMP_CONTROL = ETH_TIMESTAMP_CONTROL_TSENA_Msk | in eth_xmc4xxx_init_timestamp_control_reg()
771 regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSCFUPDT_Msk | in eth_xmc4xxx_init_timestamp_control_reg()
774 /* make ptp run at 50MHz - implies 20ns increment for each increment of the */ in eth_xmc4xxx_init_timestamp_control_reg()
776 regs->SUB_SECOND_INCREMENT = 20; in eth_xmc4xxx_init_timestamp_control_reg()
784 regs->TIMESTAMP_ADDEND = K; in eth_xmc4xxx_init_timestamp_control_reg()
787 regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSADDREG_Msk; in eth_xmc4xxx_init_timestamp_control_reg()
788 if (!WAIT_FOR((regs->TIMESTAMP_CONTROL & ETH_TIMESTAMP_CONTROL_TSADDREG_Msk) == 0, in eth_xmc4xxx_init_timestamp_control_reg()
790 return -ETIMEDOUT; in eth_xmc4xxx_init_timestamp_control_reg()
793 regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSINIT_Msk; in eth_xmc4xxx_init_timestamp_control_reg()
794 if (!WAIT_FOR((regs->TIMESTAMP_CONTROL & ETH_TIMESTAMP_CONTROL_TSINIT_Msk) == 0, in eth_xmc4xxx_init_timestamp_control_reg()
796 return -ETIMEDOUT; in eth_xmc4xxx_init_timestamp_control_reg()
804 struct eth_xmc4xxx_data *dev_data = dev->data; in eth_xmc4xxx_init()
805 const struct eth_xmc4xxx_config *dev_cfg = dev->config; in eth_xmc4xxx_init()
809 sys_slist_init(&dev_data->tx_frame_list); in eth_xmc4xxx_init()
810 k_sem_init(&dev_data->tx_desc_sem, NUM_TX_DMA_DESCRIPTORS, in eth_xmc4xxx_init()
813 if (!device_is_ready(dev_cfg->phy_dev)) { in eth_xmc4xxx_init()
814 LOG_ERR("Phy device not ready"); in eth_xmc4xxx_init()
815 return -ENODEV; in eth_xmc4xxx_init()
818 /* get the port control initialized by MDIO driver */ in eth_xmc4xxx_init()
819 port_ctrl.raw = ETH0_CON->CON; in eth_xmc4xxx_init()
820 port_ctrl.raw |= dev_cfg->port_ctrl.raw; in eth_xmc4xxx_init()
823 ret = pinctrl_apply_state(dev_cfg->pcfg, PINCTRL_STATE_DEFAULT); in eth_xmc4xxx_init()
839 dev_cfg->regs->MAC_CONFIGURATION = ETH_MAC_CONFIGURATION_IPC_Msk; in eth_xmc4xxx_init()
842 dev_cfg->regs->MAC_CONFIGURATION &= ~ETH_MAC_CONFIGURATION_JE_Msk; in eth_xmc4xxx_init()
845 /* Initialize Filter registers - disable zero quanta pause*/ in eth_xmc4xxx_init()
846 dev_cfg->regs->FLOW_CONTROL = ETH_FLOW_CONTROL_DZPQ_Msk; in eth_xmc4xxx_init()
848 /* rsf - receive store and forward */ in eth_xmc4xxx_init()
849 /* tsf - transmit store and forward */ in eth_xmc4xxx_init()
850 dev_cfg->regs->OPERATION_MODE = ETH_OPERATION_MODE_RSF_Msk | ETH_OPERATION_MODE_TSF_Msk | in eth_xmc4xxx_init()
854 /* Time-Stamp feature or Full IPC Offload Engine is enabled */ in eth_xmc4xxx_init()
855 dev_cfg->regs->BUS_MODE = ETH_BUS_MODE_ATDS_Msk | ETH_BUS_MODE_AAL_Msk | in eth_xmc4xxx_init()
865 dev_cfg->regs->STATUS = ETH_STATUS_CLEARABLE_BITS; in eth_xmc4xxx_init()
867 eth_xmc4xxx_mask_unused_interrupts(dev_cfg->regs); in eth_xmc4xxx_init()
870 gen_random_mac(dev_data->mac_addr, INFINEON_OUI_B0, INFINEON_OUI_B1, INFINEON_OUI_B2); in eth_xmc4xxx_init()
872 eth_xmc4xxx_set_mac_address(dev_cfg->regs, dev_data->mac_addr); in eth_xmc4xxx_init()
874 uint32_t reg = dev_cfg->regs->MAC_FRAME_FILTER; in eth_xmc4xxx_init()
879 dev_cfg->regs->MAC_FRAME_FILTER = reg; in eth_xmc4xxx_init()
881 return eth_xmc4xxx_init_timestamp_control_reg(dev_cfg->regs); in eth_xmc4xxx_init()
904 struct eth_xmc4xxx_data *dev_data = dev->data; in eth_xmc4xxx_set_config()
905 const struct eth_xmc4xxx_config *dev_cfg = dev->config; in eth_xmc4xxx_set_config()
909 memcpy(dev_data->mac_addr, config->mac_address.addr, sizeof(dev_data->mac_addr)); in eth_xmc4xxx_set_config()
910 LOG_INF("%s MAC set to %02x:%02x:%02x:%02x:%02x:%02x", dev->name, in eth_xmc4xxx_set_config()
911 dev_data->mac_addr[0], dev_data->mac_addr[1], dev_data->mac_addr[2], in eth_xmc4xxx_set_config()
912 dev_data->mac_addr[3], dev_data->mac_addr[4], dev_data->mac_addr[5]); in eth_xmc4xxx_set_config()
914 eth_xmc4xxx_set_mac_address(dev_cfg->regs, dev_data->mac_addr); in eth_xmc4xxx_set_config()
915 net_if_set_link_addr(dev_data->iface, dev_data->mac_addr, in eth_xmc4xxx_set_config()
916 sizeof(dev_data->mac_addr), NET_LINK_ETHERNET); in eth_xmc4xxx_set_config()
922 return -ENOTSUP; in eth_xmc4xxx_set_config()
935 struct eth_xmc4xxx_data *dev_data = dev->data; in eth_xmc4xxx_get_ptp_clock()
937 return dev_data->ptp_clock; in eth_xmc4xxx_get_ptp_clock()
947 const struct eth_xmc4xxx_config *dev_cfg = dev->config; in eth_xmc4xxx_vlan_setup()
952 dev_cfg->regs->VLAN_TAG = FIELD_PREP(ETH_VLAN_TAG_VL_Msk, tag) | in eth_xmc4xxx_vlan_setup()
955 dev_cfg->regs->MAC_FRAME_FILTER |= ETH_MAC_FRAME_FILTER_VTFE_Msk; in eth_xmc4xxx_vlan_setup()
957 dev_cfg->regs->VLAN_TAG = 0; in eth_xmc4xxx_vlan_setup()
958 dev_cfg->regs->MAC_FRAME_FILTER &= ~ETH_MAC_FRAME_FILTER_VTFE_Msk; in eth_xmc4xxx_vlan_setup()
966 .iface_api.init = eth_xmc4xxx_iface_init,
1021 struct ptp_context *ptp_context = dev->data; in eth_xmc4xxx_ptp_clock_set()
1022 const struct eth_xmc4xxx_config *dev_cfg = ptp_context->eth_dev->config; in eth_xmc4xxx_ptp_clock_set()
1024 dev_cfg->regs->SYSTEM_TIME_NANOSECONDS_UPDATE = tm->nanosecond; in eth_xmc4xxx_ptp_clock_set()
1025 dev_cfg->regs->SYSTEM_TIME_SECONDS_UPDATE = tm->second; in eth_xmc4xxx_ptp_clock_set()
1027 dev_cfg->regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSINIT_Msk; in eth_xmc4xxx_ptp_clock_set()
1028 if (!WAIT_FOR((dev_cfg->regs->TIMESTAMP_CONTROL & ETH_TIMESTAMP_CONTROL_TSINIT_Msk) == 0, in eth_xmc4xxx_ptp_clock_set()
1030 return -ETIMEDOUT; in eth_xmc4xxx_ptp_clock_set()
1038 struct ptp_context *ptp_context = dev->data; in eth_xmc4xxx_ptp_clock_get()
1039 const struct eth_xmc4xxx_config *dev_cfg = ptp_context->eth_dev->config; in eth_xmc4xxx_ptp_clock_get()
1041 uint32_t nanosecond_0 = dev_cfg->regs->SYSTEM_TIME_NANOSECONDS; in eth_xmc4xxx_ptp_clock_get()
1042 uint32_t second_0 = dev_cfg->regs->SYSTEM_TIME_SECONDS; in eth_xmc4xxx_ptp_clock_get()
1044 uint32_t nanosecond_1 = dev_cfg->regs->SYSTEM_TIME_NANOSECONDS; in eth_xmc4xxx_ptp_clock_get()
1045 uint32_t second_1 = dev_cfg->regs->SYSTEM_TIME_SECONDS; in eth_xmc4xxx_ptp_clock_get()
1050 tm->second = second_0; in eth_xmc4xxx_ptp_clock_get()
1051 tm->nanosecond = nanosecond_0; in eth_xmc4xxx_ptp_clock_get()
1053 tm->second = second_1; in eth_xmc4xxx_ptp_clock_get()
1054 tm->nanosecond = nanosecond_1; in eth_xmc4xxx_ptp_clock_get()
1062 struct ptp_context *ptp_context = dev->data; in eth_xmc4xxx_ptp_clock_adjust()
1063 const struct eth_xmc4xxx_config *dev_cfg = ptp_context->eth_dev->config; in eth_xmc4xxx_ptp_clock_adjust()
1066 if ((increment <= -(int)NSEC_PER_SEC) || (increment >= (int)NSEC_PER_SEC)) { in eth_xmc4xxx_ptp_clock_adjust()
1067 return -EINVAL; in eth_xmc4xxx_ptp_clock_adjust()
1071 increment_tmp = -increment; in eth_xmc4xxx_ptp_clock_adjust()
1077 dev_cfg->regs->SYSTEM_TIME_NANOSECONDS_UPDATE = increment_tmp; in eth_xmc4xxx_ptp_clock_adjust()
1078 dev_cfg->regs->SYSTEM_TIME_SECONDS_UPDATE = 0; in eth_xmc4xxx_ptp_clock_adjust()
1080 dev_cfg->regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSUPDT_Msk; in eth_xmc4xxx_ptp_clock_adjust()
1081 if (!WAIT_FOR((dev_cfg->regs->TIMESTAMP_CONTROL & ETH_TIMESTAMP_CONTROL_TSUPDT_Msk) == 0, in eth_xmc4xxx_ptp_clock_adjust()
1083 return -ETIMEDOUT; in eth_xmc4xxx_ptp_clock_adjust()
1091 struct ptp_context *ptp_context = dev->data; in eth_xmc4xxx_ptp_clock_rate_adjust()
1092 const struct eth_xmc4xxx_config *dev_cfg = ptp_context->eth_dev->config; in eth_xmc4xxx_ptp_clock_rate_adjust()
1093 uint64_t K = dev_cfg->regs->TIMESTAMP_ADDEND; in eth_xmc4xxx_ptp_clock_rate_adjust()
1096 return -EINVAL; in eth_xmc4xxx_ptp_clock_rate_adjust()
1102 return -EINVAL; in eth_xmc4xxx_ptp_clock_rate_adjust()
1104 dev_cfg->regs->TIMESTAMP_ADDEND = K; in eth_xmc4xxx_ptp_clock_rate_adjust()
1107 dev_cfg->regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSADDREG_Msk; in eth_xmc4xxx_ptp_clock_rate_adjust()
1108 if (!WAIT_FOR((dev_cfg->regs->TIMESTAMP_CONTROL & ETH_TIMESTAMP_CONTROL_TSADDREG_Msk) == 0, in eth_xmc4xxx_ptp_clock_rate_adjust()
1110 return -ETIMEDOUT; in eth_xmc4xxx_ptp_clock_rate_adjust()
1126 struct eth_xmc4xxx_data *dev_data = eth_dev->data; in ptp_clock_xmc4xxx_init()
1127 struct ptp_context *ptp_context = port->data; in ptp_clock_xmc4xxx_init()
1129 dev_data->ptp_clock = port; in ptp_clock_xmc4xxx_init()
1130 ptp_context->eth_dev = eth_dev; in ptp_clock_xmc4xxx_init()