Lines Matching +full:poll +full:- +full:period +full:- +full:ms
3 * Copyright (c) 2016-2017 ARM Ltd
8 * SPDX-License-Identifier: Apache-2.0
64 #define PHY_OMS_NANDTREE_MASK 0x0020U /* The PHY NAND Tree Strap-In Override/Status mask. */
137 "read-status", in phy_state_name()
138 "read-duplex", in phy_state_name()
204 * depends on introduction of zero-copy networking support
247 struct eth_context *eth_ctx = dev->data; in eth_mcux_device_pm_action()
250 if (!device_is_ready(eth_ctx->clock_dev)) { in eth_mcux_device_pm_action()
253 ret = -EIO; in eth_mcux_device_pm_action()
261 ret = net_if_suspend(eth_ctx->iface); in eth_mcux_device_pm_action()
262 if (ret == -EBUSY) { in eth_mcux_device_pm_action()
269 ENET_Reset(eth_ctx->base); in eth_mcux_device_pm_action()
270 ENET_Deinit(eth_ctx->base); in eth_mcux_device_pm_action()
271 clock_control_off(eth_ctx->clock_dev, in eth_mcux_device_pm_action()
272 (clock_control_subsys_t)eth_ctx->clock); in eth_mcux_device_pm_action()
277 clock_control_on(eth_ctx->clock_dev, in eth_mcux_device_pm_action()
278 (clock_control_subsys_t)eth_ctx->clock); in eth_mcux_device_pm_action()
280 net_if_resume(eth_ctx->iface); in eth_mcux_device_pm_action()
283 ret = -ENOTSUP; in eth_mcux_device_pm_action()
337 return ctx->iface; in get_iface()
344 ENET_StartSMIWrite(context->base, context->phy_addr, in eth_mcux_phy_enter_reset()
349 context->phy_state = eth_mcux_phy_state_reset; in eth_mcux_phy_enter_reset()
351 k_work_submit(&context->phy_work); in eth_mcux_phy_enter_reset()
358 LOG_DBG("%s phy_state=%s", eth_name(context->base), in eth_mcux_phy_start()
359 phy_state_name(context->phy_state)); in eth_mcux_phy_start()
362 context->enabled = true; in eth_mcux_phy_start()
364 switch (context->phy_state) { in eth_mcux_phy_start()
366 context->phy_handle->phyAddr = context->phy_addr; in eth_mcux_phy_start()
367 ENET_ActiveRead(context->base); in eth_mcux_phy_start()
370 ENET_StartSMIWrite(context->base, context->phy_addr, in eth_mcux_phy_start()
379 k_work_submit(&context->phy_work); in eth_mcux_phy_start()
383 context->phy_state = eth_mcux_phy_state_initial; in eth_mcux_phy_start()
385 context->phy_state = eth_mcux_phy_state_reset; in eth_mcux_phy_start()
404 LOG_DBG("%s phy_state=%s", eth_name(context->base), in eth_mcux_phy_stop()
405 phy_state_name(context->phy_state)); in eth_mcux_phy_stop()
408 context->enabled = false; in eth_mcux_phy_stop()
410 switch (context->phy_state) { in eth_mcux_phy_stop()
420 context->phy_state = eth_mcux_phy_state_closing; in eth_mcux_phy_stop()
423 k_work_cancel_delayable(&context->delayed_phy_work); in eth_mcux_phy_stop()
425 context->phy_state = eth_mcux_phy_state_initial; in eth_mcux_phy_stop()
447 LOG_DBG("%s phy_state=%s", eth_name(context->base), in eth_mcux_phy_event()
448 phy_state_name(context->phy_state)); in eth_mcux_phy_event()
450 switch (context->phy_state) { in eth_mcux_phy_event()
453 ENET_DisableInterrupts(context->base, ENET_EIR_MII_MASK); in eth_mcux_phy_event()
454 res = PHY_Read(context->phy_handle, PHY_CONTROL2_REG, &ctrl2); in eth_mcux_phy_event()
455 ENET_EnableInterrupts(context->base, ENET_EIR_MII_MASK); in eth_mcux_phy_event()
458 k_work_submit(&context->phy_work); in eth_mcux_phy_event()
461 ENET_StartSMIWrite(context->base, context->phy_addr, in eth_mcux_phy_event()
466 context->phy_state = eth_mcux_phy_state_reset; in eth_mcux_phy_event()
472 * 1ms in eth_mcux_phy_event()
474 if (context->iface) { in eth_mcux_phy_event()
475 context->phy_state = eth_mcux_phy_state_reset; in eth_mcux_phy_event()
478 k_work_reschedule(&context->delayed_phy_work, K_MSEC(1)); in eth_mcux_phy_event()
482 if (context->enabled) { in eth_mcux_phy_event()
486 context->phy_state = eth_mcux_phy_state_initial; in eth_mcux_phy_event()
492 ENET_StartSMIWrite(context->base, context->phy_addr, in eth_mcux_phy_event()
501 context->phy_state = eth_mcux_phy_state_autoneg; in eth_mcux_phy_event()
503 k_work_submit(&context->phy_work); in eth_mcux_phy_event()
509 ENET_StartSMIWrite(context->base, context->phy_addr, in eth_mcux_phy_event()
515 context->phy_state = eth_mcux_phy_state_restart; in eth_mcux_phy_event()
517 k_work_submit(&context->phy_work); in eth_mcux_phy_event()
524 ENET_StartSMIRead(context->base, context->phy_addr, in eth_mcux_phy_event()
528 context->phy_state = eth_mcux_phy_state_read_status; in eth_mcux_phy_event()
530 k_work_submit(&context->phy_work); in eth_mcux_phy_event()
538 status = ENET_ReadSMIData(context->base); in eth_mcux_phy_event()
541 if (link_up && !context->link_up && context->iface != NULL) { in eth_mcux_phy_event()
544 ENET_StartSMIRead(context->base, context->phy_addr, in eth_mcux_phy_event()
548 context->link_up = link_up; in eth_mcux_phy_event()
549 context->phy_state = eth_mcux_phy_state_read_duplex; in eth_mcux_phy_event()
550 net_eth_carrier_on(context->iface); in eth_mcux_phy_event()
553 k_work_submit(&context->phy_work); in eth_mcux_phy_event()
555 } else if (!link_up && context->link_up && context->iface != NULL) { in eth_mcux_phy_event()
556 LOG_INF("%s link down", eth_name(context->base)); in eth_mcux_phy_event()
557 context->link_up = link_up; in eth_mcux_phy_event()
558 k_work_reschedule(&context->delayed_phy_work, in eth_mcux_phy_event()
560 context->phy_state = eth_mcux_phy_state_wait; in eth_mcux_phy_event()
561 net_eth_carrier_off(context->iface); in eth_mcux_phy_event()
563 k_work_reschedule(&context->delayed_phy_work, in eth_mcux_phy_event()
565 context->phy_state = eth_mcux_phy_state_wait; in eth_mcux_phy_event()
573 LOG_INF("%s - Fixed Link", eth_name(context->base)); in eth_mcux_phy_event()
575 status = ENET_ReadSMIData(context->base); in eth_mcux_phy_event()
580 if (phy_speed != context->phy_speed || in eth_mcux_phy_event()
581 phy_duplex != context->phy_duplex) { in eth_mcux_phy_event()
582 context->phy_speed = phy_speed; in eth_mcux_phy_event()
583 context->phy_duplex = phy_duplex; in eth_mcux_phy_event()
584 ENET_SetMII(context->base, in eth_mcux_phy_event()
589 LOG_INF("%s enabled %sM %s-duplex mode.", in eth_mcux_phy_event()
590 eth_name(context->base), in eth_mcux_phy_event()
593 k_work_reschedule(&context->delayed_phy_work, in eth_mcux_phy_event()
595 context->phy_state = eth_mcux_phy_state_wait; in eth_mcux_phy_event()
624 ENET_DisableInterrupts(context->base, ENET_EIR_MII_MASK); in eth_mcux_phy_setup()
626 res = PHY_Read(context->phy_handle, in eth_mcux_phy_setup()
631 /* Based on strap-in pins the PHY can be in factory test mode. in eth_mcux_phy_setup()
641 res = PHY_Write(context->phy_handle, in eth_mcux_phy_setup()
648 ENET_EnableInterrupts(context->base, ENET_EIR_MII_MASK); in eth_mcux_phy_setup()
658 if (ntohs(NET_ETH_HDR(pkt)->type) != NET_ETH_PTYPE_PTP) { in eth_get_ptp_data()
672 struct eth_context *context = dev->data; in eth_tx()
681 k_sem_take(&context->tx_buf_sem, K_FOREVER); in eth_tx()
683 k_mutex_lock(&context->tx_frame_buf_mutex, K_FOREVER); in eth_tx()
685 if (net_pkt_read(pkt, context->tx_frame_buf, total_len)) { in eth_tx()
686 k_mutex_unlock(&context->tx_frame_buf_mutex); in eth_tx()
687 return -EIO; in eth_tx()
694 status = ENET_SendFrame(context->base, &context->enet_handle, in eth_tx()
695 context->tx_frame_buf, total_len, RING_ID, true, pkt); in eth_tx()
704 k_sem_take(&context->ptp_ts_sem, K_FOREVER); in eth_tx()
710 status = ENET_SendFrame(context->base, &context->enet_handle, in eth_tx()
711 context->tx_frame_buf, total_len, RING_ID, false, NULL); in eth_tx()
716 k_mutex_unlock(&context->tx_frame_buf_mutex); in eth_tx()
717 ENET_ReclaimTxDescriptor(context->base, in eth_tx()
718 &context->enet_handle, RING_ID); in eth_tx()
719 return -1; in eth_tx()
722 k_mutex_unlock(&context->tx_frame_buf_mutex); in eth_tx()
739 status = ENET_GetRxFrameSize(&context->enet_handle, in eth_rx()
748 ENET_GetRxErrBeforeReadFrame(&context->enet_handle, in eth_rx()
759 pkt = net_pkt_rx_alloc_with_buffer(context->iface, frame_length, in eth_rx()
768 k_mutex_lock(&context->rx_frame_buf_mutex, K_FOREVER); in eth_rx()
770 status = ENET_ReadFrame(context->base, &context->enet_handle, in eth_rx()
771 context->rx_frame_buf, frame_length, RING_ID, &ts); in eth_rx()
776 k_mutex_unlock(&context->rx_frame_buf_mutex); in eth_rx()
780 if (net_pkt_write(pkt, context->rx_frame_buf, frame_length)) { in eth_rx()
783 k_mutex_unlock(&context->rx_frame_buf_mutex); in eth_rx()
787 k_mutex_unlock(&context->rx_frame_buf_mutex); in eth_rx()
793 k_mutex_lock(&context->ptp_mutex, K_FOREVER); in eth_rx()
795 ENET_Ptp1588GetTimer(context->base, &context->enet_handle, in eth_rx()
798 * then second - 1 to make sure the actual Rx timestamp is in eth_rx()
802 ptpTimeData.second--; in eth_rx()
805 pkt->timestamp.nanosecond = ts; in eth_rx()
806 pkt->timestamp.second = ptpTimeData.second; in eth_rx()
809 pkt->timestamp.nanosecond = UINT32_MAX; in eth_rx()
810 pkt->timestamp.second = UINT64_MAX; in eth_rx()
812 k_mutex_unlock(&context->ptp_mutex); in eth_rx()
830 status = ENET_ReadFrame(context->base, &context->enet_handle, NULL, in eth_rx()
835 return -EIO; in eth_rx()
844 pkt = frameinfo->context; in ts_register_tx_event()
845 if (pkt && atomic_get(&pkt->atomic_ref) > 0) { in ts_register_tx_event()
847 if (frameinfo->isTsAvail) { in ts_register_tx_event()
848 k_mutex_lock(&context->ptp_mutex, K_FOREVER); in ts_register_tx_event()
850 pkt->timestamp.nanosecond = in ts_register_tx_event()
851 frameinfo->timeStamp.nanosecond; in ts_register_tx_event()
852 pkt->timestamp.second = in ts_register_tx_event()
853 frameinfo->timeStamp.second; in ts_register_tx_event()
856 k_sem_give(&context->ptp_ts_sem); in ts_register_tx_event()
857 k_mutex_unlock(&context->ptp_mutex); in ts_register_tx_event()
881 k_sem_give(&context->rx_thread_sem); in eth_callback()
889 k_sem_give(&context->tx_buf_sem); in eth_callback()
900 context->base->ATPER = NSEC_PER_SEC; in eth_callback()
913 if (k_sem_take(&context->rx_thread_sem, K_FOREVER) == 0) { in eth_rx_thread()
918 ENET_EnableInterrupts(context->base, in eth_rx_thread()
928 struct eth_context *context = dev->data; in eth_phy_reset()
931 err = gpio_pin_configure_dt(&context->int_gpio, GPIO_OUTPUT_ACTIVE); in eth_phy_reset()
935 return gpio_pin_configure_dt(&context->reset_gpio, GPIO_OUTPUT_INACTIVE); in eth_phy_reset()
940 struct eth_context *context = dev->data; in eth_phy_init()
944 return gpio_pin_set_dt(&context->reset_gpio, 1); in eth_phy_init()
950 struct eth_context *context = dev->data; in eth_mcux_init()
951 const enet_buffer_config_t *buffer_config = dev->config; in eth_mcux_init()
963 context->phy_state = eth_mcux_phy_state_initial; in eth_mcux_init()
964 context->phy_handle->ops = &phyksz8081_ops; in eth_mcux_init()
1006 ENET_Init(context->base, in eth_mcux_init()
1007 &context->enet_handle, in eth_mcux_init()
1010 context->mac_addr, in eth_mcux_init()
1014 ENET_AddMulticastGroup(context->base, ptp_multicast); in eth_mcux_init()
1015 ENET_AddMulticastGroup(context->base, ptp_peer_multicast); in eth_mcux_init()
1018 context->ptp_config.channel = kENET_PtpTimerChannel3; in eth_mcux_init()
1019 context->ptp_config.ptp1588ClockSrc_Hz = in eth_mcux_init()
1021 context->clk_ratio = 1.0; in eth_mcux_init()
1023 ENET_Ptp1588SetChannelMode(context->base, kENET_PtpTimerChannel3, in eth_mcux_init()
1025 ENET_Ptp1588Configure(context->base, &context->enet_handle, in eth_mcux_init()
1026 &context->ptp_config); in eth_mcux_init()
1030 ENET_AddMulticastGroup(context->base, mdns_multicast); in eth_mcux_init()
1034 ENET_SetSMI(context->base, sys_clock, false); in eth_mcux_init()
1042 ENET_SetTxReclaim(&context->enet_handle, true, 0); in eth_mcux_init()
1050 struct eth_context *context = dev->data; in eth_init()
1054 err = pinctrl_apply_state(context->pincfg, PINCTRL_STATE_DEFAULT); in eth_init()
1061 const uint32_t inst = ENET_GetInstance(context->base); in eth_init()
1063 context->clock = enet_clocks[inst]; in eth_init()
1072 k_mutex_init(&context->ptp_mutex); in eth_init()
1073 k_sem_init(&context->ptp_ts_sem, 0, 1); in eth_init()
1075 k_mutex_init(&context->rx_frame_buf_mutex); in eth_init()
1076 k_mutex_init(&context->tx_frame_buf_mutex); in eth_init()
1078 k_sem_init(&context->rx_thread_sem, 0, CONFIG_ETH_MCUX_RX_BUFFERS); in eth_init()
1079 k_sem_init(&context->tx_buf_sem, in eth_init()
1081 k_work_init(&context->phy_work, eth_mcux_phy_work); in eth_init()
1082 k_work_init_delayable(&context->delayed_phy_work, in eth_init()
1085 /* Start interruption-poll thread */ in eth_init()
1086 k_thread_create(&context->rx_thread, context->rx_thread_stack, in eth_init()
1087 K_KERNEL_STACK_SIZEOF(context->rx_thread_stack), in eth_init()
1091 k_thread_name_set(&context->rx_thread, "mcux_eth_rx"); in eth_init()
1092 if (context->generate_mac) { in eth_init()
1093 context->generate_mac(context->mac_addr); in eth_init()
1099 dev->name, in eth_init()
1100 context->mac_addr[0], context->mac_addr[1], in eth_init()
1101 context->mac_addr[2], context->mac_addr[3], in eth_init()
1102 context->mac_addr[4], context->mac_addr[5]); in eth_init()
1110 struct eth_context *context = dev->data; in eth_iface_init()
1112 net_if_set_link_addr(iface, context->mac_addr, in eth_iface_init()
1113 sizeof(context->mac_addr), in eth_iface_init()
1116 if (context->iface == NULL) { in eth_iface_init()
1117 context->iface = iface; in eth_iface_init()
1126 context->config_func(); in eth_iface_init()
1156 struct eth_context *context = dev->data; in eth_mcux_set_config()
1160 memcpy(context->mac_addr, in eth_mcux_set_config()
1161 config->mac_address.addr, in eth_mcux_set_config()
1162 sizeof(context->mac_addr)); in eth_mcux_set_config()
1163 ENET_SetMacAddr(context->base, context->mac_addr); in eth_mcux_set_config()
1164 net_if_set_link_addr(context->iface, context->mac_addr, in eth_mcux_set_config()
1165 sizeof(context->mac_addr), in eth_mcux_set_config()
1168 dev->name, in eth_mcux_set_config()
1169 context->mac_addr[0], context->mac_addr[1], in eth_mcux_set_config()
1170 context->mac_addr[2], context->mac_addr[3], in eth_mcux_set_config()
1171 context->mac_addr[4], context->mac_addr[5]); in eth_mcux_set_config()
1175 if (config->filter.set) { in eth_mcux_set_config()
1176 ENET_AddMulticastGroup(context->base, in eth_mcux_set_config()
1177 (uint8_t *)config->filter.mac_address.addr); in eth_mcux_set_config()
1179 ENET_LeaveMulticastGroup(context->base, in eth_mcux_set_config()
1180 (uint8_t *)config->filter.mac_address.addr); in eth_mcux_set_config()
1187 return -ENOTSUP; in eth_mcux_set_config()
1193 struct eth_context *context = dev->data; in eth_mcux_get_ptp_clock()
1195 return context->ptp_clock; in eth_mcux_get_ptp_clock()
1216 struct eth_context *context = dev->data; in eth_mcux_ptp_isr()
1222 if (ENET_Ptp1588GetChannelStatus(context->base, channel)) { in eth_mcux_ptp_isr()
1223 ENET_Ptp1588ClearChannelStatus(context->base, channel); in eth_mcux_ptp_isr()
1226 ENET_TimeStampIRQHandler(context->base, &context->enet_handle); in eth_mcux_ptp_isr()
1234 struct eth_context *context = dev->data; in eth_mcux_common_isr()
1235 uint32_t EIR = ENET_GetInterruptStatus(context->base); in eth_mcux_common_isr()
1240 context->rx_irq_num++; in eth_mcux_common_isr()
1243 ENET_ReceiveIRQHandler(context->base, &context->enet_handle, 0); in eth_mcux_common_isr()
1245 ENET_ReceiveIRQHandler(context->base, &context->enet_handle); in eth_mcux_common_isr()
1247 ENET_DisableInterrupts(context->base, kENET_RxFrameInterrupt | in eth_mcux_common_isr()
1253 ENET_TransmitIRQHandler(context->base, &context->enet_handle, 0); in eth_mcux_common_isr()
1255 ENET_TransmitIRQHandler(context->base, &context->enet_handle); in eth_mcux_common_isr()
1260 ENET_ClearInterruptStatus(context->base, kENET_TxBufferInterrupt); in eth_mcux_common_isr()
1261 ENET_DisableInterrupts(context->base, kENET_TxBufferInterrupt); in eth_mcux_common_isr()
1265 k_work_submit(&context->phy_work); in eth_mcux_common_isr()
1266 ENET_ClearInterruptStatus(context->base, kENET_MiiInterrupt); in eth_mcux_common_isr()
1270 ENET_TimeStampIRQHandler(context->base, &context->enet_handle); in eth_mcux_common_isr()
1280 struct eth_context *context = dev->data; in eth_mcux_rx_isr()
1282 ENET_DisableInterrupts(context->base, kENET_RxFrameInterrupt | kENET_RxBufferInterrupt); in eth_mcux_rx_isr()
1283 ENET_ReceiveIRQHandler(context->base, &context->enet_handle); in eth_mcux_rx_isr()
1290 struct eth_context *context = dev->data; in eth_mcux_tx_isr()
1292 ENET_TransmitIRQHandler(context->base, &context->enet_handle, 0); in eth_mcux_tx_isr()
1294 ENET_TransmitIRQHandler(context->base, &context->enet_handle); in eth_mcux_tx_isr()
1302 struct eth_context *context = dev->data; in eth_mcux_err_isr()
1303 uint32_t pending = ENET_GetInterruptStatus(context->base); in eth_mcux_err_isr()
1306 k_work_submit(&context->phy_work); in eth_mcux_err_isr()
1307 ENET_ClearInterruptStatus(context->base, kENET_MiiInterrupt); in eth_mcux_err_isr()
1313 #define ETH_MCUX_UNIQUE_ID (OCOTP->CFG1 ^ OCOTP->CFG2)
1315 #define ETH_MCUX_UNIQUE_ID (OCOTP->FUSEN[40].FUSE)
1317 #define ETH_MCUX_UNIQUE_ID (SIM->UIDH ^ SIM->UIDMH ^ SIM->UIDML ^ SIM->UIDL)
1609 struct ptp_context *ptp_context = dev->data; in ptp_clock_mcux_set()
1610 struct eth_context *context = ptp_context->eth_context; in ptp_clock_mcux_set()
1613 enet_time.second = tm->second; in ptp_clock_mcux_set()
1614 enet_time.nanosecond = tm->nanosecond; in ptp_clock_mcux_set()
1616 ENET_Ptp1588SetTimer(context->base, &context->enet_handle, &enet_time); in ptp_clock_mcux_set()
1623 struct ptp_context *ptp_context = dev->data; in ptp_clock_mcux_get()
1624 struct eth_context *context = ptp_context->eth_context; in ptp_clock_mcux_get()
1627 ENET_Ptp1588GetTimer(context->base, &context->enet_handle, &enet_time); in ptp_clock_mcux_get()
1629 tm->second = enet_time.second; in ptp_clock_mcux_get()
1630 tm->nanosecond = enet_time.nanosecond; in ptp_clock_mcux_get()
1636 struct ptp_context *ptp_context = dev->data; in ptp_clock_mcux_adjust()
1637 struct eth_context *context = ptp_context->eth_context; in ptp_clock_mcux_adjust()
1642 if ((increment <= (int32_t)(-NSEC_PER_SEC)) || in ptp_clock_mcux_adjust()
1644 ret = -EINVAL; in ptp_clock_mcux_adjust()
1647 if (context->base->ATPER != NSEC_PER_SEC) { in ptp_clock_mcux_adjust()
1648 ret = -EBUSY; in ptp_clock_mcux_adjust()
1651 * period of one software second to adjust the clock. in ptp_clock_mcux_adjust()
1653 context->base->ATPER = NSEC_PER_SEC - increment; in ptp_clock_mcux_adjust()
1665 struct ptp_context *ptp_context = dev->data; in ptp_clock_mcux_rate_adjust()
1666 struct eth_context *context = ptp_context->eth_context; in ptp_clock_mcux_rate_adjust()
1672 if ((ratio > 1.0 && ratio - 1.0 < 0.00000001) || in ptp_clock_mcux_rate_adjust()
1673 (ratio < 1.0 && 1.0 - ratio < 0.00000001)) { in ptp_clock_mcux_rate_adjust()
1677 ratio *= context->clk_ratio; in ptp_clock_mcux_rate_adjust()
1681 (ratio < 1.0 - 1.0/(2 * hw_inc))) { in ptp_clock_mcux_rate_adjust()
1682 return -EINVAL; in ptp_clock_mcux_rate_adjust()
1686 context->clk_ratio = ratio; in ptp_clock_mcux_rate_adjust()
1689 corr = hw_inc - 1; in ptp_clock_mcux_rate_adjust()
1690 val = 1.0 / (hw_inc * (1.0 - ratio)); in ptp_clock_mcux_rate_adjust()
1693 val = 1.0 / (hw_inc * (ratio - 1.0)); in ptp_clock_mcux_rate_adjust()
1707 k_mutex_lock(&context->ptp_mutex, K_FOREVER); in ptp_clock_mcux_rate_adjust()
1708 ENET_Ptp1588AdjustTimer(context->base, corr, mul); in ptp_clock_mcux_rate_adjust()
1709 k_mutex_unlock(&context->ptp_mutex); in ptp_clock_mcux_rate_adjust()
1724 struct eth_context *context = eth_dev->data; in ptp_mcux_init()
1725 struct ptp_context *ptp_context = port->data; in ptp_mcux_init()
1729 err = pinctrl_apply_state(ptp_context->pincfg, PINCTRL_STATE_DEFAULT); in ptp_mcux_init()
1735 context->ptp_clock = port; in ptp_mcux_init()
1736 ptp_context->eth_context = context; in ptp_mcux_init()