Lines Matching +full:suppress +full:- +full:preamble

1 // SPDX-License-Identifier: GPL-2.0+
17 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
20 * Copyright (c) 2004-2006 Macq Electronique SA.
22 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
190 .name = "imx25-fec",
193 .name = "imx27-fec",
196 .name = "imx28-fec",
199 .name = "imx6q-fec",
202 .name = "mvf600-fec",
205 .name = "imx6sx-fec",
208 .name = "imx6ul-fec",
211 .name = "imx8mq-fec",
214 .name = "imx8qm-fec",
217 .name = "s32v234-fec",
239 { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
240 { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
241 { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
242 { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
243 { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
244 { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
245 { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
246 { .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], },
247 { .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], },
248 { .compatible = "fsl,s32v234-fec", .data = &fec_devtype[S32V234_FEC], },
260 * if this is non-zero then assume it is the address to get MAC from.
282 #define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64))
338 ((addr >= txq->tso_hdrs_dma) && \
339 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
346 return (bdp >= bd->last) ? bd->base in fec_enet_get_nextdesc()
347 : (struct bufdesc *)(((void *)bdp) + bd->dsize); in fec_enet_get_nextdesc()
353 return (bdp <= bd->base) ? bd->last in fec_enet_get_prevdesc()
354 : (struct bufdesc *)(((void *)bdp) - bd->dsize); in fec_enet_get_prevdesc()
360 return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2; in fec_enet_get_bd_index()
367 entries = (((const char *)txq->dirty_tx - in fec_enet_get_free_txdesc_num()
368 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; in fec_enet_get_free_txdesc_num()
370 return entries >= 0 ? entries : entries + txq->bd.ring_size; in fec_enet_get_free_txdesc_num()
392 txq = fep->tx_queue[0]; in fec_dump()
393 bdp = txq->bd.base; in fec_dump()
398 bdp == txq->bd.cur ? 'S' : ' ', in fec_dump()
399 bdp == txq->dirty_tx ? 'H' : ' ', in fec_dump()
400 fec16_to_cpu(bdp->cbd_sc), in fec_dump()
401 fec32_to_cpu(bdp->cbd_bufaddr), in fec_dump()
402 fec16_to_cpu(bdp->cbd_datlen), in fec_dump()
403 txq->tx_buf[index].buf_p); in fec_dump()
404 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_dump()
406 } while (bdp != txq->bd.base); in fec_dump()
411 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; in is_ipv4_pkt()
418 if (skb->ip_summed != CHECKSUM_PARTIAL) in fec_enet_clear_csum()
422 return -1; in fec_enet_clear_csum()
425 ip_hdr(skb)->check = 0; in fec_enet_clear_csum()
426 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; in fec_enet_clear_csum()
435 struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); in fec_enet_create_page_pool()
440 .nid = dev_to_node(&fep->pdev->dev), in fec_enet_create_page_pool()
441 .dev = &fep->pdev->dev, in fec_enet_create_page_pool()
448 rxq->page_pool = page_pool_create(&pp_params); in fec_enet_create_page_pool()
449 if (IS_ERR(rxq->page_pool)) { in fec_enet_create_page_pool()
450 err = PTR_ERR(rxq->page_pool); in fec_enet_create_page_pool()
451 rxq->page_pool = NULL; in fec_enet_create_page_pool()
455 err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0); in fec_enet_create_page_pool()
459 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, in fec_enet_create_page_pool()
460 rxq->page_pool); in fec_enet_create_page_pool()
467 xdp_rxq_info_unreg(&rxq->xdp_rxq); in fec_enet_create_page_pool()
469 page_pool_destroy(rxq->page_pool); in fec_enet_create_page_pool()
470 rxq->page_pool = NULL; in fec_enet_create_page_pool()
480 struct bufdesc *bdp = txq->bd.cur; in fec_enet_txq_submit_frag_skb()
482 int nr_frags = skb_shinfo(skb)->nr_frags; in fec_enet_txq_submit_frag_skb()
493 this_frag = &skb_shinfo(skb)->frags[frag]; in fec_enet_txq_submit_frag_skb()
494 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_txq_submit_frag_skb()
497 status = fec16_to_cpu(bdp->cbd_sc); in fec_enet_txq_submit_frag_skb()
500 frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]); in fec_enet_txq_submit_frag_skb()
503 if (frag == nr_frags - 1) { in fec_enet_txq_submit_frag_skb()
505 if (fep->bufdesc_ex) { in fec_enet_txq_submit_frag_skb()
507 if (unlikely(skb_shinfo(skb)->tx_flags & in fec_enet_txq_submit_frag_skb()
508 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) in fec_enet_txq_submit_frag_skb()
513 if (fep->bufdesc_ex) { in fec_enet_txq_submit_frag_skb()
514 if (fep->quirks & FEC_QUIRK_HAS_AVB) in fec_enet_txq_submit_frag_skb()
515 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); in fec_enet_txq_submit_frag_skb()
516 if (skb->ip_summed == CHECKSUM_PARTIAL) in fec_enet_txq_submit_frag_skb()
519 ebdp->cbd_bdu = 0; in fec_enet_txq_submit_frag_skb()
520 ebdp->cbd_esc = cpu_to_fec32(estatus); in fec_enet_txq_submit_frag_skb()
525 index = fec_enet_get_bd_index(bdp, &txq->bd); in fec_enet_txq_submit_frag_skb()
526 if (((unsigned long) bufaddr) & fep->tx_align || in fec_enet_txq_submit_frag_skb()
527 fep->quirks & FEC_QUIRK_SWAP_FRAME) { in fec_enet_txq_submit_frag_skb()
528 memcpy(txq->tx_bounce[index], bufaddr, frag_len); in fec_enet_txq_submit_frag_skb()
529 bufaddr = txq->tx_bounce[index]; in fec_enet_txq_submit_frag_skb()
531 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) in fec_enet_txq_submit_frag_skb()
535 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, in fec_enet_txq_submit_frag_skb()
537 if (dma_mapping_error(&fep->pdev->dev, addr)) { in fec_enet_txq_submit_frag_skb()
543 bdp->cbd_bufaddr = cpu_to_fec32(addr); in fec_enet_txq_submit_frag_skb()
544 bdp->cbd_datlen = cpu_to_fec16(frag_len); in fec_enet_txq_submit_frag_skb()
549 bdp->cbd_sc = cpu_to_fec16(status); in fec_enet_txq_submit_frag_skb()
554 bdp = txq->bd.cur; in fec_enet_txq_submit_frag_skb()
556 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_txq_submit_frag_skb()
557 dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), in fec_enet_txq_submit_frag_skb()
558 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); in fec_enet_txq_submit_frag_skb()
560 return ERR_PTR(-ENOMEM); in fec_enet_txq_submit_frag_skb()
567 int nr_frags = skb_shinfo(skb)->nr_frags; in fec_enet_txq_submit_skb()
585 /* Protocol checksum off-load for TCP and UDP. */ in fec_enet_txq_submit_skb()
592 bdp = txq->bd.cur; in fec_enet_txq_submit_skb()
594 status = fec16_to_cpu(bdp->cbd_sc); in fec_enet_txq_submit_skb()
598 bufaddr = skb->data; in fec_enet_txq_submit_skb()
601 index = fec_enet_get_bd_index(bdp, &txq->bd); in fec_enet_txq_submit_skb()
602 if (((unsigned long) bufaddr) & fep->tx_align || in fec_enet_txq_submit_skb()
603 fep->quirks & FEC_QUIRK_SWAP_FRAME) { in fec_enet_txq_submit_skb()
604 memcpy(txq->tx_bounce[index], skb->data, buflen); in fec_enet_txq_submit_skb()
605 bufaddr = txq->tx_bounce[index]; in fec_enet_txq_submit_skb()
607 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) in fec_enet_txq_submit_skb()
612 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE); in fec_enet_txq_submit_skb()
613 if (dma_mapping_error(&fep->pdev->dev, addr)) { in fec_enet_txq_submit_skb()
623 dma_unmap_single(&fep->pdev->dev, addr, in fec_enet_txq_submit_skb()
630 if (fep->bufdesc_ex) { in fec_enet_txq_submit_skb()
632 if (unlikely(skb_shinfo(skb)->tx_flags & in fec_enet_txq_submit_skb()
633 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) in fec_enet_txq_submit_skb()
637 bdp->cbd_bufaddr = cpu_to_fec32(addr); in fec_enet_txq_submit_skb()
638 bdp->cbd_datlen = cpu_to_fec16(buflen); in fec_enet_txq_submit_skb()
640 if (fep->bufdesc_ex) { in fec_enet_txq_submit_skb()
644 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && in fec_enet_txq_submit_skb()
645 fep->hwts_tx_en)) in fec_enet_txq_submit_skb()
646 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in fec_enet_txq_submit_skb()
648 if (fep->quirks & FEC_QUIRK_HAS_AVB) in fec_enet_txq_submit_skb()
649 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); in fec_enet_txq_submit_skb()
651 if (skb->ip_summed == CHECKSUM_PARTIAL) in fec_enet_txq_submit_skb()
654 ebdp->cbd_bdu = 0; in fec_enet_txq_submit_skb()
655 ebdp->cbd_esc = cpu_to_fec32(estatus); in fec_enet_txq_submit_skb()
658 index = fec_enet_get_bd_index(last_bdp, &txq->bd); in fec_enet_txq_submit_skb()
660 txq->tx_buf[index].buf_p = skb; in fec_enet_txq_submit_skb()
671 bdp->cbd_sc = cpu_to_fec16(status); in fec_enet_txq_submit_skb()
674 bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd); in fec_enet_txq_submit_skb()
678 /* Make sure the update to bdp is performed before txq->bd.cur. */ in fec_enet_txq_submit_skb()
680 txq->bd.cur = bdp; in fec_enet_txq_submit_skb()
683 writel(0, txq->bd.reg_desc_active); in fec_enet_txq_submit_skb()
700 status = fec16_to_cpu(bdp->cbd_sc); in fec_enet_txq_put_data_tso()
705 if (((unsigned long) data) & fep->tx_align || in fec_enet_txq_put_data_tso()
706 fep->quirks & FEC_QUIRK_SWAP_FRAME) { in fec_enet_txq_put_data_tso()
707 memcpy(txq->tx_bounce[index], data, size); in fec_enet_txq_put_data_tso()
708 data = txq->tx_bounce[index]; in fec_enet_txq_put_data_tso()
710 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) in fec_enet_txq_put_data_tso()
714 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE); in fec_enet_txq_put_data_tso()
715 if (dma_mapping_error(&fep->pdev->dev, addr)) { in fec_enet_txq_put_data_tso()
722 bdp->cbd_datlen = cpu_to_fec16(size); in fec_enet_txq_put_data_tso()
723 bdp->cbd_bufaddr = cpu_to_fec32(addr); in fec_enet_txq_put_data_tso()
725 if (fep->bufdesc_ex) { in fec_enet_txq_put_data_tso()
726 if (fep->quirks & FEC_QUIRK_HAS_AVB) in fec_enet_txq_put_data_tso()
727 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); in fec_enet_txq_put_data_tso()
728 if (skb->ip_summed == CHECKSUM_PARTIAL) in fec_enet_txq_put_data_tso()
730 ebdp->cbd_bdu = 0; in fec_enet_txq_put_data_tso()
731 ebdp->cbd_esc = cpu_to_fec32(estatus); in fec_enet_txq_put_data_tso()
739 if (fep->bufdesc_ex) in fec_enet_txq_put_data_tso()
740 ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT); in fec_enet_txq_put_data_tso()
743 bdp->cbd_sc = cpu_to_fec16(status); in fec_enet_txq_put_data_tso()
761 status = fec16_to_cpu(bdp->cbd_sc); in fec_enet_txq_put_hdr_tso()
765 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; in fec_enet_txq_put_hdr_tso()
766 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; in fec_enet_txq_put_hdr_tso()
767 if (((unsigned long)bufaddr) & fep->tx_align || in fec_enet_txq_put_hdr_tso()
768 fep->quirks & FEC_QUIRK_SWAP_FRAME) { in fec_enet_txq_put_hdr_tso()
769 memcpy(txq->tx_bounce[index], skb->data, hdr_len); in fec_enet_txq_put_hdr_tso()
770 bufaddr = txq->tx_bounce[index]; in fec_enet_txq_put_hdr_tso()
772 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) in fec_enet_txq_put_hdr_tso()
775 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, in fec_enet_txq_put_hdr_tso()
777 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) { in fec_enet_txq_put_hdr_tso()
785 bdp->cbd_bufaddr = cpu_to_fec32(dmabuf); in fec_enet_txq_put_hdr_tso()
786 bdp->cbd_datlen = cpu_to_fec16(hdr_len); in fec_enet_txq_put_hdr_tso()
788 if (fep->bufdesc_ex) { in fec_enet_txq_put_hdr_tso()
789 if (fep->quirks & FEC_QUIRK_HAS_AVB) in fec_enet_txq_put_hdr_tso()
790 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); in fec_enet_txq_put_hdr_tso()
791 if (skb->ip_summed == CHECKSUM_PARTIAL) in fec_enet_txq_put_hdr_tso()
793 ebdp->cbd_bdu = 0; in fec_enet_txq_put_hdr_tso()
794 ebdp->cbd_esc = cpu_to_fec32(estatus); in fec_enet_txq_put_hdr_tso()
797 bdp->cbd_sc = cpu_to_fec16(status); in fec_enet_txq_put_hdr_tso()
808 struct bufdesc *bdp = txq->bd.cur; in fec_enet_txq_submit_tso()
820 /* Protocol checksum off-load for TCP and UDP. */ in fec_enet_txq_submit_tso()
829 total_len = skb->len - hdr_len; in fec_enet_txq_submit_tso()
833 index = fec_enet_get_bd_index(bdp, &txq->bd); in fec_enet_txq_submit_tso()
834 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); in fec_enet_txq_submit_tso()
835 total_len -= data_left; in fec_enet_txq_submit_tso()
838 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; in fec_enet_txq_submit_tso()
848 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_txq_submit_tso()
849 index = fec_enet_get_bd_index(bdp, &txq->bd); in fec_enet_txq_submit_tso()
858 data_left -= size; in fec_enet_txq_submit_tso()
862 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_txq_submit_tso()
866 txq->tx_buf[index].buf_p = skb; in fec_enet_txq_submit_tso()
869 txq->bd.cur = bdp; in fec_enet_txq_submit_tso()
872 if (!(fep->quirks & FEC_QUIRK_ERR007885) || in fec_enet_txq_submit_tso()
873 !readl(txq->bd.reg_desc_active) || in fec_enet_txq_submit_tso()
874 !readl(txq->bd.reg_desc_active) || in fec_enet_txq_submit_tso()
875 !readl(txq->bd.reg_desc_active) || in fec_enet_txq_submit_tso()
876 !readl(txq->bd.reg_desc_active)) in fec_enet_txq_submit_tso()
877 writel(0, txq->bd.reg_desc_active); in fec_enet_txq_submit_tso()
897 txq = fep->tx_queue[queue]; in fec_enet_start_xmit()
908 if (entries_free <= txq->tx_stop_threshold) in fec_enet_start_xmit()
925 for (q = 0; q < fep->num_rx_queues; q++) { in fec_enet_bd_init()
927 rxq = fep->rx_queue[q]; in fec_enet_bd_init()
928 bdp = rxq->bd.base; in fec_enet_bd_init()
930 for (i = 0; i < rxq->bd.ring_size; i++) { in fec_enet_bd_init()
933 if (bdp->cbd_bufaddr) in fec_enet_bd_init()
934 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); in fec_enet_bd_init()
936 bdp->cbd_sc = cpu_to_fec16(0); in fec_enet_bd_init()
937 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); in fec_enet_bd_init()
941 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); in fec_enet_bd_init()
942 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); in fec_enet_bd_init()
944 rxq->bd.cur = rxq->bd.base; in fec_enet_bd_init()
947 for (q = 0; q < fep->num_tx_queues; q++) { in fec_enet_bd_init()
949 txq = fep->tx_queue[q]; in fec_enet_bd_init()
950 bdp = txq->bd.base; in fec_enet_bd_init()
951 txq->bd.cur = bdp; in fec_enet_bd_init()
953 for (i = 0; i < txq->bd.ring_size; i++) { in fec_enet_bd_init()
955 bdp->cbd_sc = cpu_to_fec16(0); in fec_enet_bd_init()
956 if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { in fec_enet_bd_init()
957 if (bdp->cbd_bufaddr && in fec_enet_bd_init()
958 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) in fec_enet_bd_init()
959 dma_unmap_single(&fep->pdev->dev, in fec_enet_bd_init()
960 fec32_to_cpu(bdp->cbd_bufaddr), in fec_enet_bd_init()
961 fec16_to_cpu(bdp->cbd_datlen), in fec_enet_bd_init()
963 if (txq->tx_buf[i].buf_p) in fec_enet_bd_init()
964 dev_kfree_skb_any(txq->tx_buf[i].buf_p); in fec_enet_bd_init()
965 } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) { in fec_enet_bd_init()
966 if (bdp->cbd_bufaddr) in fec_enet_bd_init()
967 dma_unmap_single(&fep->pdev->dev, in fec_enet_bd_init()
968 fec32_to_cpu(bdp->cbd_bufaddr), in fec_enet_bd_init()
969 fec16_to_cpu(bdp->cbd_datlen), in fec_enet_bd_init()
972 if (txq->tx_buf[i].buf_p) in fec_enet_bd_init()
973 xdp_return_frame(txq->tx_buf[i].buf_p); in fec_enet_bd_init()
975 struct page *page = txq->tx_buf[i].buf_p; in fec_enet_bd_init()
978 page_pool_put_page(page->pp, page, 0, false); in fec_enet_bd_init()
981 txq->tx_buf[i].buf_p = NULL; in fec_enet_bd_init()
983 txq->tx_buf[i].type = FEC_TXBUF_T_SKB; in fec_enet_bd_init()
984 bdp->cbd_bufaddr = cpu_to_fec32(0); in fec_enet_bd_init()
985 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_bd_init()
989 bdp = fec_enet_get_prevdesc(bdp, &txq->bd); in fec_enet_bd_init()
990 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); in fec_enet_bd_init()
991 txq->dirty_tx = bdp; in fec_enet_bd_init()
1000 for (i = 0; i < fep->num_rx_queues; i++) in fec_enet_active_rxring()
1001 writel(0, fep->rx_queue[i]->bd.reg_desc_active); in fec_enet_active_rxring()
1011 for (i = 0; i < fep->num_rx_queues; i++) { in fec_enet_enable_ring()
1012 rxq = fep->rx_queue[i]; in fec_enet_enable_ring()
1013 writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i)); in fec_enet_enable_ring()
1014 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); in fec_enet_enable_ring()
1019 fep->hwp + FEC_RCMR(i)); in fec_enet_enable_ring()
1022 for (i = 0; i < fep->num_tx_queues; i++) { in fec_enet_enable_ring()
1023 txq = fep->tx_queue[i]; in fec_enet_enable_ring()
1024 writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i)); in fec_enet_enable_ring()
1029 fep->hwp + FEC_DMA_CFG(i)); in fec_enet_enable_ring()
1050 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES || in fec_restart()
1051 ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) { in fec_restart()
1052 writel(0, fep->hwp + FEC_ECNTRL); in fec_restart()
1054 writel(1, fep->hwp + FEC_ECNTRL); in fec_restart()
1059 * enet-mac reset will reset mac address registers too, in fec_restart()
1062 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); in fec_restart()
1064 fep->hwp + FEC_ADDR_LOW); in fec_restart()
1066 fep->hwp + FEC_ADDR_HIGH); in fec_restart()
1069 writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT); in fec_restart()
1076 if (fep->full_duplex == DUPLEX_FULL) { in fec_restart()
1078 writel(0x04, fep->hwp + FEC_X_CNTRL); in fec_restart()
1082 writel(0x0, fep->hwp + FEC_X_CNTRL); in fec_restart()
1086 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); in fec_restart()
1089 if (fep->quirks & FEC_QUIRK_HAS_RACC) { in fec_restart()
1090 u32 val = readl(fep->hwp + FEC_RACC); in fec_restart()
1094 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) in fec_restart()
1099 writel(val, fep->hwp + FEC_RACC); in fec_restart()
1100 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL); in fec_restart()
1106 * differently on enet-mac. in fec_restart()
1108 if (fep->quirks & FEC_QUIRK_ENET_MAC) { in fec_restart()
1113 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII || in fec_restart()
1114 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || in fec_restart()
1115 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || in fec_restart()
1116 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) in fec_restart()
1118 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) in fec_restart()
1124 if (ndev->phydev) { in fec_restart()
1125 if (ndev->phydev->speed == SPEED_1000) in fec_restart()
1127 else if (ndev->phydev->speed == SPEED_100) in fec_restart()
1134 if (fep->quirks & FEC_QUIRK_USE_GASKET) { in fec_restart()
1137 writel(0, fep->hwp + FEC_MIIGSK_ENR); in fec_restart()
1138 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) in fec_restart()
1146 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) in fec_restart()
1148 if (ndev->phydev && ndev->phydev->speed == SPEED_10) in fec_restart()
1150 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); in fec_restart()
1152 /* re-enable the gasket */ in fec_restart()
1153 writel(2, fep->hwp + FEC_MIIGSK_ENR); in fec_restart()
1160 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || in fec_restart()
1161 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && in fec_restart()
1162 ndev->phydev && ndev->phydev->pause)) { in fec_restart()
1166 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); in fec_restart()
1167 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); in fec_restart()
1168 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); in fec_restart()
1169 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); in fec_restart()
1172 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); in fec_restart()
1178 writel(rcntl, fep->hwp + FEC_R_CNTRL); in fec_restart()
1183 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); in fec_restart()
1184 writel(0, fep->hwp + FEC_HASH_TABLE_LOW); in fec_restart()
1187 if (fep->quirks & FEC_QUIRK_ENET_MAC) { in fec_restart()
1191 writel(1 << 8, fep->hwp + FEC_X_WMRK); in fec_restart()
1194 if (fep->bufdesc_ex) in fec_restart()
1197 if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && in fec_restart()
1198 fep->rgmii_txc_dly) in fec_restart()
1200 if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && in fec_restart()
1201 fep->rgmii_rxc_dly) in fec_restart()
1206 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); in fec_restart()
1210 writel(ecntl, fep->hwp + FEC_ECNTRL); in fec_restart()
1213 if (fep->bufdesc_ex) in fec_restart()
1217 if (fep->link) in fec_restart()
1218 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); in fec_restart()
1220 writel(0, fep->hwp + FEC_IMASK); in fec_restart()
1223 if (fep->quirks & FEC_QUIRK_HAS_COALESCE) in fec_restart()
1234 return imx_scu_get_handle(&fep->ipc_handle); in fec_enet_ipc_handle_init()
1239 struct device_node *np = fep->pdev->dev.of_node; in fec_enet_ipg_stop_set()
1243 if (!np || !fep->ipc_handle) in fec_enet_ipg_stop_set()
1252 imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val); in fec_enet_ipg_stop_set()
1257 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; in fec_enet_stop_mode()
1258 struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr; in fec_enet_stop_mode()
1260 if (stop_gpr->gpr) { in fec_enet_stop_mode()
1262 regmap_update_bits(stop_gpr->gpr, stop_gpr->reg, in fec_enet_stop_mode()
1263 BIT(stop_gpr->bit), in fec_enet_stop_mode()
1264 BIT(stop_gpr->bit)); in fec_enet_stop_mode()
1266 regmap_update_bits(stop_gpr->gpr, stop_gpr->reg, in fec_enet_stop_mode()
1267 BIT(stop_gpr->bit), 0); in fec_enet_stop_mode()
1268 } else if (pdata && pdata->sleep_mode_enable) { in fec_enet_stop_mode()
1269 pdata->sleep_mode_enable(enabled); in fec_enet_stop_mode()
1279 writel(0, fep->hwp + FEC_IMASK); in fec_irqs_disable()
1286 writel(0, fep->hwp + FEC_IMASK); in fec_irqs_disable_except_wakeup()
1287 writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); in fec_irqs_disable_except_wakeup()
1294 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); in fec_stop()
1298 if (fep->link) { in fec_stop()
1299 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ in fec_stop()
1301 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) in fec_stop()
1309 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { in fec_stop()
1310 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { in fec_stop()
1311 writel(0, fep->hwp + FEC_ECNTRL); in fec_stop()
1313 writel(1, fep->hwp + FEC_ECNTRL); in fec_stop()
1317 val = readl(fep->hwp + FEC_ECNTRL); in fec_stop()
1319 writel(val, fep->hwp + FEC_ECNTRL); in fec_stop()
1321 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); in fec_stop()
1322 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); in fec_stop()
1325 if (fep->quirks & FEC_QUIRK_ENET_MAC && in fec_stop()
1326 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { in fec_stop()
1327 writel(2, fep->hwp + FEC_ECNTRL); in fec_stop()
1328 writel(rmii_mode, fep->hwp + FEC_R_CNTRL); in fec_stop()
1340 ndev->stats.tx_errors++; in fec_timeout()
1342 schedule_work(&fep->tx_timeout_work); in fec_timeout()
1349 struct net_device *ndev = fep->netdev; in fec_enet_timeout_work()
1353 napi_disable(&fep->napi); in fec_enet_timeout_work()
1358 napi_enable(&fep->napi); in fec_enet_timeout_work()
1370 spin_lock_irqsave(&fep->tmreg_lock, flags); in fec_enet_hwtstamp()
1371 ns = timecounter_cyc2time(&fep->tc, ts); in fec_enet_hwtstamp()
1372 spin_unlock_irqrestore(&fep->tmreg_lock, flags); in fec_enet_hwtstamp()
1375 hwtstamps->hwtstamp = ns_to_ktime(ns); in fec_enet_hwtstamp()
1395 txq = fep->tx_queue[queue_id]; in fec_enet_tx_queue()
1398 bdp = txq->dirty_tx; in fec_enet_tx_queue()
1401 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_tx_queue()
1403 while (bdp != READ_ONCE(txq->bd.cur)) { in fec_enet_tx_queue()
1406 status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc)); in fec_enet_tx_queue()
1410 index = fec_enet_get_bd_index(bdp, &txq->bd); in fec_enet_tx_queue()
1412 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { in fec_enet_tx_queue()
1413 skb = txq->tx_buf[index].buf_p; in fec_enet_tx_queue()
1414 if (bdp->cbd_bufaddr && in fec_enet_tx_queue()
1415 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) in fec_enet_tx_queue()
1416 dma_unmap_single(&fep->pdev->dev, in fec_enet_tx_queue()
1417 fec32_to_cpu(bdp->cbd_bufaddr), in fec_enet_tx_queue()
1418 fec16_to_cpu(bdp->cbd_datlen), in fec_enet_tx_queue()
1420 bdp->cbd_bufaddr = cpu_to_fec32(0); in fec_enet_tx_queue()
1432 if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) { in fec_enet_tx_queue()
1433 xdpf = txq->tx_buf[index].buf_p; in fec_enet_tx_queue()
1434 if (bdp->cbd_bufaddr) in fec_enet_tx_queue()
1435 dma_unmap_single(&fep->pdev->dev, in fec_enet_tx_queue()
1436 fec32_to_cpu(bdp->cbd_bufaddr), in fec_enet_tx_queue()
1437 fec16_to_cpu(bdp->cbd_datlen), in fec_enet_tx_queue()
1440 page = txq->tx_buf[index].buf_p; in fec_enet_tx_queue()
1443 bdp->cbd_bufaddr = cpu_to_fec32(0); in fec_enet_tx_queue()
1444 if (unlikely(!txq->tx_buf[index].buf_p)) { in fec_enet_tx_queue()
1445 txq->tx_buf[index].type = FEC_TXBUF_T_SKB; in fec_enet_tx_queue()
1449 frame_len = fec16_to_cpu(bdp->cbd_datlen); in fec_enet_tx_queue()
1456 ndev->stats.tx_errors++; in fec_enet_tx_queue()
1458 ndev->stats.tx_heartbeat_errors++; in fec_enet_tx_queue()
1460 ndev->stats.tx_window_errors++; in fec_enet_tx_queue()
1462 ndev->stats.tx_aborted_errors++; in fec_enet_tx_queue()
1464 ndev->stats.tx_fifo_errors++; in fec_enet_tx_queue()
1466 ndev->stats.tx_carrier_errors++; in fec_enet_tx_queue()
1468 ndev->stats.tx_packets++; in fec_enet_tx_queue()
1470 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) in fec_enet_tx_queue()
1471 ndev->stats.tx_bytes += skb->len; in fec_enet_tx_queue()
1473 ndev->stats.tx_bytes += frame_len; in fec_enet_tx_queue()
1480 ndev->stats.collisions++; in fec_enet_tx_queue()
1482 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { in fec_enet_tx_queue()
1487 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS && in fec_enet_tx_queue()
1488 fep->hwts_tx_en) && fep->bufdesc_ex) { in fec_enet_tx_queue()
1492 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps); in fec_enet_tx_queue()
1498 } else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) { in fec_enet_tx_queue()
1502 page_pool_put_page(page->pp, page, 0, true); in fec_enet_tx_queue()
1505 txq->tx_buf[index].buf_p = NULL; in fec_enet_tx_queue()
1507 txq->tx_buf[index].type = FEC_TXBUF_T_SKB; in fec_enet_tx_queue()
1514 txq->dirty_tx = bdp; in fec_enet_tx_queue()
1517 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_tx_queue()
1523 if (entries_free >= txq->tx_wake_threshold) in fec_enet_tx_queue()
1529 if (bdp != txq->bd.cur && in fec_enet_tx_queue()
1530 readl(txq->bd.reg_desc_active) == 0) in fec_enet_tx_queue()
1531 writel(0, txq->bd.reg_desc_active); in fec_enet_tx_queue()
1540 for (i = fep->num_tx_queues - 1; i >= 0; i--) in fec_enet_tx()
1550 new_page = page_pool_dev_alloc_pages(rxq->page_pool); in fec_enet_update_cbd()
1552 rxq->rx_skb_info[index].page = new_page; in fec_enet_update_cbd()
1554 rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM; in fec_enet_update_cbd()
1556 bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); in fec_enet_update_cbd()
1563 unsigned int sync, len = xdp->data_end - xdp->data; in fec_enet_run_xdp()
1574 sync = xdp->data_end - xdp->data; in fec_enet_run_xdp()
1579 rxq->stats[RX_XDP_PASS]++; in fec_enet_run_xdp()
1584 rxq->stats[RX_XDP_REDIRECT]++; in fec_enet_run_xdp()
1585 err = xdp_do_redirect(fep->netdev, xdp, prog); in fec_enet_run_xdp()
1593 rxq->stats[RX_XDP_TX]++; in fec_enet_run_xdp()
1596 rxq->stats[RX_XDP_TX_ERRORS]++; in fec_enet_run_xdp()
1604 bpf_warn_invalid_xdp_action(fep->netdev, prog, act); in fec_enet_run_xdp()
1611 rxq->stats[RX_XDP_DROP]++; in fec_enet_run_xdp()
1614 page = virt_to_head_page(xdp->data); in fec_enet_run_xdp()
1615 page_pool_put_page(rxq->page_pool, page, sync, true); in fec_enet_run_xdp()
1617 trace_xdp_exception(fep->netdev, prog, act); in fec_enet_run_xdp()
1644 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; in fec_enet_rx_queue()
1645 struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); in fec_enet_rx_queue()
1657 if (fep->quirks & FEC_QUIRK_HAS_RACC) { in fec_enet_rx_queue()
1666 rxq = fep->rx_queue[queue_id]; in fec_enet_rx_queue()
1671 bdp = rxq->bd.cur; in fec_enet_rx_queue()
1672 xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq); in fec_enet_rx_queue()
1674 while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { in fec_enet_rx_queue()
1680 writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT); in fec_enet_rx_queue()
1687 ndev->stats.rx_errors++; in fec_enet_rx_queue()
1690 ndev->stats.rx_fifo_errors++; in fec_enet_rx_queue()
1696 ndev->stats.rx_length_errors++; in fec_enet_rx_queue()
1701 ndev->stats.rx_crc_errors++; in fec_enet_rx_queue()
1704 ndev->stats.rx_frame_errors++; in fec_enet_rx_queue()
1709 ndev->stats.rx_packets++; in fec_enet_rx_queue()
1710 pkt_len = fec16_to_cpu(bdp->cbd_datlen); in fec_enet_rx_queue()
1711 ndev->stats.rx_bytes += pkt_len; in fec_enet_rx_queue()
1713 index = fec_enet_get_bd_index(bdp, &rxq->bd); in fec_enet_rx_queue()
1714 page = rxq->rx_skb_info[index].page; in fec_enet_rx_queue()
1715 dma_sync_single_for_cpu(&fep->pdev->dev, in fec_enet_rx_queue()
1716 fec32_to_cpu(bdp->cbd_bufaddr), in fec_enet_rx_queue()
1726 data_start, pkt_len - sub_len, false); in fec_enet_rx_queue()
1739 page_pool_recycle_direct(rxq->page_pool, page); in fec_enet_rx_queue()
1740 ndev->stats.rx_dropped++; in fec_enet_rx_queue()
1747 skb_put(skb, pkt_len - sub_len); in fec_enet_rx_queue()
1754 data = skb->data; in fec_enet_rx_queue()
1758 if (fep->bufdesc_ex) in fec_enet_rx_queue()
1763 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && in fec_enet_rx_queue()
1764 fep->bufdesc_ex && in fec_enet_rx_queue()
1765 (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) { in fec_enet_rx_queue()
1769 vlan_tag = ntohs(vlan_header->h_vlan_TCI); in fec_enet_rx_queue()
1773 memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2); in fec_enet_rx_queue()
1777 skb->protocol = eth_type_trans(skb, ndev); in fec_enet_rx_queue()
1780 if (fep->hwts_rx_en && fep->bufdesc_ex) in fec_enet_rx_queue()
1781 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), in fec_enet_rx_queue()
1784 if (fep->bufdesc_ex && in fec_enet_rx_queue()
1785 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { in fec_enet_rx_queue()
1786 if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) { in fec_enet_rx_queue()
1788 skb->ip_summed = CHECKSUM_UNNECESSARY; in fec_enet_rx_queue()
1801 napi_gro_receive(&fep->napi, skb); in fec_enet_rx_queue()
1810 if (fep->bufdesc_ex) { in fec_enet_rx_queue()
1813 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); in fec_enet_rx_queue()
1814 ebdp->cbd_prot = 0; in fec_enet_rx_queue()
1815 ebdp->cbd_bdu = 0; in fec_enet_rx_queue()
1821 bdp->cbd_sc = cpu_to_fec16(status); in fec_enet_rx_queue()
1824 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); in fec_enet_rx_queue()
1830 writel(0, rxq->bd.reg_desc_active); in fec_enet_rx_queue()
1832 rxq->bd.cur = bdp; in fec_enet_rx_queue()
1846 for (i = fep->num_rx_queues - 1; i >= 0; i--) in fec_enet_rx()
1847 done += fec_enet_rx_queue(ndev, budget - done, i); in fec_enet_rx()
1856 int_events = readl(fep->hwp + FEC_IEVENT); in fec_enet_collect_events()
1861 writel(int_events, fep->hwp + FEC_IEVENT); in fec_enet_collect_events()
1873 if (fec_enet_collect_events(fep) && fep->link) { in fec_enet_interrupt()
1876 if (napi_schedule_prep(&fep->napi)) { in fec_enet_interrupt()
1878 writel(0, fep->hwp + FEC_IMASK); in fec_enet_interrupt()
1879 __napi_schedule(&fep->napi); in fec_enet_interrupt()
1888 struct net_device *ndev = napi->dev; in fec_enet_rx_napi()
1893 done += fec_enet_rx(ndev, budget - done); in fec_enet_rx_napi()
1899 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); in fec_enet_rx_napi()
1905 /* ------------------------------------------------------------------------- */
1924 struct device_node *np = fep->pdev->dev.of_node; in fec_get_mac()
1929 else if (ret == -EPROBE_DEFER) in fec_get_mac()
1942 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); in fec_get_mac()
1945 iap = (unsigned char *)&pdata->mac; in fec_get_mac()
1954 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); in fec_get_mac()
1956 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); in fec_get_mac()
1965 dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap); in fec_get_mac()
1967 dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n", in fec_get_mac()
1968 ndev->dev_addr); in fec_get_mac()
1973 eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0); in fec_get_mac()
1978 /* ------------------------------------------------------------------------- */
1986 struct phy_device *phy_dev = ndev->phydev; in fec_enet_adjust_link()
1995 fep->link = 0; in fec_enet_adjust_link()
1996 } else if (phy_dev->link) { in fec_enet_adjust_link()
1997 if (!fep->link) { in fec_enet_adjust_link()
1998 fep->link = phy_dev->link; in fec_enet_adjust_link()
2002 if (fep->full_duplex != phy_dev->duplex) { in fec_enet_adjust_link()
2003 fep->full_duplex = phy_dev->duplex; in fec_enet_adjust_link()
2007 if (phy_dev->speed != fep->speed) { in fec_enet_adjust_link()
2008 fep->speed = phy_dev->speed; in fec_enet_adjust_link()
2014 napi_disable(&fep->napi); in fec_enet_adjust_link()
2019 napi_enable(&fep->napi); in fec_enet_adjust_link()
2022 if (fep->link) { in fec_enet_adjust_link()
2023 napi_disable(&fep->napi); in fec_enet_adjust_link()
2027 napi_enable(&fep->napi); in fec_enet_adjust_link()
2028 fep->link = phy_dev->link; in fec_enet_adjust_link()
2042 ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent, in fec_enet_mdio_wait()
2046 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); in fec_enet_mdio_wait()
2053 struct fec_enet_private *fep = bus->priv; in fec_enet_mdio_read_c22()
2054 struct device *dev = &fep->pdev->dev; in fec_enet_mdio_read_c22()
2069 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); in fec_enet_mdio_read_c22()
2074 netdev_err(fep->netdev, "MDIO read timeout\n"); in fec_enet_mdio_read_c22()
2078 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); in fec_enet_mdio_read_c22()
2090 struct fec_enet_private *fep = bus->priv; in fec_enet_mdio_read_c45()
2091 struct device *dev = &fep->pdev->dev; in fec_enet_mdio_read_c45()
2104 fep->hwp + FEC_MII_DATA); in fec_enet_mdio_read_c45()
2109 netdev_err(fep->netdev, "MDIO address write timeout\n"); in fec_enet_mdio_read_c45()
2118 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); in fec_enet_mdio_read_c45()
2123 netdev_err(fep->netdev, "MDIO read timeout\n"); in fec_enet_mdio_read_c45()
2127 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); in fec_enet_mdio_read_c45()
2139 struct fec_enet_private *fep = bus->priv; in fec_enet_mdio_write_c22()
2140 struct device *dev = &fep->pdev->dev; in fec_enet_mdio_write_c22()
2155 fep->hwp + FEC_MII_DATA); in fec_enet_mdio_write_c22()
2160 netdev_err(fep->netdev, "MDIO write timeout\n"); in fec_enet_mdio_write_c22()
2171 struct fec_enet_private *fep = bus->priv; in fec_enet_mdio_write_c45()
2172 struct device *dev = &fep->pdev->dev; in fec_enet_mdio_write_c45()
2185 fep->hwp + FEC_MII_DATA); in fec_enet_mdio_write_c45()
2190 netdev_err(fep->netdev, "MDIO address write timeout\n"); in fec_enet_mdio_write_c45()
2198 fep->hwp + FEC_MII_DATA); in fec_enet_mdio_write_c45()
2203 netdev_err(fep->netdev, "MDIO write timeout\n"); in fec_enet_mdio_write_c45()
2215 struct phy_device *phy_dev = ndev->phydev; in fec_enet_phy_reset_after_clk_enable()
2219 } else if (fep->phy_node) { in fec_enet_phy_reset_after_clk_enable()
2227 phy_dev = of_phy_find_device(fep->phy_node); in fec_enet_phy_reset_after_clk_enable()
2229 put_device(&phy_dev->mdio.dev); in fec_enet_phy_reset_after_clk_enable()
2239 ret = clk_prepare_enable(fep->clk_enet_out); in fec_enet_clk_enable()
2243 if (fep->clk_ptp) { in fec_enet_clk_enable()
2244 mutex_lock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
2245 ret = clk_prepare_enable(fep->clk_ptp); in fec_enet_clk_enable()
2247 mutex_unlock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
2250 fep->ptp_clk_on = true; in fec_enet_clk_enable()
2252 mutex_unlock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
2255 ret = clk_prepare_enable(fep->clk_ref); in fec_enet_clk_enable()
2259 ret = clk_prepare_enable(fep->clk_2x_txclk); in fec_enet_clk_enable()
2265 clk_disable_unprepare(fep->clk_enet_out); in fec_enet_clk_enable()
2266 if (fep->clk_ptp) { in fec_enet_clk_enable()
2267 mutex_lock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
2268 clk_disable_unprepare(fep->clk_ptp); in fec_enet_clk_enable()
2269 fep->ptp_clk_on = false; in fec_enet_clk_enable()
2270 mutex_unlock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
2272 clk_disable_unprepare(fep->clk_ref); in fec_enet_clk_enable()
2273 clk_disable_unprepare(fep->clk_2x_txclk); in fec_enet_clk_enable()
2279 if (fep->clk_ref) in fec_enet_clk_enable()
2280 clk_disable_unprepare(fep->clk_ref); in fec_enet_clk_enable()
2282 if (fep->clk_ptp) { in fec_enet_clk_enable()
2283 mutex_lock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
2284 clk_disable_unprepare(fep->clk_ptp); in fec_enet_clk_enable()
2285 fep->ptp_clk_on = false; in fec_enet_clk_enable()
2286 mutex_unlock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
2289 clk_disable_unprepare(fep->clk_enet_out); in fec_enet_clk_enable()
2300 if (!of_property_read_u32(np, "tx-internal-delay-ps", &rgmii_tx_delay)) { in fec_enet_parse_rgmii_delay()
2302 dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps"); in fec_enet_parse_rgmii_delay()
2303 return -EINVAL; in fec_enet_parse_rgmii_delay()
2305 fep->rgmii_txc_dly = true; in fec_enet_parse_rgmii_delay()
2310 if (!of_property_read_u32(np, "rx-internal-delay-ps", &rgmii_rx_delay)) { in fec_enet_parse_rgmii_delay()
2312 dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps"); in fec_enet_parse_rgmii_delay()
2313 return -EINVAL; in fec_enet_parse_rgmii_delay()
2315 fep->rgmii_rxc_dly = true; in fec_enet_parse_rgmii_delay()
2329 int dev_id = fep->dev_id; in fec_enet_mii_probe()
2331 if (fep->phy_node) { in fec_enet_mii_probe()
2332 phy_dev = of_phy_connect(ndev, fep->phy_node, in fec_enet_mii_probe()
2334 fep->phy_interface); in fec_enet_mii_probe()
2337 return -ENODEV; in fec_enet_mii_probe()
2342 if (!mdiobus_is_registered_device(fep->mii_bus, phy_id)) in fec_enet_mii_probe()
2344 if (dev_id--) in fec_enet_mii_probe()
2346 strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); in fec_enet_mii_probe()
2352 strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); in fec_enet_mii_probe()
2359 fep->phy_interface); in fec_enet_mii_probe()
2368 if (fep->quirks & FEC_QUIRK_HAS_GBIT) { in fec_enet_mii_probe()
2379 fep->link = 0; in fec_enet_mii_probe()
2380 fep->full_duplex = 0; in fec_enet_mii_probe()
2382 phy_dev->mac_managed_pm = true; in fec_enet_mii_probe()
2396 int err = -ENXIO; in fec_enet_mii_init()
2404 * - fec0 supports MII & RMII modes while fec1 only supports RMII in fec_enet_mii_init()
2405 * - fec0 acts as the 1588 time master while fec1 is slave in fec_enet_mii_init()
2406 * - external phys can only be configured by fec0 in fec_enet_mii_init()
2416 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { in fec_enet_mii_init()
2419 fep->mii_bus = fec0_mii_bus; in fec_enet_mii_init()
2423 return -ENOENT; in fec_enet_mii_init()
2427 node = of_get_child_by_name(pdev->dev.of_node, "mdio"); in fec_enet_mii_init()
2429 of_property_read_u32(node, "clock-frequency", &bus_freq); in fec_enet_mii_init()
2431 "suppress-preamble"); in fec_enet_mii_init()
2438 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28 in fec_enet_mii_init()
2442 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2); in fec_enet_mii_init()
2443 if (fep->quirks & FEC_QUIRK_ENET_MAC) in fec_enet_mii_init()
2444 mii_speed--; in fec_enet_mii_init()
2446 dev_err(&pdev->dev, in fec_enet_mii_init()
2448 clk_get_rate(fep->clk_ipg)); in fec_enet_mii_init()
2449 err = -EINVAL; in fec_enet_mii_init()
2465 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1; in fec_enet_mii_init()
2467 fep->phy_speed = mii_speed << 1 | holdtime << 8; in fec_enet_mii_init()
2470 fep->phy_speed |= BIT(7); in fec_enet_mii_init()
2472 if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) { in fec_enet_mii_init()
2475 * - writing MSCR: in fec_enet_mii_init()
2476 * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero & in fec_enet_mii_init()
2478 * - writing MMFR: in fec_enet_mii_init()
2479 * - mscr[7:0]_not_zero in fec_enet_mii_init()
2481 writel(0, fep->hwp + FEC_MII_DATA); in fec_enet_mii_init()
2484 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); in fec_enet_mii_init()
2487 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); in fec_enet_mii_init()
2489 fep->mii_bus = mdiobus_alloc(); in fec_enet_mii_init()
2490 if (fep->mii_bus == NULL) { in fec_enet_mii_init()
2491 err = -ENOMEM; in fec_enet_mii_init()
2495 fep->mii_bus->name = "fec_enet_mii_bus"; in fec_enet_mii_init()
2496 fep->mii_bus->read = fec_enet_mdio_read_c22; in fec_enet_mii_init()
2497 fep->mii_bus->write = fec_enet_mdio_write_c22; in fec_enet_mii_init()
2498 if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) { in fec_enet_mii_init()
2499 fep->mii_bus->read_c45 = fec_enet_mdio_read_c45; in fec_enet_mii_init()
2500 fep->mii_bus->write_c45 = fec_enet_mdio_write_c45; in fec_enet_mii_init()
2502 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in fec_enet_mii_init()
2503 pdev->name, fep->dev_id + 1); in fec_enet_mii_init()
2504 fep->mii_bus->priv = fep; in fec_enet_mii_init()
2505 fep->mii_bus->parent = &pdev->dev; in fec_enet_mii_init()
2507 err = of_mdiobus_register(fep->mii_bus, node); in fec_enet_mii_init()
2515 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) in fec_enet_mii_init()
2516 fec0_mii_bus = fep->mii_bus; in fec_enet_mii_init()
2521 mdiobus_free(fep->mii_bus); in fec_enet_mii_init()
2529 if (--mii_cnt == 0) { in fec_enet_mii_remove()
2530 mdiobus_unregister(fep->mii_bus); in fec_enet_mii_remove()
2531 mdiobus_free(fep->mii_bus); in fec_enet_mii_remove()
2540 strscpy(info->driver, fep->pdev->dev.driver->name, in fec_enet_get_drvinfo()
2541 sizeof(info->driver)); in fec_enet_get_drvinfo()
2542 strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); in fec_enet_get_drvinfo()
2551 r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0); in fec_enet_get_regs_len()
2636 u32 __iomem *theregs = (u32 __iomem *)fep->hwp; in fec_enet_get_regs()
2637 struct device *dev = &fep->pdev->dev; in fec_enet_get_regs()
2663 regs->version = fec_enet_register_version; in fec_enet_get_regs()
2665 memset(buf, 0, regs->len); in fec_enet_get_regs()
2671 !(fep->quirks & FEC_QUIRK_HAS_FRREG)) in fec_enet_get_regs()
2687 if (fep->bufdesc_ex) { in fec_enet_get_ts_info()
2689 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | in fec_enet_get_ts_info()
2695 if (fep->ptp_clock) in fec_enet_get_ts_info()
2696 info->phc_index = ptp_clock_index(fep->ptp_clock); in fec_enet_get_ts_info()
2698 info->phc_index = -1; in fec_enet_get_ts_info()
2700 info->tx_types = (1 << HWTSTAMP_TX_OFF) | in fec_enet_get_ts_info()
2703 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | in fec_enet_get_ts_info()
2718 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; in fec_enet_get_pauseparam()
2719 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; in fec_enet_get_pauseparam()
2720 pause->rx_pause = pause->tx_pause; in fec_enet_get_pauseparam()
2728 if (!ndev->phydev) in fec_enet_set_pauseparam()
2729 return -ENODEV; in fec_enet_set_pauseparam()
2731 if (pause->tx_pause != pause->rx_pause) { in fec_enet_set_pauseparam()
2734 return -EINVAL; in fec_enet_set_pauseparam()
2737 fep->pause_flag = 0; in fec_enet_set_pauseparam()
2740 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; in fec_enet_set_pauseparam()
2741 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; in fec_enet_set_pauseparam()
2743 phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause, in fec_enet_set_pauseparam()
2744 pause->autoneg); in fec_enet_set_pauseparam()
2746 if (pause->autoneg) { in fec_enet_set_pauseparam()
2749 phy_start_aneg(ndev->phydev); in fec_enet_set_pauseparam()
2752 napi_disable(&fep->napi); in fec_enet_set_pauseparam()
2757 napi_enable(&fep->napi); in fec_enet_set_pauseparam()
2847 fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset); in fec_enet_update_ethtool_stats()
2856 for (i = fep->num_rx_queues - 1; i >= 0; i--) { in fec_enet_get_xdp_stats()
2857 rxq = fep->rx_queue[i]; in fec_enet_get_xdp_stats()
2860 xdp_stats[j] += rxq->stats[j]; in fec_enet_get_xdp_stats()
2873 for (i = fep->num_rx_queues - 1; i >= 0; i--) { in fec_enet_page_pool_stats()
2874 rxq = fep->rx_queue[i]; in fec_enet_page_pool_stats()
2876 if (!rxq->page_pool) in fec_enet_page_pool_stats()
2879 page_pool_get_stats(rxq->page_pool, &stats); in fec_enet_page_pool_stats()
2894 memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE); in fec_enet_get_ethtool_stats()
2939 return -EOPNOTSUPP; in fec_enet_get_sset_count()
2950 writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT); in fec_enet_clear_ethtool_stats()
2953 writel(0, fep->hwp + fec_stats[i].offset); in fec_enet_clear_ethtool_stats()
2955 for (i = fep->num_rx_queues - 1; i >= 0; i--) { in fec_enet_clear_ethtool_stats()
2956 rxq = fep->rx_queue[i]; in fec_enet_clear_ethtool_stats()
2958 rxq->stats[j] = 0; in fec_enet_clear_ethtool_stats()
2962 writel(0, fep->hwp + FEC_MIB_CTRLSTAT); in fec_enet_clear_ethtool_stats()
2984 return us * (fep->itr_clk_rate / 64000) / 1000; in fec_enet_us_to_itr_clock()
2994 if (!fep->rx_time_itr || !fep->rx_pkts_itr || in fec_enet_itr_coal_set()
2995 !fep->tx_time_itr || !fep->tx_pkts_itr) in fec_enet_itr_coal_set()
3005 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); in fec_enet_itr_coal_set()
3006 rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); in fec_enet_itr_coal_set()
3007 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); in fec_enet_itr_coal_set()
3008 tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); in fec_enet_itr_coal_set()
3013 writel(tx_itr, fep->hwp + FEC_TXIC0); in fec_enet_itr_coal_set()
3014 writel(rx_itr, fep->hwp + FEC_RXIC0); in fec_enet_itr_coal_set()
3015 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { in fec_enet_itr_coal_set()
3016 writel(tx_itr, fep->hwp + FEC_TXIC1); in fec_enet_itr_coal_set()
3017 writel(rx_itr, fep->hwp + FEC_RXIC1); in fec_enet_itr_coal_set()
3018 writel(tx_itr, fep->hwp + FEC_TXIC2); in fec_enet_itr_coal_set()
3019 writel(rx_itr, fep->hwp + FEC_RXIC2); in fec_enet_itr_coal_set()
3030 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) in fec_enet_get_coalesce()
3031 return -EOPNOTSUPP; in fec_enet_get_coalesce()
3033 ec->rx_coalesce_usecs = fep->rx_time_itr; in fec_enet_get_coalesce()
3034 ec->rx_max_coalesced_frames = fep->rx_pkts_itr; in fec_enet_get_coalesce()
3036 ec->tx_coalesce_usecs = fep->tx_time_itr; in fec_enet_get_coalesce()
3037 ec->tx_max_coalesced_frames = fep->tx_pkts_itr; in fec_enet_get_coalesce()
3048 struct device *dev = &fep->pdev->dev; in fec_enet_set_coalesce()
3051 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) in fec_enet_set_coalesce()
3052 return -EOPNOTSUPP; in fec_enet_set_coalesce()
3054 if (ec->rx_max_coalesced_frames > 255) { in fec_enet_set_coalesce()
3056 return -EINVAL; in fec_enet_set_coalesce()
3059 if (ec->tx_max_coalesced_frames > 255) { in fec_enet_set_coalesce()
3061 return -EINVAL; in fec_enet_set_coalesce()
3064 cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs); in fec_enet_set_coalesce()
3067 return -EINVAL; in fec_enet_set_coalesce()
3070 cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs); in fec_enet_set_coalesce()
3073 return -EINVAL; in fec_enet_set_coalesce()
3076 fep->rx_time_itr = ec->rx_coalesce_usecs; in fec_enet_set_coalesce()
3077 fep->rx_pkts_itr = ec->rx_max_coalesced_frames; in fec_enet_set_coalesce()
3079 fep->tx_time_itr = ec->tx_coalesce_usecs; in fec_enet_set_coalesce()
3080 fep->tx_pkts_itr = ec->tx_max_coalesced_frames; in fec_enet_set_coalesce()
3094 return us * (fep->clk_ref_rate / 1000) / 1000; in fec_enet_us_to_tx_cycle()
3100 struct ethtool_eee *p = &fep->eee; in fec_enet_eee_mode_set()
3105 ret = phy_init_eee(ndev->phydev, false); in fec_enet_eee_mode_set()
3109 sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer); in fec_enet_eee_mode_set()
3116 p->tx_lpi_enabled = enable; in fec_enet_eee_mode_set()
3117 p->eee_enabled = enable; in fec_enet_eee_mode_set()
3118 p->eee_active = enable; in fec_enet_eee_mode_set()
3120 writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP); in fec_enet_eee_mode_set()
3121 writel(wake_cycle, fep->hwp + FEC_LPI_WAKE); in fec_enet_eee_mode_set()
3130 struct ethtool_eee *p = &fep->eee; in fec_enet_get_eee()
3132 if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) in fec_enet_get_eee()
3133 return -EOPNOTSUPP; in fec_enet_get_eee()
3136 return -ENETDOWN; in fec_enet_get_eee()
3138 edata->eee_enabled = p->eee_enabled; in fec_enet_get_eee()
3139 edata->eee_active = p->eee_active; in fec_enet_get_eee()
3140 edata->tx_lpi_timer = p->tx_lpi_timer; in fec_enet_get_eee()
3141 edata->tx_lpi_enabled = p->tx_lpi_enabled; in fec_enet_get_eee()
3143 return phy_ethtool_get_eee(ndev->phydev, edata); in fec_enet_get_eee()
3150 struct ethtool_eee *p = &fep->eee; in fec_enet_set_eee()
3153 if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) in fec_enet_set_eee()
3154 return -EOPNOTSUPP; in fec_enet_set_eee()
3157 return -ENETDOWN; in fec_enet_set_eee()
3159 p->tx_lpi_timer = edata->tx_lpi_timer; in fec_enet_set_eee()
3161 if (!edata->eee_enabled || !edata->tx_lpi_enabled || in fec_enet_set_eee()
3162 !edata->tx_lpi_timer) in fec_enet_set_eee()
3170 return phy_ethtool_set_eee(ndev->phydev, edata); in fec_enet_set_eee()
3178 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) { in fec_enet_get_wol()
3179 wol->supported = WAKE_MAGIC; in fec_enet_get_wol()
3180 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0; in fec_enet_get_wol()
3182 wol->supported = wol->wolopts = 0; in fec_enet_get_wol()
3191 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET)) in fec_enet_set_wol()
3192 return -EINVAL; in fec_enet_set_wol()
3194 if (wol->wolopts & ~WAKE_MAGIC) in fec_enet_set_wol()
3195 return -EINVAL; in fec_enet_set_wol()
3197 device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC); in fec_enet_set_wol()
3198 if (device_may_wakeup(&ndev->dev)) in fec_enet_set_wol()
3199 fep->wol_flag |= FEC_WOL_FLAG_ENABLE; in fec_enet_set_wol()
3201 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); in fec_enet_set_wol()
3241 for (q = 0; q < fep->num_rx_queues; q++) { in fec_enet_free_buffers()
3242 rxq = fep->rx_queue[q]; in fec_enet_free_buffers()
3243 for (i = 0; i < rxq->bd.ring_size; i++) in fec_enet_free_buffers()
3244 page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false); in fec_enet_free_buffers()
3247 rxq->stats[i] = 0; in fec_enet_free_buffers()
3249 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) in fec_enet_free_buffers()
3250 xdp_rxq_info_unreg(&rxq->xdp_rxq); in fec_enet_free_buffers()
3251 page_pool_destroy(rxq->page_pool); in fec_enet_free_buffers()
3252 rxq->page_pool = NULL; in fec_enet_free_buffers()
3255 for (q = 0; q < fep->num_tx_queues; q++) { in fec_enet_free_buffers()
3256 txq = fep->tx_queue[q]; in fec_enet_free_buffers()
3257 for (i = 0; i < txq->bd.ring_size; i++) { in fec_enet_free_buffers()
3258 kfree(txq->tx_bounce[i]); in fec_enet_free_buffers()
3259 txq->tx_bounce[i] = NULL; in fec_enet_free_buffers()
3261 if (!txq->tx_buf[i].buf_p) { in fec_enet_free_buffers()
3262 txq->tx_buf[i].type = FEC_TXBUF_T_SKB; in fec_enet_free_buffers()
3266 if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { in fec_enet_free_buffers()
3267 dev_kfree_skb(txq->tx_buf[i].buf_p); in fec_enet_free_buffers()
3268 } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) { in fec_enet_free_buffers()
3269 xdp_return_frame(txq->tx_buf[i].buf_p); in fec_enet_free_buffers()
3271 struct page *page = txq->tx_buf[i].buf_p; in fec_enet_free_buffers()
3273 page_pool_put_page(page->pp, page, 0, false); in fec_enet_free_buffers()
3276 txq->tx_buf[i].buf_p = NULL; in fec_enet_free_buffers()
3277 txq->tx_buf[i].type = FEC_TXBUF_T_SKB; in fec_enet_free_buffers()
3288 for (i = 0; i < fep->num_tx_queues; i++) in fec_enet_free_queue()
3289 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { in fec_enet_free_queue()
3290 txq = fep->tx_queue[i]; in fec_enet_free_queue()
3291 dma_free_coherent(&fep->pdev->dev, in fec_enet_free_queue()
3292 txq->bd.ring_size * TSO_HEADER_SIZE, in fec_enet_free_queue()
3293 txq->tso_hdrs, in fec_enet_free_queue()
3294 txq->tso_hdrs_dma); in fec_enet_free_queue()
3297 for (i = 0; i < fep->num_rx_queues; i++) in fec_enet_free_queue()
3298 kfree(fep->rx_queue[i]); in fec_enet_free_queue()
3299 for (i = 0; i < fep->num_tx_queues; i++) in fec_enet_free_queue()
3300 kfree(fep->tx_queue[i]); in fec_enet_free_queue()
3310 for (i = 0; i < fep->num_tx_queues; i++) { in fec_enet_alloc_queue()
3313 ret = -ENOMEM; in fec_enet_alloc_queue()
3317 fep->tx_queue[i] = txq; in fec_enet_alloc_queue()
3318 txq->bd.ring_size = TX_RING_SIZE; in fec_enet_alloc_queue()
3319 fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size; in fec_enet_alloc_queue()
3321 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; in fec_enet_alloc_queue()
3322 txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS; in fec_enet_alloc_queue()
3324 txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev, in fec_enet_alloc_queue()
3325 txq->bd.ring_size * TSO_HEADER_SIZE, in fec_enet_alloc_queue()
3326 &txq->tso_hdrs_dma, in fec_enet_alloc_queue()
3328 if (!txq->tso_hdrs) { in fec_enet_alloc_queue()
3329 ret = -ENOMEM; in fec_enet_alloc_queue()
3334 for (i = 0; i < fep->num_rx_queues; i++) { in fec_enet_alloc_queue()
3335 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), in fec_enet_alloc_queue()
3337 if (!fep->rx_queue[i]) { in fec_enet_alloc_queue()
3338 ret = -ENOMEM; in fec_enet_alloc_queue()
3342 fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE; in fec_enet_alloc_queue()
3343 fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size; in fec_enet_alloc_queue()
3362 rxq = fep->rx_queue[queue]; in fec_enet_alloc_rxq_buffers()
3363 bdp = rxq->bd.base; in fec_enet_alloc_rxq_buffers()
3365 err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size); in fec_enet_alloc_rxq_buffers()
3371 for (i = 0; i < rxq->bd.ring_size; i++) { in fec_enet_alloc_rxq_buffers()
3372 page = page_pool_dev_alloc_pages(rxq->page_pool); in fec_enet_alloc_rxq_buffers()
3377 bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); in fec_enet_alloc_rxq_buffers()
3379 rxq->rx_skb_info[i].page = page; in fec_enet_alloc_rxq_buffers()
3380 rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM; in fec_enet_alloc_rxq_buffers()
3381 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); in fec_enet_alloc_rxq_buffers()
3383 if (fep->bufdesc_ex) { in fec_enet_alloc_rxq_buffers()
3385 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); in fec_enet_alloc_rxq_buffers()
3388 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); in fec_enet_alloc_rxq_buffers()
3392 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); in fec_enet_alloc_rxq_buffers()
3393 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); in fec_enet_alloc_rxq_buffers()
3398 return -ENOMEM; in fec_enet_alloc_rxq_buffers()
3409 txq = fep->tx_queue[queue]; in fec_enet_alloc_txq_buffers()
3410 bdp = txq->bd.base; in fec_enet_alloc_txq_buffers()
3411 for (i = 0; i < txq->bd.ring_size; i++) { in fec_enet_alloc_txq_buffers()
3412 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); in fec_enet_alloc_txq_buffers()
3413 if (!txq->tx_bounce[i]) in fec_enet_alloc_txq_buffers()
3416 bdp->cbd_sc = cpu_to_fec16(0); in fec_enet_alloc_txq_buffers()
3417 bdp->cbd_bufaddr = cpu_to_fec32(0); in fec_enet_alloc_txq_buffers()
3419 if (fep->bufdesc_ex) { in fec_enet_alloc_txq_buffers()
3421 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT); in fec_enet_alloc_txq_buffers()
3424 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_alloc_txq_buffers()
3428 bdp = fec_enet_get_prevdesc(bdp, &txq->bd); in fec_enet_alloc_txq_buffers()
3429 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); in fec_enet_alloc_txq_buffers()
3435 return -ENOMEM; in fec_enet_alloc_txq_buffers()
3443 for (i = 0; i < fep->num_rx_queues; i++) in fec_enet_alloc_buffers()
3445 return -ENOMEM; in fec_enet_alloc_buffers()
3447 for (i = 0; i < fep->num_tx_queues; i++) in fec_enet_alloc_buffers()
3449 return -ENOMEM; in fec_enet_alloc_buffers()
3460 ret = pm_runtime_resume_and_get(&fep->pdev->dev); in fec_enet_open()
3464 pinctrl_pm_select_default_state(&fep->pdev->dev); in fec_enet_open()
3475 if (ndev->phydev && ndev->phydev->drv) in fec_enet_open()
3502 if (fep->quirks & FEC_QUIRK_ERR006687) in fec_enet_open()
3505 if (fep->quirks & FEC_QUIRK_HAS_PMQOS) in fec_enet_open()
3506 cpu_latency_qos_add_request(&fep->pm_qos_req, 0); in fec_enet_open()
3508 napi_enable(&fep->napi); in fec_enet_open()
3509 phy_start(ndev->phydev); in fec_enet_open()
3512 device_set_wakeup_enable(&ndev->dev, fep->wol_flag & in fec_enet_open()
3522 pm_runtime_mark_last_busy(&fep->pdev->dev); in fec_enet_open()
3523 pm_runtime_put_autosuspend(&fep->pdev->dev); in fec_enet_open()
3524 pinctrl_pm_select_sleep_state(&fep->pdev->dev); in fec_enet_open()
3533 phy_stop(ndev->phydev); in fec_enet_close()
3536 napi_disable(&fep->napi); in fec_enet_close()
3541 phy_disconnect(ndev->phydev); in fec_enet_close()
3543 if (fep->quirks & FEC_QUIRK_ERR006687) in fec_enet_close()
3549 if (fep->quirks & FEC_QUIRK_HAS_PMQOS) in fec_enet_close()
3550 cpu_latency_qos_remove_request(&fep->pm_qos_req); in fec_enet_close()
3552 pinctrl_pm_select_sleep_state(&fep->pdev->dev); in fec_enet_close()
3553 pm_runtime_mark_last_busy(&fep->pdev->dev); in fec_enet_close()
3554 pm_runtime_put_autosuspend(&fep->pdev->dev); in fec_enet_close()
3581 if (ndev->flags & IFF_PROMISC) { in set_multicast_list()
3582 tmp = readl(fep->hwp + FEC_R_CNTRL); in set_multicast_list()
3584 writel(tmp, fep->hwp + FEC_R_CNTRL); in set_multicast_list()
3588 tmp = readl(fep->hwp + FEC_R_CNTRL); in set_multicast_list()
3590 writel(tmp, fep->hwp + FEC_R_CNTRL); in set_multicast_list()
3592 if (ndev->flags & IFF_ALLMULTI) { in set_multicast_list()
3596 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); in set_multicast_list()
3597 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); in set_multicast_list()
3605 crc = ether_crc_le(ndev->addr_len, ha->addr); in set_multicast_list()
3610 hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f; in set_multicast_list()
3613 hash_high |= 1 << (hash - 32); in set_multicast_list()
3618 writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); in set_multicast_list()
3619 writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW); in set_multicast_list()
3630 if (!is_valid_ether_addr(addr->sa_data)) in fec_set_mac_address()
3631 return -EADDRNOTAVAIL; in fec_set_mac_address()
3632 eth_hw_addr_set(ndev, addr->sa_data); in fec_set_mac_address()
3643 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | in fec_set_mac_address()
3644 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), in fec_set_mac_address()
3645 fep->hwp + FEC_ADDR_LOW); in fec_set_mac_address()
3646 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), in fec_set_mac_address()
3647 fep->hwp + FEC_ADDR_HIGH); in fec_set_mac_address()
3653 * fec_poll_controller - FEC Poll controller function
3665 if (fep->irq[i] > 0) { in fec_poll_controller()
3666 disable_irq(fep->irq[i]); in fec_poll_controller()
3667 fec_enet_interrupt(fep->irq[i], dev); in fec_poll_controller()
3668 enable_irq(fep->irq[i]); in fec_poll_controller()
3678 netdev_features_t changed = features ^ netdev->features; in fec_enet_set_netdev_features()
3680 netdev->features = features; in fec_enet_set_netdev_features()
3685 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; in fec_enet_set_netdev_features()
3687 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; in fec_enet_set_netdev_features()
3695 netdev_features_t changed = features ^ netdev->features; in fec_set_features()
3698 napi_disable(&fep->napi); in fec_set_features()
3705 napi_enable(&fep->napi); in fec_set_features()
3718 if (skb->protocol == htons(ETH_P_ALL)) { in fec_enet_get_raw_vlan_tci()
3719 vhdr = (struct vlan_ethhdr *)(skb->data); in fec_enet_get_raw_vlan_tci()
3720 vlan_TCI = ntohs(vhdr->h_vlan_TCI); in fec_enet_get_raw_vlan_tci()
3732 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) in fec_enet_select_queue()
3748 switch (bpf->command) { in fec_enet_bpf()
3754 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) in fec_enet_bpf()
3755 return -EOPNOTSUPP; in fec_enet_bpf()
3757 if (!bpf->prog) in fec_enet_bpf()
3761 napi_disable(&fep->napi); in fec_enet_bpf()
3765 old_prog = xchg(&fep->xdp_prog, bpf->prog); in fec_enet_bpf()
3772 napi_enable(&fep->napi); in fec_enet_bpf()
3776 if (bpf->prog) in fec_enet_bpf()
3782 return -EOPNOTSUPP; in fec_enet_bpf()
3785 return -EOPNOTSUPP; in fec_enet_bpf()
3795 return (index % fep->num_tx_queues); in fec_enet_xdp_get_tx_queue()
3811 netdev_err_once(fep->netdev, "NOT enough BD for SG!\n"); in fec_enet_txq_xmit_frame()
3812 return -EBUSY; in fec_enet_txq_xmit_frame()
3816 bdp = txq->bd.cur; in fec_enet_txq_xmit_frame()
3817 status = fec16_to_cpu(bdp->cbd_sc); in fec_enet_txq_xmit_frame()
3820 index = fec_enet_get_bd_index(bdp, &txq->bd); in fec_enet_txq_xmit_frame()
3825 dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data, in fec_enet_txq_xmit_frame()
3826 xdpf->len, DMA_TO_DEVICE); in fec_enet_txq_xmit_frame()
3827 if (dma_mapping_error(&fep->pdev->dev, dma_addr)) in fec_enet_txq_xmit_frame()
3828 return -ENOMEM; in fec_enet_txq_xmit_frame()
3830 frame_len = xdpf->len; in fec_enet_txq_xmit_frame()
3831 txq->tx_buf[index].buf_p = xdpf; in fec_enet_txq_xmit_frame()
3832 txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO; in fec_enet_txq_xmit_frame()
3837 page = virt_to_page(xdpb->data); in fec_enet_txq_xmit_frame()
3839 (xdpb->data - xdpb->data_hard_start); in fec_enet_txq_xmit_frame()
3840 dma_sync_single_for_device(&fep->pdev->dev, dma_addr, in fec_enet_txq_xmit_frame()
3842 frame_len = xdpb->data_end - xdpb->data; in fec_enet_txq_xmit_frame()
3843 txq->tx_buf[index].buf_p = page; in fec_enet_txq_xmit_frame()
3844 txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX; in fec_enet_txq_xmit_frame()
3848 if (fep->bufdesc_ex) in fec_enet_txq_xmit_frame()
3851 bdp->cbd_bufaddr = cpu_to_fec32(dma_addr); in fec_enet_txq_xmit_frame()
3852 bdp->cbd_datlen = cpu_to_fec16(frame_len); in fec_enet_txq_xmit_frame()
3854 if (fep->bufdesc_ex) { in fec_enet_txq_xmit_frame()
3857 if (fep->quirks & FEC_QUIRK_HAS_AVB) in fec_enet_txq_xmit_frame()
3858 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); in fec_enet_txq_xmit_frame()
3860 ebdp->cbd_bdu = 0; in fec_enet_txq_xmit_frame()
3861 ebdp->cbd_esc = cpu_to_fec32(estatus); in fec_enet_txq_xmit_frame()
3873 bdp->cbd_sc = cpu_to_fec16(status); in fec_enet_txq_xmit_frame()
3876 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); in fec_enet_txq_xmit_frame()
3878 /* Make sure the update to bdp are performed before txq->bd.cur. */ in fec_enet_txq_xmit_frame()
3881 txq->bd.cur = bdp; in fec_enet_txq_xmit_frame()
3884 writel(0, txq->bd.reg_desc_active); in fec_enet_txq_xmit_frame()
3898 txq = fep->tx_queue[queue]; in fec_enet_xdp_tx_xmit()
3899 nq = netdev_get_tx_queue(fep->netdev, queue); in fec_enet_xdp_tx_xmit()
3926 txq = fep->tx_queue[queue]; in fec_enet_xdp_xmit()
3927 nq = netdev_get_tx_queue(fep->netdev, queue); in fec_enet_xdp_xmit()
3950 return -EINVAL; in fec_hwtstamp_get()
3952 if (!fep->bufdesc_ex) in fec_hwtstamp_get()
3953 return -EOPNOTSUPP; in fec_hwtstamp_get()
3967 return -EINVAL; in fec_hwtstamp_set()
3969 if (!fep->bufdesc_ex) in fec_hwtstamp_set()
3970 return -EOPNOTSUPP; in fec_hwtstamp_set()
4014 unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : in fec_enet_init()
4021 fep->rx_align = 0xf; in fec_enet_init()
4022 fep->tx_align = 0xf; in fec_enet_init()
4024 fep->rx_align = 0x3; in fec_enet_init()
4025 fep->tx_align = 0x3; in fec_enet_init()
4027 fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT; in fec_enet_init()
4028 fep->tx_pkts_itr = FEC_ITR_ICFT_DEFAULT; in fec_enet_init()
4029 fep->rx_time_itr = FEC_ITR_ICTT_DEFAULT; in fec_enet_init()
4030 fep->tx_time_itr = FEC_ITR_ICTT_DEFAULT; in fec_enet_init()
4033 ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32)); in fec_enet_init()
4035 dev_warn(&fep->pdev->dev, "No suitable DMA available\n"); in fec_enet_init()
4043 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; in fec_enet_init()
4046 cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, in fec_enet_init()
4049 ret = -ENOMEM; in fec_enet_init()
4059 for (i = 0; i < fep->num_rx_queues; i++) { in fec_enet_init()
4060 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i]; in fec_enet_init()
4061 unsigned size = dsize * rxq->bd.ring_size; in fec_enet_init()
4063 rxq->bd.qid = i; in fec_enet_init()
4064 rxq->bd.base = cbd_base; in fec_enet_init()
4065 rxq->bd.cur = cbd_base; in fec_enet_init()
4066 rxq->bd.dma = bd_dma; in fec_enet_init()
4067 rxq->bd.dsize = dsize; in fec_enet_init()
4068 rxq->bd.dsize_log2 = dsize_log2; in fec_enet_init()
4069 rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i]; in fec_enet_init()
4072 rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); in fec_enet_init()
4075 for (i = 0; i < fep->num_tx_queues; i++) { in fec_enet_init()
4076 struct fec_enet_priv_tx_q *txq = fep->tx_queue[i]; in fec_enet_init()
4077 unsigned size = dsize * txq->bd.ring_size; in fec_enet_init()
4079 txq->bd.qid = i; in fec_enet_init()
4080 txq->bd.base = cbd_base; in fec_enet_init()
4081 txq->bd.cur = cbd_base; in fec_enet_init()
4082 txq->bd.dma = bd_dma; in fec_enet_init()
4083 txq->bd.dsize = dsize; in fec_enet_init()
4084 txq->bd.dsize_log2 = dsize_log2; in fec_enet_init()
4085 txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i]; in fec_enet_init()
4088 txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); in fec_enet_init()
4093 ndev->watchdog_timeo = TX_TIMEOUT; in fec_enet_init()
4094 ndev->netdev_ops = &fec_netdev_ops; in fec_enet_init()
4095 ndev->ethtool_ops = &fec_enet_ethtool_ops; in fec_enet_init()
4097 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); in fec_enet_init()
4098 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi); in fec_enet_init()
4100 if (fep->quirks & FEC_QUIRK_HAS_VLAN) in fec_enet_init()
4102 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; in fec_enet_init()
4104 if (fep->quirks & FEC_QUIRK_HAS_CSUM) { in fec_enet_init()
4108 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM in fec_enet_init()
4110 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; in fec_enet_init()
4113 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { in fec_enet_init()
4114 fep->tx_align = 0; in fec_enet_init()
4115 fep->rx_align = 0x3f; in fec_enet_init()
4118 ndev->hw_features = ndev->features; in fec_enet_init()
4120 if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME)) in fec_enet_init()
4121 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | in fec_enet_init()
4126 if (fep->quirks & FEC_QUIRK_MIB_CLEAR) in fec_enet_init()
4143 struct device_node *np = pdev->dev.of_node; in fec_reset_phy()
4149 err = of_property_read_u32(np, "phy-reset-duration", &msec); in fec_reset_phy()
4154 err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay); in fec_reset_phy()
4157 return -EINVAL; in fec_reset_phy()
4159 phy_reset = devm_gpiod_get_optional(&pdev->dev, "phy-reset", in fec_reset_phy()
4162 return dev_err_probe(&pdev->dev, PTR_ERR(phy_reset), in fec_reset_phy()
4163 "failed to get phy-reset-gpios\n"); in fec_reset_phy()
4200 struct device_node *np = pdev->dev.of_node; in fec_enet_get_queue_num()
4208 of_property_read_u32(np, "fsl,num-tx-queues", num_tx); in fec_enet_get_queue_num()
4210 of_property_read_u32(np, "fsl,num-rx-queues", num_rx); in fec_enet_get_queue_num()
4213 dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n", in fec_enet_get_queue_num()
4220 dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n", in fec_enet_get_queue_num()
4246 if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2) in fec_enet_get_wakeup_irq()
4247 fep->wake_irq = fep->irq[2]; in fec_enet_get_wakeup_irq()
4249 fep->wake_irq = fep->irq[0]; in fec_enet_get_wakeup_irq()
4259 gpr_np = of_parse_phandle(np, "fsl,stop-mode", 0); in fec_enet_init_stop_mode()
4263 ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val, in fec_enet_init_stop_mode()
4266 dev_dbg(&fep->pdev->dev, "no stop mode property\n"); in fec_enet_init_stop_mode()
4270 fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np); in fec_enet_init_stop_mode()
4271 if (IS_ERR(fep->stop_gpr.gpr)) { in fec_enet_init_stop_mode()
4272 dev_err(&fep->pdev->dev, "could not find gpr regmap\n"); in fec_enet_init_stop_mode()
4273 ret = PTR_ERR(fep->stop_gpr.gpr); in fec_enet_init_stop_mode()
4274 fep->stop_gpr.gpr = NULL; in fec_enet_init_stop_mode()
4278 fep->stop_gpr.reg = out_val[1]; in fec_enet_init_stop_mode()
4279 fep->stop_gpr.bit = out_val[2]; in fec_enet_init_stop_mode()
4297 struct device_node *np = pdev->dev.of_node, *phy_node; in fec_probe()
4310 return -ENOMEM; in fec_probe()
4312 SET_NETDEV_DEV(ndev, &pdev->dev); in fec_probe()
4317 of_id = of_match_device(fec_dt_ids, &pdev->dev); in fec_probe()
4319 pdev->id_entry = of_id->data; in fec_probe()
4320 dev_info = (struct fec_devinfo *)pdev->id_entry->driver_data; in fec_probe()
4322 fep->quirks = dev_info->quirks; in fec_probe()
4324 fep->netdev = ndev; in fec_probe()
4325 fep->num_rx_queues = num_rx_qs; in fec_probe()
4326 fep->num_tx_queues = num_tx_qs; in fec_probe()
4330 if (fep->quirks & FEC_QUIRK_HAS_GBIT) in fec_probe()
4331 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; in fec_probe()
4335 pinctrl_pm_select_default_state(&pdev->dev); in fec_probe()
4337 fep->hwp = devm_platform_ioremap_resource(pdev, 0); in fec_probe()
4338 if (IS_ERR(fep->hwp)) { in fec_probe()
4339 ret = PTR_ERR(fep->hwp); in fec_probe()
4343 fep->pdev = pdev; in fec_probe()
4344 fep->dev_id = dev_id++; in fec_probe()
4350 !of_property_read_bool(np, "fsl,err006687-workaround-present")) in fec_probe()
4351 fep->quirks |= FEC_QUIRK_ERR006687; in fec_probe()
4357 if (of_property_read_bool(np, "fsl,magic-packet")) in fec_probe()
4358 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; in fec_probe()
4364 phy_node = of_parse_phandle(np, "phy-handle", 0); in fec_probe()
4368 dev_err(&pdev->dev, in fec_probe()
4369 "broken fixed-link specification\n"); in fec_probe()
4374 fep->phy_node = phy_node; in fec_probe()
4376 ret = of_get_phy_mode(pdev->dev.of_node, &interface); in fec_probe()
4378 pdata = dev_get_platdata(&pdev->dev); in fec_probe()
4380 fep->phy_interface = pdata->phy; in fec_probe()
4382 fep->phy_interface = PHY_INTERFACE_MODE_MII; in fec_probe()
4384 fep->phy_interface = interface; in fec_probe()
4391 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); in fec_probe()
4392 if (IS_ERR(fep->clk_ipg)) { in fec_probe()
4393 ret = PTR_ERR(fep->clk_ipg); in fec_probe()
4397 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); in fec_probe()
4398 if (IS_ERR(fep->clk_ahb)) { in fec_probe()
4399 ret = PTR_ERR(fep->clk_ahb); in fec_probe()
4403 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); in fec_probe()
4406 fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out"); in fec_probe()
4407 if (IS_ERR(fep->clk_enet_out)) { in fec_probe()
4408 ret = PTR_ERR(fep->clk_enet_out); in fec_probe()
4412 fep->ptp_clk_on = false; in fec_probe()
4413 mutex_init(&fep->ptp_clk_mutex); in fec_probe()
4416 fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref"); in fec_probe()
4417 if (IS_ERR(fep->clk_ref)) { in fec_probe()
4418 ret = PTR_ERR(fep->clk_ref); in fec_probe()
4421 fep->clk_ref_rate = clk_get_rate(fep->clk_ref); in fec_probe()
4424 if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) { in fec_probe()
4425 fep->clk_2x_txclk = devm_clk_get(&pdev->dev, "enet_2x_txclk"); in fec_probe()
4426 if (IS_ERR(fep->clk_2x_txclk)) in fec_probe()
4427 fep->clk_2x_txclk = NULL; in fec_probe()
4430 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; in fec_probe()
4431 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); in fec_probe()
4432 if (IS_ERR(fep->clk_ptp)) { in fec_probe()
4433 fep->clk_ptp = NULL; in fec_probe()
4434 fep->bufdesc_ex = false; in fec_probe()
4441 ret = clk_prepare_enable(fep->clk_ipg); in fec_probe()
4444 ret = clk_prepare_enable(fep->clk_ahb); in fec_probe()
4448 fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy"); in fec_probe()
4449 if (!IS_ERR(fep->reg_phy)) { in fec_probe()
4450 ret = regulator_enable(fep->reg_phy); in fec_probe()
4452 dev_err(&pdev->dev, in fec_probe()
4457 if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) { in fec_probe()
4458 ret = -EPROBE_DEFER; in fec_probe()
4461 fep->reg_phy = NULL; in fec_probe()
4464 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); in fec_probe()
4465 pm_runtime_use_autosuspend(&pdev->dev); in fec_probe()
4466 pm_runtime_get_noresume(&pdev->dev); in fec_probe()
4467 pm_runtime_set_active(&pdev->dev); in fec_probe()
4468 pm_runtime_enable(&pdev->dev); in fec_probe()
4475 if (fep->bufdesc_ex) in fec_probe()
4491 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt, in fec_probe()
4492 0, pdev->name, ndev); in fec_probe()
4496 fep->irq[i] = irq; in fec_probe()
4509 pinctrl_pm_select_sleep_state(&pdev->dev); in fec_probe()
4511 ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN; in fec_probe()
4517 device_init_wakeup(&ndev->dev, fep->wol_flag & in fec_probe()
4520 if (fep->bufdesc_ex && fep->ptp_clock) in fec_probe()
4521 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); in fec_probe()
4523 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); in fec_probe()
4525 pm_runtime_mark_last_busy(&pdev->dev); in fec_probe()
4526 pm_runtime_put_autosuspend(&pdev->dev); in fec_probe()
4537 pm_runtime_put_noidle(&pdev->dev); in fec_probe()
4538 pm_runtime_disable(&pdev->dev); in fec_probe()
4539 if (fep->reg_phy) in fec_probe()
4540 regulator_disable(fep->reg_phy); in fec_probe()
4542 clk_disable_unprepare(fep->clk_ahb); in fec_probe()
4544 clk_disable_unprepare(fep->clk_ipg); in fec_probe()
4555 dev_id--; in fec_probe()
4567 struct device_node *np = pdev->dev.of_node; in fec_drv_remove()
4570 ret = pm_runtime_get_sync(&pdev->dev); in fec_drv_remove()
4572 dev_err(&pdev->dev, in fec_drv_remove()
4576 cancel_work_sync(&fep->tx_timeout_work); in fec_drv_remove()
4580 if (fep->reg_phy) in fec_drv_remove()
4581 regulator_disable(fep->reg_phy); in fec_drv_remove()
4585 of_node_put(fep->phy_node); in fec_drv_remove()
4591 clk_disable_unprepare(fep->clk_ahb); in fec_drv_remove()
4592 clk_disable_unprepare(fep->clk_ipg); in fec_drv_remove()
4594 pm_runtime_put_noidle(&pdev->dev); in fec_drv_remove()
4595 pm_runtime_disable(&pdev->dev); in fec_drv_remove()
4608 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) in fec_suspend()
4609 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON; in fec_suspend()
4610 phy_stop(ndev->phydev); in fec_suspend()
4611 napi_disable(&fep->napi); in fec_suspend()
4616 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { in fec_suspend()
4618 pinctrl_pm_select_sleep_state(&fep->pdev->dev); in fec_suspend()
4621 if (fep->wake_irq > 0) { in fec_suspend()
4622 disable_irq(fep->wake_irq); in fec_suspend()
4623 enable_irq_wake(fep->wake_irq); in fec_suspend()
4630 fep->rpm_active = !pm_runtime_status_suspended(dev); in fec_suspend()
4631 if (fep->rpm_active) { in fec_suspend()
4641 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) in fec_suspend()
4642 regulator_disable(fep->reg_phy); in fec_suspend()
4647 if (fep->clk_enet_out || fep->reg_phy) in fec_suspend()
4648 fep->link = 0; in fec_suspend()
4660 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { in fec_resume()
4661 ret = regulator_enable(fep->reg_phy); in fec_resume()
4668 if (fep->rpm_active) in fec_resume()
4676 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { in fec_resume()
4678 if (fep->wake_irq) { in fec_resume()
4679 disable_irq_wake(fep->wake_irq); in fec_resume()
4680 enable_irq(fep->wake_irq); in fec_resume()
4683 val = readl(fep->hwp + FEC_ECNTRL); in fec_resume()
4685 writel(val, fep->hwp + FEC_ECNTRL); in fec_resume()
4686 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON; in fec_resume()
4688 pinctrl_pm_select_default_state(&fep->pdev->dev); in fec_resume()
4694 napi_enable(&fep->napi); in fec_resume()
4695 phy_init_hw(ndev->phydev); in fec_resume()
4696 phy_start(ndev->phydev); in fec_resume()
4703 if (fep->reg_phy) in fec_resume()
4704 regulator_disable(fep->reg_phy); in fec_resume()
4713 clk_disable_unprepare(fep->clk_ahb); in fec_runtime_suspend()
4714 clk_disable_unprepare(fep->clk_ipg); in fec_runtime_suspend()
4725 ret = clk_prepare_enable(fep->clk_ahb); in fec_runtime_resume()
4728 ret = clk_prepare_enable(fep->clk_ipg); in fec_runtime_resume()
4735 clk_disable_unprepare(fep->clk_ahb); in fec_runtime_resume()