Lines Matching +full:mtl +full:- +full:rx +full:- +full:config
9 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
59 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
129 #include "xgbe-common.h"
176 for (i = 0; i < ARRAY_SIZE(pdata->channel); i++) { in xgbe_free_channels()
177 if (!pdata->channel[i]) in xgbe_free_channels()
180 kfree(pdata->channel[i]->rx_ring); in xgbe_free_channels()
181 kfree(pdata->channel[i]->tx_ring); in xgbe_free_channels()
182 kfree(pdata->channel[i]); in xgbe_free_channels()
184 pdata->channel[i] = NULL; in xgbe_free_channels()
187 pdata->channel_count = 0; in xgbe_free_channels()
198 count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); in xgbe_alloc_channels()
201 cpu = cpumask_local_spread(i, dev_to_node(pdata->dev)); in xgbe_alloc_channels()
209 pdata->channel[i] = channel; in xgbe_alloc_channels()
211 snprintf(channel->name, sizeof(channel->name), "channel-%u", i); in xgbe_alloc_channels()
212 channel->pdata = pdata; in xgbe_alloc_channels()
213 channel->queue_index = i; in xgbe_alloc_channels()
214 channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE + in xgbe_alloc_channels()
216 channel->node = node; in xgbe_alloc_channels()
217 cpumask_set_cpu(cpu, &channel->affinity_mask); in xgbe_alloc_channels()
219 if (pdata->per_channel_irq) in xgbe_alloc_channels()
220 channel->dma_irq = pdata->channel_irq[i]; in xgbe_alloc_channels()
222 if (i < pdata->tx_ring_count) { in xgbe_alloc_channels()
227 spin_lock_init(&ring->lock); in xgbe_alloc_channels()
228 ring->node = node; in xgbe_alloc_channels()
230 channel->tx_ring = ring; in xgbe_alloc_channels()
233 if (i < pdata->rx_ring_count) { in xgbe_alloc_channels()
238 spin_lock_init(&ring->lock); in xgbe_alloc_channels()
239 ring->node = node; in xgbe_alloc_channels()
241 channel->rx_ring = ring; in xgbe_alloc_channels()
244 netif_dbg(pdata, drv, pdata->netdev, in xgbe_alloc_channels()
245 "%s: cpu=%u, node=%d\n", channel->name, cpu, node); in xgbe_alloc_channels()
247 netif_dbg(pdata, drv, pdata->netdev, in xgbe_alloc_channels()
248 "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n", in xgbe_alloc_channels()
249 channel->name, channel->dma_regs, channel->dma_irq, in xgbe_alloc_channels()
250 channel->tx_ring, channel->rx_ring); in xgbe_alloc_channels()
253 pdata->channel_count = count; in xgbe_alloc_channels()
260 return -ENOMEM; in xgbe_alloc_channels()
265 return (ring->rdesc_count - (ring->cur - ring->dirty)); in xgbe_tx_avail_desc()
270 return (ring->cur - ring->dirty); in xgbe_rx_dirty_desc()
276 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_maybe_stop_tx_queue()
279 netif_info(pdata, drv, pdata->netdev, in xgbe_maybe_stop_tx_queue()
281 netif_stop_subqueue(pdata->netdev, channel->queue_index); in xgbe_maybe_stop_tx_queue()
282 ring->tx.queue_stopped = 1; in xgbe_maybe_stop_tx_queue()
287 if (ring->tx.xmit_more) in xgbe_maybe_stop_tx_queue()
288 pdata->hw_if.tx_start_xmit(channel, ring); in xgbe_maybe_stop_tx_queue()
303 rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) & in xgbe_calc_rx_buf_size()
304 ~(XGBE_RX_BUF_ALIGN - 1); in xgbe_calc_rx_buf_size()
312 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_enable_rx_tx_int()
315 if (channel->tx_ring && channel->rx_ring) in xgbe_enable_rx_tx_int()
317 else if (channel->tx_ring) in xgbe_enable_rx_tx_int()
319 else if (channel->rx_ring) in xgbe_enable_rx_tx_int()
324 hw_if->enable_int(channel, int_id); in xgbe_enable_rx_tx_int()
331 for (i = 0; i < pdata->channel_count; i++) in xgbe_enable_rx_tx_ints()
332 xgbe_enable_rx_tx_int(pdata, pdata->channel[i]); in xgbe_enable_rx_tx_ints()
338 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_disable_rx_tx_int()
341 if (channel->tx_ring && channel->rx_ring) in xgbe_disable_rx_tx_int()
343 else if (channel->tx_ring) in xgbe_disable_rx_tx_int()
345 else if (channel->rx_ring) in xgbe_disable_rx_tx_int()
350 hw_if->disable_int(channel, int_id); in xgbe_disable_rx_tx_int()
357 for (i = 0; i < pdata->channel_count; i++) in xgbe_disable_rx_tx_ints()
358 xgbe_disable_rx_tx_int(pdata, pdata->channel[i]); in xgbe_disable_rx_tx_ints()
372 dev_warn_once(pdata->dev, in xgbe_ecc_sec()
377 dev_warn_once(pdata->dev, in xgbe_ecc_sec()
397 netdev_alert(pdata->netdev, in xgbe_ecc_ded()
415 netif_dbg(pdata, intr, pdata->netdev, "ECC_ISR=%#010x\n", ecc_isr); in xgbe_ecc_isr_task()
418 stop |= xgbe_ecc_ded(pdata, &pdata->tx_ded_period, in xgbe_ecc_isr_task()
419 &pdata->tx_ded_count, "TX fifo"); in xgbe_ecc_isr_task()
423 stop |= xgbe_ecc_ded(pdata, &pdata->rx_ded_period, in xgbe_ecc_isr_task()
424 &pdata->rx_ded_count, "RX fifo"); in xgbe_ecc_isr_task()
428 stop |= xgbe_ecc_ded(pdata, &pdata->desc_ded_period, in xgbe_ecc_isr_task()
429 &pdata->desc_ded_count, in xgbe_ecc_isr_task()
434 pdata->hw_if.disable_ecc_ded(pdata); in xgbe_ecc_isr_task()
435 schedule_work(&pdata->stopdev_work); in xgbe_ecc_isr_task()
440 if (xgbe_ecc_sec(pdata, &pdata->tx_sec_period, in xgbe_ecc_isr_task()
441 &pdata->tx_sec_count, "TX fifo")) in xgbe_ecc_isr_task()
442 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_TX); in xgbe_ecc_isr_task()
446 if (xgbe_ecc_sec(pdata, &pdata->rx_sec_period, in xgbe_ecc_isr_task()
447 &pdata->rx_sec_count, "RX fifo")) in xgbe_ecc_isr_task()
448 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_RX); in xgbe_ecc_isr_task()
451 if (xgbe_ecc_sec(pdata, &pdata->desc_sec_period, in xgbe_ecc_isr_task()
452 &pdata->desc_sec_count, "descriptor cache")) in xgbe_ecc_isr_task()
453 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_DESC); in xgbe_ecc_isr_task()
460 if (pdata->vdata->irq_reissue_support) in xgbe_ecc_isr_task()
468 if (pdata->isr_as_tasklet) in xgbe_ecc_isr()
469 tasklet_schedule(&pdata->tasklet_ecc); in xgbe_ecc_isr()
471 xgbe_ecc_isr_task(&pdata->tasklet_ecc); in xgbe_ecc_isr()
479 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_isr_task()
485 /* The DMA interrupt status register also reports MAC and MTL in xgbe_isr_task()
487 * this register to be non-zero in xgbe_isr_task()
493 netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr); in xgbe_isr_task()
495 for (i = 0; i < pdata->channel_count; i++) { in xgbe_isr_task()
499 channel = pdata->channel[i]; in xgbe_isr_task()
502 netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n", in xgbe_isr_task()
509 if (!pdata->per_channel_irq && in xgbe_isr_task()
512 if (napi_schedule_prep(&pdata->napi)) { in xgbe_isr_task()
513 /* Disable Tx and Rx interrupts */ in xgbe_isr_task()
517 __napi_schedule(&pdata->napi); in xgbe_isr_task()
520 /* Don't clear Rx/Tx status if doing per channel DMA in xgbe_isr_task()
529 pdata->ext_stats.rx_buffer_unavailable++; in xgbe_isr_task()
533 schedule_work(&pdata->restart_work); in xgbe_isr_task()
542 netif_dbg(pdata, intr, pdata->netdev, "MAC_ISR=%#010x\n", in xgbe_isr_task()
546 hw_if->tx_mmc_int(pdata); in xgbe_isr_task()
549 hw_if->rx_mmc_int(pdata); in xgbe_isr_task()
554 netif_dbg(pdata, intr, pdata->netdev, in xgbe_isr_task()
559 pdata->tx_tstamp = in xgbe_isr_task()
560 hw_if->get_tx_tstamp(pdata); in xgbe_isr_task()
561 queue_work(pdata->dev_workqueue, in xgbe_isr_task()
562 &pdata->tx_tstamp_work); in xgbe_isr_task()
569 netif_dbg(pdata, intr, pdata->netdev, in xgbe_isr_task()
574 complete(&pdata->mdio_complete); in xgbe_isr_task()
580 if (pdata->dev_irq == pdata->an_irq) in xgbe_isr_task()
581 pdata->phy_if.an_isr(pdata); in xgbe_isr_task()
584 if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq)) in xgbe_isr_task()
585 xgbe_ecc_isr_task(&pdata->tasklet_ecc); in xgbe_isr_task()
588 if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq)) in xgbe_isr_task()
589 pdata->i2c_if.i2c_isr(pdata); in xgbe_isr_task()
592 if (pdata->vdata->irq_reissue_support) { in xgbe_isr_task()
596 if (!pdata->per_channel_irq) in xgbe_isr_task()
607 if (pdata->isr_as_tasklet) in xgbe_isr()
608 tasklet_schedule(&pdata->tasklet_dev); in xgbe_isr()
610 xgbe_isr_task(&pdata->tasklet_dev); in xgbe_isr()
618 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_dma_isr()
624 if (napi_schedule_prep(&channel->napi)) { in xgbe_dma_isr()
625 /* Disable Tx and Rx interrupts */ in xgbe_dma_isr()
626 if (pdata->channel_irq_mode) in xgbe_dma_isr()
629 disable_irq_nosync(channel->dma_irq); in xgbe_dma_isr()
632 __napi_schedule_irqoff(&channel->napi); in xgbe_dma_isr()
635 /* Clear Tx/Rx signals */ in xgbe_dma_isr()
647 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_tx_timer()
650 DBGPR("-->xgbe_tx_timer\n"); in xgbe_tx_timer()
652 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; in xgbe_tx_timer()
655 /* Disable Tx and Rx interrupts */ in xgbe_tx_timer()
656 if (pdata->per_channel_irq) in xgbe_tx_timer()
657 if (pdata->channel_irq_mode) in xgbe_tx_timer()
660 disable_irq_nosync(channel->dma_irq); in xgbe_tx_timer()
668 channel->tx_timer_active = 0; in xgbe_tx_timer()
670 DBGPR("<--xgbe_tx_timer\n"); in xgbe_tx_timer()
679 pdata->phy_if.phy_status(pdata); in xgbe_service()
686 queue_work(pdata->dev_workqueue, &pdata->service_work); in xgbe_service_timer()
688 mod_timer(&pdata->service_timer, jiffies + HZ); in xgbe_service_timer()
696 timer_setup(&pdata->service_timer, xgbe_service_timer, 0); in xgbe_init_timers()
698 for (i = 0; i < pdata->channel_count; i++) { in xgbe_init_timers()
699 channel = pdata->channel[i]; in xgbe_init_timers()
700 if (!channel->tx_ring) in xgbe_init_timers()
703 timer_setup(&channel->tx_timer, xgbe_tx_timer, 0); in xgbe_init_timers()
709 mod_timer(&pdata->service_timer, jiffies + HZ); in xgbe_start_timers()
717 del_timer_sync(&pdata->service_timer); in xgbe_stop_timers()
719 for (i = 0; i < pdata->channel_count; i++) { in xgbe_stop_timers()
720 channel = pdata->channel[i]; in xgbe_stop_timers()
721 if (!channel->tx_ring) in xgbe_stop_timers()
724 del_timer_sync(&channel->tx_timer); in xgbe_stop_timers()
731 struct xgbe_hw_features *hw_feat = &pdata->hw_feat; in xgbe_get_all_hw_features()
739 hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR); in xgbe_get_all_hw_features()
742 hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); in xgbe_get_all_hw_features()
743 hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); in xgbe_get_all_hw_features()
744 hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); in xgbe_get_all_hw_features()
745 hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); in xgbe_get_all_hw_features()
746 hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); in xgbe_get_all_hw_features()
747 hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); in xgbe_get_all_hw_features()
748 hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); in xgbe_get_all_hw_features()
749 hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); in xgbe_get_all_hw_features()
750 hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); in xgbe_get_all_hw_features()
751 hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); in xgbe_get_all_hw_features()
752 hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); in xgbe_get_all_hw_features()
753 hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, in xgbe_get_all_hw_features()
755 hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); in xgbe_get_all_hw_features()
756 hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); in xgbe_get_all_hw_features()
757 hw_feat->vxn = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VXN); in xgbe_get_all_hw_features()
760 hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, in xgbe_get_all_hw_features()
762 hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, in xgbe_get_all_hw_features()
764 hw_feat->adv_ts_hi = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD); in xgbe_get_all_hw_features()
765 hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); in xgbe_get_all_hw_features()
766 hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); in xgbe_get_all_hw_features()
767 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); in xgbe_get_all_hw_features()
768 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); in xgbe_get_all_hw_features()
769 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); in xgbe_get_all_hw_features()
770 hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); in xgbe_get_all_hw_features()
771 hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); in xgbe_get_all_hw_features()
772 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, in xgbe_get_all_hw_features()
774 hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, in xgbe_get_all_hw_features()
778 hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); in xgbe_get_all_hw_features()
779 hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); in xgbe_get_all_hw_features()
780 hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); in xgbe_get_all_hw_features()
781 hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); in xgbe_get_all_hw_features()
782 hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); in xgbe_get_all_hw_features()
783 hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM); in xgbe_get_all_hw_features()
786 switch (hw_feat->hash_table_size) { in xgbe_get_all_hw_features()
790 hw_feat->hash_table_size = 64; in xgbe_get_all_hw_features()
793 hw_feat->hash_table_size = 128; in xgbe_get_all_hw_features()
796 hw_feat->hash_table_size = 256; in xgbe_get_all_hw_features()
801 switch (hw_feat->dma_width) { in xgbe_get_all_hw_features()
803 hw_feat->dma_width = 32; in xgbe_get_all_hw_features()
806 hw_feat->dma_width = 40; in xgbe_get_all_hw_features()
809 hw_feat->dma_width = 48; in xgbe_get_all_hw_features()
812 hw_feat->dma_width = 32; in xgbe_get_all_hw_features()
818 hw_feat->rx_q_cnt++; in xgbe_get_all_hw_features()
819 hw_feat->tx_q_cnt++; in xgbe_get_all_hw_features()
820 hw_feat->rx_ch_cnt++; in xgbe_get_all_hw_features()
821 hw_feat->tx_ch_cnt++; in xgbe_get_all_hw_features()
822 hw_feat->tc_cnt++; in xgbe_get_all_hw_features()
825 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7); in xgbe_get_all_hw_features()
826 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7); in xgbe_get_all_hw_features()
829 dev_dbg(pdata->dev, "Hardware features:\n"); in xgbe_get_all_hw_features()
832 dev_dbg(pdata->dev, " 1GbE support : %s\n", in xgbe_get_all_hw_features()
833 hw_feat->gmii ? "yes" : "no"); in xgbe_get_all_hw_features()
834 dev_dbg(pdata->dev, " VLAN hash filter : %s\n", in xgbe_get_all_hw_features()
835 hw_feat->vlhash ? "yes" : "no"); in xgbe_get_all_hw_features()
836 dev_dbg(pdata->dev, " MDIO interface : %s\n", in xgbe_get_all_hw_features()
837 hw_feat->sma ? "yes" : "no"); in xgbe_get_all_hw_features()
838 dev_dbg(pdata->dev, " Wake-up packet support : %s\n", in xgbe_get_all_hw_features()
839 hw_feat->rwk ? "yes" : "no"); in xgbe_get_all_hw_features()
840 dev_dbg(pdata->dev, " Magic packet support : %s\n", in xgbe_get_all_hw_features()
841 hw_feat->mgk ? "yes" : "no"); in xgbe_get_all_hw_features()
842 dev_dbg(pdata->dev, " Management counters : %s\n", in xgbe_get_all_hw_features()
843 hw_feat->mmc ? "yes" : "no"); in xgbe_get_all_hw_features()
844 dev_dbg(pdata->dev, " ARP offload : %s\n", in xgbe_get_all_hw_features()
845 hw_feat->aoe ? "yes" : "no"); in xgbe_get_all_hw_features()
846 dev_dbg(pdata->dev, " IEEE 1588-2008 Timestamp : %s\n", in xgbe_get_all_hw_features()
847 hw_feat->ts ? "yes" : "no"); in xgbe_get_all_hw_features()
848 dev_dbg(pdata->dev, " Energy Efficient Ethernet : %s\n", in xgbe_get_all_hw_features()
849 hw_feat->eee ? "yes" : "no"); in xgbe_get_all_hw_features()
850 dev_dbg(pdata->dev, " TX checksum offload : %s\n", in xgbe_get_all_hw_features()
851 hw_feat->tx_coe ? "yes" : "no"); in xgbe_get_all_hw_features()
852 dev_dbg(pdata->dev, " RX checksum offload : %s\n", in xgbe_get_all_hw_features()
853 hw_feat->rx_coe ? "yes" : "no"); in xgbe_get_all_hw_features()
854 dev_dbg(pdata->dev, " Additional MAC addresses : %u\n", in xgbe_get_all_hw_features()
855 hw_feat->addn_mac); in xgbe_get_all_hw_features()
856 dev_dbg(pdata->dev, " Timestamp source : %s\n", in xgbe_get_all_hw_features()
857 (hw_feat->ts_src == 1) ? "internal" : in xgbe_get_all_hw_features()
858 (hw_feat->ts_src == 2) ? "external" : in xgbe_get_all_hw_features()
859 (hw_feat->ts_src == 3) ? "internal/external" : "n/a"); in xgbe_get_all_hw_features()
860 dev_dbg(pdata->dev, " SA/VLAN insertion : %s\n", in xgbe_get_all_hw_features()
861 hw_feat->sa_vlan_ins ? "yes" : "no"); in xgbe_get_all_hw_features()
862 dev_dbg(pdata->dev, " VXLAN/NVGRE support : %s\n", in xgbe_get_all_hw_features()
863 hw_feat->vxn ? "yes" : "no"); in xgbe_get_all_hw_features()
866 dev_dbg(pdata->dev, " RX fifo size : %u\n", in xgbe_get_all_hw_features()
867 hw_feat->rx_fifo_size); in xgbe_get_all_hw_features()
868 dev_dbg(pdata->dev, " TX fifo size : %u\n", in xgbe_get_all_hw_features()
869 hw_feat->tx_fifo_size); in xgbe_get_all_hw_features()
870 dev_dbg(pdata->dev, " IEEE 1588 high word : %s\n", in xgbe_get_all_hw_features()
871 hw_feat->adv_ts_hi ? "yes" : "no"); in xgbe_get_all_hw_features()
872 dev_dbg(pdata->dev, " DMA width : %u\n", in xgbe_get_all_hw_features()
873 hw_feat->dma_width); in xgbe_get_all_hw_features()
874 dev_dbg(pdata->dev, " Data Center Bridging : %s\n", in xgbe_get_all_hw_features()
875 hw_feat->dcb ? "yes" : "no"); in xgbe_get_all_hw_features()
876 dev_dbg(pdata->dev, " Split header : %s\n", in xgbe_get_all_hw_features()
877 hw_feat->sph ? "yes" : "no"); in xgbe_get_all_hw_features()
878 dev_dbg(pdata->dev, " TCP Segmentation Offload : %s\n", in xgbe_get_all_hw_features()
879 hw_feat->tso ? "yes" : "no"); in xgbe_get_all_hw_features()
880 dev_dbg(pdata->dev, " Debug memory interface : %s\n", in xgbe_get_all_hw_features()
881 hw_feat->dma_debug ? "yes" : "no"); in xgbe_get_all_hw_features()
882 dev_dbg(pdata->dev, " Receive Side Scaling : %s\n", in xgbe_get_all_hw_features()
883 hw_feat->rss ? "yes" : "no"); in xgbe_get_all_hw_features()
884 dev_dbg(pdata->dev, " Traffic Class count : %u\n", in xgbe_get_all_hw_features()
885 hw_feat->tc_cnt); in xgbe_get_all_hw_features()
886 dev_dbg(pdata->dev, " Hash table size : %u\n", in xgbe_get_all_hw_features()
887 hw_feat->hash_table_size); in xgbe_get_all_hw_features()
888 dev_dbg(pdata->dev, " L3/L4 Filters : %u\n", in xgbe_get_all_hw_features()
889 hw_feat->l3l4_filter_num); in xgbe_get_all_hw_features()
892 dev_dbg(pdata->dev, " RX queue count : %u\n", in xgbe_get_all_hw_features()
893 hw_feat->rx_q_cnt); in xgbe_get_all_hw_features()
894 dev_dbg(pdata->dev, " TX queue count : %u\n", in xgbe_get_all_hw_features()
895 hw_feat->tx_q_cnt); in xgbe_get_all_hw_features()
896 dev_dbg(pdata->dev, " RX DMA channel count : %u\n", in xgbe_get_all_hw_features()
897 hw_feat->rx_ch_cnt); in xgbe_get_all_hw_features()
898 dev_dbg(pdata->dev, " TX DMA channel count : %u\n", in xgbe_get_all_hw_features()
899 hw_feat->rx_ch_cnt); in xgbe_get_all_hw_features()
900 dev_dbg(pdata->dev, " PPS outputs : %u\n", in xgbe_get_all_hw_features()
901 hw_feat->pps_out_num); in xgbe_get_all_hw_features()
902 dev_dbg(pdata->dev, " Auxiliary snapshot inputs : %u\n", in xgbe_get_all_hw_features()
903 hw_feat->aux_snap_num); in xgbe_get_all_hw_features()
912 pdata->vxlan_port = be16_to_cpu(ti->port); in xgbe_vxlan_set_port()
913 pdata->hw_if.enable_vxlan(pdata); in xgbe_vxlan_set_port()
923 pdata->hw_if.disable_vxlan(pdata); in xgbe_vxlan_unset_port()
924 pdata->vxlan_port = 0; in xgbe_vxlan_unset_port()
948 if (pdata->per_channel_irq) { in xgbe_napi_enable()
949 for (i = 0; i < pdata->channel_count; i++) { in xgbe_napi_enable()
950 channel = pdata->channel[i]; in xgbe_napi_enable()
952 netif_napi_add(pdata->netdev, &channel->napi, in xgbe_napi_enable()
955 napi_enable(&channel->napi); in xgbe_napi_enable()
959 netif_napi_add(pdata->netdev, &pdata->napi, in xgbe_napi_enable()
962 napi_enable(&pdata->napi); in xgbe_napi_enable()
971 if (pdata->per_channel_irq) { in xgbe_napi_disable()
972 for (i = 0; i < pdata->channel_count; i++) { in xgbe_napi_disable()
973 channel = pdata->channel[i]; in xgbe_napi_disable()
974 napi_disable(&channel->napi); in xgbe_napi_disable()
977 netif_napi_del(&channel->napi); in xgbe_napi_disable()
980 napi_disable(&pdata->napi); in xgbe_napi_disable()
983 netif_napi_del(&pdata->napi); in xgbe_napi_disable()
990 struct net_device *netdev = pdata->netdev; in xgbe_request_irqs()
994 tasklet_setup(&pdata->tasklet_dev, xgbe_isr_task); in xgbe_request_irqs()
995 tasklet_setup(&pdata->tasklet_ecc, xgbe_ecc_isr_task); in xgbe_request_irqs()
997 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0, in xgbe_request_irqs()
1001 pdata->dev_irq); in xgbe_request_irqs()
1005 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) { in xgbe_request_irqs()
1006 ret = devm_request_irq(pdata->dev, pdata->ecc_irq, xgbe_ecc_isr, in xgbe_request_irqs()
1007 0, pdata->ecc_name, pdata); in xgbe_request_irqs()
1010 pdata->ecc_irq); in xgbe_request_irqs()
1015 if (!pdata->per_channel_irq) in xgbe_request_irqs()
1018 for (i = 0; i < pdata->channel_count; i++) { in xgbe_request_irqs()
1019 channel = pdata->channel[i]; in xgbe_request_irqs()
1020 snprintf(channel->dma_irq_name, in xgbe_request_irqs()
1021 sizeof(channel->dma_irq_name) - 1, in xgbe_request_irqs()
1022 "%s-TxRx-%u", netdev_name(netdev), in xgbe_request_irqs()
1023 channel->queue_index); in xgbe_request_irqs()
1025 ret = devm_request_irq(pdata->dev, channel->dma_irq, in xgbe_request_irqs()
1027 channel->dma_irq_name, channel); in xgbe_request_irqs()
1030 channel->dma_irq); in xgbe_request_irqs()
1034 irq_set_affinity_hint(channel->dma_irq, in xgbe_request_irqs()
1035 &channel->affinity_mask); in xgbe_request_irqs()
1042 for (i--; i < pdata->channel_count; i--) { in xgbe_request_irqs()
1043 channel = pdata->channel[i]; in xgbe_request_irqs()
1045 irq_set_affinity_hint(channel->dma_irq, NULL); in xgbe_request_irqs()
1046 devm_free_irq(pdata->dev, channel->dma_irq, channel); in xgbe_request_irqs()
1049 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) in xgbe_request_irqs()
1050 devm_free_irq(pdata->dev, pdata->ecc_irq, pdata); in xgbe_request_irqs()
1053 devm_free_irq(pdata->dev, pdata->dev_irq, pdata); in xgbe_request_irqs()
1063 devm_free_irq(pdata->dev, pdata->dev_irq, pdata); in xgbe_free_irqs()
1065 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) in xgbe_free_irqs()
1066 devm_free_irq(pdata->dev, pdata->ecc_irq, pdata); in xgbe_free_irqs()
1068 if (!pdata->per_channel_irq) in xgbe_free_irqs()
1071 for (i = 0; i < pdata->channel_count; i++) { in xgbe_free_irqs()
1072 channel = pdata->channel[i]; in xgbe_free_irqs()
1074 irq_set_affinity_hint(channel->dma_irq, NULL); in xgbe_free_irqs()
1075 devm_free_irq(pdata->dev, channel->dma_irq, channel); in xgbe_free_irqs()
1081 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_init_tx_coalesce()
1083 DBGPR("-->xgbe_init_tx_coalesce\n"); in xgbe_init_tx_coalesce()
1085 pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS; in xgbe_init_tx_coalesce()
1086 pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES; in xgbe_init_tx_coalesce()
1088 hw_if->config_tx_coalesce(pdata); in xgbe_init_tx_coalesce()
1090 DBGPR("<--xgbe_init_tx_coalesce\n"); in xgbe_init_tx_coalesce()
1095 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_init_rx_coalesce()
1097 DBGPR("-->xgbe_init_rx_coalesce\n"); in xgbe_init_rx_coalesce()
1099 pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS); in xgbe_init_rx_coalesce()
1100 pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS; in xgbe_init_rx_coalesce()
1101 pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES; in xgbe_init_rx_coalesce()
1103 hw_if->config_rx_coalesce(pdata); in xgbe_init_rx_coalesce()
1105 DBGPR("<--xgbe_init_rx_coalesce\n"); in xgbe_init_rx_coalesce()
1110 struct xgbe_desc_if *desc_if = &pdata->desc_if; in xgbe_free_tx_data()
1115 DBGPR("-->xgbe_free_tx_data\n"); in xgbe_free_tx_data()
1117 for (i = 0; i < pdata->channel_count; i++) { in xgbe_free_tx_data()
1118 ring = pdata->channel[i]->tx_ring; in xgbe_free_tx_data()
1122 for (j = 0; j < ring->rdesc_count; j++) { in xgbe_free_tx_data()
1124 desc_if->unmap_rdata(pdata, rdata); in xgbe_free_tx_data()
1128 DBGPR("<--xgbe_free_tx_data\n"); in xgbe_free_tx_data()
1133 struct xgbe_desc_if *desc_if = &pdata->desc_if; in xgbe_free_rx_data()
1138 DBGPR("-->xgbe_free_rx_data\n"); in xgbe_free_rx_data()
1140 for (i = 0; i < pdata->channel_count; i++) { in xgbe_free_rx_data()
1141 ring = pdata->channel[i]->rx_ring; in xgbe_free_rx_data()
1145 for (j = 0; j < ring->rdesc_count; j++) { in xgbe_free_rx_data()
1147 desc_if->unmap_rdata(pdata, rdata); in xgbe_free_rx_data()
1151 DBGPR("<--xgbe_free_rx_data\n"); in xgbe_free_rx_data()
1156 pdata->phy_link = -1; in xgbe_phy_reset()
1157 pdata->phy_speed = SPEED_UNKNOWN; in xgbe_phy_reset()
1159 return pdata->phy_if.phy_reset(pdata); in xgbe_phy_reset()
1165 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_powerdown()
1168 DBGPR("-->xgbe_powerdown\n"); in xgbe_powerdown()
1171 (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) { in xgbe_powerdown()
1173 DBGPR("<--xgbe_powerdown\n"); in xgbe_powerdown()
1174 return -EINVAL; in xgbe_powerdown()
1177 spin_lock_irqsave(&pdata->lock, flags); in xgbe_powerdown()
1185 flush_workqueue(pdata->dev_workqueue); in xgbe_powerdown()
1187 hw_if->powerdown_tx(pdata); in xgbe_powerdown()
1188 hw_if->powerdown_rx(pdata); in xgbe_powerdown()
1192 pdata->power_down = 1; in xgbe_powerdown()
1194 spin_unlock_irqrestore(&pdata->lock, flags); in xgbe_powerdown()
1196 DBGPR("<--xgbe_powerdown\n"); in xgbe_powerdown()
1204 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_powerup()
1207 DBGPR("-->xgbe_powerup\n"); in xgbe_powerup()
1210 (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) { in xgbe_powerup()
1212 DBGPR("<--xgbe_powerup\n"); in xgbe_powerup()
1213 return -EINVAL; in xgbe_powerup()
1216 spin_lock_irqsave(&pdata->lock, flags); in xgbe_powerup()
1218 pdata->power_down = 0; in xgbe_powerup()
1222 hw_if->powerup_tx(pdata); in xgbe_powerup()
1223 hw_if->powerup_rx(pdata); in xgbe_powerup()
1232 spin_unlock_irqrestore(&pdata->lock, flags); in xgbe_powerup()
1234 DBGPR("<--xgbe_powerup\n"); in xgbe_powerup()
1241 struct xgbe_desc_if *desc_if = &pdata->desc_if; in xgbe_free_memory()
1244 desc_if->free_ring_resources(pdata); in xgbe_free_memory()
1252 struct xgbe_desc_if *desc_if = &pdata->desc_if; in xgbe_alloc_memory()
1253 struct net_device *netdev = pdata->netdev; in xgbe_alloc_memory()
1256 if (pdata->new_tx_ring_count) { in xgbe_alloc_memory()
1257 pdata->tx_ring_count = pdata->new_tx_ring_count; in xgbe_alloc_memory()
1258 pdata->tx_q_count = pdata->tx_ring_count; in xgbe_alloc_memory()
1259 pdata->new_tx_ring_count = 0; in xgbe_alloc_memory()
1262 if (pdata->new_rx_ring_count) { in xgbe_alloc_memory()
1263 pdata->rx_ring_count = pdata->new_rx_ring_count; in xgbe_alloc_memory()
1264 pdata->new_rx_ring_count = 0; in xgbe_alloc_memory()
1267 /* Calculate the Rx buffer size before allocating rings */ in xgbe_alloc_memory()
1268 pdata->rx_buf_size = xgbe_calc_rx_buf_size(netdev, netdev->mtu); in xgbe_alloc_memory()
1276 ret = desc_if->alloc_ring_resources(pdata); in xgbe_alloc_memory()
1293 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_start()
1294 struct xgbe_phy_if *phy_if = &pdata->phy_if; in xgbe_start()
1295 struct net_device *netdev = pdata->netdev; in xgbe_start()
1300 ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count); in xgbe_start()
1306 ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count); in xgbe_start()
1308 netdev_err(netdev, "error setting real rx queue count\n"); in xgbe_start()
1314 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, in xgbe_start()
1315 i % pdata->rx_ring_count); in xgbe_start()
1317 ret = hw_if->init(pdata); in xgbe_start()
1327 ret = phy_if->phy_start(pdata); in xgbe_start()
1331 hw_if->enable_tx(pdata); in xgbe_start()
1332 hw_if->enable_rx(pdata); in xgbe_start()
1339 queue_work(pdata->dev_workqueue, &pdata->service_work); in xgbe_start()
1341 clear_bit(XGBE_STOPPED, &pdata->dev_state); in xgbe_start()
1351 hw_if->exit(pdata); in xgbe_start()
1358 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_stop()
1359 struct xgbe_phy_if *phy_if = &pdata->phy_if; in xgbe_stop()
1361 struct net_device *netdev = pdata->netdev; in xgbe_stop()
1365 DBGPR("-->xgbe_stop\n"); in xgbe_stop()
1367 if (test_bit(XGBE_STOPPED, &pdata->dev_state)) in xgbe_stop()
1373 flush_workqueue(pdata->dev_workqueue); in xgbe_stop()
1377 hw_if->disable_tx(pdata); in xgbe_stop()
1378 hw_if->disable_rx(pdata); in xgbe_stop()
1380 phy_if->phy_stop(pdata); in xgbe_stop()
1386 hw_if->exit(pdata); in xgbe_stop()
1388 for (i = 0; i < pdata->channel_count; i++) { in xgbe_stop()
1389 channel = pdata->channel[i]; in xgbe_stop()
1390 if (!channel->tx_ring) in xgbe_stop()
1393 txq = netdev_get_tx_queue(netdev, channel->queue_index); in xgbe_stop()
1397 set_bit(XGBE_STOPPED, &pdata->dev_state); in xgbe_stop()
1399 DBGPR("<--xgbe_stop\n"); in xgbe_stop()
1417 netdev_alert(pdata->netdev, "device stopped\n"); in xgbe_stopdev()
1423 if (!netif_running(pdata->netdev)) in xgbe_full_restart_dev()
1437 if (!netif_running(pdata->netdev)) in xgbe_restart_dev()
1470 spin_lock_irqsave(&pdata->tstamp_lock, flags); in xgbe_tx_tstamp()
1471 if (!pdata->tx_tstamp_skb) in xgbe_tx_tstamp()
1474 if (pdata->tx_tstamp) { in xgbe_tx_tstamp()
1475 nsec = timecounter_cyc2time(&pdata->tstamp_tc, in xgbe_tx_tstamp()
1476 pdata->tx_tstamp); in xgbe_tx_tstamp()
1480 skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps); in xgbe_tx_tstamp()
1483 dev_kfree_skb_any(pdata->tx_tstamp_skb); in xgbe_tx_tstamp()
1485 pdata->tx_tstamp_skb = NULL; in xgbe_tx_tstamp()
1488 spin_unlock_irqrestore(&pdata->tstamp_lock, flags); in xgbe_tx_tstamp()
1494 if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config, in xgbe_get_hwtstamp_settings()
1495 sizeof(pdata->tstamp_config))) in xgbe_get_hwtstamp_settings()
1496 return -EFAULT; in xgbe_get_hwtstamp_settings()
1504 struct hwtstamp_config config; in xgbe_set_hwtstamp_settings() local
1507 if (copy_from_user(&config, ifreq->ifr_data, sizeof(config))) in xgbe_set_hwtstamp_settings()
1508 return -EFAULT; in xgbe_set_hwtstamp_settings()
1510 if (config.flags) in xgbe_set_hwtstamp_settings()
1511 return -EINVAL; in xgbe_set_hwtstamp_settings()
1515 switch (config.tx_type) { in xgbe_set_hwtstamp_settings()
1524 return -ERANGE; in xgbe_set_hwtstamp_settings()
1527 switch (config.rx_filter) { in xgbe_set_hwtstamp_settings()
1625 return -ERANGE; in xgbe_set_hwtstamp_settings()
1628 pdata->hw_if.config_tstamp(pdata, mac_tscr); in xgbe_set_hwtstamp_settings()
1630 memcpy(&pdata->tstamp_config, &config, sizeof(config)); in xgbe_set_hwtstamp_settings()
1641 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) { in xgbe_prep_tx_tstamp()
1642 spin_lock_irqsave(&pdata->tstamp_lock, flags); in xgbe_prep_tx_tstamp()
1643 if (pdata->tx_tstamp_skb) { in xgbe_prep_tx_tstamp()
1645 XGMAC_SET_BITS(packet->attributes, in xgbe_prep_tx_tstamp()
1648 pdata->tx_tstamp_skb = skb_get(skb); in xgbe_prep_tx_tstamp()
1649 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in xgbe_prep_tx_tstamp()
1651 spin_unlock_irqrestore(&pdata->tstamp_lock, flags); in xgbe_prep_tx_tstamp()
1660 packet->vlan_ctag = skb_vlan_tag_get(skb); in xgbe_prep_vlan()
1667 if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, in xgbe_prep_tso()
1675 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) { in xgbe_prep_tso()
1676 packet->header_len = skb_inner_transport_offset(skb) + in xgbe_prep_tso()
1678 packet->tcp_header_len = inner_tcp_hdrlen(skb); in xgbe_prep_tso()
1680 packet->header_len = skb_transport_offset(skb) + in xgbe_prep_tso()
1682 packet->tcp_header_len = tcp_hdrlen(skb); in xgbe_prep_tso()
1684 packet->tcp_payload_len = skb->len - packet->header_len; in xgbe_prep_tso()
1685 packet->mss = skb_shinfo(skb)->gso_size; in xgbe_prep_tso()
1687 DBGPR(" packet->header_len=%u\n", packet->header_len); in xgbe_prep_tso()
1688 DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n", in xgbe_prep_tso()
1689 packet->tcp_header_len, packet->tcp_payload_len); in xgbe_prep_tso()
1690 DBGPR(" packet->mss=%u\n", packet->mss); in xgbe_prep_tso()
1695 packet->tx_packets = skb_shinfo(skb)->gso_segs; in xgbe_prep_tso()
1696 packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len; in xgbe_prep_tso()
1703 if (!skb->encapsulation) in xgbe_is_vxlan()
1706 if (skb->ip_summed != CHECKSUM_PARTIAL) in xgbe_is_vxlan()
1709 switch (skb->protocol) { in xgbe_is_vxlan()
1711 if (ip_hdr(skb)->protocol != IPPROTO_UDP) in xgbe_is_vxlan()
1716 if (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP) in xgbe_is_vxlan()
1724 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || in xgbe_is_vxlan()
1725 skb->inner_protocol != htons(ETH_P_TEB) || in xgbe_is_vxlan()
1726 (skb_inner_mac_header(skb) - skb_transport_header(skb) != in xgbe_is_vxlan()
1735 if (skb->ip_summed != CHECKSUM_PARTIAL) in xgbe_is_tso()
1755 packet->skb = skb; in xgbe_packet_info()
1758 packet->rdesc_count = 0; in xgbe_packet_info()
1760 packet->tx_packets = 1; in xgbe_packet_info()
1761 packet->tx_bytes = skb->len; in xgbe_packet_info()
1765 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) { in xgbe_packet_info()
1767 packet->rdesc_count++; in xgbe_packet_info()
1771 packet->rdesc_count++; in xgbe_packet_info()
1773 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, in xgbe_packet_info()
1775 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, in xgbe_packet_info()
1777 } else if (skb->ip_summed == CHECKSUM_PARTIAL) in xgbe_packet_info()
1778 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, in xgbe_packet_info()
1782 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, in xgbe_packet_info()
1787 if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag) in xgbe_packet_info()
1791 packet->rdesc_count++; in xgbe_packet_info()
1794 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, in xgbe_packet_info()
1798 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in xgbe_packet_info()
1799 (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON)) in xgbe_packet_info()
1800 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, in xgbe_packet_info()
1804 packet->rdesc_count++; in xgbe_packet_info()
1805 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE); in xgbe_packet_info()
1808 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in xgbe_packet_info()
1809 frag = &skb_shinfo(skb)->frags[i]; in xgbe_packet_info()
1811 packet->rdesc_count++; in xgbe_packet_info()
1812 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE); in xgbe_packet_info()
1823 snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs", in xgbe_open()
1826 snprintf(pdata->ecc_name, sizeof(pdata->ecc_name) - 1, "%s-ecc", in xgbe_open()
1829 snprintf(pdata->i2c_name, sizeof(pdata->i2c_name) - 1, "%s-i2c", in xgbe_open()
1833 pdata->dev_workqueue = in xgbe_open()
1835 if (!pdata->dev_workqueue) { in xgbe_open()
1837 return -ENOMEM; in xgbe_open()
1840 pdata->an_workqueue = in xgbe_open()
1841 create_singlethread_workqueue(pdata->an_name); in xgbe_open()
1842 if (!pdata->an_workqueue) { in xgbe_open()
1844 ret = -ENOMEM; in xgbe_open()
1854 ret = clk_prepare_enable(pdata->sysclk); in xgbe_open()
1860 ret = clk_prepare_enable(pdata->ptpclk); in xgbe_open()
1866 INIT_WORK(&pdata->service_work, xgbe_service); in xgbe_open()
1867 INIT_WORK(&pdata->restart_work, xgbe_restart); in xgbe_open()
1868 INIT_WORK(&pdata->stopdev_work, xgbe_stopdev); in xgbe_open()
1869 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); in xgbe_open()
1879 clear_bit(XGBE_DOWN, &pdata->dev_state); in xgbe_open()
1887 clk_disable_unprepare(pdata->ptpclk); in xgbe_open()
1890 clk_disable_unprepare(pdata->sysclk); in xgbe_open()
1893 destroy_workqueue(pdata->an_workqueue); in xgbe_open()
1896 destroy_workqueue(pdata->dev_workqueue); in xgbe_open()
1911 clk_disable_unprepare(pdata->ptpclk); in xgbe_close()
1912 clk_disable_unprepare(pdata->sysclk); in xgbe_close()
1914 flush_workqueue(pdata->an_workqueue); in xgbe_close()
1915 destroy_workqueue(pdata->an_workqueue); in xgbe_close()
1917 flush_workqueue(pdata->dev_workqueue); in xgbe_close()
1918 destroy_workqueue(pdata->dev_workqueue); in xgbe_close()
1920 set_bit(XGBE_DOWN, &pdata->dev_state); in xgbe_close()
1928 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_xmit()
1929 struct xgbe_desc_if *desc_if = &pdata->desc_if; in xgbe_xmit()
1936 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len); in xgbe_xmit()
1938 channel = pdata->channel[skb->queue_mapping]; in xgbe_xmit()
1939 txq = netdev_get_tx_queue(netdev, channel->queue_index); in xgbe_xmit()
1940 ring = channel->tx_ring; in xgbe_xmit()
1941 packet = &ring->packet_data; in xgbe_xmit()
1945 if (skb->len == 0) { in xgbe_xmit()
1957 ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count); in xgbe_xmit()
1970 if (!desc_if->map_tx_skb(channel, skb)) { in xgbe_xmit()
1978 netdev_tx_sent_queue(txq, packet->tx_bytes); in xgbe_xmit()
1981 hw_if->dev_xmit(channel); in xgbe_xmit()
1998 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_set_rx_mode()
2000 DBGPR("-->xgbe_set_rx_mode\n"); in xgbe_set_rx_mode()
2002 hw_if->config_rx_mode(pdata); in xgbe_set_rx_mode()
2004 DBGPR("<--xgbe_set_rx_mode\n"); in xgbe_set_rx_mode()
2010 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_set_mac_address()
2013 DBGPR("-->xgbe_set_mac_address\n"); in xgbe_set_mac_address()
2015 if (!is_valid_ether_addr(saddr->sa_data)) in xgbe_set_mac_address()
2016 return -EADDRNOTAVAIL; in xgbe_set_mac_address()
2018 memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len); in xgbe_set_mac_address()
2020 hw_if->set_mac_address(pdata, netdev->dev_addr); in xgbe_set_mac_address()
2022 DBGPR("<--xgbe_set_mac_address\n"); in xgbe_set_mac_address()
2042 ret = -EOPNOTSUPP; in xgbe_ioctl()
2053 DBGPR("-->xgbe_change_mtu\n"); in xgbe_change_mtu()
2059 pdata->rx_buf_size = ret; in xgbe_change_mtu()
2060 netdev->mtu = mtu; in xgbe_change_mtu()
2064 DBGPR("<--xgbe_change_mtu\n"); in xgbe_change_mtu()
2074 schedule_work(&pdata->restart_work); in xgbe_tx_timeout()
2081 struct xgbe_mmc_stats *pstats = &pdata->mmc_stats; in xgbe_get_stats64()
2083 DBGPR("-->%s\n", __func__); in xgbe_get_stats64()
2085 pdata->hw_if.read_mmc_stats(pdata); in xgbe_get_stats64()
2087 s->rx_packets = pstats->rxframecount_gb; in xgbe_get_stats64()
2088 s->rx_bytes = pstats->rxoctetcount_gb; in xgbe_get_stats64()
2089 s->rx_errors = pstats->rxframecount_gb - in xgbe_get_stats64()
2090 pstats->rxbroadcastframes_g - in xgbe_get_stats64()
2091 pstats->rxmulticastframes_g - in xgbe_get_stats64()
2092 pstats->rxunicastframes_g; in xgbe_get_stats64()
2093 s->multicast = pstats->rxmulticastframes_g; in xgbe_get_stats64()
2094 s->rx_length_errors = pstats->rxlengtherror; in xgbe_get_stats64()
2095 s->rx_crc_errors = pstats->rxcrcerror; in xgbe_get_stats64()
2096 s->rx_fifo_errors = pstats->rxfifooverflow; in xgbe_get_stats64()
2098 s->tx_packets = pstats->txframecount_gb; in xgbe_get_stats64()
2099 s->tx_bytes = pstats->txoctetcount_gb; in xgbe_get_stats64()
2100 s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g; in xgbe_get_stats64()
2101 s->tx_dropped = netdev->stats.tx_dropped; in xgbe_get_stats64()
2103 DBGPR("<--%s\n", __func__); in xgbe_get_stats64()
2110 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_vlan_rx_add_vid()
2112 DBGPR("-->%s\n", __func__); in xgbe_vlan_rx_add_vid()
2114 set_bit(vid, pdata->active_vlans); in xgbe_vlan_rx_add_vid()
2115 hw_if->update_vlan_hash_table(pdata); in xgbe_vlan_rx_add_vid()
2117 DBGPR("<--%s\n", __func__); in xgbe_vlan_rx_add_vid()
2126 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_vlan_rx_kill_vid()
2128 DBGPR("-->%s\n", __func__); in xgbe_vlan_rx_kill_vid()
2130 clear_bit(vid, pdata->active_vlans); in xgbe_vlan_rx_kill_vid()
2131 hw_if->update_vlan_hash_table(pdata); in xgbe_vlan_rx_kill_vid()
2133 DBGPR("<--%s\n", __func__); in xgbe_vlan_rx_kill_vid()
2145 DBGPR("-->xgbe_poll_controller\n"); in xgbe_poll_controller()
2147 if (pdata->per_channel_irq) { in xgbe_poll_controller()
2148 for (i = 0; i < pdata->channel_count; i++) { in xgbe_poll_controller()
2149 channel = pdata->channel[i]; in xgbe_poll_controller()
2150 xgbe_dma_isr(channel->dma_irq, channel); in xgbe_poll_controller()
2153 disable_irq(pdata->dev_irq); in xgbe_poll_controller()
2154 xgbe_isr(pdata->dev_irq, pdata); in xgbe_poll_controller()
2155 enable_irq(pdata->dev_irq); in xgbe_poll_controller()
2158 DBGPR("<--xgbe_poll_controller\n"); in xgbe_poll_controller()
2170 return -EOPNOTSUPP; in xgbe_setup_tc()
2172 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; in xgbe_setup_tc()
2173 tc = mqprio->num_tc; in xgbe_setup_tc()
2175 if (tc > pdata->hw_feat.tc_cnt) in xgbe_setup_tc()
2176 return -EINVAL; in xgbe_setup_tc()
2178 pdata->num_tcs = tc; in xgbe_setup_tc()
2179 pdata->hw_if.config_tc(pdata); in xgbe_setup_tc()
2192 if (!pdata->hw_feat.vxn) in xgbe_fix_features()
2206 "forcing both tx and rx udp tunnel support\n"); in xgbe_fix_features()
2231 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_set_features()
2235 rxhash = pdata->netdev_features & NETIF_F_RXHASH; in xgbe_set_features()
2236 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; in xgbe_set_features()
2237 rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; in xgbe_set_features()
2238 rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER; in xgbe_set_features()
2241 ret = hw_if->enable_rss(pdata); in xgbe_set_features()
2243 ret = hw_if->disable_rss(pdata); in xgbe_set_features()
2248 hw_if->enable_rx_csum(pdata); in xgbe_set_features()
2250 hw_if->disable_rx_csum(pdata); in xgbe_set_features()
2253 hw_if->enable_rx_vlan_stripping(pdata); in xgbe_set_features()
2255 hw_if->disable_rx_vlan_stripping(pdata); in xgbe_set_features()
2258 hw_if->enable_rx_vlan_filtering(pdata); in xgbe_set_features()
2260 hw_if->disable_rx_vlan_filtering(pdata); in xgbe_set_features()
2262 pdata->netdev_features = features; in xgbe_set_features()
2264 DBGPR("<--xgbe_set_features\n"); in xgbe_set_features()
2310 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_rx_refresh()
2311 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_rx_refresh()
2312 struct xgbe_desc_if *desc_if = &pdata->desc_if; in xgbe_rx_refresh()
2313 struct xgbe_ring *ring = channel->rx_ring; in xgbe_rx_refresh()
2316 while (ring->dirty != ring->cur) { in xgbe_rx_refresh()
2317 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); in xgbe_rx_refresh()
2320 desc_if->unmap_rdata(pdata, rdata); in xgbe_rx_refresh()
2322 if (desc_if->map_rx_buffer(pdata, ring, rdata)) in xgbe_rx_refresh()
2325 hw_if->rx_desc_reset(pdata, rdata, ring->dirty); in xgbe_rx_refresh()
2327 ring->dirty++; in xgbe_rx_refresh()
2333 /* Update the Rx Tail Pointer Register with address of in xgbe_rx_refresh()
2335 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1); in xgbe_rx_refresh()
2337 lower_32_bits(rdata->rdesc_dma)); in xgbe_rx_refresh()
2348 skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len); in xgbe_create_skb()
2355 dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base, in xgbe_create_skb()
2356 rdata->rx.hdr.dma_off, in xgbe_create_skb()
2357 rdata->rx.hdr.dma_len, DMA_FROM_DEVICE); in xgbe_create_skb()
2359 packet = page_address(rdata->rx.hdr.pa.pages) + in xgbe_create_skb()
2360 rdata->rx.hdr.pa.pages_offset; in xgbe_create_skb()
2371 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST)) in xgbe_rx_buf1_len()
2375 if (rdata->rx.hdr_len) in xgbe_rx_buf1_len()
2376 return rdata->rx.hdr_len; in xgbe_rx_buf1_len()
2381 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) in xgbe_rx_buf1_len()
2382 return rdata->rx.hdr.dma_len; in xgbe_rx_buf1_len()
2387 return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len); in xgbe_rx_buf1_len()
2395 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) in xgbe_rx_buf2_len()
2396 return rdata->rx.buf.dma_len; in xgbe_rx_buf2_len()
2401 return rdata->rx.len - len; in xgbe_rx_buf2_len()
2406 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_tx_poll()
2407 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_tx_poll()
2408 struct xgbe_desc_if *desc_if = &pdata->desc_if; in xgbe_tx_poll()
2409 struct xgbe_ring *ring = channel->tx_ring; in xgbe_tx_poll()
2412 struct net_device *netdev = pdata->netdev; in xgbe_tx_poll()
2418 DBGPR("-->xgbe_tx_poll\n"); in xgbe_tx_poll()
2424 cur = ring->cur; in xgbe_tx_poll()
2426 /* Be sure we get ring->cur before accessing descriptor data */ in xgbe_tx_poll()
2429 txq = netdev_get_tx_queue(netdev, channel->queue_index); in xgbe_tx_poll()
2432 (ring->dirty != cur)) { in xgbe_tx_poll()
2433 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); in xgbe_tx_poll()
2434 rdesc = rdata->rdesc; in xgbe_tx_poll()
2436 if (!hw_if->tx_complete(rdesc)) in xgbe_tx_poll()
2444 xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0); in xgbe_tx_poll()
2446 if (hw_if->is_last_desc(rdesc)) { in xgbe_tx_poll()
2447 tx_packets += rdata->tx.packets; in xgbe_tx_poll()
2448 tx_bytes += rdata->tx.bytes; in xgbe_tx_poll()
2451 /* Free the SKB and reset the descriptor for re-use */ in xgbe_tx_poll()
2452 desc_if->unmap_rdata(pdata, rdata); in xgbe_tx_poll()
2453 hw_if->tx_desc_reset(rdata); in xgbe_tx_poll()
2456 ring->dirty++; in xgbe_tx_poll()
2464 if ((ring->tx.queue_stopped == 1) && in xgbe_tx_poll()
2466 ring->tx.queue_stopped = 0; in xgbe_tx_poll()
2470 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed); in xgbe_tx_poll()
2477 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_rx_poll()
2478 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_rx_poll()
2479 struct xgbe_ring *ring = channel->rx_ring; in xgbe_rx_poll()
2482 struct net_device *netdev = pdata->netdev; in xgbe_rx_poll()
2491 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget); in xgbe_rx_poll()
2493 /* Nothing to do if there isn't a Rx ring for this channel */ in xgbe_rx_poll()
2500 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; in xgbe_rx_poll()
2502 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); in xgbe_rx_poll()
2503 packet = &ring->packet_data; in xgbe_rx_poll()
2505 DBGPR(" cur = %d\n", ring->cur); in xgbe_rx_poll()
2508 if (!received && rdata->state_saved) { in xgbe_rx_poll()
2509 skb = rdata->state.skb; in xgbe_rx_poll()
2510 error = rdata->state.error; in xgbe_rx_poll()
2511 len = rdata->state.len; in xgbe_rx_poll()
2520 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); in xgbe_rx_poll()
2525 if (hw_if->dev_read(channel)) in xgbe_rx_poll()
2529 ring->cur++; in xgbe_rx_poll()
2531 last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, in xgbe_rx_poll()
2533 context_next = XGMAC_GET_BITS(packet->attributes, in xgbe_rx_poll()
2536 context = XGMAC_GET_BITS(packet->attributes, in xgbe_rx_poll()
2544 if (error || packet->errors) { in xgbe_rx_poll()
2545 if (packet->errors) in xgbe_rx_poll()
2569 dma_sync_single_range_for_cpu(pdata->dev, in xgbe_rx_poll()
2570 rdata->rx.buf.dma_base, in xgbe_rx_poll()
2571 rdata->rx.buf.dma_off, in xgbe_rx_poll()
2572 rdata->rx.buf.dma_len, in xgbe_rx_poll()
2575 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, in xgbe_rx_poll()
2576 rdata->rx.buf.pa.pages, in xgbe_rx_poll()
2577 rdata->rx.buf.pa.pages_offset, in xgbe_rx_poll()
2579 rdata->rx.buf.dma_len); in xgbe_rx_poll()
2580 rdata->rx.buf.pa.pages = NULL; in xgbe_rx_poll()
2592 max_len = netdev->mtu + ETH_HLEN; in xgbe_rx_poll()
2593 if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && in xgbe_rx_poll()
2594 (skb->protocol == htons(ETH_P_8021Q))) in xgbe_rx_poll()
2597 if (skb->len > max_len) { in xgbe_rx_poll()
2608 if (XGMAC_GET_BITS(packet->attributes, in xgbe_rx_poll()
2610 skb->ip_summed = CHECKSUM_UNNECESSARY; in xgbe_rx_poll()
2612 if (XGMAC_GET_BITS(packet->attributes, in xgbe_rx_poll()
2614 skb->encapsulation = 1; in xgbe_rx_poll()
2616 if (XGMAC_GET_BITS(packet->attributes, in xgbe_rx_poll()
2618 skb->csum_level = 1; in xgbe_rx_poll()
2621 if (XGMAC_GET_BITS(packet->attributes, in xgbe_rx_poll()
2624 packet->vlan_ctag); in xgbe_rx_poll()
2626 if (XGMAC_GET_BITS(packet->attributes, in xgbe_rx_poll()
2630 nsec = timecounter_cyc2time(&pdata->tstamp_tc, in xgbe_rx_poll()
2631 packet->rx_tstamp); in xgbe_rx_poll()
2633 hwtstamps->hwtstamp = ns_to_ktime(nsec); in xgbe_rx_poll()
2636 if (XGMAC_GET_BITS(packet->attributes, in xgbe_rx_poll()
2638 skb_set_hash(skb, packet->rss_hash, in xgbe_rx_poll()
2639 packet->rss_hash_type); in xgbe_rx_poll()
2641 skb->dev = netdev; in xgbe_rx_poll()
2642 skb->protocol = eth_type_trans(skb, netdev); in xgbe_rx_poll()
2643 skb_record_rx_queue(skb, channel->queue_index); in xgbe_rx_poll()
2653 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); in xgbe_rx_poll()
2654 rdata->state_saved = 1; in xgbe_rx_poll()
2655 rdata->state.skb = skb; in xgbe_rx_poll()
2656 rdata->state.len = len; in xgbe_rx_poll()
2657 rdata->state.error = error; in xgbe_rx_poll()
2660 DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count); in xgbe_rx_poll()
2669 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_one_poll()
2672 DBGPR("-->xgbe_one_poll: budget=%d\n", budget); in xgbe_one_poll()
2677 /* Process Rx ring next */ in xgbe_one_poll()
2682 /* Enable Tx and Rx interrupts */ in xgbe_one_poll()
2683 if (pdata->channel_irq_mode) in xgbe_one_poll()
2686 enable_irq(channel->dma_irq); in xgbe_one_poll()
2689 DBGPR("<--xgbe_one_poll: received = %d\n", processed); in xgbe_one_poll()
2703 DBGPR("-->xgbe_all_poll: budget=%d\n", budget); in xgbe_all_poll()
2706 ring_budget = budget / pdata->rx_ring_count; in xgbe_all_poll()
2710 for (i = 0; i < pdata->channel_count; i++) { in xgbe_all_poll()
2711 channel = pdata->channel[i]; in xgbe_all_poll()
2716 /* Process Rx ring next */ in xgbe_all_poll()
2717 if (ring_budget > (budget - processed)) in xgbe_all_poll()
2718 ring_budget = budget - processed; in xgbe_all_poll()
2725 /* Enable Tx and Rx interrupts */ in xgbe_all_poll()
2729 DBGPR("<--xgbe_all_poll: received = %d\n", processed); in xgbe_all_poll()
2740 while (count--) { in xgbe_dump_tx_desc()
2742 rdesc = rdata->rdesc; in xgbe_dump_tx_desc()
2743 netdev_dbg(pdata->netdev, in xgbe_dump_tx_desc()
2746 le32_to_cpu(rdesc->desc0), in xgbe_dump_tx_desc()
2747 le32_to_cpu(rdesc->desc1), in xgbe_dump_tx_desc()
2748 le32_to_cpu(rdesc->desc2), in xgbe_dump_tx_desc()
2749 le32_to_cpu(rdesc->desc3)); in xgbe_dump_tx_desc()
2761 rdesc = rdata->rdesc; in xgbe_dump_rx_desc()
2762 netdev_dbg(pdata->netdev, in xgbe_dump_rx_desc()
2763 "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", in xgbe_dump_rx_desc()
2764 idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1), in xgbe_dump_rx_desc()
2765 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3)); in xgbe_dump_rx_desc()
2770 struct ethhdr *eth = (struct ethhdr *)skb->data; in xgbe_print_pkt()
2777 (tx_rx ? "TX" : "RX"), skb->len); in xgbe_print_pkt()
2779 netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest); in xgbe_print_pkt()
2780 netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source); in xgbe_print_pkt()
2781 netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto)); in xgbe_print_pkt()
2783 for (i = 0; i < skb->len; i += 32) { in xgbe_print_pkt()
2784 unsigned int len = min(skb->len - i, 32U); in xgbe_print_pkt()
2786 hex_dump_to_buffer(&skb->data[i], len, 32, 1, in xgbe_print_pkt()