Lines Matching +full:tcs +full:- +full:wait

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
61 "Copyright (c) 1999-2016 Intel Corporation.";
76 /* ixgbe_pci_tbl - PCI Device ID Table
151 …"Maximum number of virtual functions to allocate per physical function - default is zero and maxim…
157 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
160 static int debug = -1;
177 return dev && (dev->netdev_ops == &ixgbe_netdev_ops); in netif_is_ixgbe()
186 parent_bus = adapter->pdev->bus->parent; in ixgbe_read_pci_cfg_word_parent()
188 return -1; in ixgbe_read_pci_cfg_word_parent()
190 parent_dev = parent_bus->self; in ixgbe_read_pci_cfg_word_parent()
192 return -1; in ixgbe_read_pci_cfg_word_parent()
195 return -1; in ixgbe_read_pci_cfg_word_parent()
199 ixgbe_check_cfg_remove(&adapter->hw, parent_dev)) in ixgbe_read_pci_cfg_word_parent()
200 return -1; in ixgbe_read_pci_cfg_word_parent()
206 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_get_parent_bus_info()
210 hw->bus.type = ixgbe_bus_type_pci_express; in ixgbe_get_parent_bus_info()
221 hw->bus.width = ixgbe_convert_bus_width(link_status); in ixgbe_get_parent_bus_info()
222 hw->bus.speed = ixgbe_convert_bus_speed(link_status); in ixgbe_get_parent_bus_info()
228 * ixgbe_pcie_from_parent - Determine whether PCIe info should come from parent
231 * This function is used by probe to determine whether a device's PCI-Express
238 switch (hw->device_id) { in ixgbe_pcie_from_parent()
250 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_minimum_link()
257 if (hw->bus.type == ixgbe_bus_type_internal) in ixgbe_check_minimum_link()
261 if (ixgbe_pcie_from_parent(&adapter->hw)) in ixgbe_check_minimum_link()
262 pdev = adapter->pdev->bus->parent->self; in ixgbe_check_minimum_link()
264 pdev = adapter->pdev; in ixgbe_check_minimum_link()
271 if (!test_bit(__IXGBE_DOWN, &adapter->state) && in ixgbe_service_event_schedule()
272 !test_bit(__IXGBE_REMOVING, &adapter->state) && in ixgbe_service_event_schedule()
273 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) in ixgbe_service_event_schedule()
274 queue_work(ixgbe_wq, &adapter->service_task); in ixgbe_service_event_schedule()
279 struct ixgbe_adapter *adapter = hw->back; in ixgbe_remove_adapter()
281 if (!hw->hw_addr) in ixgbe_remove_adapter()
283 hw->hw_addr = NULL; in ixgbe_remove_adapter()
285 if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) in ixgbe_remove_adapter()
295 reg_addr = READ_ONCE(hw->hw_addr); in ixgbe_check_remove()
318 * ixgbe_read_reg - Read from device register
332 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); in ixgbe_read_reg()
337 if (unlikely(hw->phy.nw_mng_if_sel & in ixgbe_read_reg()
353 adapter = hw->back; in ixgbe_read_reg()
378 struct ixgbe_adapter *adapter = hw->back; in ixgbe_read_pci_cfg_word()
381 if (ixgbe_removed(hw->hw_addr)) in ixgbe_read_pci_cfg_word()
383 pci_read_config_word(adapter->pdev, reg, &value); in ixgbe_read_pci_cfg_word()
385 ixgbe_check_cfg_remove(hw, adapter->pdev)) in ixgbe_read_pci_cfg_word()
393 struct ixgbe_adapter *adapter = hw->back; in ixgbe_read_pci_cfg_dword()
396 if (ixgbe_removed(hw->hw_addr)) in ixgbe_read_pci_cfg_dword()
398 pci_read_config_dword(adapter->pdev, reg, &value); in ixgbe_read_pci_cfg_dword()
400 ixgbe_check_cfg_remove(hw, adapter->pdev)) in ixgbe_read_pci_cfg_dword()
408 struct ixgbe_adapter *adapter = hw->back; in ixgbe_write_pci_cfg_word()
410 if (ixgbe_removed(hw->hw_addr)) in ixgbe_write_pci_cfg_word()
412 pci_write_config_word(adapter->pdev, reg, value); in ixgbe_write_pci_cfg_word()
417 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); in ixgbe_service_event_complete()
421 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); in ixgbe_service_event_complete()
463 * ixgbe_regdump - register printout routine
471 switch (reginfo->ofs) { in ixgbe_regdump()
529 pr_info("%-15s %08x\n", in ixgbe_regdump()
530 reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs)); in ixgbe_regdump()
540 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7); in ixgbe_regdump()
543 pr_err("%-15s%s\n", rname, buf); in ixgbe_regdump()
552 tx_buffer = &ring->tx_buffer_info[ring->next_to_clean]; in ixgbe_print_buffer()
554 n, ring->next_to_use, ring->next_to_clean, in ixgbe_print_buffer()
557 tx_buffer->next_to_watch, in ixgbe_print_buffer()
558 (u64)tx_buffer->time_stamp); in ixgbe_print_buffer()
562 * ixgbe_dump - Print registers, tx-rings and rx-rings
566 struct net_device *netdev = adapter->netdev; in ixgbe_dump()
567 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_dump()
584 dev_info(&adapter->pdev->dev, "Net device Info\n"); in ixgbe_dump()
587 pr_info("%-15s %016lX %016lX\n", in ixgbe_dump()
588 netdev->name, in ixgbe_dump()
589 netdev->state, in ixgbe_dump()
594 dev_info(&adapter->pdev->dev, "Register Dump\n"); in ixgbe_dump()
597 reginfo->name; reginfo++) { in ixgbe_dump()
605 dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); in ixgbe_dump()
607 "Queue [NTU] [NTC] [bi(ntc)->dma ]", in ixgbe_dump()
609 for (n = 0; n < adapter->num_tx_queues; n++) { in ixgbe_dump()
610 ring = adapter->tx_ring[n]; in ixgbe_dump()
614 for (n = 0; n < adapter->num_xdp_queues; n++) { in ixgbe_dump()
615 ring = adapter->xdp_ring[n]; in ixgbe_dump()
623 dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); in ixgbe_dump()
628 * +--------------------------------------------------------------+ in ixgbe_dump()
630 * +--------------------------------------------------------------+ in ixgbe_dump()
632 * +--------------------------------------------------------------+ in ixgbe_dump()
635 * 82598 Advanced Transmit Descriptor (Write-Back Format) in ixgbe_dump()
636 * +--------------------------------------------------------------+ in ixgbe_dump()
638 * +--------------------------------------------------------------+ in ixgbe_dump()
640 * +--------------------------------------------------------------+ in ixgbe_dump()
644 * +--------------------------------------------------------------+ in ixgbe_dump()
646 * +--------------------------------------------------------------+ in ixgbe_dump()
648 * +--------------------------------------------------------------+ in ixgbe_dump()
651 * 82599+ Advanced Transmit Descriptor (Write-Back Format) in ixgbe_dump()
652 * +--------------------------------------------------------------+ in ixgbe_dump()
654 * +--------------------------------------------------------------+ in ixgbe_dump()
656 * +--------------------------------------------------------------+ in ixgbe_dump()
660 for (n = 0; n < adapter->num_tx_queues; n++) { in ixgbe_dump()
661 ring = adapter->tx_ring[n]; in ixgbe_dump()
662 pr_info("------------------------------------\n"); in ixgbe_dump()
663 pr_info("TX QUEUE INDEX = %d\n", ring->queue_index); in ixgbe_dump()
664 pr_info("------------------------------------\n"); in ixgbe_dump()
667 "[PlPOIdStDDt Ln] [bi->dma ] ", in ixgbe_dump()
668 "leng", "ntw", "timestamp", "bi->skb"); in ixgbe_dump()
670 for (i = 0; ring->desc && (i < ring->count); i++) { in ixgbe_dump()
672 tx_buffer = &ring->tx_buffer_info[i]; in ixgbe_dump()
677 if (i == ring->next_to_use && in ixgbe_dump()
678 i == ring->next_to_clean) in ixgbe_dump()
680 else if (i == ring->next_to_use) in ixgbe_dump()
682 else if (i == ring->next_to_clean) in ixgbe_dump()
688 le64_to_cpu((__force __le64)u0->a), in ixgbe_dump()
689 le64_to_cpu((__force __le64)u0->b), in ixgbe_dump()
692 tx_buffer->next_to_watch, in ixgbe_dump()
693 (u64)tx_buffer->time_stamp, in ixgbe_dump()
694 tx_buffer->skb, in ixgbe_dump()
698 tx_buffer->skb) in ixgbe_dump()
701 tx_buffer->skb->data, in ixgbe_dump()
710 dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); in ixgbe_dump()
712 for (n = 0; n < adapter->num_rx_queues; n++) { in ixgbe_dump()
713 rx_ring = adapter->rx_ring[n]; in ixgbe_dump()
715 n, rx_ring->next_to_use, rx_ring->next_to_clean); in ixgbe_dump()
722 dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); in ixgbe_dump()
728 * +-----------------------------------------------------+ in ixgbe_dump()
730 * +----------------------------------------------+------+ in ixgbe_dump()
732 * +-----------------------------------------------------+ in ixgbe_dump()
735 * 82598 Advanced Receive Descriptor (Write-Back) Format in ixgbe_dump()
738 * +------------------------------------------------------+ in ixgbe_dump()
742 * +------------------------------------------------------+ in ixgbe_dump()
744 * +------------------------------------------------------+ in ixgbe_dump()
749 * +-----------------------------------------------------+ in ixgbe_dump()
751 * +----------------------------------------------+------+ in ixgbe_dump()
753 * +-----------------------------------------------------+ in ixgbe_dump()
756 * 82599+ Advanced Receive Descriptor (Write-Back) Format in ixgbe_dump()
759 * +------------------------------------------------------+ in ixgbe_dump()
760 * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS | in ixgbe_dump()
763 * +------------------------------------------------------+ in ixgbe_dump()
765 * +------------------------------------------------------+ in ixgbe_dump()
769 for (n = 0; n < adapter->num_rx_queues; n++) { in ixgbe_dump()
770 rx_ring = adapter->rx_ring[n]; in ixgbe_dump()
771 pr_info("------------------------------------\n"); in ixgbe_dump()
772 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); in ixgbe_dump()
773 pr_info("------------------------------------\n"); in ixgbe_dump()
776 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ", in ixgbe_dump()
777 "<-- Adv Rx Read format"); in ixgbe_dump()
780 "[vl er S cks ln] ---------------- [bi->skb ] ", in ixgbe_dump()
781 "<-- Adv Rx Write-Back format"); in ixgbe_dump()
783 for (i = 0; i < rx_ring->count; i++) { in ixgbe_dump()
786 if (i == rx_ring->next_to_use) in ixgbe_dump()
788 else if (i == rx_ring->next_to_clean) in ixgbe_dump()
793 rx_buffer_info = &rx_ring->rx_buffer_info[i]; in ixgbe_dump()
796 if (rx_desc->wb.upper.length) { in ixgbe_dump()
798 pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n", in ixgbe_dump()
800 le64_to_cpu((__force __le64)u0->a), in ixgbe_dump()
801 le64_to_cpu((__force __le64)u0->b), in ixgbe_dump()
802 rx_buffer_info->skb, in ixgbe_dump()
807 le64_to_cpu((__force __le64)u0->a), in ixgbe_dump()
808 le64_to_cpu((__force __le64)u0->b), in ixgbe_dump()
809 (u64)rx_buffer_info->dma, in ixgbe_dump()
810 rx_buffer_info->skb, in ixgbe_dump()
814 rx_buffer_info->dma) { in ixgbe_dump()
817 page_address(rx_buffer_info->page) + in ixgbe_dump()
818 rx_buffer_info->page_offset, in ixgbe_dump()
831 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); in ixgbe_release_hw_control()
832 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, in ixgbe_release_hw_control()
841 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); in ixgbe_get_hw_control()
842 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, in ixgbe_get_hw_control()
847 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
849 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
858 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_set_ivar()
859 switch (hw->mac.type) { in ixgbe_set_ivar()
862 if (direction == -1) in ixgbe_set_ivar()
875 if (direction == -1) { in ixgbe_set_ivar()
879 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC); in ixgbe_set_ivar()
882 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar); in ixgbe_set_ivar()
904 switch (adapter->hw.mac.type) { in ixgbe_irq_rearm_queues()
907 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); in ixgbe_irq_rearm_queues()
915 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); in ixgbe_irq_rearm_queues()
917 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); in ixgbe_irq_rearm_queues()
926 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_update_xoff_rx_lfc()
927 struct ixgbe_hw_stats *hwstats = &adapter->stats; in ixgbe_update_xoff_rx_lfc()
931 if ((hw->fc.current_mode != ixgbe_fc_full) && in ixgbe_update_xoff_rx_lfc()
932 (hw->fc.current_mode != ixgbe_fc_rx_pause)) in ixgbe_update_xoff_rx_lfc()
935 switch (hw->mac.type) { in ixgbe_update_xoff_rx_lfc()
942 hwstats->lxoffrxc += data; in ixgbe_update_xoff_rx_lfc()
948 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbe_update_xoff_rx_lfc()
950 &adapter->tx_ring[i]->state); in ixgbe_update_xoff_rx_lfc()
952 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbe_update_xoff_rx_lfc()
954 &adapter->xdp_ring[i]->state); in ixgbe_update_xoff_rx_lfc()
959 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_update_xoff_received()
960 struct ixgbe_hw_stats *hwstats = &adapter->stats; in ixgbe_update_xoff_received()
964 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; in ixgbe_update_xoff_received()
966 if (adapter->ixgbe_ieee_pfc) in ixgbe_update_xoff_received()
967 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); in ixgbe_update_xoff_received()
969 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) { in ixgbe_update_xoff_received()
978 switch (hw->mac.type) { in ixgbe_update_xoff_received()
985 hwstats->pxoffrxc[i] += pxoffrxc; in ixgbe_update_xoff_received()
987 tc = netdev_get_prio_tc_map(adapter->netdev, i); in ixgbe_update_xoff_received()
992 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_update_xoff_received()
993 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; in ixgbe_update_xoff_received()
995 tc = tx_ring->dcb_tc; in ixgbe_update_xoff_received()
997 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); in ixgbe_update_xoff_received()
1000 for (i = 0; i < adapter->num_xdp_queues; i++) { in ixgbe_update_xoff_received()
1001 struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i]; in ixgbe_update_xoff_received()
1003 tc = xdp_ring->dcb_tc; in ixgbe_update_xoff_received()
1005 clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state); in ixgbe_update_xoff_received()
1011 return ring->stats.packets; in ixgbe_get_tx_completed()
1018 head = ring->next_to_clean; in ixgbe_get_tx_pending()
1019 tail = ring->next_to_use; in ixgbe_get_tx_pending()
1021 return ((head <= tail) ? tail : tail + ring->count) - head; in ixgbe_get_tx_pending()
1027 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; in ixgbe_check_tx_hang()
1047 &tx_ring->state); in ixgbe_check_tx_hang()
1049 tx_ring->tx_stats.tx_done_old = tx_done; in ixgbe_check_tx_hang()
1051 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); in ixgbe_check_tx_hang()
1057 * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
1064 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { in ixgbe_tx_timeout_reset()
1065 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); in ixgbe_tx_timeout_reset()
1072 * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate
1081 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_tx_maxrate()
1105 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
1113 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_clean_tx_irq()
1117 unsigned int budget = q_vector->tx.work_limit; in ixgbe_clean_tx_irq()
1118 unsigned int i = tx_ring->next_to_clean; in ixgbe_clean_tx_irq()
1120 if (test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_clean_tx_irq()
1123 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbe_clean_tx_irq()
1125 i -= tx_ring->count; in ixgbe_clean_tx_irq()
1128 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; in ixgbe_clean_tx_irq()
1138 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) in ixgbe_clean_tx_irq()
1142 tx_buffer->next_to_watch = NULL; in ixgbe_clean_tx_irq()
1145 total_bytes += tx_buffer->bytecount; in ixgbe_clean_tx_irq()
1146 total_packets += tx_buffer->gso_segs; in ixgbe_clean_tx_irq()
1147 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC) in ixgbe_clean_tx_irq()
1152 xdp_return_frame(tx_buffer->xdpf); in ixgbe_clean_tx_irq()
1154 napi_consume_skb(tx_buffer->skb, napi_budget); in ixgbe_clean_tx_irq()
1157 dma_unmap_single(tx_ring->dev, in ixgbe_clean_tx_irq()
1171 i -= tx_ring->count; in ixgbe_clean_tx_irq()
1172 tx_buffer = tx_ring->tx_buffer_info; in ixgbe_clean_tx_irq()
1178 dma_unmap_page(tx_ring->dev, in ixgbe_clean_tx_irq()
1191 i -= tx_ring->count; in ixgbe_clean_tx_irq()
1192 tx_buffer = tx_ring->tx_buffer_info; in ixgbe_clean_tx_irq()
1200 budget--; in ixgbe_clean_tx_irq()
1203 i += tx_ring->count; in ixgbe_clean_tx_irq()
1204 tx_ring->next_to_clean = i; in ixgbe_clean_tx_irq()
1205 u64_stats_update_begin(&tx_ring->syncp); in ixgbe_clean_tx_irq()
1206 tx_ring->stats.bytes += total_bytes; in ixgbe_clean_tx_irq()
1207 tx_ring->stats.packets += total_packets; in ixgbe_clean_tx_irq()
1208 u64_stats_update_end(&tx_ring->syncp); in ixgbe_clean_tx_irq()
1209 q_vector->tx.total_bytes += total_bytes; in ixgbe_clean_tx_irq()
1210 q_vector->tx.total_packets += total_packets; in ixgbe_clean_tx_irq()
1211 adapter->tx_ipsec += total_ipsec; in ixgbe_clean_tx_irq()
1215 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_clean_tx_irq()
1225 tx_ring->queue_index, in ixgbe_clean_tx_irq()
1226 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), in ixgbe_clean_tx_irq()
1227 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), in ixgbe_clean_tx_irq()
1228 tx_ring->next_to_use, i, in ixgbe_clean_tx_irq()
1229 tx_ring->tx_buffer_info[i].time_stamp, jiffies); in ixgbe_clean_tx_irq()
1232 netif_stop_subqueue(tx_ring->netdev, in ixgbe_clean_tx_irq()
1233 tx_ring->queue_index); in ixgbe_clean_tx_irq()
1237 adapter->tx_timeout_count + 1, tx_ring->queue_index); in ixgbe_clean_tx_irq()
1253 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in ixgbe_clean_tx_irq()
1259 if (__netif_subqueue_stopped(tx_ring->netdev, in ixgbe_clean_tx_irq()
1260 tx_ring->queue_index) in ixgbe_clean_tx_irq()
1261 && !test_bit(__IXGBE_DOWN, &adapter->state)) { in ixgbe_clean_tx_irq()
1262 netif_wake_subqueue(tx_ring->netdev, in ixgbe_clean_tx_irq()
1263 tx_ring->queue_index); in ixgbe_clean_tx_irq()
1264 ++tx_ring->tx_stats.restart_queue; in ixgbe_clean_tx_irq()
1276 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_update_tx_dca()
1280 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) in ixgbe_update_tx_dca()
1281 txctrl = dca3_get_tag(tx_ring->dev, cpu); in ixgbe_update_tx_dca()
1283 switch (hw->mac.type) { in ixgbe_update_tx_dca()
1285 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx); in ixgbe_update_tx_dca()
1289 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx); in ixgbe_update_tx_dca()
1313 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_update_rx_dca()
1315 u8 reg_idx = rx_ring->reg_idx; in ixgbe_update_rx_dca()
1317 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) in ixgbe_update_rx_dca()
1318 rxctrl = dca3_get_tag(rx_ring->dev, cpu); in ixgbe_update_rx_dca()
1320 switch (hw->mac.type) { in ixgbe_update_rx_dca()
1343 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_update_dca()
1347 if (q_vector->cpu == cpu) in ixgbe_update_dca()
1350 ixgbe_for_each_ring(ring, q_vector->tx) in ixgbe_update_dca()
1353 ixgbe_for_each_ring(ring, q_vector->rx) in ixgbe_update_dca()
1356 q_vector->cpu = cpu; in ixgbe_update_dca()
1366 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) in ixgbe_setup_dca()
1367 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, in ixgbe_setup_dca()
1370 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, in ixgbe_setup_dca()
1373 for (i = 0; i < adapter->num_q_vectors; i++) { in ixgbe_setup_dca()
1374 adapter->q_vector[i]->cpu = -1; in ixgbe_setup_dca()
1375 ixgbe_update_dca(adapter->q_vector[i]); in ixgbe_setup_dca()
1384 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) in __ixgbe_notify_dca()
1390 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) in __ixgbe_notify_dca()
1393 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; in __ixgbe_notify_dca()
1394 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, in __ixgbe_notify_dca()
1400 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { in __ixgbe_notify_dca()
1402 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; in __ixgbe_notify_dca()
1403 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, in __ixgbe_notify_dca()
1426 if (!(ring->netdev->features & NETIF_F_RXHASH)) in ixgbe_rx_hash()
1429 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & in ixgbe_rx_hash()
1435 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), in ixgbe_rx_hash()
1442 * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
1451 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; in ixgbe_rx_is_fcoe()
1453 return test_bit(__IXGBE_RX_FCOE, &ring->state) && in ixgbe_rx_is_fcoe()
1461 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
1470 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; in ixgbe_rx_checksum()
1476 if (!(ring->netdev->features & NETIF_F_RXCSUM)) in ixgbe_rx_checksum()
1482 skb->encapsulation = 1; in ixgbe_rx_checksum()
1488 ring->rx_stats.csum_err++; in ixgbe_rx_checksum()
1501 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state)) in ixgbe_rx_checksum()
1504 ring->rx_stats.csum_err++; in ixgbe_rx_checksum()
1509 skb->ip_summed = CHECKSUM_UNNECESSARY; in ixgbe_rx_checksum()
1515 skb->ip_summed = CHECKSUM_NONE; in ixgbe_rx_checksum()
1519 skb->csum_level = 1; in ixgbe_rx_checksum()
1531 struct page *page = bi->page; in ixgbe_alloc_mapped_page()
1541 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbe_alloc_mapped_page()
1546 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in ixgbe_alloc_mapped_page()
1555 if (dma_mapping_error(rx_ring->dev, dma)) { in ixgbe_alloc_mapped_page()
1558 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbe_alloc_mapped_page()
1562 bi->dma = dma; in ixgbe_alloc_mapped_page()
1563 bi->page = page; in ixgbe_alloc_mapped_page()
1564 bi->page_offset = rx_ring->rx_offset; in ixgbe_alloc_mapped_page()
1565 page_ref_add(page, USHRT_MAX - 1); in ixgbe_alloc_mapped_page()
1566 bi->pagecnt_bias = USHRT_MAX; in ixgbe_alloc_mapped_page()
1567 rx_ring->rx_stats.alloc_rx_page++; in ixgbe_alloc_mapped_page()
1573 * ixgbe_alloc_rx_buffers - Replace used receive buffers
1581 u16 i = rx_ring->next_to_use; in ixgbe_alloc_rx_buffers()
1589 bi = &rx_ring->rx_buffer_info[i]; in ixgbe_alloc_rx_buffers()
1590 i -= rx_ring->count; in ixgbe_alloc_rx_buffers()
1599 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in ixgbe_alloc_rx_buffers()
1600 bi->page_offset, bufsz, in ixgbe_alloc_rx_buffers()
1605 * because each write-back erases this info. in ixgbe_alloc_rx_buffers()
1607 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in ixgbe_alloc_rx_buffers()
1614 bi = rx_ring->rx_buffer_info; in ixgbe_alloc_rx_buffers()
1615 i -= rx_ring->count; in ixgbe_alloc_rx_buffers()
1619 rx_desc->wb.upper.length = 0; in ixgbe_alloc_rx_buffers()
1621 cleaned_count--; in ixgbe_alloc_rx_buffers()
1624 i += rx_ring->count; in ixgbe_alloc_rx_buffers()
1626 if (rx_ring->next_to_use != i) { in ixgbe_alloc_rx_buffers()
1627 rx_ring->next_to_use = i; in ixgbe_alloc_rx_buffers()
1630 rx_ring->next_to_alloc = i; in ixgbe_alloc_rx_buffers()
1634 * applicable for weak-ordered memory model archs, in ixgbe_alloc_rx_buffers()
1635 * such as IA-64). in ixgbe_alloc_rx_buffers()
1638 writel(i, rx_ring->tail); in ixgbe_alloc_rx_buffers()
1648 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), in ixgbe_set_rsc_gso_size()
1649 IXGBE_CB(skb)->append_cnt); in ixgbe_set_rsc_gso_size()
1650 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in ixgbe_set_rsc_gso_size()
1657 if (!IXGBE_CB(skb)->append_cnt) in ixgbe_update_rsc_stats()
1660 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt; in ixgbe_update_rsc_stats()
1661 rx_ring->rx_stats.rsc_flush++; in ixgbe_update_rsc_stats()
1666 IXGBE_CB(skb)->append_cnt = 0; in ixgbe_update_rsc_stats()
1670 * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor
1683 struct net_device *dev = rx_ring->netdev; in ixgbe_process_skb_fields()
1684 u32 flags = rx_ring->q_vector->adapter->flags; in ixgbe_process_skb_fields()
1695 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && in ixgbe_process_skb_fields()
1697 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); in ixgbe_process_skb_fields()
1706 skb_record_rx_queue(skb, rx_ring->queue_index); in ixgbe_process_skb_fields()
1708 macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true, in ixgbe_process_skb_fields()
1711 skb->protocol = eth_type_trans(skb, dev); in ixgbe_process_skb_fields()
1717 napi_gro_receive(&q_vector->napi, skb); in ixgbe_rx_skb()
1721 * ixgbe_is_non_eop - process handling of non-EOP buffers
1729 * that this is in fact a non-EOP buffer.
1735 u32 ntc = rx_ring->next_to_clean + 1; in ixgbe_is_non_eop()
1738 ntc = (ntc < rx_ring->count) ? ntc : 0; in ixgbe_is_non_eop()
1739 rx_ring->next_to_clean = ntc; in ixgbe_is_non_eop()
1745 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data & in ixgbe_is_non_eop()
1752 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1; in ixgbe_is_non_eop()
1755 ntc = le32_to_cpu(rx_desc->wb.upper.status_error); in ixgbe_is_non_eop()
1766 rx_ring->rx_buffer_info[ntc].skb = skb; in ixgbe_is_non_eop()
1767 rx_ring->rx_stats.non_eop_descs++; in ixgbe_is_non_eop()
1773 * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail
1787 skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; in ixgbe_pull_tail()
1800 * 60 bytes if the skb->len is less than 60 for skb_pad. in ixgbe_pull_tail()
1802 pull_len = eth_get_headlen(skb->dev, va, IXGBE_RX_HDR_SIZE); in ixgbe_pull_tail()
1810 skb->data_len -= pull_len; in ixgbe_pull_tail()
1811 skb->tail += pull_len; in ixgbe_pull_tail()
1815 * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
1828 unsigned long mask = (unsigned long)ixgbe_rx_pg_size(rx_ring) - 1; in ixgbe_dma_sync_frag()
1829 unsigned long offset = (unsigned long)(skb->data) & mask; in ixgbe_dma_sync_frag()
1831 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_dma_sync_frag()
1832 IXGBE_CB(skb)->dma, in ixgbe_dma_sync_frag()
1837 skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; in ixgbe_dma_sync_frag()
1839 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_dma_sync_frag()
1840 IXGBE_CB(skb)->dma, in ixgbe_dma_sync_frag()
1847 if (unlikely(IXGBE_CB(skb)->page_released)) { in ixgbe_dma_sync_frag()
1848 dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, in ixgbe_dma_sync_frag()
1856 * ixgbe_cleanup_headers - Correct corrupted or empty headers
1881 struct net_device *netdev = rx_ring->netdev; in ixgbe_cleanup_headers()
1893 !(netdev->features & NETIF_F_RXALL)))) { in ixgbe_cleanup_headers()
1916 * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
1926 u16 nta = rx_ring->next_to_alloc; in ixgbe_reuse_rx_page()
1928 new_buff = &rx_ring->rx_buffer_info[nta]; in ixgbe_reuse_rx_page()
1932 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in ixgbe_reuse_rx_page()
1938 new_buff->dma = old_buff->dma; in ixgbe_reuse_rx_page()
1939 new_buff->page = old_buff->page; in ixgbe_reuse_rx_page()
1940 new_buff->page_offset = old_buff->page_offset; in ixgbe_reuse_rx_page()
1941 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in ixgbe_reuse_rx_page()
1947 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in ixgbe_can_reuse_rx_page()
1948 struct page *page = rx_buffer->page; in ixgbe_can_reuse_rx_page()
1950 /* avoid re-using remote and pfmemalloc pages */ in ixgbe_can_reuse_rx_page()
1956 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) in ixgbe_can_reuse_rx_page()
1965 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K) in ixgbe_can_reuse_rx_page()
1966 if (rx_buffer->page_offset > IXGBE_LAST_OFFSET) in ixgbe_can_reuse_rx_page()
1975 page_ref_add(page, USHRT_MAX - 1); in ixgbe_can_reuse_rx_page()
1976 rx_buffer->pagecnt_bias = USHRT_MAX; in ixgbe_can_reuse_rx_page()
1983 * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
1989 * This function will add the data contained in rx_buffer->page to the skb.
2005 unsigned int truesize = rx_ring->rx_offset ? in ixgbe_add_rx_frag()
2006 SKB_DATA_ALIGN(rx_ring->rx_offset + size) : in ixgbe_add_rx_frag()
2009 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, in ixgbe_add_rx_frag()
2010 rx_buffer->page_offset, size, truesize); in ixgbe_add_rx_frag()
2012 rx_buffer->page_offset ^= truesize; in ixgbe_add_rx_frag()
2014 rx_buffer->page_offset += truesize; in ixgbe_add_rx_frag()
2026 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in ixgbe_get_rx_buffer()
2029 page_count(rx_buffer->page); in ixgbe_get_rx_buffer()
2033 prefetchw(rx_buffer->page); in ixgbe_get_rx_buffer()
2034 *skb = rx_buffer->skb; in ixgbe_get_rx_buffer()
2049 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_get_rx_buffer()
2050 rx_buffer->dma, in ixgbe_get_rx_buffer()
2051 rx_buffer->page_offset, in ixgbe_get_rx_buffer()
2055 rx_buffer->pagecnt_bias--; in ixgbe_get_rx_buffer()
2069 if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) { in ixgbe_put_rx_buffer()
2071 IXGBE_CB(skb)->page_released = true; in ixgbe_put_rx_buffer()
2074 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in ixgbe_put_rx_buffer()
2079 __page_frag_cache_drain(rx_buffer->page, in ixgbe_put_rx_buffer()
2080 rx_buffer->pagecnt_bias); in ixgbe_put_rx_buffer()
2084 rx_buffer->page = NULL; in ixgbe_put_rx_buffer()
2085 rx_buffer->skb = NULL; in ixgbe_put_rx_buffer()
2093 unsigned int size = xdp->data_end - xdp->data; in ixgbe_construct_skb()
2097 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - in ixgbe_construct_skb()
2098 xdp->data_hard_start); in ixgbe_construct_skb()
2103 net_prefetch(xdp->data); in ixgbe_construct_skb()
2105 /* Note, we get here by enabling legacy-rx via: in ixgbe_construct_skb()
2107 * ethtool --set-priv-flags <dev> legacy-rx on in ixgbe_construct_skb()
2110 * opposed to having legacy-rx off, where we process XDP in ixgbe_construct_skb()
2115 * xdp->data_meta will always point to xdp->data, since in ixgbe_construct_skb()
2117 * change in future for legacy-rx mode on, then lets also in ixgbe_construct_skb()
2118 * add xdp->data_meta handling here. in ixgbe_construct_skb()
2122 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE); in ixgbe_construct_skb()
2128 IXGBE_CB(skb)->dma = rx_buffer->dma; in ixgbe_construct_skb()
2130 skb_add_rx_frag(skb, 0, rx_buffer->page, in ixgbe_construct_skb()
2131 xdp->data - page_address(rx_buffer->page), in ixgbe_construct_skb()
2134 rx_buffer->page_offset ^= truesize; in ixgbe_construct_skb()
2136 rx_buffer->page_offset += truesize; in ixgbe_construct_skb()
2140 xdp->data, ALIGN(size, sizeof(long))); in ixgbe_construct_skb()
2141 rx_buffer->pagecnt_bias++; in ixgbe_construct_skb()
2152 unsigned int metasize = xdp->data - xdp->data_meta; in ixgbe_build_skb()
2157 SKB_DATA_ALIGN(xdp->data_end - in ixgbe_build_skb()
2158 xdp->data_hard_start); in ixgbe_build_skb()
2162 /* Prefetch first cache line of first page. If xdp->data_meta in ixgbe_build_skb()
2163 * is unused, this points extactly as xdp->data, otherwise we in ixgbe_build_skb()
2167 net_prefetch(xdp->data_meta); in ixgbe_build_skb()
2170 skb = build_skb(xdp->data_hard_start, truesize); in ixgbe_build_skb()
2175 skb_reserve(skb, xdp->data - xdp->data_hard_start); in ixgbe_build_skb()
2176 __skb_put(skb, xdp->data_end - xdp->data); in ixgbe_build_skb()
2182 IXGBE_CB(skb)->dma = rx_buffer->dma; in ixgbe_build_skb()
2186 rx_buffer->page_offset ^= truesize; in ixgbe_build_skb()
2188 rx_buffer->page_offset += truesize; in ixgbe_build_skb()
2203 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ixgbe_run_xdp()
2208 prefetchw(xdp->data_hard_start); /* xdp_frame write */ in ixgbe_run_xdp()
2223 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); in ixgbe_run_xdp()
2233 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ixgbe_run_xdp()
2240 return ERR_PTR(-result); in ixgbe_run_xdp()
2249 truesize = ixgbe_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ in ixgbe_rx_frame_truesize()
2251 truesize = rx_ring->rx_offset ? in ixgbe_rx_frame_truesize()
2252 SKB_DATA_ALIGN(rx_ring->rx_offset + size) + in ixgbe_rx_frame_truesize()
2265 rx_buffer->page_offset ^= truesize; in ixgbe_rx_buffer_flip()
2267 rx_buffer->page_offset += truesize; in ixgbe_rx_buffer_flip()
2272 * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2289 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_clean_rx_irq()
2295 unsigned int offset = rx_ring->rx_offset; in ixgbe_clean_rx_irq()
2303 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); in ixgbe_clean_rx_irq()
2318 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); in ixgbe_clean_rx_irq()
2319 size = le16_to_cpu(rx_desc->wb.upper.length); in ixgbe_clean_rx_irq()
2335 hard_start = page_address(rx_buffer->page) + in ixgbe_clean_rx_irq()
2336 rx_buffer->page_offset - offset; in ixgbe_clean_rx_irq()
2346 unsigned int xdp_res = -PTR_ERR(skb); in ixgbe_clean_rx_irq()
2352 rx_buffer->pagecnt_bias++; in ixgbe_clean_rx_irq()
2368 rx_ring->rx_stats.alloc_rx_buff_failed++; in ixgbe_clean_rx_irq()
2369 rx_buffer->pagecnt_bias++; in ixgbe_clean_rx_irq()
2385 total_rx_bytes += skb->len; in ixgbe_clean_rx_irq()
2397 mss = rx_ring->netdev->mtu - in ixgbe_clean_rx_irq()
2398 sizeof(struct fcoe_hdr) - in ixgbe_clean_rx_irq()
2399 sizeof(struct fc_frame_header) - in ixgbe_clean_rx_irq()
2425 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; in ixgbe_clean_rx_irq()
2431 writel(ring->next_to_use, ring->tail); in ixgbe_clean_rx_irq()
2434 u64_stats_update_begin(&rx_ring->syncp); in ixgbe_clean_rx_irq()
2435 rx_ring->stats.packets += total_rx_packets; in ixgbe_clean_rx_irq()
2436 rx_ring->stats.bytes += total_rx_bytes; in ixgbe_clean_rx_irq()
2437 u64_stats_update_end(&rx_ring->syncp); in ixgbe_clean_rx_irq()
2438 q_vector->rx.total_packets += total_rx_packets; in ixgbe_clean_rx_irq()
2439 q_vector->rx.total_bytes += total_rx_bytes; in ixgbe_clean_rx_irq()
2445 * ixgbe_configure_msix - Configure MSI-X hardware
2448 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
2458 if (adapter->num_vfs > 32) { in ixgbe_configure_msix()
2459 u32 eitrsel = BIT(adapter->num_vfs - 32) - 1; in ixgbe_configure_msix()
2460 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); in ixgbe_configure_msix()
2467 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { in ixgbe_configure_msix()
2469 q_vector = adapter->q_vector[v_idx]; in ixgbe_configure_msix()
2471 ixgbe_for_each_ring(ring, q_vector->rx) in ixgbe_configure_msix()
2472 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); in ixgbe_configure_msix()
2474 ixgbe_for_each_ring(ring, q_vector->tx) in ixgbe_configure_msix()
2475 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); in ixgbe_configure_msix()
2480 switch (adapter->hw.mac.type) { in ixgbe_configure_msix()
2482 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, in ixgbe_configure_msix()
2490 ixgbe_set_ivar(adapter, -1, 1, v_idx); in ixgbe_configure_msix()
2495 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); in ixgbe_configure_msix()
2503 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); in ixgbe_configure_msix()
2507 * ixgbe_update_itr - update the dynamic ITR value based on statistics
2530 if (!ring_container->ring) in ixgbe_update_itr()
2533 /* If we didn't update within up to 1 - 2 jiffies we can assume in ixgbe_update_itr()
2538 if (time_after(next_update, ring_container->next_update)) in ixgbe_update_itr()
2541 packets = ring_container->total_packets; in ixgbe_update_itr()
2552 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC; in ixgbe_update_itr()
2555 itr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY; in ixgbe_update_itr()
2559 bytes = ring_container->total_bytes; in ixgbe_update_itr()
2575 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC; in ixgbe_update_itr()
2585 itr = q_vector->itr >> 2; in ixgbe_update_itr()
2594 itr = q_vector->itr >> 3; in ixgbe_update_itr()
2664 switch (q_vector->adapter->link_speed) { in ixgbe_update_itr()
2685 ring_container->itr = itr; in ixgbe_update_itr()
2688 ring_container->next_update = next_update + 1; in ixgbe_update_itr()
2690 ring_container->total_bytes = 0; in ixgbe_update_itr()
2691 ring_container->total_packets = 0; in ixgbe_update_itr()
2695 * ixgbe_write_eitr - write EITR register in hardware specific way
2704 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_write_eitr()
2705 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_write_eitr()
2706 int v_idx = q_vector->v_idx; in ixgbe_write_eitr()
2707 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; in ixgbe_write_eitr()
2709 switch (adapter->hw.mac.type) { in ixgbe_write_eitr()
2735 ixgbe_update_itr(q_vector, &q_vector->tx); in ixgbe_set_itr()
2736 ixgbe_update_itr(q_vector, &q_vector->rx); in ixgbe_set_itr()
2739 new_itr = min(q_vector->rx.itr, q_vector->tx.itr); in ixgbe_set_itr()
2745 if (new_itr != q_vector->itr) { in ixgbe_set_itr()
2747 q_vector->itr = new_itr; in ixgbe_set_itr()
2754 * ixgbe_check_overtemp_subtask - check for over temperature
2759 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_overtemp_subtask()
2760 u32 eicr = adapter->interrupt_event; in ixgbe_check_overtemp_subtask()
2763 if (test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_check_overtemp_subtask()
2766 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) in ixgbe_check_overtemp_subtask()
2769 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT; in ixgbe_check_overtemp_subtask()
2771 switch (hw->device_id) { in ixgbe_check_overtemp_subtask()
2776 * - This interrupt wasn't for our port. in ixgbe_check_overtemp_subtask()
2777 * - We may have missed the interrupt so always have to in ixgbe_check_overtemp_subtask()
2784 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) { in ixgbe_check_overtemp_subtask()
2788 hw->mac.ops.check_link(hw, &speed, &link_up, false); in ixgbe_check_overtemp_subtask()
2795 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP) in ixgbe_check_overtemp_subtask()
2801 rc = hw->phy.ops.check_overtemp(hw); in ixgbe_check_overtemp_subtask()
2806 if (adapter->hw.mac.type >= ixgbe_mac_X540) in ixgbe_check_overtemp_subtask()
2814 adapter->interrupt_event = 0; in ixgbe_check_overtemp_subtask()
2819 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_fan_failure()
2821 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && in ixgbe_check_fan_failure()
2831 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_overtemp_event()
2833 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) in ixgbe_check_overtemp_event()
2836 switch (adapter->hw.mac.type) { in ixgbe_check_overtemp_event()
2844 (!test_bit(__IXGBE_DOWN, &adapter->state))) { in ixgbe_check_overtemp_event()
2845 adapter->interrupt_event = eicr; in ixgbe_check_overtemp_event()
2846 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; in ixgbe_check_overtemp_event()
2853 adapter->interrupt_event = eicr; in ixgbe_check_overtemp_event()
2854 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; in ixgbe_check_overtemp_event()
2856 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, in ixgbe_check_overtemp_event()
2858 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR, in ixgbe_check_overtemp_event()
2876 switch (hw->mac.type) { in ixgbe_is_sfp()
2878 if (hw->phy.type == ixgbe_phy_nl) in ixgbe_is_sfp()
2884 switch (hw->mac.ops.get_media_type(hw)) { in ixgbe_is_sfp()
2898 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_sfp_event()
2905 if (hw->mac.type >= ixgbe_mac_X540) in ixgbe_check_sfp_event()
2911 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { in ixgbe_check_sfp_event()
2912 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; in ixgbe_check_sfp_event()
2913 adapter->sfp_poll_time = 0; in ixgbe_check_sfp_event()
2918 if (adapter->hw.mac.type == ixgbe_mac_82599EB && in ixgbe_check_sfp_event()
2922 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { in ixgbe_check_sfp_event()
2923 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; in ixgbe_check_sfp_event()
2931 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_lsc()
2933 adapter->lsc_int++; in ixgbe_check_lsc()
2934 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; in ixgbe_check_lsc()
2935 adapter->link_check_timeout = jiffies; in ixgbe_check_lsc()
2936 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { in ixgbe_check_lsc()
2947 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_irq_enable_queues()
2949 switch (hw->mac.type) { in ixgbe_irq_enable_queues()
2973 * ixgbe_irq_enable - Enable default interrupt generation settings
2981 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_irq_enable()
2985 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) in ixgbe_irq_enable()
2988 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) in ixgbe_irq_enable()
2989 switch (adapter->hw.mac.type) { in ixgbe_irq_enable()
3002 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) in ixgbe_irq_enable()
3004 switch (adapter->hw.mac.type) { in ixgbe_irq_enable()
3013 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP || in ixgbe_irq_enable()
3014 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP || in ixgbe_irq_enable()
3015 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) in ixgbe_irq_enable()
3016 mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw); in ixgbe_irq_enable()
3017 if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t) in ixgbe_irq_enable()
3026 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && in ixgbe_irq_enable()
3027 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) in ixgbe_irq_enable()
3030 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); in ixgbe_irq_enable()
3034 IXGBE_WRITE_FLUSH(&adapter->hw); in ixgbe_irq_enable()
3040 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_msix_other()
3044 * Workaround for Silicon errata. Use clear-by-write instead in ixgbe_msix_other()
3045 * of clear-by-read. Reading with EICS will return the in ixgbe_msix_other()
3068 switch (hw->mac.type) { in ixgbe_msix_other()
3074 if (hw->phy.type == ixgbe_phy_x550em_ext_t && in ixgbe_msix_other()
3076 adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT; in ixgbe_msix_other()
3083 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); in ixgbe_msix_other()
3091 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_msix_other()
3092 struct ixgbe_ring *ring = adapter->tx_ring[i]; in ixgbe_msix_other()
3094 &ring->state)) in ixgbe_msix_other()
3100 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT; in ixgbe_msix_other()
3116 /* re-enable the original interrupt state, no lsc, no queues */ in ixgbe_msix_other()
3117 if (!test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_msix_other()
3129 if (q_vector->rx.ring || q_vector->tx.ring) in ixgbe_msix_clean_rings()
3130 napi_schedule_irqoff(&q_vector->napi); in ixgbe_msix_clean_rings()
3136 * ixgbe_poll - NAPI Rx polling callback
3146 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_poll()
3152 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) in ixgbe_poll()
3156 ixgbe_for_each_ring(ring, q_vector->tx) { in ixgbe_poll()
3157 bool wd = ring->xsk_pool ? in ixgbe_poll()
3171 if (q_vector->rx.count > 1) in ixgbe_poll()
3172 per_ring_budget = max(budget/q_vector->rx.count, 1); in ixgbe_poll()
3176 ixgbe_for_each_ring(ring, q_vector->rx) { in ixgbe_poll()
3177 int cleaned = ring->xsk_pool ? in ixgbe_poll()
3194 if (adapter->rx_itr_setting & 1) in ixgbe_poll()
3196 if (!test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_poll()
3198 BIT_ULL(q_vector->v_idx)); in ixgbe_poll()
3201 return min(work_done, budget - 1); in ixgbe_poll()
3205 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
3208 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
3213 struct net_device *netdev = adapter->netdev; in ixgbe_request_msix_irqs()
3217 for (vector = 0; vector < adapter->num_q_vectors; vector++) { in ixgbe_request_msix_irqs()
3218 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; in ixgbe_request_msix_irqs()
3219 struct msix_entry *entry = &adapter->msix_entries[vector]; in ixgbe_request_msix_irqs()
3221 if (q_vector->tx.ring && q_vector->rx.ring) { in ixgbe_request_msix_irqs()
3222 snprintf(q_vector->name, sizeof(q_vector->name), in ixgbe_request_msix_irqs()
3223 "%s-TxRx-%u", netdev->name, ri++); in ixgbe_request_msix_irqs()
3225 } else if (q_vector->rx.ring) { in ixgbe_request_msix_irqs()
3226 snprintf(q_vector->name, sizeof(q_vector->name), in ixgbe_request_msix_irqs()
3227 "%s-rx-%u", netdev->name, ri++); in ixgbe_request_msix_irqs()
3228 } else if (q_vector->tx.ring) { in ixgbe_request_msix_irqs()
3229 snprintf(q_vector->name, sizeof(q_vector->name), in ixgbe_request_msix_irqs()
3230 "%s-tx-%u", netdev->name, ti++); in ixgbe_request_msix_irqs()
3235 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0, in ixgbe_request_msix_irqs()
3236 q_vector->name, q_vector); in ixgbe_request_msix_irqs()
3243 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { in ixgbe_request_msix_irqs()
3245 irq_set_affinity_hint(entry->vector, in ixgbe_request_msix_irqs()
3246 &q_vector->affinity_mask); in ixgbe_request_msix_irqs()
3250 err = request_irq(adapter->msix_entries[vector].vector, in ixgbe_request_msix_irqs()
3251 ixgbe_msix_other, 0, netdev->name, adapter); in ixgbe_request_msix_irqs()
3261 vector--; in ixgbe_request_msix_irqs()
3262 irq_set_affinity_hint(adapter->msix_entries[vector].vector, in ixgbe_request_msix_irqs()
3264 free_irq(adapter->msix_entries[vector].vector, in ixgbe_request_msix_irqs()
3265 adapter->q_vector[vector]); in ixgbe_request_msix_irqs()
3267 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; in ixgbe_request_msix_irqs()
3268 pci_disable_msix(adapter->pdev); in ixgbe_request_msix_irqs()
3269 kfree(adapter->msix_entries); in ixgbe_request_msix_irqs()
3270 adapter->msix_entries = NULL; in ixgbe_request_msix_irqs()
3275 * ixgbe_intr - legacy mode Interrupt Handler
3282 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_intr()
3283 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; in ixgbe_intr()
3292 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read in ixgbe_intr()
3303 if (!test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_intr()
3311 switch (hw->mac.type) { in ixgbe_intr()
3321 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); in ixgbe_intr()
3336 napi_schedule_irqoff(&q_vector->napi); in ixgbe_intr()
3339 * re-enable link(maybe) and non-queue interrupts, no flush. in ixgbe_intr()
3340 * ixgbe_poll will re-enable the queue interrupts in ixgbe_intr()
3342 if (!test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_intr()
3349 * ixgbe_request_irq - initialize interrupts
3357 struct net_device *netdev = adapter->netdev; in ixgbe_request_irq()
3360 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) in ixgbe_request_irq()
3362 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) in ixgbe_request_irq()
3363 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, in ixgbe_request_irq()
3364 netdev->name, adapter); in ixgbe_request_irq()
3366 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, in ixgbe_request_irq()
3367 netdev->name, adapter); in ixgbe_request_irq()
3379 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { in ixgbe_free_irq()
3380 free_irq(adapter->pdev->irq, adapter); in ixgbe_free_irq()
3384 if (!adapter->msix_entries) in ixgbe_free_irq()
3387 for (vector = 0; vector < adapter->num_q_vectors; vector++) { in ixgbe_free_irq()
3388 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; in ixgbe_free_irq()
3389 struct msix_entry *entry = &adapter->msix_entries[vector]; in ixgbe_free_irq()
3392 if (!q_vector->rx.ring && !q_vector->tx.ring) in ixgbe_free_irq()
3396 irq_set_affinity_hint(entry->vector, NULL); in ixgbe_free_irq()
3398 free_irq(entry->vector, q_vector); in ixgbe_free_irq()
3401 free_irq(adapter->msix_entries[vector].vector, adapter); in ixgbe_free_irq()
3405 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
3410 switch (adapter->hw.mac.type) { in ixgbe_irq_disable()
3412 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); in ixgbe_irq_disable()
3419 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); in ixgbe_irq_disable()
3420 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); in ixgbe_irq_disable()
3421 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); in ixgbe_irq_disable()
3426 IXGBE_WRITE_FLUSH(&adapter->hw); in ixgbe_irq_disable()
3427 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { in ixgbe_irq_disable()
3430 for (vector = 0; vector < adapter->num_q_vectors; vector++) in ixgbe_irq_disable()
3431 synchronize_irq(adapter->msix_entries[vector].vector); in ixgbe_irq_disable()
3433 synchronize_irq(adapter->msix_entries[vector++].vector); in ixgbe_irq_disable()
3435 synchronize_irq(adapter->pdev->irq); in ixgbe_irq_disable()
3440 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
3446 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; in ixgbe_configure_msi_and_legacy()
3457 * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
3466 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_tx_ring()
3467 u64 tdba = ring->dma; in ixgbe_configure_tx_ring()
3470 u8 reg_idx = ring->reg_idx; in ixgbe_configure_tx_ring()
3472 ring->xsk_pool = NULL; in ixgbe_configure_tx_ring()
3474 ring->xsk_pool = ixgbe_xsk_pool(adapter, ring); in ixgbe_configure_tx_ring()
3484 ring->count * sizeof(union ixgbe_adv_tx_desc)); in ixgbe_configure_tx_ring()
3487 ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx); in ixgbe_configure_tx_ring()
3492 * - ITR is 0 as it could cause false TX hangs in ixgbe_configure_tx_ring()
3493 * - ITR is set to > 100k int/sec and BQL is enabled in ixgbe_configure_tx_ring()
3499 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) in ixgbe_configure_tx_ring()
3512 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { in ixgbe_configure_tx_ring()
3513 ring->atr_sample_rate = adapter->atr_sample_rate; in ixgbe_configure_tx_ring()
3514 ring->atr_count = 0; in ixgbe_configure_tx_ring()
3515 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); in ixgbe_configure_tx_ring()
3517 ring->atr_sample_rate = 0; in ixgbe_configure_tx_ring()
3521 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) { in ixgbe_configure_tx_ring()
3522 struct ixgbe_q_vector *q_vector = ring->q_vector; in ixgbe_configure_tx_ring()
3525 netif_set_xps_queue(ring->netdev, in ixgbe_configure_tx_ring()
3526 &q_vector->affinity_mask, in ixgbe_configure_tx_ring()
3527 ring->queue_index); in ixgbe_configure_tx_ring()
3530 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); in ixgbe_configure_tx_ring()
3533 memset(ring->tx_buffer_info, 0, in ixgbe_configure_tx_ring()
3534 sizeof(struct ixgbe_tx_buffer) * ring->count); in ixgbe_configure_tx_ring()
3540 if (hw->mac.type == ixgbe_mac_82598EB && in ixgbe_configure_tx_ring()
3548 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); in ixgbe_configure_tx_ring()
3555 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_setup_mtqc()
3557 u8 tcs = adapter->hw_tcs; in ixgbe_setup_mtqc() local
3559 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_setup_mtqc()
3568 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { in ixgbe_setup_mtqc()
3570 if (tcs > 4) in ixgbe_setup_mtqc()
3572 else if (tcs > 1) in ixgbe_setup_mtqc()
3574 else if (adapter->ring_feature[RING_F_VMDQ].mask == in ixgbe_setup_mtqc()
3580 if (tcs > 4) { in ixgbe_setup_mtqc()
3582 } else if (tcs > 1) { in ixgbe_setup_mtqc()
3585 u8 max_txq = adapter->num_tx_queues + in ixgbe_setup_mtqc()
3586 adapter->num_xdp_queues; in ixgbe_setup_mtqc()
3597 if (tcs) { in ixgbe_setup_mtqc()
3603 /* re-enable the arbiter */ in ixgbe_setup_mtqc()
3609 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
3616 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_tx()
3622 if (hw->mac.type != ixgbe_mac_82598EB) { in ixgbe_configure_tx()
3630 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbe_configure_tx()
3631 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); in ixgbe_configure_tx()
3632 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbe_configure_tx()
3633 ixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]); in ixgbe_configure_tx()
3639 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_enable_rx_drop()
3640 u8 reg_idx = ring->reg_idx; in ixgbe_enable_rx_drop()
3651 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_disable_rx_drop()
3652 u8 reg_idx = ring->reg_idx; in ixgbe_disable_rx_drop()
3667 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; in ixgbe_set_rx_drop_en()
3669 if (adapter->ixgbe_ieee_pfc) in ixgbe_set_rx_drop_en()
3670 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); in ixgbe_set_rx_drop_en()
3674 * SR-IOV is enabled in ixgbe_set_rx_drop_en()
3681 if (adapter->num_vfs || (adapter->num_rx_queues > 1 && in ixgbe_set_rx_drop_en()
3682 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) { in ixgbe_set_rx_drop_en()
3683 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbe_set_rx_drop_en()
3684 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]); in ixgbe_set_rx_drop_en()
3686 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbe_set_rx_drop_en()
3687 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]); in ixgbe_set_rx_drop_en()
3696 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_srrctl()
3698 u8 reg_idx = rx_ring->reg_idx; in ixgbe_configure_srrctl()
3700 if (hw->mac.type == ixgbe_mac_82598EB) { in ixgbe_configure_srrctl()
3701 u16 mask = adapter->ring_feature[RING_F_RSS].mask; in ixgbe_configure_srrctl()
3714 if (rx_ring->xsk_pool) { in ixgbe_configure_srrctl()
3715 u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool); in ixgbe_configure_srrctl()
3725 if (hw->mac.type != ixgbe_mac_82599EB) in ixgbe_configure_srrctl()
3729 } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) { in ixgbe_configure_srrctl()
3742 * ixgbe_rss_indir_tbl_entries - Return RSS indirection table entries
3745 * - 82598/82599/X540: 128
3746 * - X550(non-SRIOV mode): 512
3747 * - X550(SRIOV mode): 64
3751 if (adapter->hw.mac.type < ixgbe_mac_X550) in ixgbe_rss_indir_tbl_entries()
3753 else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) in ixgbe_rss_indir_tbl_entries()
3760 * ixgbe_store_key - Write the RSS key to HW
3767 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_store_key()
3771 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); in ixgbe_store_key()
3775 * ixgbe_init_rss_key - Initialize adapter RSS key
3784 if (!adapter->rss_key) { in ixgbe_init_rss_key()
3787 return -ENOMEM; in ixgbe_init_rss_key()
3790 adapter->rss_key = rss_key; in ixgbe_init_rss_key()
3797 * ixgbe_store_reta - Write the RETA table to HW
3805 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_store_reta()
3808 u8 *indir_tbl = adapter->rss_indir_tbl; in ixgbe_store_reta()
3811 * - 82598: 8 bit wide entries containing pair of 4 bit RSS in ixgbe_store_reta()
3813 * - 82599/X540: 8 bit wide entries containing 4 bit RSS index in ixgbe_store_reta()
3814 * - X550: 8 bit wide entries containing 6 bit RSS index in ixgbe_store_reta()
3816 if (adapter->hw.mac.type == ixgbe_mac_82598EB) in ixgbe_store_reta()
3828 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), in ixgbe_store_reta()
3836 * ixgbe_store_vfreta - Write the RETA table to HW (x550 devices in SRIOV mode)
3844 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_store_vfreta()
3849 u16 pool = adapter->num_rx_pools; in ixgbe_store_vfreta()
3851 vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8; in ixgbe_store_vfreta()
3855 while (pool--) in ixgbe_store_vfreta()
3867 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; in ixgbe_setup_reta()
3869 /* Program table for at least 4 queues w/ SR-IOV so that VFs can in ixgbe_setup_reta()
3873 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4)) in ixgbe_setup_reta()
3880 memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); in ixgbe_setup_reta()
3886 adapter->rss_indir_tbl[i] = j; in ixgbe_setup_reta()
3894 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_setup_vfreta()
3895 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; in ixgbe_setup_vfreta()
3900 u16 pool = adapter->num_rx_pools; in ixgbe_setup_vfreta()
3902 while (pool--) in ixgbe_setup_vfreta()
3905 *(adapter->rss_key + i)); in ixgbe_setup_vfreta()
3913 adapter->rss_indir_tbl[i] = j; in ixgbe_setup_vfreta()
3921 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_setup_mrqc()
3930 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { in ixgbe_setup_mrqc()
3931 if (adapter->ring_feature[RING_F_RSS].mask) in ixgbe_setup_mrqc()
3934 u8 tcs = adapter->hw_tcs; in ixgbe_setup_mrqc() local
3936 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { in ixgbe_setup_mrqc()
3937 if (tcs > 4) in ixgbe_setup_mrqc()
3938 mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */ in ixgbe_setup_mrqc()
3939 else if (tcs > 1) in ixgbe_setup_mrqc()
3940 mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */ in ixgbe_setup_mrqc()
3941 else if (adapter->ring_feature[RING_F_VMDQ].mask == in ixgbe_setup_mrqc()
3950 if (hw->mac.type >= ixgbe_mac_X550) in ixgbe_setup_mrqc()
3953 if (tcs > 4) in ixgbe_setup_mrqc()
3955 else if (tcs > 1) in ixgbe_setup_mrqc()
3968 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) in ixgbe_setup_mrqc()
3970 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) in ixgbe_setup_mrqc()
3973 if ((hw->mac.type >= ixgbe_mac_X550) && in ixgbe_setup_mrqc()
3974 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { in ixgbe_setup_mrqc()
3975 u16 pool = adapter->num_rx_pools; in ixgbe_setup_mrqc()
3986 while (pool--) in ixgbe_setup_mrqc()
3998 * ixgbe_configure_rscctl - enable RSC for the indicated ring
4005 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_rscctl()
4007 u8 reg_idx = ring->reg_idx; in ixgbe_configure_rscctl()
4027 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_rx_desc_queue_enable()
4030 u8 reg_idx = ring->reg_idx; in ixgbe_rx_desc_queue_enable()
4032 if (ixgbe_removed(hw->hw_addr)) in ixgbe_rx_desc_queue_enable()
4035 if (hw->mac.type == ixgbe_mac_82598EB && in ixgbe_rx_desc_queue_enable()
4042 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); in ixgbe_rx_desc_queue_enable()
4053 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_rx_ring()
4055 u64 rdba = ring->dma; in ixgbe_configure_rx_ring()
4057 u8 reg_idx = ring->reg_idx; in ixgbe_configure_rx_ring()
4059 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); in ixgbe_configure_rx_ring()
4060 ring->xsk_pool = ixgbe_xsk_pool(adapter, ring); in ixgbe_configure_rx_ring()
4061 if (ring->xsk_pool) { in ixgbe_configure_rx_ring()
4062 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in ixgbe_configure_rx_ring()
4065 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in ixgbe_configure_rx_ring()
4067 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in ixgbe_configure_rx_ring()
4082 ring->count * sizeof(union ixgbe_adv_rx_desc)); in ixgbe_configure_rx_ring()
4088 ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx); in ixgbe_configure_rx_ring()
4093 if (hw->mac.type == ixgbe_mac_82598EB) { in ixgbe_configure_rx_ring()
4105 } else if (hw->mac.type != ixgbe_mac_82599EB) { in ixgbe_configure_rx_ring()
4114 !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) in ixgbe_configure_rx_ring()
4120 ring->rx_offset = ixgbe_rx_offset(ring); in ixgbe_configure_rx_ring()
4122 if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) { in ixgbe_configure_rx_ring()
4123 u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); in ixgbe_configure_rx_ring()
4129 ring->rx_buf_len = xsk_buf_len; in ixgbe_configure_rx_ring()
4133 memset(ring->rx_buffer_info, 0, in ixgbe_configure_rx_ring()
4134 sizeof(struct ixgbe_rx_buffer) * ring->count); in ixgbe_configure_rx_ring()
4138 rx_desc->wb.upper.length = 0; in ixgbe_configure_rx_ring()
4145 if (ring->xsk_pool) in ixgbe_configure_rx_ring()
4153 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_setup_psrtype()
4154 int rss_i = adapter->ring_feature[RING_F_RSS].indices; in ixgbe_setup_psrtype()
4155 u16 pool = adapter->num_rx_pools; in ixgbe_setup_psrtype()
4164 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_setup_psrtype()
4172 while (pool--) in ixgbe_setup_psrtype()
4178 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_virtualization()
4179 u16 pool = adapter->num_rx_pools; in ixgbe_configure_virtualization()
4184 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) in ixgbe_configure_virtualization()
4198 while (pool--) in ixgbe_configure_virtualization()
4206 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); in ixgbe_configure_virtualization()
4208 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); in ixgbe_configure_virtualization()
4209 if (adapter->bridge_mode == BRIDGE_MODE_VEB) in ixgbe_configure_virtualization()
4213 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0)); in ixgbe_configure_virtualization()
4216 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; in ixgbe_configure_virtualization()
4220 * i.e. 32 or 64 VFs for SR-IOV in ixgbe_configure_virtualization()
4222 switch (adapter->ring_feature[RING_F_VMDQ].mask) { in ixgbe_configure_virtualization()
4236 for (i = 0; i < adapter->num_vfs; i++) { in ixgbe_configure_virtualization()
4238 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, in ixgbe_configure_virtualization()
4239 adapter->vfinfo[i].spoofchk_enabled); in ixgbe_configure_virtualization()
4242 ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i, in ixgbe_configure_virtualization()
4243 adapter->vfinfo[i].rss_query_enabled); in ixgbe_configure_virtualization()
4249 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_set_rx_buffer_len()
4250 struct net_device *netdev = adapter->netdev; in ixgbe_set_rx_buffer_len()
4251 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; in ixgbe_set_rx_buffer_len()
4258 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && in ixgbe_set_rx_buffer_len()
4285 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_set_rx_buffer_len()
4286 rx_ring = adapter->rx_ring[i]; in ixgbe_set_rx_buffer_len()
4289 clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4290 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4292 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) in ixgbe_set_rx_buffer_len()
4295 if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state)) in ixgbe_set_rx_buffer_len()
4296 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4298 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) in ixgbe_set_rx_buffer_len()
4301 set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4304 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) in ixgbe_set_rx_buffer_len()
4305 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4309 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4316 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_setup_rdrxctl()
4319 switch (hw->mac.type) { in ixgbe_setup_rdrxctl()
4336 if (adapter->num_vfs) in ixgbe_setup_rdrxctl()
4358 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
4365 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_rx()
4370 hw->mac.ops.disable_rx(hw); in ixgbe_configure_rx()
4378 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) in ixgbe_configure_rx()
4395 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbe_configure_rx()
4396 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); in ixgbe_configure_rx()
4400 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_configure_rx()
4405 hw->mac.ops.enable_rx_dma(hw, rxctrl); in ixgbe_configure_rx()
4412 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_vlan_rx_add_vid()
4415 if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) in ixgbe_vlan_rx_add_vid()
4416 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid); in ixgbe_vlan_rx_add_vid()
4418 set_bit(vid, adapter->active_vlans); in ixgbe_vlan_rx_add_vid()
4433 for (idx = IXGBE_VLVF_ENTRIES; --idx;) { in ixgbe_find_vlvf_entry()
4444 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_update_pf_promisc_vlvf()
4461 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) in ixgbe_update_pf_promisc_vlvf()
4471 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_vlan_rx_kill_vid()
4474 if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) in ixgbe_vlan_rx_kill_vid()
4475 hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true); in ixgbe_vlan_rx_kill_vid()
4477 clear_bit(vid, adapter->active_vlans); in ixgbe_vlan_rx_kill_vid()
4483 * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
4488 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_vlan_strip_disable()
4492 switch (hw->mac.type) { in ixgbe_vlan_strip_disable()
4503 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_vlan_strip_disable()
4504 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_vlan_strip_disable()
4506 if (!netif_is_ixgbe(ring->netdev)) in ixgbe_vlan_strip_disable()
4509 j = ring->reg_idx; in ixgbe_vlan_strip_disable()
4521 * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
4526 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_vlan_strip_enable()
4530 switch (hw->mac.type) { in ixgbe_vlan_strip_enable()
4541 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_vlan_strip_enable()
4542 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_vlan_strip_enable()
4544 if (!netif_is_ixgbe(ring->netdev)) in ixgbe_vlan_strip_enable()
4547 j = ring->reg_idx; in ixgbe_vlan_strip_enable()
4560 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_vlan_promisc_enable()
4565 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { in ixgbe_vlan_promisc_enable()
4566 /* For VMDq and SR-IOV we must leave VLAN filtering enabled */ in ixgbe_vlan_promisc_enable()
4576 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_vlan_promisc_enable()
4580 if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) in ixgbe_vlan_promisc_enable()
4584 adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; in ixgbe_vlan_promisc_enable()
4587 for (i = IXGBE_VLVF_ENTRIES; --i;) { in ixgbe_vlan_promisc_enable()
4596 for (i = hw->mac.vft_size; i--;) in ixgbe_vlan_promisc_enable()
4603 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_scrub_vfta()
4609 for (i = IXGBE_VLVF_ENTRIES; --i;) { in ixgbe_scrub_vfta()
4621 vfta[(vid - vid_start) / 32] |= BIT(vid % 32); in ixgbe_scrub_vfta()
4624 if (test_bit(vid, adapter->active_vlans)) in ixgbe_scrub_vfta()
4636 for (i = VFTA_BLOCK_SIZE; i--;) { in ixgbe_scrub_vfta()
4641 vfta[i] |= adapter->active_vlans[word] >> bits; in ixgbe_scrub_vfta()
4649 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_vlan_promisc_disable()
4657 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) || in ixgbe_vlan_promisc_disable()
4658 hw->mac.type == ixgbe_mac_82598EB) in ixgbe_vlan_promisc_disable()
4662 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) in ixgbe_vlan_promisc_disable()
4666 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; in ixgbe_vlan_promisc_disable()
4668 for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE) in ixgbe_vlan_promisc_disable()
4676 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); in ixgbe_restore_vlan()
4678 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) in ixgbe_restore_vlan()
4679 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); in ixgbe_restore_vlan()
4683 * ixgbe_write_mc_addr_list - write multicast addresses to MTA
4687 * Returns: -ENOMEM on failure
4694 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_write_mc_addr_list()
4699 if (hw->mac.ops.update_mc_addr_list) in ixgbe_write_mc_addr_list()
4700 hw->mac.ops.update_mc_addr_list(hw, netdev); in ixgbe_write_mc_addr_list()
4702 return -ENOMEM; in ixgbe_write_mc_addr_list()
4714 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; in ixgbe_full_sync_mac_table()
4715 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_full_sync_mac_table()
4718 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { in ixgbe_full_sync_mac_table()
4719 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; in ixgbe_full_sync_mac_table()
4721 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) in ixgbe_full_sync_mac_table()
4722 hw->mac.ops.set_rar(hw, i, in ixgbe_full_sync_mac_table()
4723 mac_table->addr, in ixgbe_full_sync_mac_table()
4724 mac_table->pool, in ixgbe_full_sync_mac_table()
4727 hw->mac.ops.clear_rar(hw, i); in ixgbe_full_sync_mac_table()
4734 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; in ixgbe_sync_mac_table()
4735 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_sync_mac_table()
4738 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { in ixgbe_sync_mac_table()
4739 if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED)) in ixgbe_sync_mac_table()
4742 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; in ixgbe_sync_mac_table()
4744 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) in ixgbe_sync_mac_table()
4745 hw->mac.ops.set_rar(hw, i, in ixgbe_sync_mac_table()
4746 mac_table->addr, in ixgbe_sync_mac_table()
4747 mac_table->pool, in ixgbe_sync_mac_table()
4750 hw->mac.ops.clear_rar(hw, i); in ixgbe_sync_mac_table()
4756 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; in ixgbe_flush_sw_mac_table()
4757 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_flush_sw_mac_table()
4760 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { in ixgbe_flush_sw_mac_table()
4761 mac_table->state |= IXGBE_MAC_STATE_MODIFIED; in ixgbe_flush_sw_mac_table()
4762 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; in ixgbe_flush_sw_mac_table()
4770 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; in ixgbe_available_rars()
4771 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_available_rars()
4774 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { in ixgbe_available_rars()
4776 if (mac_table->state & IXGBE_MAC_STATE_DEFAULT) in ixgbe_available_rars()
4780 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) { in ixgbe_available_rars()
4781 if (mac_table->pool != pool) in ixgbe_available_rars()
4794 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; in ixgbe_mac_set_default_filter()
4795 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_mac_set_default_filter()
4797 memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN); in ixgbe_mac_set_default_filter()
4798 mac_table->pool = VMDQ_P(0); in ixgbe_mac_set_default_filter()
4800 mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE; in ixgbe_mac_set_default_filter()
4802 hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool, in ixgbe_mac_set_default_filter()
4809 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; in ixgbe_add_mac_filter()
4810 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_add_mac_filter()
4814 return -EINVAL; in ixgbe_add_mac_filter()
4816 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { in ixgbe_add_mac_filter()
4817 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) in ixgbe_add_mac_filter()
4820 ether_addr_copy(mac_table->addr, addr); in ixgbe_add_mac_filter()
4821 mac_table->pool = pool; in ixgbe_add_mac_filter()
4823 mac_table->state |= IXGBE_MAC_STATE_MODIFIED | in ixgbe_add_mac_filter()
4831 return -ENOMEM; in ixgbe_add_mac_filter()
4837 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; in ixgbe_del_mac_filter()
4838 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_del_mac_filter()
4842 return -EINVAL; in ixgbe_del_mac_filter()
4845 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { in ixgbe_del_mac_filter()
4847 if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE)) in ixgbe_del_mac_filter()
4850 if (mac_table->pool != pool) in ixgbe_del_mac_filter()
4853 if (!ether_addr_equal(addr, mac_table->addr)) in ixgbe_del_mac_filter()
4856 mac_table->state |= IXGBE_MAC_STATE_MODIFIED; in ixgbe_del_mac_filter()
4857 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; in ixgbe_del_mac_filter()
4864 return -ENOMEM; in ixgbe_del_mac_filter()
4887 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
4898 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_set_rx_mode()
4900 netdev_features_t features = netdev->features; in ixgbe_set_rx_mode()
4907 fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ in ixgbe_set_rx_mode()
4914 if (netdev->flags & IFF_PROMISC) { in ixgbe_set_rx_mode()
4915 hw->addr_ctrl.user_set_promisc = true; in ixgbe_set_rx_mode()
4920 if (netdev->flags & IFF_ALLMULTI) { in ixgbe_set_rx_mode()
4924 hw->addr_ctrl.user_set_promisc = false; in ixgbe_set_rx_mode()
4949 if (hw->mac.type != ixgbe_mac_82598EB) { in ixgbe_set_rx_mode()
4985 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) in ixgbe_napi_enable_all()
4986 napi_enable(&adapter->q_vector[q_idx]->napi); in ixgbe_napi_enable_all()
4993 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) in ixgbe_napi_disable_all()
4994 napi_disable(&adapter->q_vector[q_idx]->napi); in ixgbe_napi_disable_all()
5000 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_udp_tunnel_sync()
5005 adapter->vxlan_port = ti.port; in ixgbe_udp_tunnel_sync()
5007 adapter->geneve_port = ti.port; in ixgbe_udp_tunnel_sync()
5010 ntohs(adapter->vxlan_port) | in ixgbe_udp_tunnel_sync()
5011 ntohs(adapter->geneve_port) << in ixgbe_udp_tunnel_sync()
5035 * ixgbe_configure_dcb - Configure DCB hardware
5044 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_dcb()
5045 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; in ixgbe_configure_dcb()
5047 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { in ixgbe_configure_dcb()
5048 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_configure_dcb()
5049 netif_set_gso_max_size(adapter->netdev, 65536); in ixgbe_configure_dcb()
5053 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_configure_dcb()
5054 netif_set_gso_max_size(adapter->netdev, 32768); in ixgbe_configure_dcb()
5057 if (adapter->netdev->features & NETIF_F_FCOE_MTU) in ixgbe_configure_dcb()
5062 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { in ixgbe_configure_dcb()
5063 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, in ixgbe_configure_dcb()
5065 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, in ixgbe_configure_dcb()
5067 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); in ixgbe_configure_dcb()
5068 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) { in ixgbe_configure_dcb()
5069 ixgbe_dcb_hw_ets(&adapter->hw, in ixgbe_configure_dcb()
5070 adapter->ixgbe_ieee_ets, in ixgbe_configure_dcb()
5072 ixgbe_dcb_hw_pfc_config(&adapter->hw, in ixgbe_configure_dcb()
5073 adapter->ixgbe_ieee_pfc->pfc_en, in ixgbe_configure_dcb()
5074 adapter->ixgbe_ieee_ets->prio_tc); in ixgbe_configure_dcb()
5078 if (hw->mac.type != ixgbe_mac_82598EB) { in ixgbe_configure_dcb()
5080 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1; in ixgbe_configure_dcb()
5087 /* write msb to all 8 TCs in one write */ in ixgbe_configure_dcb()
5097 * ixgbe_hpbthresh - calculate high water mark for flow control
5104 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_hpbthresh()
5105 struct net_device *dev = adapter->netdev; in ixgbe_hpbthresh()
5110 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING; in ixgbe_hpbthresh()
5114 if ((dev->features & NETIF_F_FCOE_MTU) && in ixgbe_hpbthresh()
5121 switch (hw->mac.type) { in ixgbe_hpbthresh()
5134 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) in ixgbe_hpbthresh()
5141 marker = rx_pba - kb; in ixgbe_hpbthresh()
5158 * ixgbe_lpbthresh - calculate low water mark for for flow control
5165 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_lpbthresh()
5166 struct net_device *dev = adapter->netdev; in ixgbe_lpbthresh()
5171 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; in ixgbe_lpbthresh()
5175 if ((dev->features & NETIF_F_FCOE_MTU) && in ixgbe_lpbthresh()
5177 (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up))) in ixgbe_lpbthresh()
5182 switch (hw->mac.type) { in ixgbe_lpbthresh()
5199 * ixgbe_pbthresh_setup - calculate and setup high low water marks
5203 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_pbthresh_setup()
5204 int num_tc = adapter->hw_tcs; in ixgbe_pbthresh_setup()
5211 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i); in ixgbe_pbthresh_setup()
5212 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i); in ixgbe_pbthresh_setup()
5215 if (hw->fc.low_water[i] > hw->fc.high_water[i]) in ixgbe_pbthresh_setup()
5216 hw->fc.low_water[i] = 0; in ixgbe_pbthresh_setup()
5220 hw->fc.high_water[i] = 0; in ixgbe_pbthresh_setup()
5225 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_pb()
5227 u8 tc = adapter->hw_tcs; in ixgbe_configure_pb()
5229 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || in ixgbe_configure_pb()
5230 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) in ixgbe_configure_pb()
5231 hdrm = 32 << adapter->fdir_pballoc; in ixgbe_configure_pb()
5235 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL); in ixgbe_configure_pb()
5241 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_fdir_filter_restore()
5246 spin_lock(&adapter->fdir_perfect_lock); in ixgbe_fdir_filter_restore()
5248 if (!hlist_empty(&adapter->fdir_filter_list)) in ixgbe_fdir_filter_restore()
5249 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); in ixgbe_fdir_filter_restore()
5252 &adapter->fdir_filter_list, fdir_node) { in ixgbe_fdir_filter_restore()
5253 if (filter->action == IXGBE_FDIR_DROP_QUEUE) { in ixgbe_fdir_filter_restore()
5256 u32 ring = ethtool_get_flow_spec_ring(filter->action); in ixgbe_fdir_filter_restore()
5257 u8 vf = ethtool_get_flow_spec_ring_vf(filter->action); in ixgbe_fdir_filter_restore()
5259 if (!vf && (ring >= adapter->num_rx_queues)) { in ixgbe_fdir_filter_restore()
5264 ((vf > adapter->num_vfs) || in ixgbe_fdir_filter_restore()
5265 ring >= adapter->num_rx_queues_per_pool)) { in ixgbe_fdir_filter_restore()
5273 queue = adapter->rx_ring[ring]->reg_idx; in ixgbe_fdir_filter_restore()
5275 queue = ((vf - 1) * in ixgbe_fdir_filter_restore()
5276 adapter->num_rx_queues_per_pool) + ring; in ixgbe_fdir_filter_restore()
5280 &filter->filter, filter->sw_idx, queue); in ixgbe_fdir_filter_restore()
5283 spin_unlock(&adapter->fdir_perfect_lock); in ixgbe_fdir_filter_restore()
5287 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
5292 u16 i = rx_ring->next_to_clean; in ixgbe_clean_rx_ring()
5293 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; in ixgbe_clean_rx_ring()
5295 if (rx_ring->xsk_pool) { in ixgbe_clean_rx_ring()
5301 while (i != rx_ring->next_to_alloc) { in ixgbe_clean_rx_ring()
5302 if (rx_buffer->skb) { in ixgbe_clean_rx_ring()
5303 struct sk_buff *skb = rx_buffer->skb; in ixgbe_clean_rx_ring()
5304 if (IXGBE_CB(skb)->page_released) in ixgbe_clean_rx_ring()
5305 dma_unmap_page_attrs(rx_ring->dev, in ixgbe_clean_rx_ring()
5306 IXGBE_CB(skb)->dma, in ixgbe_clean_rx_ring()
5316 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_clean_rx_ring()
5317 rx_buffer->dma, in ixgbe_clean_rx_ring()
5318 rx_buffer->page_offset, in ixgbe_clean_rx_ring()
5323 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in ixgbe_clean_rx_ring()
5327 __page_frag_cache_drain(rx_buffer->page, in ixgbe_clean_rx_ring()
5328 rx_buffer->pagecnt_bias); in ixgbe_clean_rx_ring()
5332 if (i == rx_ring->count) { in ixgbe_clean_rx_ring()
5334 rx_buffer = rx_ring->rx_buffer_info; in ixgbe_clean_rx_ring()
5339 rx_ring->next_to_alloc = 0; in ixgbe_clean_rx_ring()
5340 rx_ring->next_to_clean = 0; in ixgbe_clean_rx_ring()
5341 rx_ring->next_to_use = 0; in ixgbe_clean_rx_ring()
5347 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; in ixgbe_fwd_ring_up()
5348 int num_tc = netdev_get_num_tc(adapter->netdev); in ixgbe_fwd_ring_up()
5349 struct net_device *vdev = accel->netdev; in ixgbe_fwd_ring_up()
5352 baseq = accel->pool * adapter->num_rx_queues_per_pool; in ixgbe_fwd_ring_up()
5354 accel->pool, adapter->num_rx_pools, in ixgbe_fwd_ring_up()
5355 baseq, baseq + adapter->num_rx_queues_per_pool); in ixgbe_fwd_ring_up()
5357 accel->rx_base_queue = baseq; in ixgbe_fwd_ring_up()
5358 accel->tx_base_queue = baseq; in ixgbe_fwd_ring_up()
5362 netdev_bind_sb_channel_queue(adapter->netdev, vdev, in ixgbe_fwd_ring_up()
5365 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) in ixgbe_fwd_ring_up()
5366 adapter->rx_ring[baseq + i]->netdev = vdev; in ixgbe_fwd_ring_up()
5376 err = ixgbe_add_mac_filter(adapter, vdev->dev_addr, in ixgbe_fwd_ring_up()
5377 VMDQ_P(accel->pool)); in ixgbe_fwd_ring_up()
5384 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) in ixgbe_fwd_ring_up()
5385 adapter->rx_ring[baseq + i]->netdev = NULL; in ixgbe_fwd_ring_up()
5390 netdev_unbind_sb_channel(adapter->netdev, vdev); in ixgbe_fwd_ring_up()
5393 clear_bit(accel->pool, adapter->fwd_bitmask); in ixgbe_fwd_ring_up()
5402 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data; in ixgbe_macvlan_up()
5423 netdev_walk_all_upper_dev_rcu(adapter->netdev, in ixgbe_configure_dfwd()
5429 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure()
5441 ixgbe_set_rx_mode(adapter->netdev); in ixgbe_configure()
5445 switch (hw->mac.type) { in ixgbe_configure()
5448 hw->mac.ops.disable_rx_buff(hw); in ixgbe_configure()
5454 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { in ixgbe_configure()
5455 ixgbe_init_fdir_signature_82599(&adapter->hw, in ixgbe_configure()
5456 adapter->fdir_pballoc); in ixgbe_configure()
5457 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { in ixgbe_configure()
5458 ixgbe_init_fdir_perfect_82599(&adapter->hw, in ixgbe_configure()
5459 adapter->fdir_pballoc); in ixgbe_configure()
5463 switch (hw->mac.type) { in ixgbe_configure()
5466 hw->mac.ops.enable_rx_buff(hw); in ixgbe_configure()
5474 if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) in ixgbe_configure()
5489 * ixgbe_sfp_link_config - set up SFP+ link
5500 if (adapter->hw.mac.type == ixgbe_mac_82598EB) in ixgbe_sfp_link_config()
5501 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; in ixgbe_sfp_link_config()
5503 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; in ixgbe_sfp_link_config()
5504 adapter->sfp_poll_time = 0; in ixgbe_sfp_link_config()
5508 * ixgbe_non_sfp_link_config - set up non-SFP+ link
5519 if (hw->mac.ops.check_link) in ixgbe_non_sfp_link_config()
5520 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false); in ixgbe_non_sfp_link_config()
5525 speed = hw->phy.autoneg_advertised; in ixgbe_non_sfp_link_config()
5526 if (!speed && hw->mac.ops.get_link_capabilities) { in ixgbe_non_sfp_link_config()
5527 ret = hw->mac.ops.get_link_capabilities(hw, &speed, in ixgbe_non_sfp_link_config()
5536 if (hw->mac.ops.setup_link) in ixgbe_non_sfp_link_config()
5537 ret = hw->mac.ops.setup_link(hw, speed, link_up); in ixgbe_non_sfp_link_config()
5544 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_setup_gpie()
5547 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { in ixgbe_setup_gpie()
5552 * use EIAM to auto-mask when MSI-X interrupt is asserted in ixgbe_setup_gpie()
5555 switch (hw->mac.type) { in ixgbe_setup_gpie()
5570 /* legacy interrupts, use EIAM to auto-mask when reading EICR, in ixgbe_setup_gpie()
5578 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { in ixgbe_setup_gpie()
5581 switch (adapter->ring_feature[RING_F_VMDQ].mask) { in ixgbe_setup_gpie()
5595 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { in ixgbe_setup_gpie()
5596 switch (adapter->hw.mac.type) { in ixgbe_setup_gpie()
5606 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) in ixgbe_setup_gpie()
5609 switch (hw->mac.type) { in ixgbe_setup_gpie()
5626 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_up_complete()
5633 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) in ixgbe_up_complete()
5639 if (hw->mac.ops.enable_tx_laser) in ixgbe_up_complete()
5640 hw->mac.ops.enable_tx_laser(hw); in ixgbe_up_complete()
5642 if (hw->phy.ops.set_phy_power) in ixgbe_up_complete()
5643 hw->phy.ops.set_phy_power(hw, true); in ixgbe_up_complete()
5646 clear_bit(__IXGBE_DOWN, &adapter->state); in ixgbe_up_complete()
5665 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { in ixgbe_up_complete()
5673 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; in ixgbe_up_complete()
5674 adapter->link_check_timeout = jiffies; in ixgbe_up_complete()
5675 mod_timer(&adapter->service_timer, jiffies); in ixgbe_up_complete()
5686 netif_trans_update(adapter->netdev); in ixgbe_reinit_locked()
5688 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) in ixgbe_reinit_locked()
5690 if (adapter->hw.phy.type == ixgbe_phy_fw) in ixgbe_reinit_locked()
5694 * If SR-IOV enabled then wait a bit before bringing the adapter in ixgbe_reinit_locked()
5696 * two second wait is based upon the watchdog timer cycle in in ixgbe_reinit_locked()
5699 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) in ixgbe_reinit_locked()
5702 clear_bit(__IXGBE_RESETTING, &adapter->state); in ixgbe_reinit_locked()
5717 pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &devctl2); in ixgbe_get_completion_timeout()
5753 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_disable_rx()
5758 hw->mac.ops.disable_rx(hw); in ixgbe_disable_rx()
5760 if (ixgbe_removed(hw->hw_addr)) in ixgbe_disable_rx()
5764 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_disable_rx()
5765 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_disable_rx()
5766 u8 reg_idx = ring->reg_idx; in ixgbe_disable_rx()
5777 if (hw->mac.type == ixgbe_mac_82598EB && in ixgbe_disable_rx()
5784 * the time increases we will wait for larger periods of time. in ixgbe_disable_rx()
5788 * of that wait is that it totals up to 100x whatever interval we in ixgbe_disable_rx()
5789 * choose. Since our minimum wait is 100us we can just divide the in ixgbe_disable_rx()
5797 while (wait_loop--) { in ixgbe_disable_rx()
5807 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_disable_rx()
5808 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_disable_rx()
5809 u8 reg_idx = ring->reg_idx; in ixgbe_disable_rx()
5825 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_disable_tx()
5829 if (ixgbe_removed(hw->hw_addr)) in ixgbe_disable_tx()
5833 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_disable_tx()
5834 struct ixgbe_ring *ring = adapter->tx_ring[i]; in ixgbe_disable_tx()
5835 u8 reg_idx = ring->reg_idx; in ixgbe_disable_tx()
5841 for (i = 0; i < adapter->num_xdp_queues; i++) { in ixgbe_disable_tx()
5842 struct ixgbe_ring *ring = adapter->xdp_ring[i]; in ixgbe_disable_tx()
5843 u8 reg_idx = ring->reg_idx; in ixgbe_disable_tx()
5859 * the time increases we will wait for larger periods of time. in ixgbe_disable_tx()
5863 * of that wait is that it totals up to 100x whatever interval we in ixgbe_disable_tx()
5864 * choose. Since our minimum wait is 100us we can just divide the in ixgbe_disable_tx()
5872 while (wait_loop--) { in ixgbe_disable_tx()
5882 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_disable_tx()
5883 struct ixgbe_ring *ring = adapter->tx_ring[i]; in ixgbe_disable_tx()
5884 u8 reg_idx = ring->reg_idx; in ixgbe_disable_tx()
5888 for (i = 0; i < adapter->num_xdp_queues; i++) { in ixgbe_disable_tx()
5889 struct ixgbe_ring *ring = adapter->xdp_ring[i]; in ixgbe_disable_tx()
5890 u8 reg_idx = ring->reg_idx; in ixgbe_disable_tx()
5904 switch (hw->mac.type) { in ixgbe_disable_tx()
5921 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_reset()
5922 struct net_device *netdev = adapter->netdev; in ixgbe_reset()
5925 if (ixgbe_removed(hw->hw_addr)) in ixgbe_reset()
5928 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) in ixgbe_reset()
5932 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP | in ixgbe_reset()
5934 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; in ixgbe_reset()
5936 err = hw->mac.ops.init_hw(hw); in ixgbe_reset()
5946 /* We are running on a pre-production device, log a warning */ in ixgbe_reset()
5947 e_dev_warn("This device is a pre-production adapter/LOM. " in ixgbe_reset()
5958 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); in ixgbe_reset()
5968 if (hw->mac.san_mac_rar_index) in ixgbe_reset()
5969 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); in ixgbe_reset()
5971 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) in ixgbe_reset()
5974 if (hw->phy.ops.set_phy_power) { in ixgbe_reset()
5975 if (!netif_running(adapter->netdev) && !adapter->wol) in ixgbe_reset()
5976 hw->phy.ops.set_phy_power(hw, false); in ixgbe_reset()
5978 hw->phy.ops.set_phy_power(hw, true); in ixgbe_reset()
5983 * ixgbe_clean_tx_ring - Free Tx Buffers
5988 u16 i = tx_ring->next_to_clean; in ixgbe_clean_tx_ring()
5989 struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbe_clean_tx_ring()
5991 if (tx_ring->xsk_pool) { in ixgbe_clean_tx_ring()
5996 while (i != tx_ring->next_to_use) { in ixgbe_clean_tx_ring()
6001 xdp_return_frame(tx_buffer->xdpf); in ixgbe_clean_tx_ring()
6003 dev_kfree_skb_any(tx_buffer->skb); in ixgbe_clean_tx_ring()
6006 dma_unmap_single(tx_ring->dev, in ixgbe_clean_tx_ring()
6012 eop_desc = tx_buffer->next_to_watch; in ixgbe_clean_tx_ring()
6020 if (unlikely(i == tx_ring->count)) { in ixgbe_clean_tx_ring()
6022 tx_buffer = tx_ring->tx_buffer_info; in ixgbe_clean_tx_ring()
6028 dma_unmap_page(tx_ring->dev, in ixgbe_clean_tx_ring()
6037 if (unlikely(i == tx_ring->count)) { in ixgbe_clean_tx_ring()
6039 tx_buffer = tx_ring->tx_buffer_info; in ixgbe_clean_tx_ring()
6049 tx_ring->next_to_use = 0; in ixgbe_clean_tx_ring()
6050 tx_ring->next_to_clean = 0; in ixgbe_clean_tx_ring()
6054 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
6061 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbe_clean_all_rx_rings()
6062 ixgbe_clean_rx_ring(adapter->rx_ring[i]); in ixgbe_clean_all_rx_rings()
6066 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
6073 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbe_clean_all_tx_rings()
6074 ixgbe_clean_tx_ring(adapter->tx_ring[i]); in ixgbe_clean_all_tx_rings()
6075 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbe_clean_all_tx_rings()
6076 ixgbe_clean_tx_ring(adapter->xdp_ring[i]); in ixgbe_clean_all_tx_rings()
6084 spin_lock(&adapter->fdir_perfect_lock); in ixgbe_fdir_filter_exit()
6087 &adapter->fdir_filter_list, fdir_node) { in ixgbe_fdir_filter_exit()
6088 hlist_del(&filter->fdir_node); in ixgbe_fdir_filter_exit()
6091 adapter->fdir_filter_count = 0; in ixgbe_fdir_filter_exit()
6093 spin_unlock(&adapter->fdir_perfect_lock); in ixgbe_fdir_filter_exit()
6098 struct net_device *netdev = adapter->netdev; in ixgbe_down()
6099 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_down()
6103 if (test_and_set_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_down()
6117 if (adapter->xdp_ring[0]) in ixgbe_down()
6124 clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state); in ixgbe_down()
6125 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; in ixgbe_down()
6126 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; in ixgbe_down()
6128 del_timer_sync(&adapter->service_timer); in ixgbe_down()
6130 if (adapter->num_vfs) { in ixgbe_down()
6132 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); in ixgbe_down()
6135 for (i = 0 ; i < adapter->num_vfs; i++) in ixgbe_down()
6136 adapter->vfinfo[i].clear_to_send = false; in ixgbe_down()
6148 if (!pci_channel_offline(adapter->pdev)) in ixgbe_down()
6152 if (hw->mac.ops.disable_tx_laser) in ixgbe_down()
6153 hw->mac.ops.disable_tx_laser(hw); in ixgbe_down()
6160 * ixgbe_set_eee_capable - helper function to determine EEE support on X550
6165 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_set_eee_capable()
6167 switch (hw->device_id) { in ixgbe_set_eee_capable()
6170 if (!hw->phy.eee_speeds_supported) in ixgbe_set_eee_capable()
6172 adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE; in ixgbe_set_eee_capable()
6173 if (!hw->phy.eee_speeds_advertised) in ixgbe_set_eee_capable()
6175 adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED; in ixgbe_set_eee_capable()
6178 adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE; in ixgbe_set_eee_capable()
6179 adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED; in ixgbe_set_eee_capable()
6185 * ixgbe_tx_timeout - Respond to a Tx Hang
6200 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_init_dcb()
6204 switch (hw->mac.type) { in ixgbe_init_dcb()
6207 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; in ixgbe_init_dcb()
6208 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; in ixgbe_init_dcb()
6212 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; in ixgbe_init_dcb()
6213 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; in ixgbe_init_dcb()
6218 adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS; in ixgbe_init_dcb()
6219 adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS; in ixgbe_init_dcb()
6225 tc = &adapter->dcb_cfg.tc_config[j]; in ixgbe_init_dcb()
6226 tc->path[DCB_TX_CONFIG].bwg_id = 0; in ixgbe_init_dcb()
6227 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); in ixgbe_init_dcb()
6228 tc->path[DCB_RX_CONFIG].bwg_id = 0; in ixgbe_init_dcb()
6229 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); in ixgbe_init_dcb()
6230 tc->dcb_pfc = pfc_disabled; in ixgbe_init_dcb()
6233 /* Initialize default user to priority mapping, UPx->TC0 */ in ixgbe_init_dcb()
6234 tc = &adapter->dcb_cfg.tc_config[0]; in ixgbe_init_dcb()
6235 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; in ixgbe_init_dcb()
6236 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; in ixgbe_init_dcb()
6238 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; in ixgbe_init_dcb()
6239 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; in ixgbe_init_dcb()
6240 adapter->dcb_cfg.pfc_mode_enable = false; in ixgbe_init_dcb()
6241 adapter->dcb_set_bitmap = 0x00; in ixgbe_init_dcb()
6242 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) in ixgbe_init_dcb()
6243 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; in ixgbe_init_dcb()
6244 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, in ixgbe_init_dcb()
6245 sizeof(adapter->temp_dcb_cfg)); in ixgbe_init_dcb()
6250 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
6261 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_sw_init()
6262 struct pci_dev *pdev = adapter->pdev; in ixgbe_sw_init()
6269 hw->vendor_id = pdev->vendor; in ixgbe_sw_init()
6270 hw->device_id = pdev->device; in ixgbe_sw_init()
6271 hw->revision_id = pdev->revision; in ixgbe_sw_init()
6272 hw->subsystem_vendor_id = pdev->subsystem_vendor; in ixgbe_sw_init()
6273 hw->subsystem_device_id = pdev->subsystem_device; in ixgbe_sw_init()
6276 ii->get_invariants(hw); in ixgbe_sw_init()
6280 adapter->ring_feature[RING_F_RSS].limit = rss; in ixgbe_sw_init()
6281 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; in ixgbe_sw_init()
6282 adapter->max_q_vectors = MAX_Q_VECTORS_82599; in ixgbe_sw_init()
6283 adapter->atr_sample_rate = 20; in ixgbe_sw_init()
6285 adapter->ring_feature[RING_F_FDIR].limit = fdir; in ixgbe_sw_init()
6286 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; in ixgbe_sw_init()
6287 adapter->ring_feature[RING_F_VMDQ].limit = 1; in ixgbe_sw_init()
6289 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; in ixgbe_sw_init()
6292 adapter->flags |= IXGBE_FLAG_DCB_CAPABLE; in ixgbe_sw_init()
6293 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; in ixgbe_sw_init()
6296 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; in ixgbe_sw_init()
6297 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; in ixgbe_sw_init()
6300 adapter->fcoe.up = IXGBE_FCOE_DEFTC; in ixgbe_sw_init()
6305 adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]), in ixgbe_sw_init()
6307 if (!adapter->jump_tables[0]) in ixgbe_sw_init()
6308 return -ENOMEM; in ixgbe_sw_init()
6309 adapter->jump_tables[0]->mat = ixgbe_ipv4_fields; in ixgbe_sw_init()
6312 adapter->jump_tables[i] = NULL; in ixgbe_sw_init()
6314 adapter->mac_table = kcalloc(hw->mac.num_rar_entries, in ixgbe_sw_init()
6317 if (!adapter->mac_table) in ixgbe_sw_init()
6318 return -ENOMEM; in ixgbe_sw_init()
6321 return -ENOMEM; in ixgbe_sw_init()
6323 adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL); in ixgbe_sw_init()
6324 if (!adapter->af_xdp_zc_qps) in ixgbe_sw_init()
6325 return -ENOMEM; in ixgbe_sw_init()
6328 switch (hw->mac.type) { in ixgbe_sw_init()
6330 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; in ixgbe_sw_init()
6332 if (hw->device_id == IXGBE_DEV_ID_82598AT) in ixgbe_sw_init()
6333 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; in ixgbe_sw_init()
6335 adapter->max_q_vectors = MAX_Q_VECTORS_82598; in ixgbe_sw_init()
6336 adapter->ring_feature[RING_F_FDIR].limit = 0; in ixgbe_sw_init()
6337 adapter->atr_sample_rate = 0; in ixgbe_sw_init()
6338 adapter->fdir_pballoc = 0; in ixgbe_sw_init()
6340 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; in ixgbe_sw_init()
6341 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; in ixgbe_sw_init()
6343 adapter->fcoe.up = 0; in ixgbe_sw_init()
6348 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) in ixgbe_sw_init()
6349 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; in ixgbe_sw_init()
6354 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; in ixgbe_sw_init()
6357 switch (hw->device_id) { in ixgbe_sw_init()
6360 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; in ixgbe_sw_init()
6368 adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE; in ixgbe_sw_init()
6371 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; in ixgbe_sw_init()
6373 adapter->fcoe.up = 0; in ixgbe_sw_init()
6378 if (hw->mac.type == ixgbe_mac_X550) in ixgbe_sw_init()
6379 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; in ixgbe_sw_init()
6381 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; in ixgbe_sw_init()
6390 spin_lock_init(&adapter->fcoe.lock); in ixgbe_sw_init()
6393 /* n-tuple support exists, always init our spinlock */ in ixgbe_sw_init()
6394 spin_lock_init(&adapter->fdir_perfect_lock); in ixgbe_sw_init()
6402 hw->fc.requested_mode = ixgbe_fc_full; in ixgbe_sw_init()
6403 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ in ixgbe_sw_init()
6405 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; in ixgbe_sw_init()
6406 hw->fc.send_xon = true; in ixgbe_sw_init()
6407 hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw); in ixgbe_sw_init()
6411 …e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the … in ixgbe_sw_init()
6413 /* assign number of SR-IOV VFs */ in ixgbe_sw_init()
6414 if (hw->mac.type != ixgbe_mac_82598EB) { in ixgbe_sw_init()
6417 e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n"); in ixgbe_sw_init()
6423 adapter->rx_itr_setting = 1; in ixgbe_sw_init()
6424 adapter->tx_itr_setting = 1; in ixgbe_sw_init()
6427 adapter->tx_ring_count = IXGBE_DEFAULT_TXD; in ixgbe_sw_init()
6428 adapter->rx_ring_count = IXGBE_DEFAULT_RXD; in ixgbe_sw_init()
6431 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; in ixgbe_sw_init()
6436 return -EIO; in ixgbe_sw_init()
6440 set_bit(0, adapter->fwd_bitmask); in ixgbe_sw_init()
6441 set_bit(__IXGBE_DOWN, &adapter->state); in ixgbe_sw_init()
6447 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
6454 struct device *dev = tx_ring->dev; in ixgbe_setup_tx_resources()
6459 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; in ixgbe_setup_tx_resources()
6461 if (tx_ring->q_vector) in ixgbe_setup_tx_resources()
6462 ring_node = tx_ring->q_vector->numa_node; in ixgbe_setup_tx_resources()
6464 tx_ring->tx_buffer_info = vmalloc_node(size, ring_node); in ixgbe_setup_tx_resources()
6465 if (!tx_ring->tx_buffer_info) in ixgbe_setup_tx_resources()
6466 tx_ring->tx_buffer_info = vmalloc(size); in ixgbe_setup_tx_resources()
6467 if (!tx_ring->tx_buffer_info) in ixgbe_setup_tx_resources()
6471 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); in ixgbe_setup_tx_resources()
6472 tx_ring->size = ALIGN(tx_ring->size, 4096); in ixgbe_setup_tx_resources()
6475 tx_ring->desc = dma_alloc_coherent(dev, in ixgbe_setup_tx_resources()
6476 tx_ring->size, in ixgbe_setup_tx_resources()
6477 &tx_ring->dma, in ixgbe_setup_tx_resources()
6480 if (!tx_ring->desc) in ixgbe_setup_tx_resources()
6481 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in ixgbe_setup_tx_resources()
6482 &tx_ring->dma, GFP_KERNEL); in ixgbe_setup_tx_resources()
6483 if (!tx_ring->desc) in ixgbe_setup_tx_resources()
6486 tx_ring->next_to_use = 0; in ixgbe_setup_tx_resources()
6487 tx_ring->next_to_clean = 0; in ixgbe_setup_tx_resources()
6491 vfree(tx_ring->tx_buffer_info); in ixgbe_setup_tx_resources()
6492 tx_ring->tx_buffer_info = NULL; in ixgbe_setup_tx_resources()
6494 return -ENOMEM; in ixgbe_setup_tx_resources()
6498 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
6511 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_setup_all_tx_resources()
6512 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); in ixgbe_setup_all_tx_resources()
6519 for (j = 0; j < adapter->num_xdp_queues; j++) { in ixgbe_setup_all_tx_resources()
6520 err = ixgbe_setup_tx_resources(adapter->xdp_ring[j]); in ixgbe_setup_all_tx_resources()
6531 while (j--) in ixgbe_setup_all_tx_resources()
6532 ixgbe_free_tx_resources(adapter->xdp_ring[j]); in ixgbe_setup_all_tx_resources()
6533 while (i--) in ixgbe_setup_all_tx_resources()
6534 ixgbe_free_tx_resources(adapter->tx_ring[i]); in ixgbe_setup_all_tx_resources()
6540 struct ixgbe_q_vector *q_vector = rx_ring->q_vector; in ixgbe_rx_napi_id()
6542 return q_vector ? q_vector->napi.napi_id : 0; in ixgbe_rx_napi_id()
6546 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
6555 struct device *dev = rx_ring->dev; in ixgbe_setup_rx_resources()
6560 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; in ixgbe_setup_rx_resources()
6562 if (rx_ring->q_vector) in ixgbe_setup_rx_resources()
6563 ring_node = rx_ring->q_vector->numa_node; in ixgbe_setup_rx_resources()
6565 rx_ring->rx_buffer_info = vmalloc_node(size, ring_node); in ixgbe_setup_rx_resources()
6566 if (!rx_ring->rx_buffer_info) in ixgbe_setup_rx_resources()
6567 rx_ring->rx_buffer_info = vmalloc(size); in ixgbe_setup_rx_resources()
6568 if (!rx_ring->rx_buffer_info) in ixgbe_setup_rx_resources()
6572 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); in ixgbe_setup_rx_resources()
6573 rx_ring->size = ALIGN(rx_ring->size, 4096); in ixgbe_setup_rx_resources()
6576 rx_ring->desc = dma_alloc_coherent(dev, in ixgbe_setup_rx_resources()
6577 rx_ring->size, in ixgbe_setup_rx_resources()
6578 &rx_ring->dma, in ixgbe_setup_rx_resources()
6581 if (!rx_ring->desc) in ixgbe_setup_rx_resources()
6582 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in ixgbe_setup_rx_resources()
6583 &rx_ring->dma, GFP_KERNEL); in ixgbe_setup_rx_resources()
6584 if (!rx_ring->desc) in ixgbe_setup_rx_resources()
6587 rx_ring->next_to_clean = 0; in ixgbe_setup_rx_resources()
6588 rx_ring->next_to_use = 0; in ixgbe_setup_rx_resources()
6590 /* XDP RX-queue info */ in ixgbe_setup_rx_resources()
6591 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, in ixgbe_setup_rx_resources()
6592 rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0) in ixgbe_setup_rx_resources()
6595 rx_ring->xdp_prog = adapter->xdp_prog; in ixgbe_setup_rx_resources()
6599 vfree(rx_ring->rx_buffer_info); in ixgbe_setup_rx_resources()
6600 rx_ring->rx_buffer_info = NULL; in ixgbe_setup_rx_resources()
6602 return -ENOMEM; in ixgbe_setup_rx_resources()
6606 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
6619 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_setup_all_rx_resources()
6620 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); in ixgbe_setup_all_rx_resources()
6635 while (i--) in ixgbe_setup_all_rx_resources()
6636 ixgbe_free_rx_resources(adapter->rx_ring[i]); in ixgbe_setup_all_rx_resources()
6641 * ixgbe_free_tx_resources - Free Tx Resources per Queue
6650 vfree(tx_ring->tx_buffer_info); in ixgbe_free_tx_resources()
6651 tx_ring->tx_buffer_info = NULL; in ixgbe_free_tx_resources()
6654 if (!tx_ring->desc) in ixgbe_free_tx_resources()
6657 dma_free_coherent(tx_ring->dev, tx_ring->size, in ixgbe_free_tx_resources()
6658 tx_ring->desc, tx_ring->dma); in ixgbe_free_tx_resources()
6660 tx_ring->desc = NULL; in ixgbe_free_tx_resources()
6664 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
6673 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbe_free_all_tx_resources()
6674 if (adapter->tx_ring[i]->desc) in ixgbe_free_all_tx_resources()
6675 ixgbe_free_tx_resources(adapter->tx_ring[i]); in ixgbe_free_all_tx_resources()
6676 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbe_free_all_tx_resources()
6677 if (adapter->xdp_ring[i]->desc) in ixgbe_free_all_tx_resources()
6678 ixgbe_free_tx_resources(adapter->xdp_ring[i]); in ixgbe_free_all_tx_resources()
6682 * ixgbe_free_rx_resources - Free Rx Resources
6691 rx_ring->xdp_prog = NULL; in ixgbe_free_rx_resources()
6692 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ixgbe_free_rx_resources()
6693 vfree(rx_ring->rx_buffer_info); in ixgbe_free_rx_resources()
6694 rx_ring->rx_buffer_info = NULL; in ixgbe_free_rx_resources()
6697 if (!rx_ring->desc) in ixgbe_free_rx_resources()
6700 dma_free_coherent(rx_ring->dev, rx_ring->size, in ixgbe_free_rx_resources()
6701 rx_ring->desc, rx_ring->dma); in ixgbe_free_rx_resources()
6703 rx_ring->desc = NULL; in ixgbe_free_rx_resources()
6707 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
6720 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbe_free_all_rx_resources()
6721 if (adapter->rx_ring[i]->desc) in ixgbe_free_all_rx_resources()
6722 ixgbe_free_rx_resources(adapter->rx_ring[i]); in ixgbe_free_all_rx_resources()
6726 * ixgbe_change_mtu - Change the Maximum Transfer Unit
6736 if (adapter->xdp_prog) { in ixgbe_change_mtu()
6741 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_change_mtu()
6742 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_change_mtu()
6746 return -EINVAL; in ixgbe_change_mtu()
6756 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && in ixgbe_change_mtu()
6757 (adapter->hw.mac.type == ixgbe_mac_82599EB) && in ixgbe_change_mtu()
6762 netdev->mtu, new_mtu); in ixgbe_change_mtu()
6765 netdev->mtu = new_mtu; in ixgbe_change_mtu()
6774 * ixgbe_open - Called when a network interface is made active
6788 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_open()
6792 if (test_bit(__IXGBE_TESTING, &adapter->state)) in ixgbe_open()
6793 return -EBUSY; in ixgbe_open()
6814 queues = adapter->num_tx_queues; in ixgbe_open()
6819 queues = adapter->num_rx_queues; in ixgbe_open()
6836 if (hw->phy.ops.set_phy_power && !adapter->wol) in ixgbe_open()
6837 hw->phy.ops.set_phy_power(&adapter->hw, false); in ixgbe_open()
6850 if (adapter->hw.phy.ops.enter_lplu) { in ixgbe_close_suspend()
6851 adapter->hw.phy.reset_disable = true; in ixgbe_close_suspend()
6853 adapter->hw.phy.ops.enter_lplu(&adapter->hw); in ixgbe_close_suspend()
6854 adapter->hw.phy.reset_disable = false; in ixgbe_close_suspend()
6866 * ixgbe_close - Disables a network interface
6871 * The close entry point is called when an interface is de-activated
6896 struct net_device *netdev = adapter->netdev; in ixgbe_resume()
6899 adapter->hw.hw_addr = adapter->io_addr; in ixgbe_resume()
6907 clear_bit(__IXGBE_DISABLED, &adapter->state); in ixgbe_resume()
6914 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); in ixgbe_resume()
6932 struct net_device *netdev = adapter->netdev; in __ixgbe_shutdown()
6933 struct ixgbe_hw *hw = &adapter->hw; in __ixgbe_shutdown()
6935 u32 wufc = adapter->wol; in __ixgbe_shutdown()
6946 if (hw->mac.ops.stop_link_on_d3) in __ixgbe_shutdown()
6947 hw->mac.ops.stop_link_on_d3(hw); in __ixgbe_shutdown()
6955 if (hw->mac.ops.enable_tx_laser) in __ixgbe_shutdown()
6956 hw->mac.ops.enable_tx_laser(hw); in __ixgbe_shutdown()
6973 switch (hw->mac.type) { in __ixgbe_shutdown()
6989 if (hw->phy.ops.set_phy_power && !*enable_wake) in __ixgbe_shutdown()
6990 hw->phy.ops.set_phy_power(hw, false); in __ixgbe_shutdown()
6994 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) in __ixgbe_shutdown()
7026 * ixgbe_update_stats - Update the board statistics counters.
7031 struct net_device *netdev = adapter->netdev; in ixgbe_update_stats()
7032 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_update_stats()
7033 struct ixgbe_hw_stats *hwstats = &adapter->stats; in ixgbe_update_stats()
7041 if (test_bit(__IXGBE_DOWN, &adapter->state) || in ixgbe_update_stats()
7042 test_bit(__IXGBE_RESETTING, &adapter->state)) in ixgbe_update_stats()
7045 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { in ixgbe_update_stats()
7048 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_update_stats()
7049 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; in ixgbe_update_stats()
7050 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; in ixgbe_update_stats()
7052 adapter->rsc_total_count = rsc_count; in ixgbe_update_stats()
7053 adapter->rsc_total_flush = rsc_flush; in ixgbe_update_stats()
7056 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_update_stats()
7057 struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]); in ixgbe_update_stats()
7061 non_eop_descs += rx_ring->rx_stats.non_eop_descs; in ixgbe_update_stats()
7062 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; in ixgbe_update_stats()
7063 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; in ixgbe_update_stats()
7064 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; in ixgbe_update_stats()
7065 hw_csum_rx_error += rx_ring->rx_stats.csum_err; in ixgbe_update_stats()
7066 bytes += rx_ring->stats.bytes; in ixgbe_update_stats()
7067 packets += rx_ring->stats.packets; in ixgbe_update_stats()
7069 adapter->non_eop_descs = non_eop_descs; in ixgbe_update_stats()
7070 adapter->alloc_rx_page = alloc_rx_page; in ixgbe_update_stats()
7071 adapter->alloc_rx_page_failed = alloc_rx_page_failed; in ixgbe_update_stats()
7072 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; in ixgbe_update_stats()
7073 adapter->hw_csum_rx_error = hw_csum_rx_error; in ixgbe_update_stats()
7074 netdev->stats.rx_bytes = bytes; in ixgbe_update_stats()
7075 netdev->stats.rx_packets = packets; in ixgbe_update_stats()
7080 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_update_stats()
7081 struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]); in ixgbe_update_stats()
7085 restart_queue += tx_ring->tx_stats.restart_queue; in ixgbe_update_stats()
7086 tx_busy += tx_ring->tx_stats.tx_busy; in ixgbe_update_stats()
7087 bytes += tx_ring->stats.bytes; in ixgbe_update_stats()
7088 packets += tx_ring->stats.packets; in ixgbe_update_stats()
7090 for (i = 0; i < adapter->num_xdp_queues; i++) { in ixgbe_update_stats()
7091 struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]); in ixgbe_update_stats()
7095 restart_queue += xdp_ring->tx_stats.restart_queue; in ixgbe_update_stats()
7096 tx_busy += xdp_ring->tx_stats.tx_busy; in ixgbe_update_stats()
7097 bytes += xdp_ring->stats.bytes; in ixgbe_update_stats()
7098 packets += xdp_ring->stats.packets; in ixgbe_update_stats()
7100 adapter->restart_queue = restart_queue; in ixgbe_update_stats()
7101 adapter->tx_busy = tx_busy; in ixgbe_update_stats()
7102 netdev->stats.tx_bytes = bytes; in ixgbe_update_stats()
7103 netdev->stats.tx_packets = packets; in ixgbe_update_stats()
7105 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); in ixgbe_update_stats()
7112 hwstats->mpc[i] += mpc; in ixgbe_update_stats()
7113 total_mpc += hwstats->mpc[i]; in ixgbe_update_stats()
7114 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); in ixgbe_update_stats()
7115 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); in ixgbe_update_stats()
7116 switch (hw->mac.type) { in ixgbe_update_stats()
7118 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); in ixgbe_update_stats()
7119 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); in ixgbe_update_stats()
7120 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); in ixgbe_update_stats()
7121 hwstats->pxonrxc[i] += in ixgbe_update_stats()
7129 hwstats->pxonrxc[i] += in ixgbe_update_stats()
7139 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); in ixgbe_update_stats()
7140 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); in ixgbe_update_stats()
7141 if ((hw->mac.type == ixgbe_mac_82599EB) || in ixgbe_update_stats()
7142 (hw->mac.type == ixgbe_mac_X540) || in ixgbe_update_stats()
7143 (hw->mac.type == ixgbe_mac_X550) || in ixgbe_update_stats()
7144 (hw->mac.type == ixgbe_mac_X550EM_x) || in ixgbe_update_stats()
7145 (hw->mac.type == ixgbe_mac_x550em_a)) { in ixgbe_update_stats()
7146 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); in ixgbe_update_stats()
7148 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); in ixgbe_update_stats()
7153 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); in ixgbe_update_stats()
7155 hwstats->gprc -= missed_rx; in ixgbe_update_stats()
7160 switch (hw->mac.type) { in ixgbe_update_stats()
7162 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); in ixgbe_update_stats()
7163 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); in ixgbe_update_stats()
7164 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); in ixgbe_update_stats()
7165 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); in ixgbe_update_stats()
7172 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); in ixgbe_update_stats()
7173 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); in ixgbe_update_stats()
7174 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); in ixgbe_update_stats()
7175 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); in ixgbe_update_stats()
7179 adapter->hw_rx_no_dma_resources += in ixgbe_update_stats()
7181 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); in ixgbe_update_stats()
7183 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); in ixgbe_update_stats()
7185 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); in ixgbe_update_stats()
7187 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); in ixgbe_update_stats()
7188 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); in ixgbe_update_stats()
7189 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); in ixgbe_update_stats()
7191 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); in ixgbe_update_stats()
7192 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); in ixgbe_update_stats()
7193 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); in ixgbe_update_stats()
7194 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); in ixgbe_update_stats()
7195 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); in ixgbe_update_stats()
7196 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); in ixgbe_update_stats()
7198 if (adapter->fcoe.ddp_pool) { in ixgbe_update_stats()
7199 struct ixgbe_fcoe *fcoe = &adapter->fcoe; in ixgbe_update_stats()
7204 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); in ixgbe_update_stats()
7205 noddp += ddp_pool->noddp; in ixgbe_update_stats()
7206 noddp_ext_buff += ddp_pool->noddp_ext_buff; in ixgbe_update_stats()
7208 hwstats->fcoe_noddp = noddp; in ixgbe_update_stats()
7209 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff; in ixgbe_update_stats()
7217 hwstats->bprc += bprc; in ixgbe_update_stats()
7218 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); in ixgbe_update_stats()
7219 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_update_stats()
7220 hwstats->mprc -= bprc; in ixgbe_update_stats()
7221 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); in ixgbe_update_stats()
7222 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); in ixgbe_update_stats()
7223 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); in ixgbe_update_stats()
7224 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); in ixgbe_update_stats()
7225 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); in ixgbe_update_stats()
7226 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); in ixgbe_update_stats()
7227 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); in ixgbe_update_stats()
7228 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); in ixgbe_update_stats()
7230 hwstats->lxontxc += lxon; in ixgbe_update_stats()
7232 hwstats->lxofftxc += lxoff; in ixgbe_update_stats()
7233 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); in ixgbe_update_stats()
7234 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); in ixgbe_update_stats()
7236 * 82598 errata - tx of flow control packets is included in tx counters in ixgbe_update_stats()
7239 hwstats->gptc -= xon_off_tot; in ixgbe_update_stats()
7240 hwstats->mptc -= xon_off_tot; in ixgbe_update_stats()
7241 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); in ixgbe_update_stats()
7242 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); in ixgbe_update_stats()
7243 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); in ixgbe_update_stats()
7244 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); in ixgbe_update_stats()
7245 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); in ixgbe_update_stats()
7246 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); in ixgbe_update_stats()
7247 hwstats->ptc64 -= xon_off_tot; in ixgbe_update_stats()
7248 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); in ixgbe_update_stats()
7249 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); in ixgbe_update_stats()
7250 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); in ixgbe_update_stats()
7251 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); in ixgbe_update_stats()
7252 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); in ixgbe_update_stats()
7253 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); in ixgbe_update_stats()
7256 netdev->stats.multicast = hwstats->mprc; in ixgbe_update_stats()
7259 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec; in ixgbe_update_stats()
7260 netdev->stats.rx_dropped = 0; in ixgbe_update_stats()
7261 netdev->stats.rx_length_errors = hwstats->rlec; in ixgbe_update_stats()
7262 netdev->stats.rx_crc_errors = hwstats->crcerrs; in ixgbe_update_stats()
7263 netdev->stats.rx_missed_errors = total_mpc; in ixgbe_update_stats()
7267 * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
7272 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_fdir_reinit_subtask()
7275 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) in ixgbe_fdir_reinit_subtask()
7278 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; in ixgbe_fdir_reinit_subtask()
7281 if (test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_fdir_reinit_subtask()
7285 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) in ixgbe_fdir_reinit_subtask()
7288 adapter->fdir_overflow++; in ixgbe_fdir_reinit_subtask()
7291 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbe_fdir_reinit_subtask()
7293 &(adapter->tx_ring[i]->state)); in ixgbe_fdir_reinit_subtask()
7294 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbe_fdir_reinit_subtask()
7296 &adapter->xdp_ring[i]->state); in ixgbe_fdir_reinit_subtask()
7297 /* re-enable flow director interrupts */ in ixgbe_fdir_reinit_subtask()
7300 e_err(probe, "failed to finish FDIR re-initialization, " in ixgbe_fdir_reinit_subtask()
7306 * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
7316 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_hang_subtask()
7321 if (test_bit(__IXGBE_DOWN, &adapter->state) || in ixgbe_check_hang_subtask()
7322 test_bit(__IXGBE_REMOVING, &adapter->state) || in ixgbe_check_hang_subtask()
7323 test_bit(__IXGBE_RESETTING, &adapter->state)) in ixgbe_check_hang_subtask()
7327 if (netif_carrier_ok(adapter->netdev)) { in ixgbe_check_hang_subtask()
7328 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbe_check_hang_subtask()
7329 set_check_for_tx_hang(adapter->tx_ring[i]); in ixgbe_check_hang_subtask()
7330 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbe_check_hang_subtask()
7331 set_check_for_tx_hang(adapter->xdp_ring[i]); in ixgbe_check_hang_subtask()
7334 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { in ixgbe_check_hang_subtask()
7344 for (i = 0; i < adapter->num_q_vectors; i++) { in ixgbe_check_hang_subtask()
7345 struct ixgbe_q_vector *qv = adapter->q_vector[i]; in ixgbe_check_hang_subtask()
7346 if (qv->rx.ring || qv->tx.ring) in ixgbe_check_hang_subtask()
7356 * ixgbe_watchdog_update_link - update the link status
7361 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_watchdog_update_link()
7362 u32 link_speed = adapter->link_speed; in ixgbe_watchdog_update_link()
7363 bool link_up = adapter->link_up; in ixgbe_watchdog_update_link()
7364 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; in ixgbe_watchdog_update_link()
7366 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) in ixgbe_watchdog_update_link()
7369 if (hw->mac.ops.check_link) { in ixgbe_watchdog_update_link()
7370 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); in ixgbe_watchdog_update_link()
7377 if (adapter->ixgbe_ieee_pfc) in ixgbe_watchdog_update_link()
7378 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); in ixgbe_watchdog_update_link()
7380 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) { in ixgbe_watchdog_update_link()
7381 hw->mac.ops.fc_enable(hw); in ixgbe_watchdog_update_link()
7386 time_after(jiffies, (adapter->link_check_timeout + in ixgbe_watchdog_update_link()
7388 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; in ixgbe_watchdog_update_link()
7393 adapter->link_up = link_up; in ixgbe_watchdog_update_link()
7394 adapter->link_speed = link_speed; in ixgbe_watchdog_update_link()
7400 struct net_device *netdev = adapter->netdev; in ixgbe_update_default_up()
7407 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) in ixgbe_update_default_up()
7410 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0; in ixgbe_update_default_up()
7415 * ixgbe_watchdog_link_is_up - update netif_carrier status and
7421 struct net_device *netdev = adapter->netdev; in ixgbe_watchdog_link_is_up()
7422 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_watchdog_link_is_up()
7423 u32 link_speed = adapter->link_speed; in ixgbe_watchdog_link_is_up()
7431 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; in ixgbe_watchdog_link_is_up()
7433 switch (hw->mac.type) { in ixgbe_watchdog_link_is_up()
7458 adapter->last_rx_ptp_check = jiffies; in ixgbe_watchdog_link_is_up()
7460 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) in ixgbe_watchdog_link_is_up()
7495 netif_tx_wake_all_queues(adapter->netdev); in ixgbe_watchdog_link_is_up()
7505 * ixgbe_watchdog_link_is_down - update netif_carrier status and
7511 struct net_device *netdev = adapter->netdev; in ixgbe_watchdog_link_is_down()
7512 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_watchdog_link_is_down()
7514 adapter->link_up = false; in ixgbe_watchdog_link_is_down()
7515 adapter->link_speed = 0; in ixgbe_watchdog_link_is_down()
7522 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) in ixgbe_watchdog_link_is_down()
7523 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; in ixgbe_watchdog_link_is_down()
7525 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) in ixgbe_watchdog_link_is_down()
7539 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_ring_tx_pending()
7540 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; in ixgbe_ring_tx_pending()
7542 if (tx_ring->next_to_use != tx_ring->next_to_clean) in ixgbe_ring_tx_pending()
7546 for (i = 0; i < adapter->num_xdp_queues; i++) { in ixgbe_ring_tx_pending()
7547 struct ixgbe_ring *ring = adapter->xdp_ring[i]; in ixgbe_ring_tx_pending()
7549 if (ring->next_to_use != ring->next_to_clean) in ixgbe_ring_tx_pending()
7558 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_vf_tx_pending()
7559 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_vf_tx_pending()
7560 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_vf_tx_pending()
7564 if (!adapter->num_vfs) in ixgbe_vf_tx_pending()
7568 if (hw->mac.type >= ixgbe_mac_X550) in ixgbe_vf_tx_pending()
7571 for (i = 0; i < adapter->num_vfs; i++) { in ixgbe_vf_tx_pending()
7587 * ixgbe_watchdog_flush_tx - flush queues on link down
7592 if (!netif_carrier_ok(adapter->netdev)) { in ixgbe_watchdog_flush_tx()
7601 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); in ixgbe_watchdog_flush_tx()
7609 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_for_bad_vf()
7610 struct pci_dev *pdev = adapter->pdev; in ixgbe_check_for_bad_vf()
7614 if (!(netif_carrier_ok(adapter->netdev))) in ixgbe_check_for_bad_vf()
7630 for (vf = 0; vf < adapter->num_vfs; ++vf) { in ixgbe_check_for_bad_vf()
7631 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; in ixgbe_check_for_bad_vf()
7648 if (adapter->hw.mac.type == ixgbe_mac_82598EB || in ixgbe_spoof_check()
7649 adapter->num_vfs == 0) in ixgbe_spoof_check()
7652 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); in ixgbe_spoof_check()
7676 * ixgbe_watchdog_subtask - check and bring link up
7682 if (test_bit(__IXGBE_DOWN, &adapter->state) || in ixgbe_watchdog_subtask()
7683 test_bit(__IXGBE_REMOVING, &adapter->state) || in ixgbe_watchdog_subtask()
7684 test_bit(__IXGBE_RESETTING, &adapter->state)) in ixgbe_watchdog_subtask()
7689 if (adapter->link_up) in ixgbe_watchdog_subtask()
7702 * ixgbe_sfp_detection_subtask - poll for SFP+ cable
7707 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_sfp_detection_subtask()
7711 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && in ixgbe_sfp_detection_subtask()
7712 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) in ixgbe_sfp_detection_subtask()
7715 if (adapter->sfp_poll_time && in ixgbe_sfp_detection_subtask()
7716 time_after(adapter->sfp_poll_time, jiffies)) in ixgbe_sfp_detection_subtask()
7719 /* someone else is in init, wait until next service event */ in ixgbe_sfp_detection_subtask()
7720 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) in ixgbe_sfp_detection_subtask()
7723 adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1; in ixgbe_sfp_detection_subtask()
7725 err = hw->phy.ops.identify_sfp(hw); in ixgbe_sfp_detection_subtask()
7732 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; in ixgbe_sfp_detection_subtask()
7740 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) in ixgbe_sfp_detection_subtask()
7743 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET; in ixgbe_sfp_detection_subtask()
7750 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_sfp_detection_subtask()
7751 err = hw->phy.ops.reset(hw); in ixgbe_sfp_detection_subtask()
7753 err = hw->mac.ops.setup_sfp(hw); in ixgbe_sfp_detection_subtask()
7758 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; in ixgbe_sfp_detection_subtask()
7759 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); in ixgbe_sfp_detection_subtask()
7762 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); in ixgbe_sfp_detection_subtask()
7765 (adapter->netdev->reg_state == NETREG_REGISTERED)) { in ixgbe_sfp_detection_subtask()
7770 unregister_netdev(adapter->netdev); in ixgbe_sfp_detection_subtask()
7775 * ixgbe_sfp_link_config_subtask - set up link SFP after module install
7780 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_sfp_link_config_subtask()
7785 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG)) in ixgbe_sfp_link_config_subtask()
7788 /* someone else is in init, wait until next service event */ in ixgbe_sfp_link_config_subtask()
7789 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) in ixgbe_sfp_link_config_subtask()
7792 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; in ixgbe_sfp_link_config_subtask()
7794 hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg); in ixgbe_sfp_link_config_subtask()
7803 if (hw->mac.ops.setup_link) in ixgbe_sfp_link_config_subtask()
7804 hw->mac.ops.setup_link(hw, speed, true); in ixgbe_sfp_link_config_subtask()
7806 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; in ixgbe_sfp_link_config_subtask()
7807 adapter->link_check_timeout = jiffies; in ixgbe_sfp_link_config_subtask()
7808 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); in ixgbe_sfp_link_config_subtask()
7812 * ixgbe_service_timer - Timer Call-back
7821 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) in ixgbe_service_timer()
7827 mod_timer(&adapter->service_timer, next_event_offset + jiffies); in ixgbe_service_timer()
7834 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_phy_interrupt_subtask()
7837 if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT)) in ixgbe_phy_interrupt_subtask()
7840 adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT; in ixgbe_phy_interrupt_subtask()
7842 if (!hw->phy.ops.handle_lasi) in ixgbe_phy_interrupt_subtask()
7845 status = hw->phy.ops.handle_lasi(&adapter->hw); in ixgbe_phy_interrupt_subtask()
7854 if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state)) in ixgbe_reset_subtask()
7859 if (test_bit(__IXGBE_DOWN, &adapter->state) || in ixgbe_reset_subtask()
7860 test_bit(__IXGBE_REMOVING, &adapter->state) || in ixgbe_reset_subtask()
7861 test_bit(__IXGBE_RESETTING, &adapter->state)) { in ixgbe_reset_subtask()
7867 netdev_err(adapter->netdev, "Reset adapter\n"); in ixgbe_reset_subtask()
7868 adapter->tx_timeout_count++; in ixgbe_reset_subtask()
7875 * ixgbe_check_fw_error - Check firmware for errors
7882 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_fw_error()
7893 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { in ixgbe_check_fw_error()
7902 * ixgbe_service_task - manages and runs subtasks
7910 if (ixgbe_removed(adapter->hw.hw_addr)) { in ixgbe_service_task()
7911 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { in ixgbe_service_task()
7920 if (!test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_service_task()
7921 unregister_netdev(adapter->netdev); in ixgbe_service_task()
7934 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { in ixgbe_service_task()
7936 if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER) in ixgbe_service_task()
7950 struct sk_buff *skb = first->skb; in ixgbe_tso()
7965 if (skb->ip_summed != CHECKSUM_PARTIAL) in ixgbe_tso()
7975 if (eth_p_mpls(first->protocol)) in ixgbe_tso()
7982 type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ? in ixgbe_tso()
7986 if (ip.v4->version == 4) { in ixgbe_tso()
7988 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); in ixgbe_tso()
7989 int len = csum_start - trans_start; in ixgbe_tso()
7995 ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ? in ixgbe_tso()
8000 ip.v4->tot_len = 0; in ixgbe_tso()
8001 first->tx_flags |= IXGBE_TX_FLAGS_TSO | in ixgbe_tso()
8005 ip.v6->payload_len = 0; in ixgbe_tso()
8006 first->tx_flags |= IXGBE_TX_FLAGS_TSO | in ixgbe_tso()
8011 l4_offset = l4.hdr - skb->data; in ixgbe_tso()
8014 paylen = skb->len - l4_offset; in ixgbe_tso()
8018 *hdr_len = (l4.tcp->doff * 4) + l4_offset; in ixgbe_tso()
8019 csum_replace_by_diff(&l4.tcp->check, in ixgbe_tso()
8024 csum_replace_by_diff(&l4.udp->check, in ixgbe_tso()
8029 first->gso_segs = skb_shinfo(skb)->gso_segs; in ixgbe_tso()
8030 first->bytecount += (first->gso_segs - 1) * *hdr_len; in ixgbe_tso()
8033 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; in ixgbe_tso()
8034 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; in ixgbe_tso()
8036 fceof_saidx |= itd->sa_idx; in ixgbe_tso()
8037 type_tucmd |= itd->flags | itd->trailer_len; in ixgbe_tso()
8040 vlan_macip_lens = l4.hdr - ip.hdr; in ixgbe_tso()
8041 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; in ixgbe_tso()
8042 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; in ixgbe_tso()
8054 struct sk_buff *skb = first->skb; in ixgbe_tx_csum()
8059 if (skb->ip_summed != CHECKSUM_PARTIAL) { in ixgbe_tx_csum()
8061 if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | in ixgbe_tx_csum()
8067 switch (skb->csum_offset) { in ixgbe_tx_csum()
8086 first->tx_flags |= IXGBE_TX_FLAGS_CSUM; in ixgbe_tx_csum()
8087 vlan_macip_lens = skb_checksum_start_offset(skb) - in ixgbe_tx_csum()
8092 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; in ixgbe_tx_csum()
8094 fceof_saidx |= itd->sa_idx; in ixgbe_tx_csum()
8095 type_tucmd |= itd->flags | itd->trailer_len; in ixgbe_tx_csum()
8125 cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS); in ixgbe_tx_cmd_type()
8158 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); in ixgbe_tx_olinfo_status()
8163 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __ixgbe_maybe_stop_tx()
8175 return -EBUSY; in __ixgbe_maybe_stop_tx()
8177 /* A reprieve! - use start_queue because it doesn't call schedule */ in __ixgbe_maybe_stop_tx()
8178 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __ixgbe_maybe_stop_tx()
8179 ++tx_ring->tx_stats.restart_queue; in __ixgbe_maybe_stop_tx()
8195 struct sk_buff *skb = first->skb; in ixgbe_tx_map()
8201 u32 tx_flags = first->tx_flags; in ixgbe_tx_map()
8203 u16 i = tx_ring->next_to_use; in ixgbe_tx_map()
8207 ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); in ixgbe_tx_map()
8210 data_len = skb->data_len; in ixgbe_tx_map()
8215 size -= sizeof(struct fcoe_crc_eof) - data_len; in ixgbe_tx_map()
8218 data_len -= sizeof(struct fcoe_crc_eof); in ixgbe_tx_map()
8223 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in ixgbe_tx_map()
8227 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in ixgbe_tx_map()
8228 if (dma_mapping_error(tx_ring->dev, dma)) in ixgbe_tx_map()
8235 tx_desc->read.buffer_addr = cpu_to_le64(dma); in ixgbe_tx_map()
8238 tx_desc->read.cmd_type_len = in ixgbe_tx_map()
8243 if (i == tx_ring->count) { in ixgbe_tx_map()
8247 tx_desc->read.olinfo_status = 0; in ixgbe_tx_map()
8250 size -= IXGBE_MAX_DATA_PER_TXD; in ixgbe_tx_map()
8252 tx_desc->read.buffer_addr = cpu_to_le64(dma); in ixgbe_tx_map()
8258 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); in ixgbe_tx_map()
8262 if (i == tx_ring->count) { in ixgbe_tx_map()
8266 tx_desc->read.olinfo_status = 0; in ixgbe_tx_map()
8273 data_len -= size; in ixgbe_tx_map()
8275 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in ixgbe_tx_map()
8278 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbe_tx_map()
8283 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); in ixgbe_tx_map()
8285 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in ixgbe_tx_map()
8288 first->time_stamp = jiffies; in ixgbe_tx_map()
8294 * are new descriptors to fetch. (Only applicable for weak-ordered in ixgbe_tx_map()
8295 * memory model archs, such as IA-64). in ixgbe_tx_map()
8303 first->next_to_watch = tx_desc; in ixgbe_tx_map()
8306 if (i == tx_ring->count) in ixgbe_tx_map()
8309 tx_ring->next_to_use = i; in ixgbe_tx_map()
8314 writel(i, tx_ring->tail); in ixgbe_tx_map()
8319 dev_err(tx_ring->dev, "TX DMA map failed\n"); in ixgbe_tx_map()
8323 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbe_tx_map()
8325 dma_unmap_page(tx_ring->dev, in ixgbe_tx_map()
8333 i += tx_ring->count; in ixgbe_tx_map()
8334 i--; in ixgbe_tx_map()
8337 dev_kfree_skb_any(first->skb); in ixgbe_tx_map()
8338 first->skb = NULL; in ixgbe_tx_map()
8340 tx_ring->next_to_use = i; in ixgbe_tx_map()
8342 return -1; in ixgbe_tx_map()
8348 struct ixgbe_q_vector *q_vector = ring->q_vector; in ixgbe_atr()
8367 if (!ring->atr_sample_rate) in ixgbe_atr()
8370 ring->atr_count++; in ixgbe_atr()
8373 if ((first->protocol != htons(ETH_P_IP)) && in ixgbe_atr()
8374 (first->protocol != htons(ETH_P_IPV6))) in ixgbe_atr()
8378 skb = first->skb; in ixgbe_atr()
8380 if (unlikely(hdr.network <= skb->data)) in ixgbe_atr()
8382 if (skb->encapsulation && in ixgbe_atr()
8383 first->protocol == htons(ETH_P_IP) && in ixgbe_atr()
8384 hdr.ipv4->protocol == IPPROTO_UDP) { in ixgbe_atr()
8385 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_atr()
8392 if (adapter->vxlan_port && in ixgbe_atr()
8393 udp_hdr(skb)->dest == adapter->vxlan_port) in ixgbe_atr()
8396 if (adapter->geneve_port && in ixgbe_atr()
8397 udp_hdr(skb)->dest == adapter->geneve_port) in ixgbe_atr()
8408 switch (hdr.ipv4->version) { in ixgbe_atr()
8412 l4_proto = hdr.ipv4->protocol; in ixgbe_atr()
8415 hlen = hdr.network - skb->data; in ixgbe_atr()
8417 hlen -= hdr.network - skb->data; in ixgbe_atr()
8433 if (th->fin) in ixgbe_atr()
8437 if (!th->syn && (ring->atr_count < ring->atr_sample_rate)) in ixgbe_atr()
8441 ring->atr_count = 0; in ixgbe_atr()
8443 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); in ixgbe_atr()
8448 * The input is broken into two sections, a non-compressed section in ixgbe_atr()
8458 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN)) in ixgbe_atr()
8459 common.port.src ^= th->dest ^ htons(ETH_P_8021Q); in ixgbe_atr()
8461 common.port.src ^= th->dest ^ first->protocol; in ixgbe_atr()
8462 common.port.dst ^= th->source; in ixgbe_atr()
8464 switch (hdr.ipv4->version) { in ixgbe_atr()
8467 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; in ixgbe_atr()
8471 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ in ixgbe_atr()
8472 hdr.ipv6->saddr.s6_addr32[1] ^ in ixgbe_atr()
8473 hdr.ipv6->saddr.s6_addr32[2] ^ in ixgbe_atr()
8474 hdr.ipv6->saddr.s6_addr32[3] ^ in ixgbe_atr()
8475 hdr.ipv6->daddr.s6_addr32[0] ^ in ixgbe_atr()
8476 hdr.ipv6->daddr.s6_addr32[1] ^ in ixgbe_atr()
8477 hdr.ipv6->daddr.s6_addr32[2] ^ in ixgbe_atr()
8478 hdr.ipv6->daddr.s6_addr32[3]; in ixgbe_atr()
8488 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, in ixgbe_atr()
8489 input, common, ring->queue_index); in ixgbe_atr()
8501 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); in ixgbe_select_queue()
8504 txq = vdev->tc_to_txq[tc].offset; in ixgbe_select_queue()
8506 vdev->tc_to_txq[tc].count); in ixgbe_select_queue()
8520 if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) in ixgbe_select_queue()
8527 f = &adapter->ring_feature[RING_F_FCOE]; in ixgbe_select_queue()
8532 while (txq >= f->indices) in ixgbe_select_queue()
8533 txq -= f->indices; in ixgbe_select_queue()
8535 return txq + f->offset; in ixgbe_select_queue()
8542 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; in ixgbe_xmit_xdp_ring()
8549 len = xdpf->len; in ixgbe_xmit_xdp_ring()
8554 dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE); in ixgbe_xmit_xdp_ring()
8555 if (dma_mapping_error(ring->dev, dma)) in ixgbe_xmit_xdp_ring()
8559 tx_buffer = &ring->tx_buffer_info[ring->next_to_use]; in ixgbe_xmit_xdp_ring()
8560 tx_buffer->bytecount = len; in ixgbe_xmit_xdp_ring()
8561 tx_buffer->gso_segs = 1; in ixgbe_xmit_xdp_ring()
8562 tx_buffer->protocol = 0; in ixgbe_xmit_xdp_ring()
8564 i = ring->next_to_use; in ixgbe_xmit_xdp_ring()
8569 tx_buffer->xdpf = xdpf; in ixgbe_xmit_xdp_ring()
8571 tx_desc->read.buffer_addr = cpu_to_le64(dma); in ixgbe_xmit_xdp_ring()
8578 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); in ixgbe_xmit_xdp_ring()
8579 tx_desc->read.olinfo_status = in ixgbe_xmit_xdp_ring()
8587 if (i == ring->count) in ixgbe_xmit_xdp_ring()
8590 tx_buffer->next_to_watch = tx_desc; in ixgbe_xmit_xdp_ring()
8591 ring->next_to_use = i; in ixgbe_xmit_xdp_ring()
8606 __be16 protocol = skb->protocol; in ixgbe_xmit_frame_ring()
8616 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) in ixgbe_xmit_frame_ring()
8618 &skb_shinfo(skb)->frags[f])); in ixgbe_xmit_frame_ring()
8621 tx_ring->tx_stats.tx_busy++; in ixgbe_xmit_frame_ring()
8626 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in ixgbe_xmit_frame_ring()
8627 first->skb = skb; in ixgbe_xmit_frame_ring()
8628 first->bytecount = skb->len; in ixgbe_xmit_frame_ring()
8629 first->gso_segs = 1; in ixgbe_xmit_frame_ring()
8642 tx_flags |= ntohs(vhdr->h_vlan_TCI) << in ixgbe_xmit_frame_ring()
8648 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in ixgbe_xmit_frame_ring()
8649 adapter->ptp_clock) { in ixgbe_xmit_frame_ring()
8650 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && in ixgbe_xmit_frame_ring()
8652 &adapter->state)) { in ixgbe_xmit_frame_ring()
8653 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in ixgbe_xmit_frame_ring()
8657 adapter->ptp_tx_skb = skb_get(skb); in ixgbe_xmit_frame_ring()
8658 adapter->ptp_tx_start = jiffies; in ixgbe_xmit_frame_ring()
8659 schedule_work(&adapter->ptp_tx_work); in ixgbe_xmit_frame_ring()
8661 adapter->tx_hwtstamp_skipped++; in ixgbe_xmit_frame_ring()
8667 * Use the l2switch_enable flag - would be false if the DMA in ixgbe_xmit_frame_ring()
8670 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) in ixgbe_xmit_frame_ring()
8674 /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */ in ixgbe_xmit_frame_ring()
8675 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && in ixgbe_xmit_frame_ring()
8677 (skb->priority != TC_PRIO_CONTROL))) { in ixgbe_xmit_frame_ring()
8679 tx_flags |= (skb->priority & 0x7) << in ixgbe_xmit_frame_ring()
8686 vhdr = (struct vlan_ethhdr *)skb->data; in ixgbe_xmit_frame_ring()
8687 vhdr->h_vlan_TCI = htons(tx_flags >> in ixgbe_xmit_frame_ring()
8695 first->tx_flags = tx_flags; in ixgbe_xmit_frame_ring()
8696 first->protocol = protocol; in ixgbe_xmit_frame_ring()
8701 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { in ixgbe_xmit_frame_ring()
8723 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) in ixgbe_xmit_frame_ring()
8735 dev_kfree_skb_any(first->skb); in ixgbe_xmit_frame_ring()
8736 first->skb = NULL; in ixgbe_xmit_frame_ring()
8739 dev_kfree_skb_any(adapter->ptp_tx_skb); in ixgbe_xmit_frame_ring()
8740 adapter->ptp_tx_skb = NULL; in ixgbe_xmit_frame_ring()
8741 cancel_work_sync(&adapter->ptp_tx_work); in ixgbe_xmit_frame_ring()
8742 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); in ixgbe_xmit_frame_ring()
8762 tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)]; in __ixgbe_xmit_frame()
8763 if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state))) in __ixgbe_xmit_frame()
8776 * ixgbe_set_mac - Change the Ethernet Address of the NIC
8785 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_set_mac()
8788 if (!is_valid_ether_addr(addr->sa_data)) in ixgbe_set_mac()
8789 return -EADDRNOTAVAIL; in ixgbe_set_mac()
8791 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); in ixgbe_set_mac()
8792 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); in ixgbe_set_mac()
8803 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_mdio_read()
8807 if (adapter->mii_bus) { in ixgbe_mdio_read()
8813 return mdiobus_read(adapter->mii_bus, prtad, regnum); in ixgbe_mdio_read()
8816 if (prtad != hw->phy.mdio.prtad) in ixgbe_mdio_read()
8817 return -EINVAL; in ixgbe_mdio_read()
8818 rc = hw->phy.ops.read_reg(hw, addr, devad, &value); in ixgbe_mdio_read()
8828 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_mdio_write()
8830 if (adapter->mii_bus) { in ixgbe_mdio_write()
8836 return mdiobus_write(adapter->mii_bus, prtad, regnum, value); in ixgbe_mdio_write()
8839 if (prtad != hw->phy.mdio.prtad) in ixgbe_mdio_write()
8840 return -EINVAL; in ixgbe_mdio_write()
8841 return hw->phy.ops.write_reg(hw, addr, devad, value); in ixgbe_mdio_write()
8854 if (!adapter->hw.phy.ops.read_reg) in ixgbe_ioctl()
8855 return -EOPNOTSUPP; in ixgbe_ioctl()
8858 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); in ixgbe_ioctl()
8863 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
8864 * netdev->dev_addrs
8867 * Returns non-zero on failure
8873 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_add_sanmac_netdev()
8875 if (is_valid_ether_addr(hw->mac.san_addr)) { in ixgbe_add_sanmac_netdev()
8877 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN); in ixgbe_add_sanmac_netdev()
8881 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); in ixgbe_add_sanmac_netdev()
8887 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
8888 * netdev->dev_addrs
8891 * Returns non-zero on failure
8897 struct ixgbe_mac_info *mac = &adapter->hw.mac; in ixgbe_del_sanmac_netdev()
8899 if (is_valid_ether_addr(mac->san_addr)) { in ixgbe_del_sanmac_netdev()
8901 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); in ixgbe_del_sanmac_netdev()
8915 start = u64_stats_fetch_begin_irq(&ring->syncp); in ixgbe_get_ring_stats64()
8916 packets = ring->stats.packets; in ixgbe_get_ring_stats64()
8917 bytes = ring->stats.bytes; in ixgbe_get_ring_stats64()
8918 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); in ixgbe_get_ring_stats64()
8919 stats->tx_packets += packets; in ixgbe_get_ring_stats64()
8920 stats->tx_bytes += bytes; in ixgbe_get_ring_stats64()
8931 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_get_stats64()
8932 struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); in ixgbe_get_stats64()
8938 start = u64_stats_fetch_begin_irq(&ring->syncp); in ixgbe_get_stats64()
8939 packets = ring->stats.packets; in ixgbe_get_stats64()
8940 bytes = ring->stats.bytes; in ixgbe_get_stats64()
8941 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); in ixgbe_get_stats64()
8942 stats->rx_packets += packets; in ixgbe_get_stats64()
8943 stats->rx_bytes += bytes; in ixgbe_get_stats64()
8947 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_get_stats64()
8948 struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]); in ixgbe_get_stats64()
8952 for (i = 0; i < adapter->num_xdp_queues; i++) { in ixgbe_get_stats64()
8953 struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]); in ixgbe_get_stats64()
8960 stats->multicast = netdev->stats.multicast; in ixgbe_get_stats64()
8961 stats->rx_errors = netdev->stats.rx_errors; in ixgbe_get_stats64()
8962 stats->rx_length_errors = netdev->stats.rx_length_errors; in ixgbe_get_stats64()
8963 stats->rx_crc_errors = netdev->stats.rx_crc_errors; in ixgbe_get_stats64()
8964 stats->rx_missed_errors = netdev->stats.rx_missed_errors; in ixgbe_get_stats64()
8969 * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
8978 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_validate_rtr()
8985 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_validate_rtr()
9006 * ixgbe_set_prio_tc_map - Configure netdev prio tc map
9013 struct net_device *dev = adapter->netdev; in ixgbe_set_prio_tc_map()
9014 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; in ixgbe_set_prio_tc_map()
9015 struct ieee_ets *ets = adapter->ixgbe_ieee_ets; in ixgbe_set_prio_tc_map()
9021 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) in ixgbe_set_prio_tc_map()
9024 tc = ets->prio_tc[prio]; in ixgbe_set_prio_tc_map()
9034 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data; in ixgbe_reassign_macvlan_pool()
9048 pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); in ixgbe_reassign_macvlan_pool()
9049 if (pool < adapter->num_rx_pools) { in ixgbe_reassign_macvlan_pool()
9050 set_bit(pool, adapter->fwd_bitmask); in ixgbe_reassign_macvlan_pool()
9051 accel->pool = pool; in ixgbe_reassign_macvlan_pool()
9060 netdev_unbind_sb_channel(adapter->netdev, vdev); in ixgbe_reassign_macvlan_pool()
9076 bitmap_clear(adapter->fwd_bitmask, 1, 63); in ixgbe_defrag_macvlan_pools()
9084 * ixgbe_setup_tc - configure net_device for multiple traffic classes
9092 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_setup_tc()
9095 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs) in ixgbe_setup_tc()
9096 return -EINVAL; in ixgbe_setup_tc()
9098 if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS) in ixgbe_setup_tc()
9099 return -EINVAL; in ixgbe_setup_tc()
9114 if (adapter->xdp_prog) { in ixgbe_setup_tc()
9120 return -EINVAL; in ixgbe_setup_tc()
9126 adapter->hw_tcs = tc; in ixgbe_setup_tc()
9127 adapter->flags |= IXGBE_FLAG_DCB_ENABLED; in ixgbe_setup_tc()
9129 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { in ixgbe_setup_tc()
9130 adapter->last_lfc_mode = adapter->hw.fc.requested_mode; in ixgbe_setup_tc()
9131 adapter->hw.fc.requested_mode = ixgbe_fc_none; in ixgbe_setup_tc()
9136 if (adapter->hw.mac.type == ixgbe_mac_82598EB) in ixgbe_setup_tc()
9137 adapter->hw.fc.requested_mode = adapter->last_lfc_mode; in ixgbe_setup_tc()
9139 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; in ixgbe_setup_tc()
9140 adapter->hw_tcs = tc; in ixgbe_setup_tc()
9142 adapter->temp_dcb_cfg.pfc_mode_enable = false; in ixgbe_setup_tc()
9143 adapter->dcb_cfg.pfc_mode_enable = false; in ixgbe_setup_tc()
9162 u32 hdl = cls->knode.handle; in ixgbe_delete_clsu32()
9163 u32 uhtid = TC_U32_USERHTID(cls->knode.handle); in ixgbe_delete_clsu32()
9164 u32 loc = cls->knode.handle & 0xfffff; in ixgbe_delete_clsu32()
9169 return -EINVAL; in ixgbe_delete_clsu32()
9172 return -EINVAL; in ixgbe_delete_clsu32()
9176 jump = adapter->jump_tables[uhtid]; in ixgbe_delete_clsu32()
9178 return -EINVAL; in ixgbe_delete_clsu32()
9179 if (!test_bit(loc - 1, jump->child_loc_map)) in ixgbe_delete_clsu32()
9180 return -EINVAL; in ixgbe_delete_clsu32()
9181 clear_bit(loc - 1, jump->child_loc_map); in ixgbe_delete_clsu32()
9186 jump = adapter->jump_tables[i]; in ixgbe_delete_clsu32()
9187 if (jump && jump->link_hdl == hdl) { in ixgbe_delete_clsu32()
9192 if (!test_bit(j, jump->child_loc_map)) in ixgbe_delete_clsu32()
9194 spin_lock(&adapter->fdir_perfect_lock); in ixgbe_delete_clsu32()
9198 spin_unlock(&adapter->fdir_perfect_lock); in ixgbe_delete_clsu32()
9199 clear_bit(j, jump->child_loc_map); in ixgbe_delete_clsu32()
9202 kfree(jump->input); in ixgbe_delete_clsu32()
9203 kfree(jump->mask); in ixgbe_delete_clsu32()
9205 adapter->jump_tables[i] = NULL; in ixgbe_delete_clsu32()
9210 spin_lock(&adapter->fdir_perfect_lock); in ixgbe_delete_clsu32()
9212 spin_unlock(&adapter->fdir_perfect_lock); in ixgbe_delete_clsu32()
9219 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); in ixgbe_configure_clsu32_add_hnode()
9222 return -EINVAL; in ixgbe_configure_clsu32_add_hnode()
9227 if (cls->hnode.divisor > 0) in ixgbe_configure_clsu32_add_hnode()
9228 return -EINVAL; in ixgbe_configure_clsu32_add_hnode()
9230 set_bit(uhtid - 1, &adapter->tables); in ixgbe_configure_clsu32_add_hnode()
9237 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); in ixgbe_configure_clsu32_del_hnode()
9240 return -EINVAL; in ixgbe_configure_clsu32_del_hnode()
9242 clear_bit(uhtid - 1, &adapter->tables); in ixgbe_configure_clsu32_del_hnode()
9263 data = (struct upper_walk_data *)priv->data; in get_macvlan_queue()
9264 ifindex = data->ifindex; in get_macvlan_queue()
9265 adapter = data->adapter; in get_macvlan_queue()
9266 if (vadapter && upper->ifindex == ifindex) { in get_macvlan_queue()
9267 data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; in get_macvlan_queue()
9268 data->action = data->queue; in get_macvlan_queue()
9279 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in handle_redirect_action()
9280 unsigned int num_vfs = adapter->num_vfs, vf; in handle_redirect_action()
9287 upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev); in handle_redirect_action()
9288 if (upper->ifindex == ifindex) { in handle_redirect_action()
9289 *queue = vf * __ALIGN_MASK(1, ~vmdq->mask); in handle_redirect_action()
9302 if (netdev_walk_all_upper_dev_rcu(adapter->netdev, in handle_redirect_action()
9310 return -EINVAL; in handle_redirect_action()
9320 return -EINVAL; in parse_tc_actions()
9335 return -EINVAL; in parse_tc_actions()
9336 return handle_redirect_action(adapter, dev->ifindex, in parse_tc_actions()
9340 return -EINVAL; in parse_tc_actions()
9343 return -EINVAL; in parse_tc_actions()
9349 return -EINVAL; in parse_tc_actions()
9363 for (i = 0; i < cls->knode.sel->nkeys; i++) { in ixgbe_clsu32_build_input()
9364 off = cls->knode.sel->keys[i].off; in ixgbe_clsu32_build_input()
9365 val = cls->knode.sel->keys[i].val; in ixgbe_clsu32_build_input()
9366 m = cls->knode.sel->keys[i].mask; in ixgbe_clsu32_build_input()
9372 input->filter.formatted.flow_type |= in ixgbe_clsu32_build_input()
9379 if (nexthdr->off == cls->knode.sel->keys[i].off && in ixgbe_clsu32_build_input()
9380 nexthdr->val == in ixgbe_clsu32_build_input()
9381 (__force u32)cls->knode.sel->keys[i].val && in ixgbe_clsu32_build_input()
9382 nexthdr->mask == in ixgbe_clsu32_build_input()
9383 (__force u32)cls->knode.sel->keys[i].mask) in ixgbe_clsu32_build_input()
9391 return -EINVAL; in ixgbe_clsu32_build_input()
9396 mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | in ixgbe_clsu32_build_input()
9399 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) in ixgbe_clsu32_build_input()
9400 mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; in ixgbe_clsu32_build_input()
9408 __be16 protocol = cls->common.protocol; in ixgbe_configure_clsu32()
9409 u32 loc = cls->knode.handle & 0xfffff; in ixgbe_configure_clsu32()
9410 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_clsu32()
9415 int i, err = -EINVAL; in ixgbe_configure_clsu32()
9419 uhtid = TC_U32_USERHTID(cls->knode.handle); in ixgbe_configure_clsu32()
9420 link_uhtid = TC_U32_USERHTID(cls->knode.link_handle); in ixgbe_configure_clsu32()
9432 if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) { in ixgbe_configure_clsu32()
9445 field_ptr = (adapter->jump_tables[0])->mat; in ixgbe_configure_clsu32()
9449 if (!adapter->jump_tables[uhtid]) in ixgbe_configure_clsu32()
9451 field_ptr = (adapter->jump_tables[uhtid])->mat; in ixgbe_configure_clsu32()
9469 if (!test_bit(link_uhtid - 1, &adapter->tables)) in ixgbe_configure_clsu32()
9477 if (adapter->jump_tables[link_uhtid] && in ixgbe_configure_clsu32()
9478 (adapter->jump_tables[link_uhtid])->link_hdl) { in ixgbe_configure_clsu32()
9485 if (nexthdr[i].o != cls->knode.sel->offoff || in ixgbe_configure_clsu32()
9486 nexthdr[i].s != cls->knode.sel->offshift || in ixgbe_configure_clsu32()
9488 (__force u32)cls->knode.sel->offmask) in ixgbe_configure_clsu32()
9493 return -ENOMEM; in ixgbe_configure_clsu32()
9496 err = -ENOMEM; in ixgbe_configure_clsu32()
9501 err = -ENOMEM; in ixgbe_configure_clsu32()
9504 jump->input = input; in ixgbe_configure_clsu32()
9505 jump->mask = mask; in ixgbe_configure_clsu32()
9506 jump->link_hdl = cls->knode.handle; in ixgbe_configure_clsu32()
9511 jump->mat = nexthdr[i].jump; in ixgbe_configure_clsu32()
9512 adapter->jump_tables[link_uhtid] = jump; in ixgbe_configure_clsu32()
9525 return -ENOMEM; in ixgbe_configure_clsu32()
9528 err = -ENOMEM; in ixgbe_configure_clsu32()
9532 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) { in ixgbe_configure_clsu32()
9533 if ((adapter->jump_tables[uhtid])->input) in ixgbe_configure_clsu32()
9534 memcpy(input, (adapter->jump_tables[uhtid])->input, in ixgbe_configure_clsu32()
9536 if ((adapter->jump_tables[uhtid])->mask) in ixgbe_configure_clsu32()
9537 memcpy(mask, (adapter->jump_tables[uhtid])->mask, in ixgbe_configure_clsu32()
9544 struct ixgbe_jump_table *link = adapter->jump_tables[i]; in ixgbe_configure_clsu32()
9546 if (link && (test_bit(loc - 1, link->child_loc_map))) { in ixgbe_configure_clsu32()
9549 err = -EINVAL; in ixgbe_configure_clsu32()
9558 err = parse_tc_actions(adapter, cls->knode.exts, &input->action, in ixgbe_configure_clsu32()
9563 input->sw_idx = loc; in ixgbe_configure_clsu32()
9565 spin_lock(&adapter->fdir_perfect_lock); in ixgbe_configure_clsu32()
9567 if (hlist_empty(&adapter->fdir_filter_list)) { in ixgbe_configure_clsu32()
9568 memcpy(&adapter->fdir_mask, mask, sizeof(*mask)); in ixgbe_configure_clsu32()
9572 } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) { in ixgbe_configure_clsu32()
9573 err = -EINVAL; in ixgbe_configure_clsu32()
9577 ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask); in ixgbe_configure_clsu32()
9578 err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter, in ixgbe_configure_clsu32()
9579 input->sw_idx, queue); in ixgbe_configure_clsu32()
9583 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); in ixgbe_configure_clsu32()
9584 spin_unlock(&adapter->fdir_perfect_lock); in ixgbe_configure_clsu32()
9586 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) in ixgbe_configure_clsu32()
9587 set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map); in ixgbe_configure_clsu32()
9592 spin_unlock(&adapter->fdir_perfect_lock); in ixgbe_configure_clsu32()
9605 switch (cls_u32->command) { in ixgbe_setup_tc_cls_u32()
9617 return -EOPNOTSUPP; in ixgbe_setup_tc_cls_u32()
9626 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) in ixgbe_setup_tc_block_cb()
9627 return -EOPNOTSUPP; in ixgbe_setup_tc_block_cb()
9633 return -EOPNOTSUPP; in ixgbe_setup_tc_block_cb()
9640 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; in ixgbe_setup_tc_mqprio()
9641 return ixgbe_setup_tc(dev, mqprio->num_tc); in ixgbe_setup_tc_mqprio()
9660 return -EOPNOTSUPP; in __ixgbe_setup_tc()
9667 struct net_device *netdev = adapter->netdev; in ixgbe_sriov_reinit()
9670 ixgbe_setup_tc(netdev, adapter->hw_tcs); in ixgbe_sriov_reinit()
9695 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) in ixgbe_fix_features()
9698 if (adapter->xdp_prog && (features & NETIF_F_LRO)) { in ixgbe_fix_features()
9711 /* go back to full RSS if we're not running SR-IOV */ in ixgbe_reset_l2fw_offload()
9712 if (!adapter->ring_feature[RING_F_VMDQ].offset) in ixgbe_reset_l2fw_offload()
9713 adapter->flags &= ~(IXGBE_FLAG_VMDQ_ENABLED | in ixgbe_reset_l2fw_offload()
9716 adapter->ring_feature[RING_F_RSS].limit = rss; in ixgbe_reset_l2fw_offload()
9717 adapter->ring_feature[RING_F_VMDQ].limit = 1; in ixgbe_reset_l2fw_offload()
9719 ixgbe_setup_tc(adapter->netdev, adapter->hw_tcs); in ixgbe_reset_l2fw_offload()
9726 netdev_features_t changed = netdev->features ^ features; in ixgbe_set_features()
9731 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) in ixgbe_set_features()
9733 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; in ixgbe_set_features()
9734 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && in ixgbe_set_features()
9735 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { in ixgbe_set_features()
9736 if (adapter->rx_itr_setting == 1 || in ixgbe_set_features()
9737 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { in ixgbe_set_features()
9738 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; in ixgbe_set_features()
9741 e_info(probe, "rx-usecs set too low, " in ixgbe_set_features()
9747 * Check if Flow Director n-tuple support or hw_tc support was in ixgbe_set_features()
9752 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) in ixgbe_set_features()
9755 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; in ixgbe_set_features()
9756 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; in ixgbe_set_features()
9759 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) in ixgbe_set_features()
9762 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; in ixgbe_set_features()
9764 /* We cannot enable ATR if SR-IOV is enabled */ in ixgbe_set_features()
9765 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED || in ixgbe_set_features()
9766 /* We cannot enable ATR if we have 2 or more tcs */ in ixgbe_set_features()
9767 (adapter->hw_tcs > 1) || in ixgbe_set_features()
9769 (adapter->ring_feature[RING_F_RSS].limit <= 1) || in ixgbe_set_features()
9771 (!adapter->atr_sample_rate)) in ixgbe_set_features()
9774 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; in ixgbe_set_features()
9780 netdev->features = features; in ixgbe_set_features()
9782 if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1) in ixgbe_set_features()
9805 return -ENOMEM; in ixgbe_ndo_fdb_add()
9812 * ixgbe_configure_bridge_mode - set various bridge modes
9821 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_bridge_mode()
9828 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0); in ixgbe_configure_bridge_mode()
9841 num_pools = adapter->num_vfs + adapter->num_rx_pools; in ixgbe_configure_bridge_mode()
9843 if (hw->mac.ops.set_source_address_pruning) in ixgbe_configure_bridge_mode()
9844 hw->mac.ops.set_source_address_pruning(hw, in ixgbe_configure_bridge_mode()
9851 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, in ixgbe_configure_bridge_mode()
9854 /* disable Rx switching replication unless we have SR-IOV in ixgbe_configure_bridge_mode()
9858 if (!adapter->num_vfs) in ixgbe_configure_bridge_mode()
9865 num_pools = adapter->num_vfs + adapter->num_rx_pools; in ixgbe_configure_bridge_mode()
9867 if (hw->mac.ops.set_source_address_pruning) in ixgbe_configure_bridge_mode()
9868 hw->mac.ops.set_source_address_pruning(hw, in ixgbe_configure_bridge_mode()
9874 return -EINVAL; in ixgbe_configure_bridge_mode()
9877 adapter->bridge_mode = mode; in ixgbe_configure_bridge_mode()
9893 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) in ixgbe_ndo_bridge_setlink()
9894 return -EOPNOTSUPP; in ixgbe_ndo_bridge_setlink()
9898 return -EINVAL; in ixgbe_ndo_bridge_setlink()
9908 return -EINVAL; in ixgbe_ndo_bridge_setlink()
9927 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) in ixgbe_ndo_bridge_getlink()
9931 adapter->bridge_mode, 0, 0, nlflags, in ixgbe_ndo_bridge_getlink()
9939 int tcs = adapter->hw_tcs ? : 1; in ixgbe_fwd_add() local
9942 if (adapter->xdp_prog) { in ixgbe_fwd_add()
9944 return ERR_PTR(-EINVAL); in ixgbe_fwd_add()
9952 return ERR_PTR(-EMEDIUMTYPE); in ixgbe_fwd_add()
9959 return ERR_PTR(-ERANGE); in ixgbe_fwd_add()
9961 pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); in ixgbe_fwd_add()
9962 if (pool == adapter->num_rx_pools) { in ixgbe_fwd_add()
9963 u16 used_pools = adapter->num_vfs + adapter->num_rx_pools; in ixgbe_fwd_add()
9966 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && in ixgbe_fwd_add()
9967 adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) || in ixgbe_fwd_add()
9968 adapter->num_rx_pools > IXGBE_MAX_MACVLANS) in ixgbe_fwd_add()
9969 return ERR_PTR(-EBUSY); in ixgbe_fwd_add()
9976 return ERR_PTR(-EBUSY); in ixgbe_fwd_add()
9979 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | in ixgbe_fwd_add()
9986 if (used_pools < 32 && adapter->num_rx_pools < 16) in ixgbe_fwd_add()
9988 32 - used_pools, in ixgbe_fwd_add()
9989 16 - adapter->num_rx_pools); in ixgbe_fwd_add()
9990 else if (adapter->num_rx_pools < 32) in ixgbe_fwd_add()
9992 64 - used_pools, in ixgbe_fwd_add()
9993 32 - adapter->num_rx_pools); in ixgbe_fwd_add()
9995 reserved_pools = 64 - used_pools; in ixgbe_fwd_add()
9999 return ERR_PTR(-EBUSY); in ixgbe_fwd_add()
10001 adapter->ring_feature[RING_F_VMDQ].limit += reserved_pools; in ixgbe_fwd_add()
10004 err = ixgbe_setup_tc(pdev, adapter->hw_tcs); in ixgbe_fwd_add()
10008 if (pool >= adapter->num_rx_pools) in ixgbe_fwd_add()
10009 return ERR_PTR(-ENOMEM); in ixgbe_fwd_add()
10014 return ERR_PTR(-ENOMEM); in ixgbe_fwd_add()
10016 set_bit(pool, adapter->fwd_bitmask); in ixgbe_fwd_add()
10018 accel->pool = pool; in ixgbe_fwd_add()
10019 accel->netdev = vdev; in ixgbe_fwd_add()
10035 unsigned int rxbase = accel->rx_base_queue; in ixgbe_fwd_del()
10039 ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr, in ixgbe_fwd_del()
10040 VMDQ_P(accel->pool)); in ixgbe_fwd_del()
10047 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { in ixgbe_fwd_del()
10048 struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i]; in ixgbe_fwd_del()
10049 struct ixgbe_q_vector *qv = ring->q_vector; in ixgbe_fwd_del()
10054 if (netif_running(adapter->netdev)) in ixgbe_fwd_del()
10055 napi_synchronize(&qv->napi); in ixgbe_fwd_del()
10056 ring->netdev = NULL; in ixgbe_fwd_del()
10060 netdev_unbind_sb_channel(pdev, accel->netdev); in ixgbe_fwd_del()
10061 netdev_set_sb_channel(accel->netdev, 0); in ixgbe_fwd_del()
10063 clear_bit(accel->pool, adapter->fwd_bitmask); in ixgbe_fwd_del()
10077 mac_hdr_len = skb_network_header(skb) - skb->data; in ixgbe_features_check()
10086 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); in ixgbe_features_check()
10096 * IPsec offoad sets skb->encapsulation but still can handle in ixgbe_features_check()
10099 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) { in ixgbe_features_check()
10111 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; in ixgbe_xdp_setup()
10117 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) in ixgbe_xdp_setup()
10118 return -EINVAL; in ixgbe_xdp_setup()
10120 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) in ixgbe_xdp_setup()
10121 return -EINVAL; in ixgbe_xdp_setup()
10124 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_xdp_setup()
10125 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_xdp_setup()
10128 return -EINVAL; in ixgbe_xdp_setup()
10131 return -EINVAL; in ixgbe_xdp_setup()
10135 return -ENOMEM; in ixgbe_xdp_setup()
10137 old_prog = xchg(&adapter->xdp_prog, prog); in ixgbe_xdp_setup()
10145 /* Wait until ndo_xsk_wakeup completes. */ in ixgbe_xdp_setup()
10147 err = ixgbe_setup_tc(dev, adapter->hw_tcs); in ixgbe_xdp_setup()
10150 rcu_assign_pointer(adapter->xdp_prog, old_prog); in ixgbe_xdp_setup()
10151 return -EINVAL; in ixgbe_xdp_setup()
10154 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbe_xdp_setup()
10155 (void)xchg(&adapter->rx_ring[i]->xdp_prog, in ixgbe_xdp_setup()
10156 adapter->xdp_prog); in ixgbe_xdp_setup()
10166 num_queues = min_t(int, adapter->num_rx_queues, in ixgbe_xdp_setup()
10167 adapter->num_xdp_queues); in ixgbe_xdp_setup()
10169 if (adapter->xdp_ring[i]->xsk_pool) in ixgbe_xdp_setup()
10170 (void)ixgbe_xsk_wakeup(adapter->netdev, i, in ixgbe_xdp_setup()
10181 switch (xdp->command) { in ixgbe_xdp()
10183 return ixgbe_xdp_setup(dev, xdp->prog); in ixgbe_xdp()
10185 return ixgbe_xsk_pool_setup(adapter, xdp->xsk.pool, in ixgbe_xdp()
10186 xdp->xsk.queue_id); in ixgbe_xdp()
10189 return -EINVAL; in ixgbe_xdp()
10199 writel(ring->next_to_use, ring->tail); in ixgbe_xdp_ring_update_tail()
10210 if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) in ixgbe_xdp_xmit()
10211 return -ENETDOWN; in ixgbe_xdp_xmit()
10214 return -EINVAL; in ixgbe_xdp_xmit()
10216 /* During program transitions its possible adapter->xdp_prog is assigned in ixgbe_xdp_xmit()
10219 ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL; in ixgbe_xdp_xmit()
10221 return -ENXIO; in ixgbe_xdp_xmit()
10223 if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state))) in ixgbe_xdp_xmit()
10224 return -ENXIO; in ixgbe_xdp_xmit()
10291 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_disable_txr_hw()
10292 u8 reg_idx = tx_ring->reg_idx; in ixgbe_disable_txr_hw()
10304 while (wait_loop--) { in ixgbe_disable_txr_hw()
10319 set_bit(__IXGBE_TX_DISABLED, &tx_ring->state); in ixgbe_disable_txr()
10327 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_disable_rxr_hw()
10328 u8 reg_idx = rx_ring->reg_idx; in ixgbe_disable_rxr_hw()
10340 if (hw->mac.type == ixgbe_mac_82598EB && in ixgbe_disable_rxr_hw()
10350 while (wait_loop--) { in ixgbe_disable_rxr_hw()
10364 memset(&tx_ring->stats, 0, sizeof(tx_ring->stats)); in ixgbe_reset_txr_stats()
10365 memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats)); in ixgbe_reset_txr_stats()
10370 memset(&rx_ring->stats, 0, sizeof(rx_ring->stats)); in ixgbe_reset_rxr_stats()
10371 memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats)); in ixgbe_reset_rxr_stats()
10375 * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings
10386 rx_ring = adapter->rx_ring[ring]; in ixgbe_txrx_ring_disable()
10387 tx_ring = adapter->tx_ring[ring]; in ixgbe_txrx_ring_disable()
10388 xdp_ring = adapter->xdp_ring[ring]; in ixgbe_txrx_ring_disable()
10399 napi_disable(&rx_ring->q_vector->napi); in ixgbe_txrx_ring_disable()
10413 * ixgbe_txrx_ring_enable - Enable Rx/Tx/XDP Tx rings
10424 rx_ring = adapter->rx_ring[ring]; in ixgbe_txrx_ring_enable()
10425 tx_ring = adapter->tx_ring[ring]; in ixgbe_txrx_ring_enable()
10426 xdp_ring = adapter->xdp_ring[ring]; in ixgbe_txrx_ring_enable()
10429 napi_enable(&rx_ring->q_vector->napi); in ixgbe_txrx_ring_enable()
10436 clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state); in ixgbe_txrx_ring_enable()
10438 clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state); in ixgbe_txrx_ring_enable()
10442 * ixgbe_enumerate_functions - Get the number of ports this device has
10445 * This function enumerates the phsyical functions co-located on a single slot,
10452 struct pci_dev *entry, *pdev = adapter->pdev; in ixgbe_enumerate_functions()
10459 if (ixgbe_pcie_from_parent(&adapter->hw)) in ixgbe_enumerate_functions()
10462 list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) { in ixgbe_enumerate_functions()
10464 if (entry->is_virtfn) in ixgbe_enumerate_functions()
10470 * attached to a virtual machine using VT-d, for example. In in ixgbe_enumerate_functions()
10471 * this case, simply return -1 to indicate this. in ixgbe_enumerate_functions()
10473 if ((entry->vendor != pdev->vendor) || in ixgbe_enumerate_functions()
10474 (entry->device != pdev->device)) in ixgbe_enumerate_functions()
10475 return -1; in ixgbe_enumerate_functions()
10484 * ixgbe_wol_supported - Check whether device supports WoL
10496 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_wol_supported()
10497 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; in ixgbe_wol_supported()
10500 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_wol_supported()
10504 if (hw->mac.type >= ixgbe_mac_X540) { in ixgbe_wol_supported()
10507 (hw->bus.func == 0))) in ixgbe_wol_supported()
10521 if (hw->bus.func != 0) in ixgbe_wol_supported()
10556 * ixgbe_set_fw_version - Set FW version
10564 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_set_fw_version()
10569 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), in ixgbe_set_fw_version()
10579 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), in ixgbe_set_fw_version()
10586 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), in ixgbe_set_fw_version()
10591 * ixgbe_probe - Device Initialization Routine
10606 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; in ixgbe_probe()
10617 * the PCIe SR-IOV capability. in ixgbe_probe()
10619 if (pdev->is_virtfn) { in ixgbe_probe()
10621 pci_name(pdev), pdev->vendor, pdev->device); in ixgbe_probe()
10622 return -EINVAL; in ixgbe_probe()
10629 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { in ixgbe_probe()
10632 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in ixgbe_probe()
10634 dev_err(&pdev->dev, in ixgbe_probe()
10643 dev_err(&pdev->dev, in ixgbe_probe()
10653 if (ii->mac == ixgbe_mac_82598EB) { in ixgbe_probe()
10664 err = -ENOMEM; in ixgbe_probe()
10668 SET_NETDEV_DEV(netdev, &pdev->dev); in ixgbe_probe()
10672 adapter->netdev = netdev; in ixgbe_probe()
10673 adapter->pdev = pdev; in ixgbe_probe()
10674 hw = &adapter->hw; in ixgbe_probe()
10675 hw->back = adapter; in ixgbe_probe()
10676 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in ixgbe_probe()
10678 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), in ixgbe_probe()
10680 adapter->io_addr = hw->hw_addr; in ixgbe_probe()
10681 if (!hw->hw_addr) { in ixgbe_probe()
10682 err = -EIO; in ixgbe_probe()
10686 netdev->netdev_ops = &ixgbe_netdev_ops; in ixgbe_probe()
10688 netdev->watchdog_timeo = 5 * HZ; in ixgbe_probe()
10689 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); in ixgbe_probe()
10692 hw->mac.ops = *ii->mac_ops; in ixgbe_probe()
10693 hw->mac.type = ii->mac; in ixgbe_probe()
10694 hw->mvals = ii->mvals; in ixgbe_probe()
10695 if (ii->link_ops) in ixgbe_probe()
10696 hw->link.ops = *ii->link_ops; in ixgbe_probe()
10699 hw->eeprom.ops = *ii->eeprom_ops; in ixgbe_probe()
10701 if (ixgbe_removed(hw->hw_addr)) { in ixgbe_probe()
10702 err = -EIO; in ixgbe_probe()
10707 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; in ixgbe_probe()
10710 hw->phy.ops = *ii->phy_ops; in ixgbe_probe()
10711 hw->phy.sfp_type = ixgbe_sfp_type_unknown; in ixgbe_probe()
10713 hw->phy.mdio.prtad = MDIO_PRTAD_NONE; in ixgbe_probe()
10714 hw->phy.mdio.mmds = 0; in ixgbe_probe()
10715 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; in ixgbe_probe()
10716 hw->phy.mdio.dev = netdev; in ixgbe_probe()
10717 hw->phy.mdio.mdio_read = ixgbe_mdio_read; in ixgbe_probe()
10718 hw->phy.mdio.mdio_write = ixgbe_mdio_write; in ixgbe_probe()
10725 switch (adapter->hw.mac.type) { in ixgbe_probe()
10728 netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550; in ixgbe_probe()
10731 netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550em_a; in ixgbe_probe()
10738 if (hw->mac.ops.init_swfw_sync) in ixgbe_probe()
10739 hw->mac.ops.init_swfw_sync(hw); in ixgbe_probe()
10742 switch (adapter->hw.mac.type) { in ixgbe_probe()
10748 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); in ixgbe_probe()
10758 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { in ixgbe_probe()
10765 hw->allow_unsupported_sfp = allow_unsupported_sfp; in ixgbe_probe()
10768 hw->phy.reset_if_overtemp = true; in ixgbe_probe()
10769 err = hw->mac.ops.reset_hw(hw); in ixgbe_probe()
10770 hw->phy.reset_if_overtemp = false; in ixgbe_probe()
10784 /* SR-IOV not supported on the 82598 */ in ixgbe_probe()
10785 if (adapter->hw.mac.type == ixgbe_mac_82598EB) in ixgbe_probe()
10789 hw->mbx.ops = ii->mbx_ops; in ixgbe_probe()
10795 netdev->features = NETIF_F_SG | in ixgbe_probe()
10809 netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES; in ixgbe_probe()
10810 netdev->features |= NETIF_F_GSO_PARTIAL | in ixgbe_probe()
10813 if (hw->mac.type >= ixgbe_mac_82599EB) in ixgbe_probe()
10814 netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4; in ixgbe_probe()
10821 if (adapter->ipsec) in ixgbe_probe()
10822 netdev->features |= IXGBE_ESP_FEATURES; in ixgbe_probe()
10825 netdev->hw_features |= netdev->features | in ixgbe_probe()
10832 if (hw->mac.type >= ixgbe_mac_82599EB) in ixgbe_probe()
10833 netdev->hw_features |= NETIF_F_NTUPLE | in ixgbe_probe()
10837 netdev->features |= NETIF_F_HIGHDMA; in ixgbe_probe()
10839 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; in ixgbe_probe()
10840 netdev->hw_enc_features |= netdev->vlan_features; in ixgbe_probe()
10841 netdev->mpls_features |= NETIF_F_SG | in ixgbe_probe()
10845 netdev->mpls_features |= IXGBE_GSO_PARTIAL_FEATURES; in ixgbe_probe()
10848 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | in ixgbe_probe()
10852 netdev->priv_flags |= IFF_UNICAST_FLT; in ixgbe_probe()
10853 netdev->priv_flags |= IFF_SUPP_NOFCS; in ixgbe_probe()
10855 /* MTU range: 68 - 9710 */ in ixgbe_probe()
10856 netdev->min_mtu = ETH_MIN_MTU; in ixgbe_probe()
10857 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); in ixgbe_probe()
10860 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) in ixgbe_probe()
10861 netdev->dcbnl_ops = &ixgbe_dcbnl_ops; in ixgbe_probe()
10865 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { in ixgbe_probe()
10868 if (hw->mac.ops.get_device_caps) { in ixgbe_probe()
10869 hw->mac.ops.get_device_caps(hw, &device_caps); in ixgbe_probe()
10871 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; in ixgbe_probe()
10876 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l; in ixgbe_probe()
10878 netdev->features |= NETIF_F_FSO | in ixgbe_probe()
10881 netdev->vlan_features |= NETIF_F_FSO | in ixgbe_probe()
10886 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) in ixgbe_probe()
10887 netdev->hw_features |= NETIF_F_LRO; in ixgbe_probe()
10888 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) in ixgbe_probe()
10889 netdev->features |= NETIF_F_LRO; in ixgbe_probe()
10892 err = -EIO; in ixgbe_probe()
10897 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { in ixgbe_probe()
10899 err = -EIO; in ixgbe_probe()
10903 eth_platform_get_mac_address(&adapter->pdev->dev, in ixgbe_probe()
10904 adapter->hw.mac.perm_addr); in ixgbe_probe()
10906 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); in ixgbe_probe()
10908 if (!is_valid_ether_addr(netdev->dev_addr)) { in ixgbe_probe()
10910 err = -EIO; in ixgbe_probe()
10914 /* Set hw->mac.addr to permanent MAC address */ in ixgbe_probe()
10915 ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); in ixgbe_probe()
10918 timer_setup(&adapter->service_timer, ixgbe_service_timer, 0); in ixgbe_probe()
10920 if (ixgbe_removed(hw->hw_addr)) { in ixgbe_probe()
10921 err = -EIO; in ixgbe_probe()
10924 INIT_WORK(&adapter->service_task, ixgbe_service_task); in ixgbe_probe()
10925 set_bit(__IXGBE_SERVICE_INITED, &adapter->state); in ixgbe_probe()
10926 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); in ixgbe_probe()
10932 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbe_probe()
10933 u64_stats_init(&adapter->rx_ring[i]->syncp); in ixgbe_probe()
10934 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbe_probe()
10935 u64_stats_init(&adapter->tx_ring[i]->syncp); in ixgbe_probe()
10936 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbe_probe()
10937 u64_stats_init(&adapter->xdp_ring[i]->syncp); in ixgbe_probe()
10940 adapter->wol = 0; in ixgbe_probe()
10941 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); in ixgbe_probe()
10942 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device, in ixgbe_probe()
10943 pdev->subsystem_device); in ixgbe_probe()
10944 if (hw->wol_enabled) in ixgbe_probe()
10945 adapter->wol = IXGBE_WUFC_MAG; in ixgbe_probe()
10947 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); in ixgbe_probe()
10956 hw->mac.ops.get_bus_info(hw); in ixgbe_probe()
10963 switch (hw->mac.type) { in ixgbe_probe()
10979 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) in ixgbe_probe()
10981 hw->mac.type, hw->phy.type, hw->phy.sfp_type, in ixgbe_probe()
10985 hw->mac.type, hw->phy.type, part_str); in ixgbe_probe()
10987 e_dev_info("%pM\n", netdev->dev_addr); in ixgbe_probe()
10990 err = hw->mac.ops.start_hw(hw); in ixgbe_probe()
10992 /* We are running on a pre-production device, log a warning */ in ixgbe_probe()
10993 e_dev_warn("This device is a pre-production adapter/LOM. " in ixgbe_probe()
11000 strcpy(netdev->name, "eth%d"); in ixgbe_probe()
11008 if (hw->mac.ops.disable_tx_laser) in ixgbe_probe()
11009 hw->mac.ops.disable_tx_laser(hw); in ixgbe_probe()
11015 if (dca_add_requester(&pdev->dev) == 0) { in ixgbe_probe()
11016 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; in ixgbe_probe()
11020 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { in ixgbe_probe()
11021 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs); in ixgbe_probe()
11022 for (i = 0; i < adapter->num_vfs; i++) in ixgbe_probe()
11029 if (hw->mac.ops.set_fw_drv_ver) in ixgbe_probe()
11030 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF, in ixgbe_probe()
11031 sizeof(UTS_RELEASE) - 1, in ixgbe_probe()
11046 /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */ in ixgbe_probe()
11047 if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link) in ixgbe_probe()
11048 hw->mac.ops.setup_link(hw, in ixgbe_probe()
11065 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; in ixgbe_probe()
11066 iounmap(adapter->io_addr); in ixgbe_probe()
11067 kfree(adapter->jump_tables[0]); in ixgbe_probe()
11068 kfree(adapter->mac_table); in ixgbe_probe()
11069 kfree(adapter->rss_key); in ixgbe_probe()
11070 bitmap_free(adapter->af_xdp_zc_qps); in ixgbe_probe()
11072 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); in ixgbe_probe()
11085 * ixgbe_remove - Device Removal Routine
11090 * Hot-Plug event, or because the driver is going to be removed from
11104 netdev = adapter->netdev; in ixgbe_remove()
11107 set_bit(__IXGBE_REMOVING, &adapter->state); in ixgbe_remove()
11108 cancel_work_sync(&adapter->service_task); in ixgbe_remove()
11110 if (adapter->mii_bus) in ixgbe_remove()
11111 mdiobus_unregister(adapter->mii_bus); in ixgbe_remove()
11114 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { in ixgbe_remove()
11115 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; in ixgbe_remove()
11116 dca_remove_requester(&pdev->dev); in ixgbe_remove()
11117 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, in ixgbe_remove()
11132 if (netdev->reg_state == NETREG_REGISTERED) in ixgbe_remove()
11141 kfree(adapter->ixgbe_ieee_pfc); in ixgbe_remove()
11142 kfree(adapter->ixgbe_ieee_ets); in ixgbe_remove()
11145 iounmap(adapter->io_addr); in ixgbe_remove()
11151 if (adapter->jump_tables[i]) { in ixgbe_remove()
11152 kfree(adapter->jump_tables[i]->input); in ixgbe_remove()
11153 kfree(adapter->jump_tables[i]->mask); in ixgbe_remove()
11155 kfree(adapter->jump_tables[i]); in ixgbe_remove()
11158 kfree(adapter->mac_table); in ixgbe_remove()
11159 kfree(adapter->rss_key); in ixgbe_remove()
11160 bitmap_free(adapter->af_xdp_zc_qps); in ixgbe_remove()
11161 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); in ixgbe_remove()
11171 * ixgbe_io_error_detected - called when PCI error is detected
11182 struct net_device *netdev = adapter->netdev; in ixgbe_io_error_detected()
11185 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_io_error_detected()
11191 if (adapter->hw.mac.type == ixgbe_mac_82598EB || in ixgbe_io_error_detected()
11192 adapter->num_vfs == 0) in ixgbe_io_error_detected()
11195 bdev = pdev->bus->self; in ixgbe_io_error_detected()
11197 bdev = bdev->bus->self; in ixgbe_io_error_detected()
11210 if (ixgbe_removed(hw->hw_addr)) in ixgbe_io_error_detected()
11219 if ((pf_func & 1) == (pdev->devfn & 1)) { in ixgbe_io_error_detected()
11227 switch (adapter->hw.mac.type) { in ixgbe_io_error_detected()
11251 if (vfdev->devfn == (req_id & 0xFF)) in ixgbe_io_error_detected()
11274 adapter->vferr_refcount++; in ixgbe_io_error_detected()
11280 if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) in ixgbe_io_error_detected()
11297 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) in ixgbe_io_error_detected()
11306 * ixgbe_io_slot_reset - called after the pci bus has been reset.
11309 * Restart the card from scratch, as if from a cold-boot.
11317 e_err(probe, "Cannot re-enable PCI device after reset.\n"); in ixgbe_io_slot_reset()
11321 clear_bit(__IXGBE_DISABLED, &adapter->state); in ixgbe_io_slot_reset()
11322 adapter->hw.hw_addr = adapter->io_addr; in ixgbe_io_slot_reset()
11330 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); in ixgbe_io_slot_reset()
11338 * ixgbe_io_resume - called when traffic can start flowing again.
11347 struct net_device *netdev = adapter->netdev; in ixgbe_io_resume()
11350 if (adapter->vferr_refcount) { in ixgbe_io_resume()
11352 adapter->vferr_refcount--; in ixgbe_io_resume()
11385 * ixgbe_init_module - Driver Registration Routine
11399 return -ENOMEM; in ixgbe_init_module()
11421 * ixgbe_exit_module - Driver Exit Cleanup Routine