Lines Matching +full:tcs +full:- +full:wait
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2021 Intel Corporation. */
28 static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
54 /* i40e_pci_tbl - PCI Device ID Table
94 static int debug = -1;
98 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
113 if (ether_addr_equal(ha->addr, f->macaddr)) { in netdev_hw_addr_refcnt()
114 ha->refcount += delta; in netdev_hw_addr_refcnt()
115 if (ha->refcount <= 0) in netdev_hw_addr_refcnt()
116 ha->refcount = 1; in netdev_hw_addr_refcnt()
123 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
132 struct i40e_pf *pf = (struct i40e_pf *)hw->back; in i40e_allocate_dma_mem_d()
134 mem->size = ALIGN(size, alignment); in i40e_allocate_dma_mem_d()
135 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa, in i40e_allocate_dma_mem_d()
137 if (!mem->va) in i40e_allocate_dma_mem_d()
138 return -ENOMEM; in i40e_allocate_dma_mem_d()
144 * i40e_free_dma_mem_d - OS specific memory free for shared code
150 struct i40e_pf *pf = (struct i40e_pf *)hw->back; in i40e_free_dma_mem_d()
152 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); in i40e_free_dma_mem_d()
153 mem->va = NULL; in i40e_free_dma_mem_d()
154 mem->pa = 0; in i40e_free_dma_mem_d()
155 mem->size = 0; in i40e_free_dma_mem_d()
161 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
169 mem->size = size; in i40e_allocate_virt_mem_d()
170 mem->va = kzalloc(size, GFP_KERNEL); in i40e_allocate_virt_mem_d()
172 if (!mem->va) in i40e_allocate_virt_mem_d()
173 return -ENOMEM; in i40e_allocate_virt_mem_d()
179 * i40e_free_virt_mem_d - OS specific memory free for shared code
186 kfree(mem->va); in i40e_free_virt_mem_d()
187 mem->va = NULL; in i40e_free_virt_mem_d()
188 mem->size = 0; in i40e_free_virt_mem_d()
194 * i40e_get_lump - find a lump of free generic resource
205 int ret = -ENOMEM; in i40e_get_lump()
209 dev_info(&pf->pdev->dev, in i40e_get_lump()
212 return -EINVAL; in i40e_get_lump()
218 if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) { in i40e_get_lump()
219 if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) { in i40e_get_lump()
220 dev_err(&pf->pdev->dev, in i40e_get_lump()
222 pile->num_entries - 1); in i40e_get_lump()
223 return -ENOMEM; in i40e_get_lump()
225 pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT; in i40e_get_lump()
226 return pile->num_entries - 1; in i40e_get_lump()
230 while (i < pile->num_entries) { in i40e_get_lump()
232 if (pile->list[i] & I40E_PILE_VALID_BIT) { in i40e_get_lump()
238 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { in i40e_get_lump()
239 if (pile->list[i+j] & I40E_PILE_VALID_BIT) in i40e_get_lump()
246 pile->list[i+j] = id | I40E_PILE_VALID_BIT; in i40e_get_lump()
259 * i40e_put_lump - return a lump of generic resource
272 if (!pile || index >= pile->num_entries) in i40e_put_lump()
273 return -EINVAL; in i40e_put_lump()
276 i < pile->num_entries && pile->list[i] == valid_id; in i40e_put_lump()
278 pile->list[i] = 0; in i40e_put_lump()
287 * i40e_find_vsi_from_id - searches for the vsi with the given id
295 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_find_vsi_from_id()
296 if (pf->vsi[i] && (pf->vsi[i]->id == id)) in i40e_find_vsi_from_id()
297 return pf->vsi[i]; in i40e_find_vsi_from_id()
303 * i40e_service_event_schedule - Schedule the service task to wake up
310 if ((!test_bit(__I40E_DOWN, pf->state) && in i40e_service_event_schedule()
311 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) || in i40e_service_event_schedule()
312 test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_service_event_schedule()
313 queue_work(i40e_wq, &pf->service_task); in i40e_service_event_schedule()
317 * i40e_tx_timeout - Respond to a Tx Hang
328 struct i40e_vsi *vsi = np->vsi; in i40e_tx_timeout()
329 struct i40e_pf *pf = vsi->back; in i40e_tx_timeout()
334 pf->tx_timeout_count++; in i40e_tx_timeout()
337 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_tx_timeout()
338 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { in i40e_tx_timeout()
340 vsi->tx_rings[i]->queue_index) { in i40e_tx_timeout()
341 tx_ring = vsi->tx_rings[i]; in i40e_tx_timeout()
347 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) in i40e_tx_timeout()
348 pf->tx_timeout_recovery_level = 1; /* reset after some time */ in i40e_tx_timeout()
350 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) in i40e_tx_timeout()
354 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state)) in i40e_tx_timeout()
360 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_tx_timeout()
361 val = rd32(&pf->hw, in i40e_tx_timeout()
362 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + in i40e_tx_timeout()
363 tx_ring->vsi->base_vector - 1)); in i40e_tx_timeout()
365 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); in i40e_tx_timeout()
368 vsi->seid, txqueue, tx_ring->next_to_clean, in i40e_tx_timeout()
369 head, tx_ring->next_to_use, in i40e_tx_timeout()
370 readl(tx_ring->tail), val); in i40e_tx_timeout()
373 pf->tx_timeout_last_recovery = jiffies; in i40e_tx_timeout()
375 pf->tx_timeout_recovery_level, txqueue); in i40e_tx_timeout()
377 switch (pf->tx_timeout_recovery_level) { in i40e_tx_timeout()
379 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_tx_timeout()
382 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state); in i40e_tx_timeout()
385 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); in i40e_tx_timeout()
388 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n"); in i40e_tx_timeout()
389 set_bit(__I40E_DOWN_REQUESTED, pf->state); in i40e_tx_timeout()
390 set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state); in i40e_tx_timeout()
395 pf->tx_timeout_recovery_level++; in i40e_tx_timeout()
399 * i40e_get_vsi_stats_struct - Get System Network Statistics
407 return &vsi->net_stats; in i40e_get_vsi_stats_struct()
411 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
422 start = u64_stats_fetch_begin_irq(&ring->syncp); in i40e_get_netdev_stats_struct_tx()
423 packets = ring->stats.packets; in i40e_get_netdev_stats_struct_tx()
424 bytes = ring->stats.bytes; in i40e_get_netdev_stats_struct_tx()
425 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); in i40e_get_netdev_stats_struct_tx()
427 stats->tx_packets += packets; in i40e_get_netdev_stats_struct_tx()
428 stats->tx_bytes += bytes; in i40e_get_netdev_stats_struct_tx()
432 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
443 struct i40e_vsi *vsi = np->vsi; in i40e_get_netdev_stats_struct()
448 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_get_netdev_stats_struct()
451 if (!vsi->tx_rings) in i40e_get_netdev_stats_struct()
455 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_get_netdev_stats_struct()
459 ring = READ_ONCE(vsi->tx_rings[i]); in i40e_get_netdev_stats_struct()
465 ring = READ_ONCE(vsi->xdp_rings[i]); in i40e_get_netdev_stats_struct()
471 ring = READ_ONCE(vsi->rx_rings[i]); in i40e_get_netdev_stats_struct()
475 start = u64_stats_fetch_begin_irq(&ring->syncp); in i40e_get_netdev_stats_struct()
476 packets = ring->stats.packets; in i40e_get_netdev_stats_struct()
477 bytes = ring->stats.bytes; in i40e_get_netdev_stats_struct()
478 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); in i40e_get_netdev_stats_struct()
480 stats->rx_packets += packets; in i40e_get_netdev_stats_struct()
481 stats->rx_bytes += bytes; in i40e_get_netdev_stats_struct()
487 stats->multicast = vsi_stats->multicast; in i40e_get_netdev_stats_struct()
488 stats->tx_errors = vsi_stats->tx_errors; in i40e_get_netdev_stats_struct()
489 stats->tx_dropped = vsi_stats->tx_dropped; in i40e_get_netdev_stats_struct()
490 stats->rx_errors = vsi_stats->rx_errors; in i40e_get_netdev_stats_struct()
491 stats->rx_dropped = vsi_stats->rx_dropped; in i40e_get_netdev_stats_struct()
492 stats->rx_crc_errors = vsi_stats->rx_crc_errors; in i40e_get_netdev_stats_struct()
493 stats->rx_length_errors = vsi_stats->rx_length_errors; in i40e_get_netdev_stats_struct()
497 * i40e_vsi_reset_stats - Resets all stats of the given vsi
510 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); in i40e_vsi_reset_stats()
511 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); in i40e_vsi_reset_stats()
512 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); in i40e_vsi_reset_stats()
513 if (vsi->rx_rings && vsi->rx_rings[0]) { in i40e_vsi_reset_stats()
514 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_vsi_reset_stats()
515 memset(&vsi->rx_rings[i]->stats, 0, in i40e_vsi_reset_stats()
516 sizeof(vsi->rx_rings[i]->stats)); in i40e_vsi_reset_stats()
517 memset(&vsi->rx_rings[i]->rx_stats, 0, in i40e_vsi_reset_stats()
518 sizeof(vsi->rx_rings[i]->rx_stats)); in i40e_vsi_reset_stats()
519 memset(&vsi->tx_rings[i]->stats, 0, in i40e_vsi_reset_stats()
520 sizeof(vsi->tx_rings[i]->stats)); in i40e_vsi_reset_stats()
521 memset(&vsi->tx_rings[i]->tx_stats, 0, in i40e_vsi_reset_stats()
522 sizeof(vsi->tx_rings[i]->tx_stats)); in i40e_vsi_reset_stats()
525 vsi->stat_offsets_loaded = false; in i40e_vsi_reset_stats()
529 * i40e_pf_reset_stats - Reset all of the stats for the given PF
536 memset(&pf->stats, 0, sizeof(pf->stats)); in i40e_pf_reset_stats()
537 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); in i40e_pf_reset_stats()
538 pf->stat_offsets_loaded = false; in i40e_pf_reset_stats()
541 if (pf->veb[i]) { in i40e_pf_reset_stats()
542 memset(&pf->veb[i]->stats, 0, in i40e_pf_reset_stats()
543 sizeof(pf->veb[i]->stats)); in i40e_pf_reset_stats()
544 memset(&pf->veb[i]->stats_offsets, 0, in i40e_pf_reset_stats()
545 sizeof(pf->veb[i]->stats_offsets)); in i40e_pf_reset_stats()
546 memset(&pf->veb[i]->tc_stats, 0, in i40e_pf_reset_stats()
547 sizeof(pf->veb[i]->tc_stats)); in i40e_pf_reset_stats()
548 memset(&pf->veb[i]->tc_stats_offsets, 0, in i40e_pf_reset_stats()
549 sizeof(pf->veb[i]->tc_stats_offsets)); in i40e_pf_reset_stats()
550 pf->veb[i]->stat_offsets_loaded = false; in i40e_pf_reset_stats()
553 pf->hw_csum_rx_error = 0; in i40e_pf_reset_stats()
557 * i40e_compute_pci_to_hw_id - compute index form PCI function.
565 if (vsi->type == I40E_VSI_SRIOV) in i40e_compute_pci_to_hw_id()
566 return (hw->port * BIT(7)) / pf_count + vsi->vf_id; in i40e_compute_pci_to_hw_id()
568 return hw->port + BIT(7); in i40e_compute_pci_to_hw_id()
572 * i40e_stat_update64 - read and update a 64 bit stat from the chip.
594 *stat = new_data - *offset; in i40e_stat_update64()
598 * i40e_stat_update48 - read and update a 48 bit stat from the chip
610 * the potential roll-over.
617 if (hw->device_id == I40E_DEV_ID_QEMU) { in i40e_stat_update48()
626 *stat = new_data - *offset; in i40e_stat_update48()
628 *stat = (new_data + BIT_ULL(48)) - *offset; in i40e_stat_update48()
633 * i40e_stat_update32 - read and update a 32 bit stat from the chip
649 *stat = (u32)(new_data - *offset); in i40e_stat_update32()
651 *stat = (u32)((new_data + BIT_ULL(32)) - *offset); in i40e_stat_update32()
655 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
669 * i40e_stats_update_rx_discards - update rx_discards.
686 &stat_offset->rx_discards, &rx_rdpc); in i40e_stats_update_rx_discards()
690 offset_loaded, &stat_offset->rx_discards_other, in i40e_stats_update_rx_discards()
693 stat->rx_discards = rx_rdpc + rx_rxerr; in i40e_stats_update_rx_discards()
697 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
702 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); in i40e_update_eth_stats()
703 struct i40e_pf *pf = vsi->back; in i40e_update_eth_stats()
704 struct i40e_hw *hw = &pf->hw; in i40e_update_eth_stats()
708 es = &vsi->eth_stats; in i40e_update_eth_stats()
709 oes = &vsi->eth_stats_offsets; in i40e_update_eth_stats()
713 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
714 &oes->tx_errors, &es->tx_errors); in i40e_update_eth_stats()
716 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
717 &oes->rx_discards, &es->rx_discards); in i40e_update_eth_stats()
719 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
720 &oes->rx_unknown_protocol, &es->rx_unknown_protocol); in i40e_update_eth_stats()
724 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
725 &oes->rx_bytes, &es->rx_bytes); in i40e_update_eth_stats()
728 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
729 &oes->rx_unicast, &es->rx_unicast); in i40e_update_eth_stats()
732 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
733 &oes->rx_multicast, &es->rx_multicast); in i40e_update_eth_stats()
736 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
737 &oes->rx_broadcast, &es->rx_broadcast); in i40e_update_eth_stats()
741 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
742 &oes->tx_bytes, &es->tx_bytes); in i40e_update_eth_stats()
745 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
746 &oes->tx_unicast, &es->tx_unicast); in i40e_update_eth_stats()
749 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
750 &oes->tx_multicast, &es->tx_multicast); in i40e_update_eth_stats()
753 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
754 &oes->tx_broadcast, &es->tx_broadcast); in i40e_update_eth_stats()
757 vsi->stat_offsets_loaded, oes, es); in i40e_update_eth_stats()
759 vsi->stat_offsets_loaded = true; in i40e_update_eth_stats()
763 * i40e_update_veb_stats - Update Switch component statistics
768 struct i40e_pf *pf = veb->pf; in i40e_update_veb_stats()
769 struct i40e_hw *hw = &pf->hw; in i40e_update_veb_stats()
776 idx = veb->stats_idx; in i40e_update_veb_stats()
777 es = &veb->stats; in i40e_update_veb_stats()
778 oes = &veb->stats_offsets; in i40e_update_veb_stats()
779 veb_es = &veb->tc_stats; in i40e_update_veb_stats()
780 veb_oes = &veb->tc_stats_offsets; in i40e_update_veb_stats()
784 veb->stat_offsets_loaded, in i40e_update_veb_stats()
785 &oes->tx_discards, &es->tx_discards); in i40e_update_veb_stats()
786 if (hw->revision_id > 0) in i40e_update_veb_stats()
788 veb->stat_offsets_loaded, in i40e_update_veb_stats()
789 &oes->rx_unknown_protocol, in i40e_update_veb_stats()
790 &es->rx_unknown_protocol); in i40e_update_veb_stats()
792 veb->stat_offsets_loaded, in i40e_update_veb_stats()
793 &oes->rx_bytes, &es->rx_bytes); in i40e_update_veb_stats()
795 veb->stat_offsets_loaded, in i40e_update_veb_stats()
796 &oes->rx_unicast, &es->rx_unicast); in i40e_update_veb_stats()
798 veb->stat_offsets_loaded, in i40e_update_veb_stats()
799 &oes->rx_multicast, &es->rx_multicast); in i40e_update_veb_stats()
801 veb->stat_offsets_loaded, in i40e_update_veb_stats()
802 &oes->rx_broadcast, &es->rx_broadcast); in i40e_update_veb_stats()
805 veb->stat_offsets_loaded, in i40e_update_veb_stats()
806 &oes->tx_bytes, &es->tx_bytes); in i40e_update_veb_stats()
808 veb->stat_offsets_loaded, in i40e_update_veb_stats()
809 &oes->tx_unicast, &es->tx_unicast); in i40e_update_veb_stats()
811 veb->stat_offsets_loaded, in i40e_update_veb_stats()
812 &oes->tx_multicast, &es->tx_multicast); in i40e_update_veb_stats()
814 veb->stat_offsets_loaded, in i40e_update_veb_stats()
815 &oes->tx_broadcast, &es->tx_broadcast); in i40e_update_veb_stats()
819 veb->stat_offsets_loaded, in i40e_update_veb_stats()
820 &veb_oes->tc_rx_packets[i], in i40e_update_veb_stats()
821 &veb_es->tc_rx_packets[i]); in i40e_update_veb_stats()
824 veb->stat_offsets_loaded, in i40e_update_veb_stats()
825 &veb_oes->tc_rx_bytes[i], in i40e_update_veb_stats()
826 &veb_es->tc_rx_bytes[i]); in i40e_update_veb_stats()
829 veb->stat_offsets_loaded, in i40e_update_veb_stats()
830 &veb_oes->tc_tx_packets[i], in i40e_update_veb_stats()
831 &veb_es->tc_tx_packets[i]); in i40e_update_veb_stats()
834 veb->stat_offsets_loaded, in i40e_update_veb_stats()
835 &veb_oes->tc_tx_bytes[i], in i40e_update_veb_stats()
836 &veb_es->tc_tx_bytes[i]); in i40e_update_veb_stats()
838 veb->stat_offsets_loaded = true; in i40e_update_veb_stats()
842 * i40e_update_vsi_stats - Update the vsi statistics counters.
854 struct i40e_pf *pf = vsi->back; in i40e_update_vsi_stats()
870 if (test_bit(__I40E_VSI_DOWN, vsi->state) || in i40e_update_vsi_stats()
871 test_bit(__I40E_CONFIG_BUSY, pf->state)) in i40e_update_vsi_stats()
875 ons = &vsi->net_stats_offsets; in i40e_update_vsi_stats()
876 es = &vsi->eth_stats; in i40e_update_vsi_stats()
877 oes = &vsi->eth_stats_offsets; in i40e_update_vsi_stats()
893 for (q = 0; q < vsi->num_queue_pairs; q++) { in i40e_update_vsi_stats()
895 p = READ_ONCE(vsi->tx_rings[q]); in i40e_update_vsi_stats()
900 start = u64_stats_fetch_begin_irq(&p->syncp); in i40e_update_vsi_stats()
901 packets = p->stats.packets; in i40e_update_vsi_stats()
902 bytes = p->stats.bytes; in i40e_update_vsi_stats()
903 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); in i40e_update_vsi_stats()
906 tx_restart += p->tx_stats.restart_queue; in i40e_update_vsi_stats()
907 tx_busy += p->tx_stats.tx_busy; in i40e_update_vsi_stats()
908 tx_linearize += p->tx_stats.tx_linearize; in i40e_update_vsi_stats()
909 tx_force_wb += p->tx_stats.tx_force_wb; in i40e_update_vsi_stats()
910 tx_stopped += p->tx_stats.tx_stopped; in i40e_update_vsi_stats()
913 p = READ_ONCE(vsi->rx_rings[q]); in i40e_update_vsi_stats()
918 start = u64_stats_fetch_begin_irq(&p->syncp); in i40e_update_vsi_stats()
919 packets = p->stats.packets; in i40e_update_vsi_stats()
920 bytes = p->stats.bytes; in i40e_update_vsi_stats()
921 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); in i40e_update_vsi_stats()
924 rx_buf += p->rx_stats.alloc_buff_failed; in i40e_update_vsi_stats()
925 rx_page += p->rx_stats.alloc_page_failed; in i40e_update_vsi_stats()
926 rx_reuse += p->rx_stats.page_reuse_count; in i40e_update_vsi_stats()
927 rx_alloc += p->rx_stats.page_alloc_count; in i40e_update_vsi_stats()
928 rx_waive += p->rx_stats.page_waive_count; in i40e_update_vsi_stats()
929 rx_busy += p->rx_stats.page_busy_count; in i40e_update_vsi_stats()
933 p = READ_ONCE(vsi->xdp_rings[q]); in i40e_update_vsi_stats()
938 start = u64_stats_fetch_begin_irq(&p->syncp); in i40e_update_vsi_stats()
939 packets = p->stats.packets; in i40e_update_vsi_stats()
940 bytes = p->stats.bytes; in i40e_update_vsi_stats()
941 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); in i40e_update_vsi_stats()
944 tx_restart += p->tx_stats.restart_queue; in i40e_update_vsi_stats()
945 tx_busy += p->tx_stats.tx_busy; in i40e_update_vsi_stats()
946 tx_linearize += p->tx_stats.tx_linearize; in i40e_update_vsi_stats()
947 tx_force_wb += p->tx_stats.tx_force_wb; in i40e_update_vsi_stats()
951 vsi->tx_restart = tx_restart; in i40e_update_vsi_stats()
952 vsi->tx_busy = tx_busy; in i40e_update_vsi_stats()
953 vsi->tx_linearize = tx_linearize; in i40e_update_vsi_stats()
954 vsi->tx_force_wb = tx_force_wb; in i40e_update_vsi_stats()
955 vsi->tx_stopped = tx_stopped; in i40e_update_vsi_stats()
956 vsi->rx_page_failed = rx_page; in i40e_update_vsi_stats()
957 vsi->rx_buf_failed = rx_buf; in i40e_update_vsi_stats()
958 vsi->rx_page_reuse = rx_reuse; in i40e_update_vsi_stats()
959 vsi->rx_page_alloc = rx_alloc; in i40e_update_vsi_stats()
960 vsi->rx_page_waive = rx_waive; in i40e_update_vsi_stats()
961 vsi->rx_page_busy = rx_busy; in i40e_update_vsi_stats()
963 ns->rx_packets = rx_p; in i40e_update_vsi_stats()
964 ns->rx_bytes = rx_b; in i40e_update_vsi_stats()
965 ns->tx_packets = tx_p; in i40e_update_vsi_stats()
966 ns->tx_bytes = tx_b; in i40e_update_vsi_stats()
970 ons->tx_errors = oes->tx_errors; in i40e_update_vsi_stats()
971 ns->tx_errors = es->tx_errors; in i40e_update_vsi_stats()
972 ons->multicast = oes->rx_multicast; in i40e_update_vsi_stats()
973 ns->multicast = es->rx_multicast; in i40e_update_vsi_stats()
974 ons->rx_dropped = oes->rx_discards; in i40e_update_vsi_stats()
975 ns->rx_dropped = es->rx_discards; in i40e_update_vsi_stats()
976 ons->tx_dropped = oes->tx_discards; in i40e_update_vsi_stats()
977 ns->tx_dropped = es->tx_discards; in i40e_update_vsi_stats()
980 if (vsi == pf->vsi[pf->lan_vsi]) { in i40e_update_vsi_stats()
981 ns->rx_crc_errors = pf->stats.crc_errors; in i40e_update_vsi_stats()
982 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; in i40e_update_vsi_stats()
983 ns->rx_length_errors = pf->stats.rx_length_errors; in i40e_update_vsi_stats()
988 * i40e_update_pf_stats - Update the PF statistics counters.
993 struct i40e_hw_port_stats *osd = &pf->stats_offsets; in i40e_update_pf_stats()
994 struct i40e_hw_port_stats *nsd = &pf->stats; in i40e_update_pf_stats()
995 struct i40e_hw *hw = &pf->hw; in i40e_update_pf_stats()
999 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), in i40e_update_pf_stats()
1000 I40E_GLPRT_GORCL(hw->port), in i40e_update_pf_stats()
1001 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1002 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); in i40e_update_pf_stats()
1003 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), in i40e_update_pf_stats()
1004 I40E_GLPRT_GOTCL(hw->port), in i40e_update_pf_stats()
1005 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1006 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); in i40e_update_pf_stats()
1007 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), in i40e_update_pf_stats()
1008 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1009 &osd->eth.rx_discards, in i40e_update_pf_stats()
1010 &nsd->eth.rx_discards); in i40e_update_pf_stats()
1011 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), in i40e_update_pf_stats()
1012 I40E_GLPRT_UPRCL(hw->port), in i40e_update_pf_stats()
1013 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1014 &osd->eth.rx_unicast, in i40e_update_pf_stats()
1015 &nsd->eth.rx_unicast); in i40e_update_pf_stats()
1016 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), in i40e_update_pf_stats()
1017 I40E_GLPRT_MPRCL(hw->port), in i40e_update_pf_stats()
1018 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1019 &osd->eth.rx_multicast, in i40e_update_pf_stats()
1020 &nsd->eth.rx_multicast); in i40e_update_pf_stats()
1021 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), in i40e_update_pf_stats()
1022 I40E_GLPRT_BPRCL(hw->port), in i40e_update_pf_stats()
1023 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1024 &osd->eth.rx_broadcast, in i40e_update_pf_stats()
1025 &nsd->eth.rx_broadcast); in i40e_update_pf_stats()
1026 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), in i40e_update_pf_stats()
1027 I40E_GLPRT_UPTCL(hw->port), in i40e_update_pf_stats()
1028 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1029 &osd->eth.tx_unicast, in i40e_update_pf_stats()
1030 &nsd->eth.tx_unicast); in i40e_update_pf_stats()
1031 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), in i40e_update_pf_stats()
1032 I40E_GLPRT_MPTCL(hw->port), in i40e_update_pf_stats()
1033 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1034 &osd->eth.tx_multicast, in i40e_update_pf_stats()
1035 &nsd->eth.tx_multicast); in i40e_update_pf_stats()
1036 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), in i40e_update_pf_stats()
1037 I40E_GLPRT_BPTCL(hw->port), in i40e_update_pf_stats()
1038 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1039 &osd->eth.tx_broadcast, in i40e_update_pf_stats()
1040 &nsd->eth.tx_broadcast); in i40e_update_pf_stats()
1042 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), in i40e_update_pf_stats()
1043 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1044 &osd->tx_dropped_link_down, in i40e_update_pf_stats()
1045 &nsd->tx_dropped_link_down); in i40e_update_pf_stats()
1047 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), in i40e_update_pf_stats()
1048 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1049 &osd->crc_errors, &nsd->crc_errors); in i40e_update_pf_stats()
1051 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), in i40e_update_pf_stats()
1052 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1053 &osd->illegal_bytes, &nsd->illegal_bytes); in i40e_update_pf_stats()
1055 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), in i40e_update_pf_stats()
1056 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1057 &osd->mac_local_faults, in i40e_update_pf_stats()
1058 &nsd->mac_local_faults); in i40e_update_pf_stats()
1059 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), in i40e_update_pf_stats()
1060 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1061 &osd->mac_remote_faults, in i40e_update_pf_stats()
1062 &nsd->mac_remote_faults); in i40e_update_pf_stats()
1064 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), in i40e_update_pf_stats()
1065 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1066 &osd->rx_length_errors, in i40e_update_pf_stats()
1067 &nsd->rx_length_errors); in i40e_update_pf_stats()
1069 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), in i40e_update_pf_stats()
1070 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1071 &osd->link_xon_rx, &nsd->link_xon_rx); in i40e_update_pf_stats()
1072 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), in i40e_update_pf_stats()
1073 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1074 &osd->link_xon_tx, &nsd->link_xon_tx); in i40e_update_pf_stats()
1075 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), in i40e_update_pf_stats()
1076 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1077 &osd->link_xoff_rx, &nsd->link_xoff_rx); in i40e_update_pf_stats()
1078 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), in i40e_update_pf_stats()
1079 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1080 &osd->link_xoff_tx, &nsd->link_xoff_tx); in i40e_update_pf_stats()
1083 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), in i40e_update_pf_stats()
1084 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1085 &osd->priority_xoff_rx[i], in i40e_update_pf_stats()
1086 &nsd->priority_xoff_rx[i]); in i40e_update_pf_stats()
1087 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), in i40e_update_pf_stats()
1088 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1089 &osd->priority_xon_rx[i], in i40e_update_pf_stats()
1090 &nsd->priority_xon_rx[i]); in i40e_update_pf_stats()
1091 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), in i40e_update_pf_stats()
1092 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1093 &osd->priority_xon_tx[i], in i40e_update_pf_stats()
1094 &nsd->priority_xon_tx[i]); in i40e_update_pf_stats()
1095 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), in i40e_update_pf_stats()
1096 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1097 &osd->priority_xoff_tx[i], in i40e_update_pf_stats()
1098 &nsd->priority_xoff_tx[i]); in i40e_update_pf_stats()
1100 I40E_GLPRT_RXON2OFFCNT(hw->port, i), in i40e_update_pf_stats()
1101 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1102 &osd->priority_xon_2_xoff[i], in i40e_update_pf_stats()
1103 &nsd->priority_xon_2_xoff[i]); in i40e_update_pf_stats()
1106 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), in i40e_update_pf_stats()
1107 I40E_GLPRT_PRC64L(hw->port), in i40e_update_pf_stats()
1108 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1109 &osd->rx_size_64, &nsd->rx_size_64); in i40e_update_pf_stats()
1110 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), in i40e_update_pf_stats()
1111 I40E_GLPRT_PRC127L(hw->port), in i40e_update_pf_stats()
1112 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1113 &osd->rx_size_127, &nsd->rx_size_127); in i40e_update_pf_stats()
1114 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), in i40e_update_pf_stats()
1115 I40E_GLPRT_PRC255L(hw->port), in i40e_update_pf_stats()
1116 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1117 &osd->rx_size_255, &nsd->rx_size_255); in i40e_update_pf_stats()
1118 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), in i40e_update_pf_stats()
1119 I40E_GLPRT_PRC511L(hw->port), in i40e_update_pf_stats()
1120 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1121 &osd->rx_size_511, &nsd->rx_size_511); in i40e_update_pf_stats()
1122 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), in i40e_update_pf_stats()
1123 I40E_GLPRT_PRC1023L(hw->port), in i40e_update_pf_stats()
1124 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1125 &osd->rx_size_1023, &nsd->rx_size_1023); in i40e_update_pf_stats()
1126 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), in i40e_update_pf_stats()
1127 I40E_GLPRT_PRC1522L(hw->port), in i40e_update_pf_stats()
1128 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1129 &osd->rx_size_1522, &nsd->rx_size_1522); in i40e_update_pf_stats()
1130 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), in i40e_update_pf_stats()
1131 I40E_GLPRT_PRC9522L(hw->port), in i40e_update_pf_stats()
1132 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1133 &osd->rx_size_big, &nsd->rx_size_big); in i40e_update_pf_stats()
1135 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), in i40e_update_pf_stats()
1136 I40E_GLPRT_PTC64L(hw->port), in i40e_update_pf_stats()
1137 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1138 &osd->tx_size_64, &nsd->tx_size_64); in i40e_update_pf_stats()
1139 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), in i40e_update_pf_stats()
1140 I40E_GLPRT_PTC127L(hw->port), in i40e_update_pf_stats()
1141 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1142 &osd->tx_size_127, &nsd->tx_size_127); in i40e_update_pf_stats()
1143 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), in i40e_update_pf_stats()
1144 I40E_GLPRT_PTC255L(hw->port), in i40e_update_pf_stats()
1145 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1146 &osd->tx_size_255, &nsd->tx_size_255); in i40e_update_pf_stats()
1147 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), in i40e_update_pf_stats()
1148 I40E_GLPRT_PTC511L(hw->port), in i40e_update_pf_stats()
1149 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1150 &osd->tx_size_511, &nsd->tx_size_511); in i40e_update_pf_stats()
1151 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), in i40e_update_pf_stats()
1152 I40E_GLPRT_PTC1023L(hw->port), in i40e_update_pf_stats()
1153 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1154 &osd->tx_size_1023, &nsd->tx_size_1023); in i40e_update_pf_stats()
1155 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), in i40e_update_pf_stats()
1156 I40E_GLPRT_PTC1522L(hw->port), in i40e_update_pf_stats()
1157 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1158 &osd->tx_size_1522, &nsd->tx_size_1522); in i40e_update_pf_stats()
1159 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), in i40e_update_pf_stats()
1160 I40E_GLPRT_PTC9522L(hw->port), in i40e_update_pf_stats()
1161 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1162 &osd->tx_size_big, &nsd->tx_size_big); in i40e_update_pf_stats()
1164 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), in i40e_update_pf_stats()
1165 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1166 &osd->rx_undersize, &nsd->rx_undersize); in i40e_update_pf_stats()
1167 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), in i40e_update_pf_stats()
1168 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1169 &osd->rx_fragments, &nsd->rx_fragments); in i40e_update_pf_stats()
1170 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), in i40e_update_pf_stats()
1171 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1172 &osd->rx_oversize, &nsd->rx_oversize); in i40e_update_pf_stats()
1173 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), in i40e_update_pf_stats()
1174 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1175 &osd->rx_jabber, &nsd->rx_jabber); in i40e_update_pf_stats()
1179 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)), in i40e_update_pf_stats()
1180 &nsd->fd_atr_match); in i40e_update_pf_stats()
1182 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)), in i40e_update_pf_stats()
1183 &nsd->fd_sb_match); in i40e_update_pf_stats()
1185 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)), in i40e_update_pf_stats()
1186 &nsd->fd_atr_tunnel_match); in i40e_update_pf_stats()
1189 nsd->tx_lpi_status = in i40e_update_pf_stats()
1192 nsd->rx_lpi_status = in i40e_update_pf_stats()
1196 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1197 &osd->tx_lpi_count, &nsd->tx_lpi_count); in i40e_update_pf_stats()
1199 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1200 &osd->rx_lpi_count, &nsd->rx_lpi_count); in i40e_update_pf_stats()
1202 if (pf->flags & I40E_FLAG_FD_SB_ENABLED && in i40e_update_pf_stats()
1203 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) in i40e_update_pf_stats()
1204 nsd->fd_sb_status = true; in i40e_update_pf_stats()
1206 nsd->fd_sb_status = false; in i40e_update_pf_stats()
1208 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && in i40e_update_pf_stats()
1209 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) in i40e_update_pf_stats()
1210 nsd->fd_atr_status = true; in i40e_update_pf_stats()
1212 nsd->fd_atr_status = false; in i40e_update_pf_stats()
1214 pf->stat_offsets_loaded = true; in i40e_update_pf_stats()
1218 * i40e_update_stats - Update the various statistics counters.
1225 struct i40e_pf *pf = vsi->back; in i40e_update_stats()
1227 if (vsi == pf->vsi[pf->lan_vsi]) in i40e_update_stats()
1234 * i40e_count_filters - counts VSI mac filters
1246 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) in i40e_count_filters()
1253 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1270 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) { in i40e_find_filter()
1271 if ((ether_addr_equal(macaddr, f->macaddr)) && in i40e_find_filter()
1272 (vlan == f->vlan)) in i40e_find_filter()
1279 * i40e_find_mac - Find a mac addr in the macvlan filters list
1295 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) { in i40e_find_mac()
1296 if ((ether_addr_equal(macaddr, f->macaddr))) in i40e_find_mac()
1303 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1311 if (vsi->info.pvid) in i40e_is_vsi_in_vlan()
1334 return vsi->has_vlan_filter; in i40e_is_vsi_in_vlan()
1338 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1344 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1346 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1348 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1353 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1358 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1371 s16 pvid = le16_to_cpu(vsi->info.pvid); in i40e_correct_mac_vlan_filters()
1384 * which are marked as VLAN=-1 must be replaced with in i40e_correct_mac_vlan_filters()
1388 * marked as VLAN=-1 in i40e_correct_mac_vlan_filters()
1393 if (pvid && new->f->vlan != pvid) in i40e_correct_mac_vlan_filters()
1394 new->f->vlan = pvid; in i40e_correct_mac_vlan_filters()
1395 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY) in i40e_correct_mac_vlan_filters()
1396 new->f->vlan = 0; in i40e_correct_mac_vlan_filters()
1397 else if (!vlan_filters && new->f->vlan == 0) in i40e_correct_mac_vlan_filters()
1398 new->f->vlan = I40E_VLAN_ANY; in i40e_correct_mac_vlan_filters()
1402 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_correct_mac_vlan_filters()
1408 if ((pvid && f->vlan != pvid) || in i40e_correct_mac_vlan_filters()
1409 (vlan_filters && f->vlan == I40E_VLAN_ANY) || in i40e_correct_mac_vlan_filters()
1410 (!vlan_filters && f->vlan == 0)) { in i40e_correct_mac_vlan_filters()
1420 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan); in i40e_correct_mac_vlan_filters()
1422 return -ENOMEM; in i40e_correct_mac_vlan_filters()
1427 return -ENOMEM; in i40e_correct_mac_vlan_filters()
1429 new->f = add_head; in i40e_correct_mac_vlan_filters()
1430 new->state = add_head->state; in i40e_correct_mac_vlan_filters()
1433 hlist_add_head(&new->hlist, tmp_add_list); in i40e_correct_mac_vlan_filters()
1436 f->state = I40E_FILTER_REMOVE; in i40e_correct_mac_vlan_filters()
1437 hash_del(&f->hlist); in i40e_correct_mac_vlan_filters()
1438 hlist_add_head(&f->hlist, tmp_del_list); in i40e_correct_mac_vlan_filters()
1442 vsi->has_vlan_filter = !!vlan_filters; in i40e_correct_mac_vlan_filters()
1448 * i40e_get_vf_new_vlan - Get new vlan id on a vf
1451 * @f: existing mac filter, replaced with new_mac->f if new_mac is not NULL
1456 * and vf-vlan-prune-disable flag.
1467 s16 pvid = le16_to_cpu(vsi->info.pvid); in i40e_get_vf_new_vlan()
1468 struct i40e_pf *pf = vsi->back; in i40e_get_vf_new_vlan()
1472 f = new_mac->f; in i40e_get_vf_new_vlan()
1474 if (pvid && f->vlan != pvid) in i40e_get_vf_new_vlan()
1478 !(pf->flags & I40E_FLAG_VF_VLAN_PRUNING)); in i40e_get_vf_new_vlan()
1480 if ((vlan_filters && f->vlan == I40E_VLAN_ANY) || in i40e_get_vf_new_vlan()
1481 (!is_any && !vlan_filters && f->vlan == I40E_VLAN_ANY) || in i40e_get_vf_new_vlan()
1482 (is_any && !vlan_filters && f->vlan == 0)) { in i40e_get_vf_new_vlan()
1489 return f->vlan; in i40e_get_vf_new_vlan()
1493 * i40e_correct_vf_mac_vlan_filters - Correct non-VLAN VF filters if necessary
1501 * and vf-vlan-prune-disable flag.
1503 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1523 new_mac->f->vlan = i40e_get_vf_new_vlan(vsi, new_mac, NULL, in i40e_correct_vf_mac_vlan_filters()
1527 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_correct_vf_mac_vlan_filters()
1530 if (new_vlan != f->vlan) { in i40e_correct_vf_mac_vlan_filters()
1531 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan); in i40e_correct_vf_mac_vlan_filters()
1533 return -ENOMEM; in i40e_correct_vf_mac_vlan_filters()
1537 return -ENOMEM; in i40e_correct_vf_mac_vlan_filters()
1538 new_mac->f = add_head; in i40e_correct_vf_mac_vlan_filters()
1539 new_mac->state = add_head->state; in i40e_correct_vf_mac_vlan_filters()
1542 hlist_add_head(&new_mac->hlist, tmp_add_list); in i40e_correct_vf_mac_vlan_filters()
1545 f->state = I40E_FILTER_REMOVE; in i40e_correct_vf_mac_vlan_filters()
1546 hash_del(&f->hlist); in i40e_correct_vf_mac_vlan_filters()
1547 hlist_add_head(&f->hlist, tmp_del_list); in i40e_correct_vf_mac_vlan_filters()
1551 vsi->has_vlan_filter = !!vlan_filters; in i40e_correct_vf_mac_vlan_filters()
1556 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1557 * @vsi: the PF Main VSI - inappropriate for any other VSI
1566 struct i40e_pf *pf = vsi->back; in i40e_rm_default_mac_filter()
1569 if (vsi->type != I40E_VSI_MAIN) in i40e_rm_default_mac_filter()
1577 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); in i40e_rm_default_mac_filter()
1585 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); in i40e_rm_default_mac_filter()
1589 * i40e_add_filter - Add a mac/vlan filter to the VSI
1618 vsi->has_vlan_filter = true; in i40e_add_filter()
1620 ether_addr_copy(f->macaddr, macaddr); in i40e_add_filter()
1621 f->vlan = vlan; in i40e_add_filter()
1622 f->state = I40E_FILTER_NEW; in i40e_add_filter()
1623 INIT_HLIST_NODE(&f->hlist); in i40e_add_filter()
1626 hash_add(vsi->mac_filter_hash, &f->hlist, key); in i40e_add_filter()
1628 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_add_filter()
1629 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); in i40e_add_filter()
1640 if (f->state == I40E_FILTER_REMOVE) in i40e_add_filter()
1641 f->state = I40E_FILTER_ACTIVE; in i40e_add_filter()
1647 * __i40e_del_filter - Remove a specific filter from the VSI
1670 if ((f->state == I40E_FILTER_FAILED) || in __i40e_del_filter()
1671 (f->state == I40E_FILTER_NEW)) { in __i40e_del_filter()
1672 hash_del(&f->hlist); in __i40e_del_filter()
1675 f->state = I40E_FILTER_REMOVE; in __i40e_del_filter()
1678 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in __i40e_del_filter()
1679 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); in __i40e_del_filter()
1683 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1706 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1724 if (vsi->info.pvid) in i40e_add_mac_filter()
1726 le16_to_cpu(vsi->info.pvid)); in i40e_add_mac_filter()
1731 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_add_mac_filter()
1732 if (f->state == I40E_FILTER_REMOVE) in i40e_add_mac_filter()
1734 add = i40e_add_filter(vsi, macaddr, f->vlan); in i40e_add_mac_filter()
1743 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1759 lockdep_assert_held(&vsi->mac_filter_hash_lock); in i40e_del_mac_filter()
1760 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_del_mac_filter()
1761 if (ether_addr_equal(macaddr, f->macaddr)) { in i40e_del_mac_filter()
1770 return -ENOENT; in i40e_del_mac_filter()
1774 * i40e_set_mac - NDO callback to set mac address
1783 struct i40e_vsi *vsi = np->vsi; in i40e_set_mac()
1784 struct i40e_pf *pf = vsi->back; in i40e_set_mac()
1785 struct i40e_hw *hw = &pf->hw; in i40e_set_mac()
1788 if (!is_valid_ether_addr(addr->sa_data)) in i40e_set_mac()
1789 return -EADDRNOTAVAIL; in i40e_set_mac()
1791 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { in i40e_set_mac()
1793 addr->sa_data); in i40e_set_mac()
1797 if (test_bit(__I40E_DOWN, pf->state) || in i40e_set_mac()
1798 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_set_mac()
1799 return -EADDRNOTAVAIL; in i40e_set_mac()
1801 if (ether_addr_equal(hw->mac.addr, addr->sa_data)) in i40e_set_mac()
1803 hw->mac.addr); in i40e_set_mac()
1805 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); in i40e_set_mac()
1809 * - Remove old address from MAC filter in i40e_set_mac()
1810 * - Copy new address in i40e_set_mac()
1811 * - Add new address to MAC filter in i40e_set_mac()
1813 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_set_mac()
1814 i40e_del_mac_filter(vsi, netdev->dev_addr); in i40e_set_mac()
1815 eth_hw_addr_set(netdev, addr->sa_data); in i40e_set_mac()
1816 i40e_add_mac_filter(vsi, netdev->dev_addr); in i40e_set_mac()
1817 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_set_mac()
1819 if (vsi->type == I40E_VSI_MAIN) { in i40e_set_mac()
1823 addr->sa_data, NULL); in i40e_set_mac()
1827 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_mac()
1838 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1847 struct i40e_pf *pf = vsi->back; in i40e_config_rss_aq()
1848 struct i40e_hw *hw = &pf->hw; in i40e_config_rss_aq()
1854 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw); in i40e_config_rss_aq()
1856 dev_info(&pf->pdev->dev, in i40e_config_rss_aq()
1859 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_config_rss_aq()
1864 bool pf_lut = vsi->type == I40E_VSI_MAIN; in i40e_config_rss_aq()
1866 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); in i40e_config_rss_aq()
1868 dev_info(&pf->pdev->dev, in i40e_config_rss_aq()
1871 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_config_rss_aq()
1879 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1884 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_rss()
1889 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)) in i40e_vsi_config_rss()
1891 if (!vsi->rss_size) in i40e_vsi_config_rss()
1892 vsi->rss_size = min_t(int, pf->alloc_rss_size, in i40e_vsi_config_rss()
1893 vsi->num_queue_pairs); in i40e_vsi_config_rss()
1894 if (!vsi->rss_size) in i40e_vsi_config_rss()
1895 return -EINVAL; in i40e_vsi_config_rss()
1896 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in i40e_vsi_config_rss()
1898 return -ENOMEM; in i40e_vsi_config_rss()
1903 if (vsi->rss_lut_user) in i40e_vsi_config_rss()
1904 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in i40e_vsi_config_rss()
1906 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); in i40e_vsi_config_rss()
1907 if (vsi->rss_hkey_user) in i40e_vsi_config_rss()
1908 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); in i40e_vsi_config_rss()
1911 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); in i40e_vsi_config_rss()
1917 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1932 if (vsi->type != I40E_VSI_MAIN) in i40e_vsi_setup_queue_map_mqprio()
1933 return -EINVAL; in i40e_vsi_setup_queue_map_mqprio()
1936 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc; in i40e_vsi_setup_queue_map_mqprio()
1937 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; in i40e_vsi_setup_queue_map_mqprio()
1938 num_qps = vsi->mqprio_qopt.qopt.count[0]; in i40e_vsi_setup_queue_map_mqprio()
1940 /* find the next higher power-of-2 of num queue pairs */ in i40e_vsi_setup_queue_map_mqprio()
1947 /* Setup queue offset/count for all TCs for given VSI */ in i40e_vsi_setup_queue_map_mqprio()
1948 max_qcount = vsi->mqprio_qopt.qopt.count[0]; in i40e_vsi_setup_queue_map_mqprio()
1951 if (vsi->tc_config.enabled_tc & BIT(i)) { in i40e_vsi_setup_queue_map_mqprio()
1952 offset = vsi->mqprio_qopt.qopt.offset[i]; in i40e_vsi_setup_queue_map_mqprio()
1953 qcount = vsi->mqprio_qopt.qopt.count[i]; in i40e_vsi_setup_queue_map_mqprio()
1956 vsi->tc_config.tc_info[i].qoffset = offset; in i40e_vsi_setup_queue_map_mqprio()
1957 vsi->tc_config.tc_info[i].qcount = qcount; in i40e_vsi_setup_queue_map_mqprio()
1958 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; in i40e_vsi_setup_queue_map_mqprio()
1964 vsi->tc_config.tc_info[i].qoffset = 0; in i40e_vsi_setup_queue_map_mqprio()
1965 vsi->tc_config.tc_info[i].qcount = 1; in i40e_vsi_setup_queue_map_mqprio()
1966 vsi->tc_config.tc_info[i].netdev_tc = 0; in i40e_vsi_setup_queue_map_mqprio()
1971 vsi->num_queue_pairs = offset + qcount; in i40e_vsi_setup_queue_map_mqprio()
1974 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in i40e_vsi_setup_queue_map_mqprio()
1975 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); in i40e_vsi_setup_queue_map_mqprio()
1976 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); in i40e_vsi_setup_queue_map_mqprio()
1977 ctxt->info.valid_sections |= cpu_to_le16(sections); in i40e_vsi_setup_queue_map_mqprio()
1980 vsi->rss_size = max_qcount; in i40e_vsi_setup_queue_map_mqprio()
1983 dev_info(&vsi->back->pdev->dev, in i40e_vsi_setup_queue_map_mqprio()
1988 vsi->reconfig_rss = true; in i40e_vsi_setup_queue_map_mqprio()
1989 dev_dbg(&vsi->back->pdev->dev, in i40e_vsi_setup_queue_map_mqprio()
1995 override_q = vsi->mqprio_qopt.qopt.count[0]; in i40e_vsi_setup_queue_map_mqprio()
1996 if (override_q && override_q < vsi->num_queue_pairs) { in i40e_vsi_setup_queue_map_mqprio()
1997 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q; in i40e_vsi_setup_queue_map_mqprio()
1998 vsi->next_base_queue = override_q; in i40e_vsi_setup_queue_map_mqprio()
2004 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
2007 * @enabled_tc: Enabled TCs bitmap
2017 struct i40e_pf *pf = vsi->back; in i40e_vsi_setup_queue_map()
2030 memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping)); in i40e_vsi_setup_queue_map()
2032 if (vsi->type == I40E_VSI_MAIN) { in i40e_vsi_setup_queue_map()
2036 * non-zero req_queue_pairs says that user requested a new in i40e_vsi_setup_queue_map()
2042 if (vsi->req_queue_pairs > 0) in i40e_vsi_setup_queue_map()
2043 vsi->num_queue_pairs = vsi->req_queue_pairs; in i40e_vsi_setup_queue_map()
2044 else if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_vsi_setup_queue_map()
2045 vsi->num_queue_pairs = pf->num_lan_msix; in i40e_vsi_setup_queue_map()
2047 vsi->num_queue_pairs = 1; in i40e_vsi_setup_queue_map()
2051 if (vsi->type == I40E_VSI_MAIN || in i40e_vsi_setup_queue_map()
2052 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0)) in i40e_vsi_setup_queue_map()
2053 num_tc_qps = vsi->num_queue_pairs; in i40e_vsi_setup_queue_map()
2055 num_tc_qps = vsi->alloc_queue_pairs; in i40e_vsi_setup_queue_map()
2057 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { in i40e_vsi_setup_queue_map()
2064 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); in i40e_vsi_setup_queue_map()
2072 vsi->tc_config.numtc = numtc; in i40e_vsi_setup_queue_map()
2073 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; in i40e_vsi_setup_queue_map()
2075 /* Do not allow use more TC queue pairs than MSI-X vectors exist */ in i40e_vsi_setup_queue_map()
2076 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_vsi_setup_queue_map()
2077 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix); in i40e_vsi_setup_queue_map()
2079 /* Setup queue offset/count for all TCs for given VSI */ in i40e_vsi_setup_queue_map()
2082 if (vsi->tc_config.enabled_tc & BIT(i)) { in i40e_vsi_setup_queue_map()
2086 switch (vsi->type) { in i40e_vsi_setup_queue_map()
2088 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | in i40e_vsi_setup_queue_map()
2090 vsi->tc_config.enabled_tc != 1) { in i40e_vsi_setup_queue_map()
2091 qcount = min_t(int, pf->alloc_rss_size, in i40e_vsi_setup_queue_map()
2104 vsi->tc_config.tc_info[i].qoffset = offset; in i40e_vsi_setup_queue_map()
2105 vsi->tc_config.tc_info[i].qcount = qcount; in i40e_vsi_setup_queue_map()
2107 /* find the next higher power-of-2 of num queue pairs */ in i40e_vsi_setup_queue_map()
2115 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; in i40e_vsi_setup_queue_map()
2126 vsi->tc_config.tc_info[i].qoffset = 0; in i40e_vsi_setup_queue_map()
2127 vsi->tc_config.tc_info[i].qcount = 1; in i40e_vsi_setup_queue_map()
2128 vsi->tc_config.tc_info[i].netdev_tc = 0; in i40e_vsi_setup_queue_map()
2132 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); in i40e_vsi_setup_queue_map()
2135 if ((vsi->type == I40E_VSI_MAIN && numtc != 1) || in i40e_vsi_setup_queue_map()
2136 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) || in i40e_vsi_setup_queue_map()
2137 (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV)) in i40e_vsi_setup_queue_map()
2138 vsi->num_queue_pairs = offset; in i40e_vsi_setup_queue_map()
2144 ctxt->info.up_enable_bits = enabled_tc; in i40e_vsi_setup_queue_map()
2146 if (vsi->type == I40E_VSI_SRIOV) { in i40e_vsi_setup_queue_map()
2147 ctxt->info.mapping_flags |= in i40e_vsi_setup_queue_map()
2149 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_setup_queue_map()
2150 ctxt->info.queue_mapping[i] = in i40e_vsi_setup_queue_map()
2151 cpu_to_le16(vsi->base_queue + i); in i40e_vsi_setup_queue_map()
2153 ctxt->info.mapping_flags |= in i40e_vsi_setup_queue_map()
2155 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); in i40e_vsi_setup_queue_map()
2157 ctxt->info.valid_sections |= cpu_to_le16(sections); in i40e_vsi_setup_queue_map()
2161 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
2171 struct i40e_vsi *vsi = np->vsi; in i40e_addr_sync()
2176 return -ENOMEM; in i40e_addr_sync()
2180 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
2190 struct i40e_vsi *vsi = np->vsi; in i40e_addr_unsync()
2197 if (ether_addr_equal(addr, netdev->dev_addr)) in i40e_addr_unsync()
2206 * i40e_set_rx_mode - NDO callback to set the netdev filters
2212 struct i40e_vsi *vsi = np->vsi; in i40e_set_rx_mode()
2214 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_set_rx_mode()
2219 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_set_rx_mode()
2222 if (vsi->current_netdev_flags != vsi->netdev->flags) { in i40e_set_rx_mode()
2223 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_set_rx_mode()
2224 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); in i40e_set_rx_mode()
2229 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
2231 * @from: Pointer to list which contains MAC filter entries - changes to
2243 u64 key = i40e_addr_to_hkey(f->macaddr); in i40e_undo_del_filter_entries()
2246 hlist_del(&f->hlist); in i40e_undo_del_filter_entries()
2247 hash_add(vsi->mac_filter_hash, &f->hlist, key); in i40e_undo_del_filter_entries()
2252 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
2254 * @from: Pointer to list which contains MAC filter entries - changes to
2267 hlist_del(&new->hlist); in i40e_undo_add_filter_entries()
2268 netdev_hw_addr_refcnt(new->f, vsi->netdev, -1); in i40e_undo_add_filter_entries()
2274 * i40e_next_filter - Get the next non-broadcast filter from a list
2277 * Returns the next non-broadcast filter in the list. Required so that we
2285 if (!is_broadcast_ether_addr(next->f->macaddr)) in i40e_next_filter()
2293 * i40e_update_filter_state - Update filter state based on return data
2312 * the firmware return status because we pre-set the filter in i40e_update_filter_state()
2318 add_head->state = I40E_FILTER_FAILED; in i40e_update_filter_state()
2320 add_head->state = I40E_FILTER_ACTIVE; in i40e_update_filter_state()
2333 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2338 * @retval: Set to -EIO on failure to delete
2350 struct i40e_hw *hw = &vsi->back->hw; in i40e_aqc_del_filters()
2354 aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL, in i40e_aqc_del_filters()
2359 *retval = -EIO; in i40e_aqc_del_filters()
2360 dev_info(&vsi->back->pdev->dev, in i40e_aqc_del_filters()
2368 * i40e_aqc_add_filters - Request firmware to add a set of filters
2376 * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
2385 struct i40e_hw *hw = &vsi->back->hw; in i40e_aqc_add_filters()
2389 i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status); in i40e_aqc_add_filters()
2393 if (vsi->type == I40E_VSI_MAIN) { in i40e_aqc_add_filters()
2394 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_aqc_add_filters()
2395 dev_warn(&vsi->back->pdev->dev, in i40e_aqc_add_filters()
2398 } else if (vsi->type == I40E_VSI_SRIOV || in i40e_aqc_add_filters()
2399 vsi->type == I40E_VSI_VMDQ1 || in i40e_aqc_add_filters()
2400 vsi->type == I40E_VSI_VMDQ2) { in i40e_aqc_add_filters()
2401 dev_warn(&vsi->back->pdev->dev, in i40e_aqc_add_filters()
2406 dev_warn(&vsi->back->pdev->dev, in i40e_aqc_add_filters()
2409 vsi->type); in i40e_aqc_add_filters()
2415 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2430 bool enable = f->state == I40E_FILTER_NEW; in i40e_aqc_broadcast_filter()
2431 struct i40e_hw *hw = &vsi->back->hw; in i40e_aqc_broadcast_filter()
2434 if (f->vlan == I40E_VLAN_ANY) { in i40e_aqc_broadcast_filter()
2436 vsi->seid, in i40e_aqc_broadcast_filter()
2441 vsi->seid, in i40e_aqc_broadcast_filter()
2443 f->vlan, in i40e_aqc_broadcast_filter()
2448 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_aqc_broadcast_filter()
2449 dev_warn(&vsi->back->pdev->dev, in i40e_aqc_broadcast_filter()
2451 i40e_aq_str(hw, hw->aq.asq_last_status), in i40e_aqc_broadcast_filter()
2459 * i40e_set_promiscuous - set promiscuous mode
2469 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_set_promiscuous()
2470 struct i40e_hw *hw = &pf->hw; in i40e_set_promiscuous()
2473 if (vsi->type == I40E_VSI_MAIN && in i40e_set_promiscuous()
2474 pf->lan_veb != I40E_NO_VEB && in i40e_set_promiscuous()
2475 !(pf->flags & I40E_FLAG_MFP_ENABLED)) { in i40e_set_promiscuous()
2483 vsi->seid, in i40e_set_promiscuous()
2487 vsi->seid, in i40e_set_promiscuous()
2490 dev_info(&pf->pdev->dev, in i40e_set_promiscuous()
2493 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_promiscuous()
2498 vsi->seid, in i40e_set_promiscuous()
2502 dev_info(&pf->pdev->dev, in i40e_set_promiscuous()
2505 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_promiscuous()
2509 vsi->seid, in i40e_set_promiscuous()
2512 dev_info(&pf->pdev->dev, in i40e_set_promiscuous()
2515 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_promiscuous()
2520 pf->cur_promisc = promisc; in i40e_set_promiscuous()
2526 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2538 struct i40e_hw *hw = &vsi->back->hw; in i40e_sync_vsi_filters()
2559 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state)) in i40e_sync_vsi_filters()
2561 pf = vsi->back; in i40e_sync_vsi_filters()
2563 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_sync_vsi_filters()
2565 if (vsi->netdev) { in i40e_sync_vsi_filters()
2566 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; in i40e_sync_vsi_filters()
2567 vsi->current_netdev_flags = vsi->netdev->flags; in i40e_sync_vsi_filters()
2573 if (vsi->type == I40E_VSI_SRIOV) in i40e_sync_vsi_filters()
2574 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id); in i40e_sync_vsi_filters()
2575 else if (vsi->type != I40E_VSI_MAIN) in i40e_sync_vsi_filters()
2576 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid); in i40e_sync_vsi_filters()
2578 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { in i40e_sync_vsi_filters()
2579 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; in i40e_sync_vsi_filters()
2581 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2583 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_sync_vsi_filters()
2584 if (f->state == I40E_FILTER_REMOVE) { in i40e_sync_vsi_filters()
2586 hash_del(&f->hlist); in i40e_sync_vsi_filters()
2587 hlist_add_head(&f->hlist, &tmp_del_list); in i40e_sync_vsi_filters()
2592 if (f->state == I40E_FILTER_NEW) { in i40e_sync_vsi_filters()
2599 new->f = f; in i40e_sync_vsi_filters()
2600 new->state = f->state; in i40e_sync_vsi_filters()
2603 hlist_add_head(&new->hlist, &tmp_add_list); in i40e_sync_vsi_filters()
2610 if (f->vlan > 0) in i40e_sync_vsi_filters()
2614 if (vsi->type != I40E_VSI_SRIOV) in i40e_sync_vsi_filters()
2621 vlan_filters, pf->vf[vsi->vf_id].trusted); in i40e_sync_vsi_filters()
2624 netdev_hw_addr_refcnt(new->f, vsi->netdev, 1); in i40e_sync_vsi_filters()
2629 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2634 filter_list_len = hw->aq.asq_buf_size / in i40e_sync_vsi_filters()
2648 if (is_broadcast_ether_addr(f->macaddr)) { in i40e_sync_vsi_filters()
2651 hlist_del(&f->hlist); in i40e_sync_vsi_filters()
2657 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr); in i40e_sync_vsi_filters()
2658 if (f->vlan == I40E_VLAN_ANY) { in i40e_sync_vsi_filters()
2663 cpu_to_le16((u16)(f->vlan)); in i40e_sync_vsi_filters()
2680 hlist_del(&f->hlist); in i40e_sync_vsi_filters()
2695 filter_list_len = hw->aq.asq_buf_size / in i40e_sync_vsi_filters()
2708 if (is_broadcast_ether_addr(new->f->macaddr)) { in i40e_sync_vsi_filters()
2710 new->f)) in i40e_sync_vsi_filters()
2711 new->state = I40E_FILTER_FAILED; in i40e_sync_vsi_filters()
2713 new->state = I40E_FILTER_ACTIVE; in i40e_sync_vsi_filters()
2722 new->f->macaddr); in i40e_sync_vsi_filters()
2723 if (new->f->vlan == I40E_VLAN_ANY) { in i40e_sync_vsi_filters()
2728 cpu_to_le16((u16)(new->f->vlan)); in i40e_sync_vsi_filters()
2752 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2755 if (new->f->state == I40E_FILTER_NEW) in i40e_sync_vsi_filters()
2756 new->f->state = new->state; in i40e_sync_vsi_filters()
2757 hlist_del(&new->hlist); in i40e_sync_vsi_filters()
2758 netdev_hw_addr_refcnt(new->f, vsi->netdev, -1); in i40e_sync_vsi_filters()
2761 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2767 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2768 vsi->active_filters = 0; in i40e_sync_vsi_filters()
2769 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { in i40e_sync_vsi_filters()
2770 if (f->state == I40E_FILTER_ACTIVE) in i40e_sync_vsi_filters()
2771 vsi->active_filters++; in i40e_sync_vsi_filters()
2772 else if (f->state == I40E_FILTER_FAILED) in i40e_sync_vsi_filters()
2775 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2782 vsi->active_filters < vsi->promisc_threshold) { in i40e_sync_vsi_filters()
2783 dev_info(&pf->pdev->dev, in i40e_sync_vsi_filters()
2786 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_sync_vsi_filters()
2787 vsi->promisc_threshold = 0; in i40e_sync_vsi_filters()
2791 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) { in i40e_sync_vsi_filters()
2792 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_sync_vsi_filters()
2796 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_sync_vsi_filters()
2802 vsi->promisc_threshold = (vsi->active_filters * 3) / 4; in i40e_sync_vsi_filters()
2808 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); in i40e_sync_vsi_filters()
2809 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, in i40e_sync_vsi_filters()
2810 vsi->seid, in i40e_sync_vsi_filters()
2815 hw->aq.asq_last_status); in i40e_sync_vsi_filters()
2816 dev_info(&pf->pdev->dev, in i40e_sync_vsi_filters()
2820 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_sync_vsi_filters()
2822 dev_info(&pf->pdev->dev, "%s allmulti mode.\n", in i40e_sync_vsi_filters()
2830 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || in i40e_sync_vsi_filters()
2835 hw->aq.asq_last_status); in i40e_sync_vsi_filters()
2836 dev_info(&pf->pdev->dev, in i40e_sync_vsi_filters()
2841 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_sync_vsi_filters()
2847 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_sync_vsi_filters()
2849 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state); in i40e_sync_vsi_filters()
2854 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2858 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2860 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_sync_vsi_filters()
2861 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state); in i40e_sync_vsi_filters()
2862 return -ENOMEM; in i40e_sync_vsi_filters()
2866 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2875 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state)) in i40e_sync_filters_subtask()
2877 if (test_bit(__I40E_VF_DISABLE, pf->state)) { in i40e_sync_filters_subtask()
2878 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); in i40e_sync_filters_subtask()
2882 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_sync_filters_subtask()
2883 if (pf->vsi[v] && in i40e_sync_filters_subtask()
2884 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) && in i40e_sync_filters_subtask()
2885 !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) { in i40e_sync_filters_subtask()
2886 int ret = i40e_sync_vsi_filters(pf->vsi[v]); in i40e_sync_filters_subtask()
2891 pf->state); in i40e_sync_filters_subtask()
2899 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2904 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) in i40e_max_xdp_frame_size()
2911 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2920 struct i40e_vsi *vsi = np->vsi; in i40e_change_mtu()
2921 struct i40e_pf *pf = vsi->back; in i40e_change_mtu()
2927 return -EINVAL; in i40e_change_mtu()
2931 netdev->mtu, new_mtu); in i40e_change_mtu()
2932 netdev->mtu = new_mtu; in i40e_change_mtu()
2935 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_change_mtu()
2936 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); in i40e_change_mtu()
2941 * i40e_ioctl - Access the hwtstamp interface
2949 struct i40e_pf *pf = np->vsi->back; in i40e_ioctl()
2957 return -EOPNOTSUPP; in i40e_ioctl()
2962 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2971 if (vsi->info.pvid) in i40e_vlan_stripping_enable()
2974 if ((vsi->info.valid_sections & in i40e_vlan_stripping_enable()
2976 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) in i40e_vlan_stripping_enable()
2979 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); in i40e_vlan_stripping_enable()
2980 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | in i40e_vlan_stripping_enable()
2983 ctxt.seid = vsi->seid; in i40e_vlan_stripping_enable()
2984 ctxt.info = vsi->info; in i40e_vlan_stripping_enable()
2985 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_vlan_stripping_enable()
2987 dev_info(&vsi->back->pdev->dev, in i40e_vlan_stripping_enable()
2989 i40e_stat_str(&vsi->back->hw, ret), in i40e_vlan_stripping_enable()
2990 i40e_aq_str(&vsi->back->hw, in i40e_vlan_stripping_enable()
2991 vsi->back->hw.aq.asq_last_status)); in i40e_vlan_stripping_enable()
2996 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
3005 if (vsi->info.pvid) in i40e_vlan_stripping_disable()
3008 if ((vsi->info.valid_sections & in i40e_vlan_stripping_disable()
3010 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == in i40e_vlan_stripping_disable()
3014 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); in i40e_vlan_stripping_disable()
3015 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | in i40e_vlan_stripping_disable()
3018 ctxt.seid = vsi->seid; in i40e_vlan_stripping_disable()
3019 ctxt.info = vsi->info; in i40e_vlan_stripping_disable()
3020 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_vlan_stripping_disable()
3022 dev_info(&vsi->back->pdev->dev, in i40e_vlan_stripping_disable()
3024 i40e_stat_str(&vsi->back->hw, ret), in i40e_vlan_stripping_disable()
3025 i40e_aq_str(&vsi->back->hw, in i40e_vlan_stripping_disable()
3026 vsi->back->hw.aq.asq_last_status)); in i40e_vlan_stripping_disable()
3031 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
3033 * @vid: vlan id to be added (0 = untagged only , -1 = any)
3049 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_add_vlan_all_mac()
3059 if (f->state == I40E_FILTER_REMOVE && f->vlan == vid) { in i40e_add_vlan_all_mac()
3060 f->state = I40E_FILTER_ACTIVE; in i40e_add_vlan_all_mac()
3062 } else if (f->state == I40E_FILTER_REMOVE) { in i40e_add_vlan_all_mac()
3065 add_f = i40e_add_filter(vsi, f->macaddr, vid); in i40e_add_vlan_all_mac()
3067 dev_info(&vsi->back->pdev->dev, in i40e_add_vlan_all_mac()
3069 vid, f->macaddr); in i40e_add_vlan_all_mac()
3070 return -ENOMEM; in i40e_add_vlan_all_mac()
3078 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
3086 if (vsi->info.pvid) in i40e_vsi_add_vlan()
3087 return -EINVAL; in i40e_vsi_add_vlan()
3101 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_add_vlan()
3103 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_add_vlan()
3110 i40e_service_event_schedule(vsi->back); in i40e_vsi_add_vlan()
3115 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
3117 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
3133 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_rm_vlan_all_mac()
3134 if (f->vlan == vid) in i40e_rm_vlan_all_mac()
3140 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
3146 if (!vid || vsi->info.pvid) in i40e_vsi_kill_vlan()
3149 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_kill_vlan()
3151 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_kill_vlan()
3156 i40e_service_event_schedule(vsi->back); in i40e_vsi_kill_vlan()
3160 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
3171 struct i40e_vsi *vsi = np->vsi; in i40e_vlan_rx_add_vid()
3175 return -EINVAL; in i40e_vlan_rx_add_vid()
3179 set_bit(vid, vsi->active_vlans); in i40e_vlan_rx_add_vid()
3185 * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
3194 struct i40e_vsi *vsi = np->vsi; in i40e_vlan_rx_add_vid_up()
3198 set_bit(vid, vsi->active_vlans); in i40e_vlan_rx_add_vid_up()
3202 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
3213 struct i40e_vsi *vsi = np->vsi; in i40e_vlan_rx_kill_vid()
3221 clear_bit(vid, vsi->active_vlans); in i40e_vlan_rx_kill_vid()
3227 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
3234 if (!vsi->netdev) in i40e_restore_vlan()
3237 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) in i40e_restore_vlan()
3242 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) in i40e_restore_vlan()
3243 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q), in i40e_restore_vlan()
3248 * i40e_vsi_add_pvid - Add pvid for the VSI
3257 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); in i40e_vsi_add_pvid()
3258 vsi->info.pvid = cpu_to_le16(vid); in i40e_vsi_add_pvid()
3259 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | in i40e_vsi_add_pvid()
3263 ctxt.seid = vsi->seid; in i40e_vsi_add_pvid()
3264 ctxt.info = vsi->info; in i40e_vsi_add_pvid()
3265 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_vsi_add_pvid()
3267 dev_info(&vsi->back->pdev->dev, in i40e_vsi_add_pvid()
3269 i40e_stat_str(&vsi->back->hw, ret), in i40e_vsi_add_pvid()
3270 i40e_aq_str(&vsi->back->hw, in i40e_vsi_add_pvid()
3271 vsi->back->hw.aq.asq_last_status)); in i40e_vsi_add_pvid()
3272 return -ENOENT; in i40e_vsi_add_pvid()
3279 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
3286 vsi->info.pvid = 0; in i40e_vsi_remove_pvid()
3292 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
3305 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_setup_tx_resources()
3306 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); in i40e_vsi_setup_tx_resources()
3311 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_setup_tx_resources()
3312 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]); in i40e_vsi_setup_tx_resources()
3318 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
3327 if (vsi->tx_rings) { in i40e_vsi_free_tx_resources()
3328 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_free_tx_resources()
3329 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) in i40e_vsi_free_tx_resources()
3330 i40e_free_tx_resources(vsi->tx_rings[i]); in i40e_vsi_free_tx_resources()
3333 if (vsi->xdp_rings) { in i40e_vsi_free_tx_resources()
3334 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_free_tx_resources()
3335 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) in i40e_vsi_free_tx_resources()
3336 i40e_free_tx_resources(vsi->xdp_rings[i]); in i40e_vsi_free_tx_resources()
3341 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3354 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_setup_rx_resources()
3355 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); in i40e_vsi_setup_rx_resources()
3360 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3369 if (!vsi->rx_rings) in i40e_vsi_free_rx_resources()
3372 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_free_rx_resources()
3373 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) in i40e_vsi_free_rx_resources()
3374 i40e_free_rx_resources(vsi->rx_rings[i]); in i40e_vsi_free_rx_resources()
3378 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3382 * based on the TCs enabled for the VSI that ring belongs to.
3388 if (!ring->q_vector || !ring->netdev || ring->ch) in i40e_config_xps_tx_ring()
3392 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state)) in i40e_config_xps_tx_ring()
3395 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1); in i40e_config_xps_tx_ring()
3396 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu), in i40e_config_xps_tx_ring()
3397 ring->queue_index); in i40e_config_xps_tx_ring()
3401 * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled
3408 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi); in i40e_xsk_pool()
3409 int qid = ring->queue_index; in i40e_xsk_pool()
3412 qid -= ring->vsi->alloc_queue_pairs; in i40e_xsk_pool()
3414 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps)) in i40e_xsk_pool()
3417 return xsk_get_pool_from_qid(ring->vsi->netdev, qid); in i40e_xsk_pool()
3421 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3428 struct i40e_vsi *vsi = ring->vsi; in i40e_configure_tx_ring()
3429 u16 pf_q = vsi->base_queue + ring->queue_index; in i40e_configure_tx_ring()
3430 struct i40e_hw *hw = &vsi->back->hw; in i40e_configure_tx_ring()
3436 ring->xsk_pool = i40e_xsk_pool(ring); in i40e_configure_tx_ring()
3439 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { in i40e_configure_tx_ring()
3440 ring->atr_sample_rate = vsi->back->atr_sample_rate; in i40e_configure_tx_ring()
3441 ring->atr_count = 0; in i40e_configure_tx_ring()
3443 ring->atr_sample_rate = 0; in i40e_configure_tx_ring()
3453 tx_ctx.base = (ring->dma / 128); in i40e_configure_tx_ring()
3454 tx_ctx.qlen = ring->count; in i40e_configure_tx_ring()
3455 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | in i40e_configure_tx_ring()
3457 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); in i40e_configure_tx_ring()
3459 if (vsi->type != I40E_VSI_FDIR) in i40e_configure_tx_ring()
3461 tx_ctx.head_wb_addr = ring->dma + in i40e_configure_tx_ring()
3462 (ring->count * sizeof(struct i40e_tx_desc)); in i40e_configure_tx_ring()
3475 if (ring->ch) in i40e_configure_tx_ring()
3477 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]); in i40e_configure_tx_ring()
3480 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); in i40e_configure_tx_ring()
3487 dev_info(&vsi->back->pdev->dev, in i40e_configure_tx_ring()
3489 ring->queue_index, pf_q, err); in i40e_configure_tx_ring()
3490 return -ENOMEM; in i40e_configure_tx_ring()
3496 dev_info(&vsi->back->pdev->dev, in i40e_configure_tx_ring()
3498 ring->queue_index, pf_q, err); in i40e_configure_tx_ring()
3499 return -ENOMEM; in i40e_configure_tx_ring()
3503 if (ring->ch) { in i40e_configure_tx_ring()
3504 if (ring->ch->type == I40E_VSI_VMDQ2) in i40e_configure_tx_ring()
3507 return -EINVAL; in i40e_configure_tx_ring()
3509 qtx_ctl |= (ring->ch->vsi_number << in i40e_configure_tx_ring()
3513 if (vsi->type == I40E_VSI_VMDQ2) { in i40e_configure_tx_ring()
3515 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & in i40e_configure_tx_ring()
3522 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & in i40e_configure_tx_ring()
3528 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); in i40e_configure_tx_ring()
3534 * i40e_rx_offset - Return expected offset into page to access data
3545 * i40e_configure_rx_ring - Configure a receive ring context
3552 struct i40e_vsi *vsi = ring->vsi; in i40e_configure_rx_ring()
3553 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; in i40e_configure_rx_ring()
3554 u16 pf_q = vsi->base_queue + ring->queue_index; in i40e_configure_rx_ring()
3555 struct i40e_hw *hw = &vsi->back->hw; in i40e_configure_rx_ring()
3561 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS); in i40e_configure_rx_ring()
3566 if (ring->vsi->type == I40E_VSI_MAIN) in i40e_configure_rx_ring()
3567 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); in i40e_configure_rx_ring()
3569 ring->xsk_pool = i40e_xsk_pool(ring); in i40e_configure_rx_ring()
3570 if (ring->xsk_pool) { in i40e_configure_rx_ring()
3571 ring->rx_buf_len = in i40e_configure_rx_ring()
3572 xsk_pool_get_rx_frame_size(ring->xsk_pool); in i40e_configure_rx_ring()
3575 * handling in the fast-path. in i40e_configure_rx_ring()
3578 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in i40e_configure_rx_ring()
3583 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3585 ring->queue_index); in i40e_configure_rx_ring()
3588 ring->rx_buf_len = vsi->rx_buf_len; in i40e_configure_rx_ring()
3589 if (ring->vsi->type == I40E_VSI_MAIN) { in i40e_configure_rx_ring()
3590 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in i40e_configure_rx_ring()
3598 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len, in i40e_configure_rx_ring()
3601 rx_ctx.base = (ring->dma / 128); in i40e_configure_rx_ring()
3602 rx_ctx.qlen = ring->count; in i40e_configure_rx_ring()
3612 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len); in i40e_configure_rx_ring()
3613 if (hw->revision_id == 0) in i40e_configure_rx_ring()
3627 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3629 ring->queue_index, pf_q, err); in i40e_configure_rx_ring()
3630 return -ENOMEM; in i40e_configure_rx_ring()
3636 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3638 ring->queue_index, pf_q, err); in i40e_configure_rx_ring()
3639 return -ENOMEM; in i40e_configure_rx_ring()
3643 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) in i40e_configure_rx_ring()
3648 ring->rx_offset = i40e_rx_offset(ring); in i40e_configure_rx_ring()
3651 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); in i40e_configure_rx_ring()
3652 writel(0, ring->tail); in i40e_configure_rx_ring()
3654 if (ring->xsk_pool) { in i40e_configure_rx_ring()
3655 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in i40e_configure_rx_ring()
3664 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3666 ring->xsk_pool ? "AF_XDP ZC enabled " : "", in i40e_configure_rx_ring()
3667 ring->queue_index, pf_q); in i40e_configure_rx_ring()
3674 * i40e_vsi_configure_tx - Configure the VSI for Tx
3684 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) in i40e_vsi_configure_tx()
3685 err = i40e_configure_tx_ring(vsi->tx_rings[i]); in i40e_vsi_configure_tx()
3690 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) in i40e_vsi_configure_tx()
3691 err = i40e_configure_tx_ring(vsi->xdp_rings[i]); in i40e_vsi_configure_tx()
3697 * i40e_vsi_configure_rx - Configure the VSI for Rx
3707 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) { in i40e_vsi_configure_rx()
3708 vsi->max_frame = I40E_MAX_RXBUFFER; in i40e_vsi_configure_rx()
3709 vsi->rx_buf_len = I40E_RXBUFFER_2048; in i40e_vsi_configure_rx()
3712 (vsi->netdev->mtu <= ETH_DATA_LEN)) { in i40e_vsi_configure_rx()
3713 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN; in i40e_vsi_configure_rx()
3714 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN; in i40e_vsi_configure_rx()
3717 vsi->max_frame = I40E_MAX_RXBUFFER; in i40e_vsi_configure_rx()
3718 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 : in i40e_vsi_configure_rx()
3723 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_configure_rx()
3724 err = i40e_configure_rx_ring(vsi->rx_rings[i]); in i40e_vsi_configure_rx()
3730 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3739 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { in i40e_vsi_config_dcb_rings()
3741 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_vsi_config_dcb_rings()
3742 rx_ring = vsi->rx_rings[i]; in i40e_vsi_config_dcb_rings()
3743 tx_ring = vsi->tx_rings[i]; in i40e_vsi_config_dcb_rings()
3744 rx_ring->dcb_tc = 0; in i40e_vsi_config_dcb_rings()
3745 tx_ring->dcb_tc = 0; in i40e_vsi_config_dcb_rings()
3751 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n))) in i40e_vsi_config_dcb_rings()
3754 qoffset = vsi->tc_config.tc_info[n].qoffset; in i40e_vsi_config_dcb_rings()
3755 qcount = vsi->tc_config.tc_info[n].qcount; in i40e_vsi_config_dcb_rings()
3757 rx_ring = vsi->rx_rings[i]; in i40e_vsi_config_dcb_rings()
3758 tx_ring = vsi->tx_rings[i]; in i40e_vsi_config_dcb_rings()
3759 rx_ring->dcb_tc = n; in i40e_vsi_config_dcb_rings()
3760 tx_ring->dcb_tc = n; in i40e_vsi_config_dcb_rings()
3766 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3771 if (vsi->netdev) in i40e_set_vsi_rx_mode()
3772 i40e_set_rx_mode(vsi->netdev); in i40e_set_vsi_rx_mode()
3776 * i40e_reset_fdir_filter_cnt - Reset flow director filter counters
3783 pf->fd_tcp4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3784 pf->fd_udp4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3785 pf->fd_sctp4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3786 pf->fd_ip4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3787 pf->fd_tcp6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3788 pf->fd_udp6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3789 pf->fd_sctp6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3790 pf->fd_ip6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3794 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3803 struct i40e_pf *pf = vsi->back; in i40e_fdir_filter_restore()
3806 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) in i40e_fdir_filter_restore()
3813 &pf->fdir_filter_list, fdir_node) { in i40e_fdir_filter_restore()
3819 * i40e_vsi_configure - Set up the VSI for action
3837 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3843 struct i40e_pf *pf = vsi->back; in i40e_vsi_configure_msix()
3844 struct i40e_hw *hw = &pf->hw; in i40e_vsi_configure_msix()
3851 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) in i40e_vsi_configure_msix()
3853 qp = vsi->base_queue; in i40e_vsi_configure_msix()
3854 vector = vsi->base_vector; in i40e_vsi_configure_msix()
3855 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { in i40e_vsi_configure_msix()
3856 struct i40e_q_vector *q_vector = vsi->q_vectors[i]; in i40e_vsi_configure_msix()
3858 q_vector->rx.next_update = jiffies + 1; in i40e_vsi_configure_msix()
3859 q_vector->rx.target_itr = in i40e_vsi_configure_msix()
3860 ITR_TO_REG(vsi->rx_rings[i]->itr_setting); in i40e_vsi_configure_msix()
3861 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), in i40e_vsi_configure_msix()
3862 q_vector->rx.target_itr >> 1); in i40e_vsi_configure_msix()
3863 q_vector->rx.current_itr = q_vector->rx.target_itr; in i40e_vsi_configure_msix()
3865 q_vector->tx.next_update = jiffies + 1; in i40e_vsi_configure_msix()
3866 q_vector->tx.target_itr = in i40e_vsi_configure_msix()
3867 ITR_TO_REG(vsi->tx_rings[i]->itr_setting); in i40e_vsi_configure_msix()
3868 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), in i40e_vsi_configure_msix()
3869 q_vector->tx.target_itr >> 1); in i40e_vsi_configure_msix()
3870 q_vector->tx.current_itr = q_vector->tx.target_itr; in i40e_vsi_configure_msix()
3872 wr32(hw, I40E_PFINT_RATEN(vector - 1), in i40e_vsi_configure_msix()
3873 i40e_intrl_usec_to_reg(vsi->int_rate_limit)); in i40e_vsi_configure_msix()
3876 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); in i40e_vsi_configure_msix()
3877 for (q = 0; q < q_vector->num_ringpairs; q++) { in i40e_vsi_configure_msix()
3878 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp; in i40e_vsi_configure_msix()
3910 if (q == (q_vector->num_ringpairs - 1)) in i40e_vsi_configure_msix()
3923 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3928 struct i40e_hw *hw = &pf->hw; in i40e_enable_misc_int_causes()
3944 if (pf->flags & I40E_FLAG_IWARP_ENABLED) in i40e_enable_misc_int_causes()
3947 if (pf->flags & I40E_FLAG_PTP) in i40e_enable_misc_int_causes()
3961 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3966 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0; in i40e_configure_msi_and_legacy()
3967 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; in i40e_configure_msi_and_legacy()
3968 struct i40e_pf *pf = vsi->back; in i40e_configure_msi_and_legacy()
3969 struct i40e_hw *hw = &pf->hw; in i40e_configure_msi_and_legacy()
3972 q_vector->rx.next_update = jiffies + 1; in i40e_configure_msi_and_legacy()
3973 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting); in i40e_configure_msi_and_legacy()
3974 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1); in i40e_configure_msi_and_legacy()
3975 q_vector->rx.current_itr = q_vector->rx.target_itr; in i40e_configure_msi_and_legacy()
3976 q_vector->tx.next_update = jiffies + 1; in i40e_configure_msi_and_legacy()
3977 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting); in i40e_configure_msi_and_legacy()
3978 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1); in i40e_configure_msi_and_legacy()
3979 q_vector->tx.current_itr = q_vector->tx.target_itr; in i40e_configure_msi_and_legacy()
4004 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
4009 struct i40e_hw *hw = &pf->hw; in i40e_irq_dynamic_disable_icr0()
4017 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
4022 struct i40e_hw *hw = &pf->hw; in i40e_irq_dynamic_enable_icr0()
4034 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
4042 if (!q_vector->tx.ring && !q_vector->rx.ring) in i40e_msix_clean_rings()
4045 napi_schedule_irqoff(&q_vector->napi); in i40e_msix_clean_rings()
4051 * i40e_irq_affinity_notify - Callback for affinity changes
4064 cpumask_copy(&q_vector->affinity_mask, mask); in i40e_irq_affinity_notify()
4068 * i40e_irq_affinity_release - Callback for affinity notifier release
4078 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
4082 * Allocates MSI-X vectors and requests interrupts from the kernel.
4086 int q_vectors = vsi->num_q_vectors; in i40e_vsi_request_irq_msix()
4087 struct i40e_pf *pf = vsi->back; in i40e_vsi_request_irq_msix()
4088 int base = vsi->base_vector; in i40e_vsi_request_irq_msix()
4096 struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; in i40e_vsi_request_irq_msix()
4098 irq_num = pf->msix_entries[base + vector].vector; in i40e_vsi_request_irq_msix()
4100 if (q_vector->tx.ring && q_vector->rx.ring) { in i40e_vsi_request_irq_msix()
4101 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in i40e_vsi_request_irq_msix()
4102 "%s-%s-%d", basename, "TxRx", rx_int_idx++); in i40e_vsi_request_irq_msix()
4104 } else if (q_vector->rx.ring) { in i40e_vsi_request_irq_msix()
4105 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in i40e_vsi_request_irq_msix()
4106 "%s-%s-%d", basename, "rx", rx_int_idx++); in i40e_vsi_request_irq_msix()
4107 } else if (q_vector->tx.ring) { in i40e_vsi_request_irq_msix()
4108 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in i40e_vsi_request_irq_msix()
4109 "%s-%s-%d", basename, "tx", tx_int_idx++); in i40e_vsi_request_irq_msix()
4115 vsi->irq_handler, in i40e_vsi_request_irq_msix()
4117 q_vector->name, in i40e_vsi_request_irq_msix()
4120 dev_info(&pf->pdev->dev, in i40e_vsi_request_irq_msix()
4126 q_vector->affinity_notify.notify = i40e_irq_affinity_notify; in i40e_vsi_request_irq_msix()
4127 q_vector->affinity_notify.release = i40e_irq_affinity_release; in i40e_vsi_request_irq_msix()
4128 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); in i40e_vsi_request_irq_msix()
4135 cpu = cpumask_local_spread(q_vector->v_idx, -1); in i40e_vsi_request_irq_msix()
4139 vsi->irqs_ready = true; in i40e_vsi_request_irq_msix()
4144 vector--; in i40e_vsi_request_irq_msix()
4145 irq_num = pf->msix_entries[base + vector].vector; in i40e_vsi_request_irq_msix()
4148 free_irq(irq_num, &vsi->q_vectors[vector]); in i40e_vsi_request_irq_msix()
4154 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
4155 * @vsi: the VSI being un-configured
4159 struct i40e_pf *pf = vsi->back; in i40e_vsi_disable_irq()
4160 struct i40e_hw *hw = &pf->hw; in i40e_vsi_disable_irq()
4161 int base = vsi->base_vector; in i40e_vsi_disable_irq()
4165 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_vsi_disable_irq()
4168 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx)); in i40e_vsi_disable_irq()
4170 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val); in i40e_vsi_disable_irq()
4172 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx)); in i40e_vsi_disable_irq()
4174 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val); in i40e_vsi_disable_irq()
4178 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0); in i40e_vsi_disable_irq()
4182 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_vsi_disable_irq()
4183 for (i = vsi->base_vector; in i40e_vsi_disable_irq()
4184 i < (vsi->num_q_vectors + vsi->base_vector); i++) in i40e_vsi_disable_irq()
4185 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); in i40e_vsi_disable_irq()
4188 for (i = 0; i < vsi->num_q_vectors; i++) in i40e_vsi_disable_irq()
4189 synchronize_irq(pf->msix_entries[i + base].vector); in i40e_vsi_disable_irq()
4191 /* Legacy and MSI mode - this stops all interrupt handling */ in i40e_vsi_disable_irq()
4195 synchronize_irq(pf->pdev->irq); in i40e_vsi_disable_irq()
4200 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
4205 struct i40e_pf *pf = vsi->back; in i40e_vsi_enable_irq()
4208 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_vsi_enable_irq()
4209 for (i = 0; i < vsi->num_q_vectors; i++) in i40e_vsi_enable_irq()
4215 i40e_flush(&pf->hw); in i40e_vsi_enable_irq()
4220 * i40e_free_misc_vector - Free the vector that handles non-queue events
4226 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); in i40e_free_misc_vector()
4227 i40e_flush(&pf->hw); in i40e_free_misc_vector()
4229 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) { in i40e_free_misc_vector()
4230 free_irq(pf->msix_entries[0].vector, pf); in i40e_free_misc_vector()
4231 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); in i40e_free_misc_vector()
4236 * i40e_intr - MSI/Legacy and non-queue interrupt handler
4241 * with both queue and non-queue interrupts. This is also used in
4242 * MSIX mode to handle the non-queue interrupts.
4247 struct i40e_hw *hw = &pf->hw; in i40e_intr()
4262 pf->sw_int_count++; in i40e_intr()
4264 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && in i40e_intr()
4267 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n"); in i40e_intr()
4268 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state); in i40e_intr()
4273 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_intr()
4274 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; in i40e_intr()
4282 if (!test_bit(__I40E_DOWN, pf->state)) in i40e_intr()
4283 napi_schedule_irqoff(&q_vector->napi); in i40e_intr()
4288 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); in i40e_intr()
4289 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n"); in i40e_intr()
4294 set_bit(__I40E_MDD_EVENT_PENDING, pf->state); in i40e_intr()
4299 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) { in i40e_intr()
4306 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state); in i40e_intr()
4311 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_intr()
4312 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state); in i40e_intr()
4318 pf->corer_count++; in i40e_intr()
4320 pf->globr_count++; in i40e_intr()
4322 pf->empr_count++; in i40e_intr()
4323 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state); in i40e_intr()
4329 dev_info(&pf->pdev->dev, "HMC error interrupt\n"); in i40e_intr()
4330 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n", in i40e_intr()
4339 schedule_work(&pf->ptp_extts0_work); in i40e_intr()
4353 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", in i40e_intr()
4358 dev_info(&pf->pdev->dev, "device will be reset\n"); in i40e_intr()
4359 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_intr()
4367 /* re-enable interrupt causes */ in i40e_intr()
4369 if (!test_bit(__I40E_DOWN, pf->state) || in i40e_intr()
4370 test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_intr()
4379 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
4387 struct i40e_vsi *vsi = tx_ring->vsi; in i40e_clean_fdir_tx_irq()
4388 u16 i = tx_ring->next_to_clean; in i40e_clean_fdir_tx_irq()
4392 tx_buf = &tx_ring->tx_bi[i]; in i40e_clean_fdir_tx_irq()
4394 i -= tx_ring->count; in i40e_clean_fdir_tx_irq()
4397 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; in i40e_clean_fdir_tx_irq()
4407 if (!(eop_desc->cmd_type_offset_bsz & in i40e_clean_fdir_tx_irq()
4412 tx_buf->next_to_watch = NULL; in i40e_clean_fdir_tx_irq()
4414 tx_desc->buffer_addr = 0; in i40e_clean_fdir_tx_irq()
4415 tx_desc->cmd_type_offset_bsz = 0; in i40e_clean_fdir_tx_irq()
4421 i -= tx_ring->count; in i40e_clean_fdir_tx_irq()
4422 tx_buf = tx_ring->tx_bi; in i40e_clean_fdir_tx_irq()
4426 dma_unmap_single(tx_ring->dev, in i40e_clean_fdir_tx_irq()
4430 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB) in i40e_clean_fdir_tx_irq()
4431 kfree(tx_buf->raw_buf); in i40e_clean_fdir_tx_irq()
4433 tx_buf->raw_buf = NULL; in i40e_clean_fdir_tx_irq()
4434 tx_buf->tx_flags = 0; in i40e_clean_fdir_tx_irq()
4435 tx_buf->next_to_watch = NULL; in i40e_clean_fdir_tx_irq()
4437 tx_desc->buffer_addr = 0; in i40e_clean_fdir_tx_irq()
4438 tx_desc->cmd_type_offset_bsz = 0; in i40e_clean_fdir_tx_irq()
4445 i -= tx_ring->count; in i40e_clean_fdir_tx_irq()
4446 tx_buf = tx_ring->tx_bi; in i40e_clean_fdir_tx_irq()
4451 budget--; in i40e_clean_fdir_tx_irq()
4454 i += tx_ring->count; in i40e_clean_fdir_tx_irq()
4455 tx_ring->next_to_clean = i; in i40e_clean_fdir_tx_irq()
4457 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) in i40e_clean_fdir_tx_irq()
4458 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx); in i40e_clean_fdir_tx_irq()
4464 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4473 if (!q_vector->tx.ring) in i40e_fdir_clean_ring()
4476 vsi = q_vector->tx.ring->vsi; in i40e_fdir_clean_ring()
4477 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); in i40e_fdir_clean_ring()
4483 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4490 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; in i40e_map_vector_to_qp()
4491 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; in i40e_map_vector_to_qp()
4492 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; in i40e_map_vector_to_qp()
4494 tx_ring->q_vector = q_vector; in i40e_map_vector_to_qp()
4495 tx_ring->next = q_vector->tx.ring; in i40e_map_vector_to_qp()
4496 q_vector->tx.ring = tx_ring; in i40e_map_vector_to_qp()
4497 q_vector->tx.count++; in i40e_map_vector_to_qp()
4501 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx]; in i40e_map_vector_to_qp()
4503 xdp_ring->q_vector = q_vector; in i40e_map_vector_to_qp()
4504 xdp_ring->next = q_vector->tx.ring; in i40e_map_vector_to_qp()
4505 q_vector->tx.ring = xdp_ring; in i40e_map_vector_to_qp()
4506 q_vector->tx.count++; in i40e_map_vector_to_qp()
4509 rx_ring->q_vector = q_vector; in i40e_map_vector_to_qp()
4510 rx_ring->next = q_vector->rx.ring; in i40e_map_vector_to_qp()
4511 q_vector->rx.ring = rx_ring; in i40e_map_vector_to_qp()
4512 q_vector->rx.count++; in i40e_map_vector_to_qp()
4516 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4519 * This function maps descriptor rings to the queue-specific vectors
4520 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4526 int qp_remaining = vsi->num_queue_pairs; in i40e_vsi_map_rings_to_vectors()
4527 int q_vectors = vsi->num_q_vectors; in i40e_vsi_map_rings_to_vectors()
4532 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to in i40e_vsi_map_rings_to_vectors()
4540 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; in i40e_vsi_map_rings_to_vectors()
4542 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); in i40e_vsi_map_rings_to_vectors()
4544 q_vector->num_ringpairs = num_ringpairs; in i40e_vsi_map_rings_to_vectors()
4545 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1; in i40e_vsi_map_rings_to_vectors()
4547 q_vector->rx.count = 0; in i40e_vsi_map_rings_to_vectors()
4548 q_vector->tx.count = 0; in i40e_vsi_map_rings_to_vectors()
4549 q_vector->rx.ring = NULL; in i40e_vsi_map_rings_to_vectors()
4550 q_vector->tx.ring = NULL; in i40e_vsi_map_rings_to_vectors()
4552 while (num_ringpairs--) { in i40e_vsi_map_rings_to_vectors()
4555 qp_remaining--; in i40e_vsi_map_rings_to_vectors()
4561 * i40e_vsi_request_irq - Request IRQ from the OS
4567 struct i40e_pf *pf = vsi->back; in i40e_vsi_request_irq()
4570 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_vsi_request_irq()
4572 else if (pf->flags & I40E_FLAG_MSI_ENABLED) in i40e_vsi_request_irq()
4573 err = request_irq(pf->pdev->irq, i40e_intr, 0, in i40e_vsi_request_irq()
4574 pf->int_name, pf); in i40e_vsi_request_irq()
4576 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, in i40e_vsi_request_irq()
4577 pf->int_name, pf); in i40e_vsi_request_irq()
4580 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); in i40e_vsi_request_irq()
4587 * i40e_netpoll - A Polling 'interrupt' handler
4590 * This is used by netconsole to send skbs without having to re-enable
4596 struct i40e_vsi *vsi = np->vsi; in i40e_netpoll()
4597 struct i40e_pf *pf = vsi->back; in i40e_netpoll()
4601 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_netpoll()
4604 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_netpoll()
4605 for (i = 0; i < vsi->num_q_vectors; i++) in i40e_netpoll()
4606 i40e_msix_clean_rings(0, vsi->q_vectors[i]); in i40e_netpoll()
4608 i40e_intr(pf->pdev->irq, netdev); in i40e_netpoll()
4616 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4621 * This routine will wait for the given Tx queue of the PF to reach the
4623 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4632 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q)); in i40e_pf_txq_wait()
4639 return -ETIMEDOUT; in i40e_pf_txq_wait()
4645 * i40e_control_tx_q - Start or stop a particular Tx queue
4656 struct i40e_hw *hw = &pf->hw; in i40e_control_tx_q()
4661 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); in i40e_control_tx_q()
4689 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4703 /* wait for the change to finish */ in i40e_control_wait_tx_q()
4706 dev_info(&pf->pdev->dev, in i40e_control_wait_tx_q()
4716 * i40e_vsi_enable_tx - Start a VSI's rings
4721 struct i40e_pf *pf = vsi->back; in i40e_vsi_enable_tx()
4724 pf_q = vsi->base_queue; in i40e_vsi_enable_tx()
4725 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_enable_tx()
4726 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_vsi_enable_tx()
4735 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_vsi_enable_tx()
4736 pf_q + vsi->alloc_queue_pairs, in i40e_vsi_enable_tx()
4745 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4750 * This routine will wait for the given Rx queue of the PF to reach the
4752 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4761 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q)); in i40e_pf_rxq_wait()
4768 return -ETIMEDOUT; in i40e_pf_rxq_wait()
4774 * i40e_control_rx_q - Start or stop a particular Rx queue
4785 struct i40e_hw *hw = &pf->hw; in i40e_control_rx_q()
4826 /* wait for the change to finish */ in i40e_control_wait_rx_q()
4835 * i40e_vsi_enable_rx - Start a VSI's rings
4840 struct i40e_pf *pf = vsi->back; in i40e_vsi_enable_rx()
4843 pf_q = vsi->base_queue; in i40e_vsi_enable_rx()
4844 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_enable_rx()
4847 dev_info(&pf->pdev->dev, in i40e_vsi_enable_rx()
4849 vsi->seid, pf_q); in i40e_vsi_enable_rx()
4858 * i40e_vsi_start_rings - Start a VSI's rings
4877 * i40e_vsi_stop_rings - Stop a VSI's rings
4882 struct i40e_pf *pf = vsi->back; in i40e_vsi_stop_rings()
4885 /* When port TX is suspended, don't wait */ in i40e_vsi_stop_rings()
4886 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state)) in i40e_vsi_stop_rings()
4889 q_end = vsi->base_queue + vsi->num_queue_pairs; in i40e_vsi_stop_rings()
4890 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) in i40e_vsi_stop_rings()
4891 i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false); in i40e_vsi_stop_rings()
4893 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) { in i40e_vsi_stop_rings()
4896 dev_info(&pf->pdev->dev, in i40e_vsi_stop_rings()
4898 vsi->seid, pf_q); in i40e_vsi_stop_rings()
4902 pf_q = vsi->base_queue; in i40e_vsi_stop_rings()
4903 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) in i40e_vsi_stop_rings()
4904 wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0); in i40e_vsi_stop_rings()
4910 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4922 struct i40e_pf *pf = vsi->back; in i40e_vsi_stop_rings_no_wait()
4925 pf_q = vsi->base_queue; in i40e_vsi_stop_rings_no_wait()
4926 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_stop_rings_no_wait()
4933 * i40e_vsi_free_irq - Free the irq association with the OS
4938 struct i40e_pf *pf = vsi->back; in i40e_vsi_free_irq()
4939 struct i40e_hw *hw = &pf->hw; in i40e_vsi_free_irq()
4940 int base = vsi->base_vector; in i40e_vsi_free_irq()
4944 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_vsi_free_irq()
4945 if (!vsi->q_vectors) in i40e_vsi_free_irq()
4948 if (!vsi->irqs_ready) in i40e_vsi_free_irq()
4951 vsi->irqs_ready = false; in i40e_vsi_free_irq()
4952 for (i = 0; i < vsi->num_q_vectors; i++) { in i40e_vsi_free_irq()
4957 irq_num = pf->msix_entries[vector].vector; in i40e_vsi_free_irq()
4960 if (!vsi->q_vectors[i] || in i40e_vsi_free_irq()
4961 !vsi->q_vectors[i]->num_ringpairs) in i40e_vsi_free_irq()
4968 free_irq(irq_num, vsi->q_vectors[i]); in i40e_vsi_free_irq()
4977 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); in i40e_vsi_free_irq()
4982 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); in i40e_vsi_free_irq()
5017 free_irq(pf->pdev->irq, pf); in i40e_vsi_free_irq()
5052 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
5062 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; in i40e_free_q_vector()
5069 i40e_for_each_ring(ring, q_vector->tx) in i40e_free_q_vector()
5070 ring->q_vector = NULL; in i40e_free_q_vector()
5072 i40e_for_each_ring(ring, q_vector->rx) in i40e_free_q_vector()
5073 ring->q_vector = NULL; in i40e_free_q_vector()
5076 if (vsi->netdev) in i40e_free_q_vector()
5077 netif_napi_del(&q_vector->napi); in i40e_free_q_vector()
5079 vsi->q_vectors[v_idx] = NULL; in i40e_free_q_vector()
5085 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
5086 * @vsi: the VSI being un-configured
5095 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) in i40e_vsi_free_q_vectors()
5100 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
5106 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_reset_interrupt_capability()
5107 pci_disable_msix(pf->pdev); in i40e_reset_interrupt_capability()
5108 kfree(pf->msix_entries); in i40e_reset_interrupt_capability()
5109 pf->msix_entries = NULL; in i40e_reset_interrupt_capability()
5110 kfree(pf->irq_pile); in i40e_reset_interrupt_capability()
5111 pf->irq_pile = NULL; in i40e_reset_interrupt_capability()
5112 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { in i40e_reset_interrupt_capability()
5113 pci_disable_msi(pf->pdev); in i40e_reset_interrupt_capability()
5115 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); in i40e_reset_interrupt_capability()
5119 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
5123 * to pre-load conditions
5129 if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) in i40e_clear_interrupt_scheme()
5132 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector, in i40e_clear_interrupt_scheme()
5135 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); in i40e_clear_interrupt_scheme()
5136 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_clear_interrupt_scheme()
5137 if (pf->vsi[i]) in i40e_clear_interrupt_scheme()
5138 i40e_vsi_free_q_vectors(pf->vsi[i]); in i40e_clear_interrupt_scheme()
5143 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
5150 if (!vsi->netdev) in i40e_napi_enable_all()
5153 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { in i40e_napi_enable_all()
5154 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; in i40e_napi_enable_all()
5156 if (q_vector->rx.ring || q_vector->tx.ring) in i40e_napi_enable_all()
5157 napi_enable(&q_vector->napi); in i40e_napi_enable_all()
5162 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
5169 if (!vsi->netdev) in i40e_napi_disable_all()
5172 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { in i40e_napi_disable_all()
5173 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; in i40e_napi_disable_all()
5175 if (q_vector->rx.ring || q_vector->tx.ring) in i40e_napi_disable_all()
5176 napi_disable(&q_vector->napi); in i40e_napi_disable_all()
5181 * i40e_vsi_close - Shut down a VSI
5186 struct i40e_pf *pf = vsi->back; in i40e_vsi_close()
5187 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_vsi_close()
5192 vsi->current_netdev_flags = 0; in i40e_vsi_close()
5193 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_vsi_close()
5194 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_vsi_close()
5195 set_bit(__I40E_CLIENT_RESET, pf->state); in i40e_vsi_close()
5199 * i40e_quiesce_vsi - Pause a given VSI
5204 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_quiesce_vsi()
5207 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state); in i40e_quiesce_vsi()
5208 if (vsi->netdev && netif_running(vsi->netdev)) in i40e_quiesce_vsi()
5209 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); in i40e_quiesce_vsi()
5215 * i40e_unquiesce_vsi - Resume a given VSI
5220 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state)) in i40e_unquiesce_vsi()
5223 if (vsi->netdev && netif_running(vsi->netdev)) in i40e_unquiesce_vsi()
5224 vsi->netdev->netdev_ops->ndo_open(vsi->netdev); in i40e_unquiesce_vsi()
5230 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
5237 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_pf_quiesce_all_vsi()
5238 if (pf->vsi[v]) in i40e_pf_quiesce_all_vsi()
5239 i40e_quiesce_vsi(pf->vsi[v]); in i40e_pf_quiesce_all_vsi()
5244 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
5251 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_pf_unquiesce_all_vsi()
5252 if (pf->vsi[v]) in i40e_pf_unquiesce_all_vsi()
5253 i40e_unquiesce_vsi(pf->vsi[v]); in i40e_pf_unquiesce_all_vsi()
5258 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
5261 * Wait until all queues on a given VSI have been disabled.
5265 struct i40e_pf *pf = vsi->back; in i40e_vsi_wait_queues_disabled()
5268 pf_q = vsi->base_queue; in i40e_vsi_wait_queues_disabled()
5269 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_wait_queues_disabled()
5270 /* Check and wait for the Tx queue */ in i40e_vsi_wait_queues_disabled()
5273 dev_info(&pf->pdev->dev, in i40e_vsi_wait_queues_disabled()
5275 vsi->seid, pf_q); in i40e_vsi_wait_queues_disabled()
5282 /* Check and wait for the XDP Tx queue */ in i40e_vsi_wait_queues_disabled()
5283 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs, in i40e_vsi_wait_queues_disabled()
5286 dev_info(&pf->pdev->dev, in i40e_vsi_wait_queues_disabled()
5288 vsi->seid, pf_q); in i40e_vsi_wait_queues_disabled()
5292 /* Check and wait for the Rx queue */ in i40e_vsi_wait_queues_disabled()
5295 dev_info(&pf->pdev->dev, in i40e_vsi_wait_queues_disabled()
5297 vsi->seid, pf_q); in i40e_vsi_wait_queues_disabled()
5307 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
5317 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { in i40e_pf_wait_queues_disabled()
5318 if (pf->vsi[v]) { in i40e_pf_wait_queues_disabled()
5319 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); in i40e_pf_wait_queues_disabled()
5331 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
5340 struct i40e_hw *hw = &pf->hw; in i40e_get_iscsi_tc_map()
5344 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; in i40e_get_iscsi_tc_map()
5346 for (i = 0; i < dcbcfg->numapps; i++) { in i40e_get_iscsi_tc_map()
5347 app = dcbcfg->app[i]; in i40e_get_iscsi_tc_map()
5350 tc = dcbcfg->etscfg.prioritytable[app.priority]; in i40e_get_iscsi_tc_map()
5360 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
5363 * Return the number of TCs from given DCBx configuration
5373 * and create a bitmask of enabled TCs in i40e_dcb_get_num_tc()
5376 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]); in i40e_dcb_get_num_tc()
5379 * contiguous TCs starting with TC0 in i40e_dcb_get_num_tc()
5386 pr_err("Non-contiguous TC - Disabling DCB\n"); in i40e_dcb_get_num_tc()
5402 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5421 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5429 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_mqprio_get_enabled_tc()
5430 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc; in i40e_mqprio_get_enabled_tc()
5439 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5446 struct i40e_hw *hw = &pf->hw; in i40e_pf_get_num_tc()
5449 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; in i40e_pf_get_num_tc()
5452 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc; in i40e_pf_get_num_tc()
5455 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) in i40e_pf_get_num_tc()
5458 /* SFP mode will be enabled for all TCs on port */ in i40e_pf_get_num_tc()
5459 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) in i40e_pf_get_num_tc()
5462 /* MFP mode return count of enabled TCs for this PF */ in i40e_pf_get_num_tc()
5463 if (pf->hw.func_caps.iscsi) in i40e_pf_get_num_tc()
5476 * i40e_pf_get_tc_map - Get bitmap for enabled traffic classes
5489 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) in i40e_pf_get_tc_map()
5492 /* SFP mode we want PF to be enabled for all TCs */ in i40e_pf_get_tc_map()
5493 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) in i40e_pf_get_tc_map()
5494 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); in i40e_pf_get_tc_map()
5497 if (pf->hw.func_caps.iscsi) in i40e_pf_get_tc_map()
5504 * i40e_vsi_get_bw_info - Query VSI BW Information
5513 struct i40e_pf *pf = vsi->back; in i40e_vsi_get_bw_info()
5514 struct i40e_hw *hw = &pf->hw; in i40e_vsi_get_bw_info()
5520 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); in i40e_vsi_get_bw_info()
5522 dev_info(&pf->pdev->dev, in i40e_vsi_get_bw_info()
5524 i40e_stat_str(&pf->hw, ret), in i40e_vsi_get_bw_info()
5525 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_vsi_get_bw_info()
5526 return -EINVAL; in i40e_vsi_get_bw_info()
5530 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, in i40e_vsi_get_bw_info()
5533 dev_info(&pf->pdev->dev, in i40e_vsi_get_bw_info()
5535 i40e_stat_str(&pf->hw, ret), in i40e_vsi_get_bw_info()
5536 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_vsi_get_bw_info()
5537 return -EINVAL; in i40e_vsi_get_bw_info()
5541 dev_info(&pf->pdev->dev, in i40e_vsi_get_bw_info()
5542 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", in i40e_vsi_get_bw_info()
5548 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); in i40e_vsi_get_bw_info()
5549 vsi->bw_max_quanta = bw_config.max_bw; in i40e_vsi_get_bw_info()
5553 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; in i40e_vsi_get_bw_info()
5554 vsi->bw_ets_limit_credits[i] = in i40e_vsi_get_bw_info()
5557 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); in i40e_vsi_get_bw_info()
5564 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5575 struct i40e_pf *pf = vsi->back; in i40e_vsi_configure_bw_alloc()
5582 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) { in i40e_vsi_configure_bw_alloc()
5583 ret = i40e_set_bw_limit(vsi, vsi->seid, 0); in i40e_vsi_configure_bw_alloc()
5585 dev_info(&pf->pdev->dev, in i40e_vsi_configure_bw_alloc()
5586 "Failed to reset tx rate for vsi->seid %u\n", in i40e_vsi_configure_bw_alloc()
5587 vsi->seid); in i40e_vsi_configure_bw_alloc()
5595 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL); in i40e_vsi_configure_bw_alloc()
5597 dev_info(&pf->pdev->dev, in i40e_vsi_configure_bw_alloc()
5599 pf->hw.aq.asq_last_status); in i40e_vsi_configure_bw_alloc()
5600 return -EINVAL; in i40e_vsi_configure_bw_alloc()
5604 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; in i40e_vsi_configure_bw_alloc()
5610 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5617 struct net_device *netdev = vsi->netdev; in i40e_vsi_config_netdev_tc()
5618 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_netdev_tc()
5619 struct i40e_hw *hw = &pf->hw; in i40e_vsi_config_netdev_tc()
5622 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; in i40e_vsi_config_netdev_tc()
5632 /* Set up actual enabled TCs on the VSI */ in i40e_vsi_config_netdev_tc()
5633 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) in i40e_vsi_config_netdev_tc()
5638 /* Only set TC queues for enabled tcs in i40e_vsi_config_netdev_tc()
5645 if (vsi->tc_config.enabled_tc & BIT(i)) in i40e_vsi_config_netdev_tc()
5647 vsi->tc_config.tc_info[i].netdev_tc, in i40e_vsi_config_netdev_tc()
5648 vsi->tc_config.tc_info[i].qcount, in i40e_vsi_config_netdev_tc()
5649 vsi->tc_config.tc_info[i].qoffset); in i40e_vsi_config_netdev_tc()
5658 u8 ets_tc = dcbcfg->etscfg.prioritytable[i]; in i40e_vsi_config_netdev_tc()
5660 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; in i40e_vsi_config_netdev_tc()
5666 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5677 vsi->info.mapping_flags = ctxt->info.mapping_flags; in i40e_vsi_update_queue_map()
5678 memcpy(&vsi->info.queue_mapping, in i40e_vsi_update_queue_map()
5679 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); in i40e_vsi_update_queue_map()
5680 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, in i40e_vsi_update_queue_map()
5681 sizeof(vsi->info.tc_mapping)); in i40e_vsi_update_queue_map()
5685 * i40e_update_adq_vsi_queues - update queue mapping for ADq VSI
5698 pf = vsi->back; in i40e_update_adq_vsi_queues()
5699 hw = &pf->hw; in i40e_update_adq_vsi_queues()
5701 ctxt.seid = vsi->seid; in i40e_update_adq_vsi_queues()
5702 ctxt.pf_num = hw->pf_id; in i40e_update_adq_vsi_queues()
5703 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset; in i40e_update_adq_vsi_queues()
5704 ctxt.uplink_seid = vsi->uplink_seid; in i40e_update_adq_vsi_queues()
5707 ctxt.info = vsi->info; in i40e_update_adq_vsi_queues()
5709 i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc, in i40e_update_adq_vsi_queues()
5711 if (vsi->reconfig_rss) { in i40e_update_adq_vsi_queues()
5712 vsi->rss_size = min_t(int, pf->alloc_rss_size, in i40e_update_adq_vsi_queues()
5713 vsi->num_queue_pairs); in i40e_update_adq_vsi_queues()
5716 dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n"); in i40e_update_adq_vsi_queues()
5719 vsi->reconfig_rss = false; in i40e_update_adq_vsi_queues()
5724 dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n", in i40e_update_adq_vsi_queues()
5726 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_update_adq_vsi_queues()
5731 vsi->info.valid_sections = 0; in i40e_update_adq_vsi_queues()
5737 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5741 * This configures a particular VSI for TCs that are mapped to the
5742 * given TC bitmap. It uses default bandwidth share for TCs across
5752 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_tc()
5753 struct i40e_hw *hw = &pf->hw; in i40e_vsi_config_tc()
5758 /* Check if enabled_tc is same as existing or new TCs */ in i40e_vsi_config_tc()
5759 if (vsi->tc_config.enabled_tc == enabled_tc && in i40e_vsi_config_tc()
5760 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) in i40e_vsi_config_tc()
5763 /* Enable ETS TCs with equal BW Share for now across all VSIs */ in i40e_vsi_config_tc()
5773 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5775 enabled_tc, vsi->seid); in i40e_vsi_config_tc()
5776 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, in i40e_vsi_config_tc()
5779 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5782 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_vsi_config_tc()
5792 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5800 dev_err(&pf->pdev->dev, in i40e_vsi_config_tc()
5802 enabled_tc, vsi->seid); in i40e_vsi_config_tc()
5808 ctxt.seid = vsi->seid; in i40e_vsi_config_tc()
5809 ctxt.pf_num = vsi->back->hw.pf_id; in i40e_vsi_config_tc()
5811 ctxt.uplink_seid = vsi->uplink_seid; in i40e_vsi_config_tc()
5812 ctxt.info = vsi->info; in i40e_vsi_config_tc()
5821 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled in i40e_vsi_config_tc()
5824 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) { in i40e_vsi_config_tc()
5825 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size, in i40e_vsi_config_tc()
5826 vsi->num_queue_pairs); in i40e_vsi_config_tc()
5829 dev_info(&vsi->back->pdev->dev, in i40e_vsi_config_tc()
5833 vsi->reconfig_rss = false; in i40e_vsi_config_tc()
5835 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_vsi_config_tc()
5841 /* Update the VSI after updating the VSI queue-mapping in i40e_vsi_config_tc()
5846 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5849 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_vsi_config_tc()
5854 vsi->info.valid_sections = 0; in i40e_vsi_config_tc()
5859 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5862 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_vsi_config_tc()
5873 * i40e_get_link_speed - Returns link speed for the interface
5879 struct i40e_pf *pf = vsi->back; in i40e_get_link_speed()
5881 switch (pf->hw.phy.link_info.link_speed) { in i40e_get_link_speed()
5893 return -EINVAL; in i40e_get_link_speed()
5898 * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
5907 dev_warn(&vsi->back->pdev->dev, in i40e_bw_bytes_to_mbits()
5918 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5927 struct i40e_pf *pf = vsi->back; in i40e_set_bw_limit()
5934 dev_err(&pf->pdev->dev, in i40e_set_bw_limit()
5937 return -EINVAL; in i40e_set_bw_limit()
5940 dev_warn(&pf->pdev->dev, in i40e_set_bw_limit()
5948 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits, in i40e_set_bw_limit()
5951 dev_err(&pf->pdev->dev, in i40e_set_bw_limit()
5952 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n", in i40e_set_bw_limit()
5953 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret), in i40e_set_bw_limit()
5954 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_set_bw_limit()
5959 * i40e_remove_queue_channels - Remove queue channels for the TCs
5962 * Remove queue channels for the TCs
5969 struct i40e_pf *pf = vsi->back; in i40e_remove_queue_channels()
5974 * channel VSIs with non-power-of-2 queue count. in i40e_remove_queue_channels()
5976 vsi->current_rss_size = 0; in i40e_remove_queue_channels()
5979 if (list_empty(&vsi->ch_list)) in i40e_remove_queue_channels()
5982 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { in i40e_remove_queue_channels()
5985 list_del(&ch->list); in i40e_remove_queue_channels()
5986 p_vsi = ch->parent_vsi; in i40e_remove_queue_channels()
5987 if (!p_vsi || !ch->initialized) { in i40e_remove_queue_channels()
5992 for (i = 0; i < ch->num_queue_pairs; i++) { in i40e_remove_queue_channels()
5996 pf_q = ch->base_queue + i; in i40e_remove_queue_channels()
5997 tx_ring = vsi->tx_rings[pf_q]; in i40e_remove_queue_channels()
5998 tx_ring->ch = NULL; in i40e_remove_queue_channels()
6000 rx_ring = vsi->rx_rings[pf_q]; in i40e_remove_queue_channels()
6001 rx_ring->ch = NULL; in i40e_remove_queue_channels()
6005 ret = i40e_set_bw_limit(vsi, ch->seid, 0); in i40e_remove_queue_channels()
6007 dev_info(&vsi->back->pdev->dev, in i40e_remove_queue_channels()
6008 "Failed to reset tx rate for ch->seid %u\n", in i40e_remove_queue_channels()
6009 ch->seid); in i40e_remove_queue_channels()
6013 &pf->cloud_filter_list, cloud_node) { in i40e_remove_queue_channels()
6014 if (cfilter->seid != ch->seid) in i40e_remove_queue_channels()
6017 hash_del(&cfilter->cloud_node); in i40e_remove_queue_channels()
6018 if (cfilter->dst_port) in i40e_remove_queue_channels()
6025 last_aq_status = pf->hw.aq.asq_last_status; in i40e_remove_queue_channels()
6027 dev_info(&pf->pdev->dev, in i40e_remove_queue_channels()
6029 i40e_stat_str(&pf->hw, ret), in i40e_remove_queue_channels()
6030 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_remove_queue_channels()
6035 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid, in i40e_remove_queue_channels()
6038 dev_err(&vsi->back->pdev->dev, in i40e_remove_queue_channels()
6040 ch->seid, p_vsi->seid); in i40e_remove_queue_channels()
6043 INIT_LIST_HEAD(&vsi->ch_list); in i40e_remove_queue_channels()
6051 * channels/TCs created.
6058 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { in i40e_get_max_queues_for_channel()
6059 if (!ch->initialized) in i40e_get_max_queues_for_channel()
6061 if (ch->num_queue_pairs > max) in i40e_get_max_queues_for_channel()
6062 max = ch->num_queue_pairs; in i40e_get_max_queues_for_channel()
6069 * i40e_validate_num_queues - validate num_queues w.r.t channel
6085 return -EINVAL; in i40e_validate_num_queues()
6088 if (vsi->current_rss_size) { in i40e_validate_num_queues()
6089 if (num_queues > vsi->current_rss_size) { in i40e_validate_num_queues()
6090 dev_dbg(&pf->pdev->dev, in i40e_validate_num_queues()
6092 num_queues, vsi->current_rss_size); in i40e_validate_num_queues()
6093 return -EINVAL; in i40e_validate_num_queues()
6094 } else if ((num_queues < vsi->current_rss_size) && in i40e_validate_num_queues()
6096 dev_dbg(&pf->pdev->dev, in i40e_validate_num_queues()
6098 num_queues, vsi->current_rss_size); in i40e_validate_num_queues()
6099 return -EINVAL; in i40e_validate_num_queues()
6111 dev_dbg(&pf->pdev->dev, in i40e_validate_num_queues()
6114 return -EINVAL; in i40e_validate_num_queues()
6123 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
6131 struct i40e_pf *pf = vsi->back; in i40e_vsi_reconfig_rss()
6133 struct i40e_hw *hw = &pf->hw; in i40e_vsi_reconfig_rss()
6138 if (!vsi->rss_size) in i40e_vsi_reconfig_rss()
6139 return -EINVAL; in i40e_vsi_reconfig_rss()
6141 if (rss_size > vsi->rss_size) in i40e_vsi_reconfig_rss()
6142 return -EINVAL; in i40e_vsi_reconfig_rss()
6144 local_rss_size = min_t(int, vsi->rss_size, rss_size); in i40e_vsi_reconfig_rss()
6145 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in i40e_vsi_reconfig_rss()
6147 return -ENOMEM; in i40e_vsi_reconfig_rss()
6150 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size); in i40e_vsi_reconfig_rss()
6155 if (vsi->rss_hkey_user) in i40e_vsi_reconfig_rss()
6156 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); in i40e_vsi_reconfig_rss()
6160 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); in i40e_vsi_reconfig_rss()
6162 dev_info(&pf->pdev->dev, in i40e_vsi_reconfig_rss()
6165 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_vsi_reconfig_rss()
6172 if (!vsi->orig_rss_size) in i40e_vsi_reconfig_rss()
6173 vsi->orig_rss_size = vsi->rss_size; in i40e_vsi_reconfig_rss()
6174 vsi->current_rss_size = local_rss_size; in i40e_vsi_reconfig_rss()
6180 * i40e_channel_setup_queue_map - Setup a channel queue map
6198 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix); in i40e_channel_setup_queue_map()
6199 ch->num_queue_pairs = qcount; in i40e_channel_setup_queue_map()
6201 /* find the next higher power-of-2 of num queue pairs */ in i40e_channel_setup_queue_map()
6210 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in i40e_channel_setup_queue_map()
6212 ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */ in i40e_channel_setup_queue_map()
6213 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); in i40e_channel_setup_queue_map()
6214 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue); in i40e_channel_setup_queue_map()
6215 ctxt->info.valid_sections |= cpu_to_le16(sections); in i40e_channel_setup_queue_map()
6219 * i40e_add_channel - add a channel by adding VSI
6229 struct i40e_hw *hw = &pf->hw; in i40e_add_channel()
6234 if (ch->type != I40E_VSI_VMDQ2) { in i40e_add_channel()
6235 dev_info(&pf->pdev->dev, in i40e_add_channel()
6236 "add new vsi failed, ch->type %d\n", ch->type); in i40e_add_channel()
6237 return -EINVAL; in i40e_add_channel()
6241 ctxt.pf_num = hw->pf_id; in i40e_add_channel()
6245 if (ch->type == I40E_VSI_VMDQ2) in i40e_add_channel()
6248 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) { in i40e_add_channel()
6261 dev_info(&pf->pdev->dev, in i40e_add_channel()
6263 i40e_stat_str(&pf->hw, ret), in i40e_add_channel()
6264 i40e_aq_str(&pf->hw, in i40e_add_channel()
6265 pf->hw.aq.asq_last_status)); in i40e_add_channel()
6266 return -ENOENT; in i40e_add_channel()
6272 ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc; in i40e_add_channel()
6273 ch->seid = ctxt.seid; in i40e_add_channel()
6274 ch->vsi_number = ctxt.vsi_number; in i40e_add_channel()
6275 ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx); in i40e_add_channel()
6281 ch->info.mapping_flags = ctxt.info.mapping_flags; in i40e_add_channel()
6282 memcpy(&ch->info.queue_mapping, in i40e_add_channel()
6284 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping, in i40e_add_channel()
6298 bw_data.tc_valid_bits = ch->enabled_tc; in i40e_channel_config_bw()
6302 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid, in i40e_channel_config_bw()
6305 dev_info(&vsi->back->pdev->dev, in i40e_channel_config_bw()
6306 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n", in i40e_channel_config_bw()
6307 vsi->back->hw.aq.asq_last_status, ch->seid); in i40e_channel_config_bw()
6308 return -EINVAL; in i40e_channel_config_bw()
6312 ch->info.qs_handle[i] = bw_data.qs_handles[i]; in i40e_channel_config_bw()
6318 * i40e_channel_config_tx_ring - config TX ring associated with new channel
6334 /* Enable ETS TCs with equal BW Share for now across all VSIs */ in i40e_channel_config_tx_ring()
6336 if (ch->enabled_tc & BIT(i)) in i40e_channel_config_tx_ring()
6343 dev_info(&vsi->back->pdev->dev, in i40e_channel_config_tx_ring()
6345 ch->enabled_tc, ch->seid); in i40e_channel_config_tx_ring()
6349 for (i = 0; i < ch->num_queue_pairs; i++) { in i40e_channel_config_tx_ring()
6353 pf_q = ch->base_queue + i; in i40e_channel_config_tx_ring()
6355 /* Get to TX ring ptr of main VSI, for re-setup TX queue in i40e_channel_config_tx_ring()
6358 tx_ring = vsi->tx_rings[pf_q]; in i40e_channel_config_tx_ring()
6359 tx_ring->ch = ch; in i40e_channel_config_tx_ring()
6362 rx_ring = vsi->rx_rings[pf_q]; in i40e_channel_config_tx_ring()
6363 rx_ring->ch = ch; in i40e_channel_config_tx_ring()
6370 * i40e_setup_hw_channel - setup new channel
6387 ch->initialized = false; in i40e_setup_hw_channel()
6388 ch->base_queue = vsi->next_base_queue; in i40e_setup_hw_channel()
6389 ch->type = type; in i40e_setup_hw_channel()
6394 dev_info(&pf->pdev->dev, in i40e_setup_hw_channel()
6401 ch->initialized = true; in i40e_setup_hw_channel()
6406 dev_info(&pf->pdev->dev, in i40e_setup_hw_channel()
6408 ch->seid); in i40e_setup_hw_channel()
6413 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs; in i40e_setup_hw_channel()
6414 dev_dbg(&pf->pdev->dev, in i40e_setup_hw_channel()
6415 …"Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base… in i40e_setup_hw_channel()
6416 ch->seid, ch->vsi_number, ch->stat_counter_idx, in i40e_setup_hw_channel()
6417 ch->num_queue_pairs, in i40e_setup_hw_channel()
6418 vsi->next_base_queue); in i40e_setup_hw_channel()
6423 * i40e_setup_channel - setup new channel using uplink element
6438 if (vsi->type == I40E_VSI_MAIN) { in i40e_setup_channel()
6441 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n", in i40e_setup_channel()
6442 vsi->type); in i40e_setup_channel()
6447 seid = pf->vsi[pf->lan_vsi]->uplink_seid; in i40e_setup_channel()
6452 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n"); in i40e_setup_channel()
6456 return ch->initialized ? true : false; in i40e_setup_channel()
6460 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6469 struct i40e_pf *pf = vsi->back; in i40e_validate_and_set_switch_mode()
6470 struct i40e_hw *hw = &pf->hw; in i40e_validate_and_set_switch_mode()
6475 return -EINVAL; in i40e_validate_and_set_switch_mode()
6477 if (hw->dev_caps.switch_mode) { in i40e_validate_and_set_switch_mode()
6478 /* if switch mode is set, support mode2 (non-tunneled for in i40e_validate_and_set_switch_mode()
6481 u32 switch_mode = hw->dev_caps.switch_mode & in i40e_validate_and_set_switch_mode()
6486 dev_err(&pf->pdev->dev, in i40e_validate_and_set_switch_mode()
6487 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n", in i40e_validate_and_set_switch_mode()
6488 hw->dev_caps.switch_mode); in i40e_validate_and_set_switch_mode()
6489 return -EINVAL; in i40e_validate_and_set_switch_mode()
6503 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags, in i40e_validate_and_set_switch_mode()
6504 pf->last_sw_conf_valid_flags, in i40e_validate_and_set_switch_mode()
6506 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH) in i40e_validate_and_set_switch_mode()
6507 dev_err(&pf->pdev->dev, in i40e_validate_and_set_switch_mode()
6511 hw->aq.asq_last_status)); in i40e_validate_and_set_switch_mode()
6517 * i40e_create_queue_channel - function to create channel
6527 struct i40e_pf *pf = vsi->back; in i40e_create_queue_channel()
6532 return -EINVAL; in i40e_create_queue_channel()
6534 if (!ch->num_queue_pairs) { in i40e_create_queue_channel()
6535 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n", in i40e_create_queue_channel()
6536 ch->num_queue_pairs); in i40e_create_queue_channel()
6537 return -EINVAL; in i40e_create_queue_channel()
6541 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi, in i40e_create_queue_channel()
6544 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n", in i40e_create_queue_channel()
6545 ch->num_queue_pairs); in i40e_create_queue_channel()
6546 return -EINVAL; in i40e_create_queue_channel()
6553 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { in i40e_create_queue_channel()
6554 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; in i40e_create_queue_channel()
6556 if (vsi->type == I40E_VSI_MAIN) { in i40e_create_queue_channel()
6567 /* By this time, vsi->cnt_q_avail shall be set to non-zero and in i40e_create_queue_channel()
6570 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) { in i40e_create_queue_channel()
6571 dev_dbg(&pf->pdev->dev, in i40e_create_queue_channel()
6573 vsi->cnt_q_avail, ch->num_queue_pairs); in i40e_create_queue_channel()
6574 return -EINVAL; in i40e_create_queue_channel()
6578 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) { in i40e_create_queue_channel()
6579 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs); in i40e_create_queue_channel()
6581 dev_info(&pf->pdev->dev, in i40e_create_queue_channel()
6583 ch->num_queue_pairs); in i40e_create_queue_channel()
6584 return -EINVAL; in i40e_create_queue_channel()
6589 dev_info(&pf->pdev->dev, "Failed to setup channel\n"); in i40e_create_queue_channel()
6590 return -EINVAL; in i40e_create_queue_channel()
6593 dev_info(&pf->pdev->dev, in i40e_create_queue_channel()
6595 ch->seid, ch->num_queue_pairs); in i40e_create_queue_channel()
6598 if (ch->max_tx_rate) { in i40e_create_queue_channel()
6599 u64 credits = ch->max_tx_rate; in i40e_create_queue_channel()
6601 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate)) in i40e_create_queue_channel()
6602 return -EINVAL; in i40e_create_queue_channel()
6605 dev_dbg(&pf->pdev->dev, in i40e_create_queue_channel()
6606 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", in i40e_create_queue_channel()
6607 ch->max_tx_rate, in i40e_create_queue_channel()
6609 ch->seid); in i40e_create_queue_channel()
6613 ch->parent_vsi = vsi; in i40e_create_queue_channel()
6616 vsi->cnt_q_avail -= ch->num_queue_pairs; in i40e_create_queue_channel()
6622 * i40e_configure_queue_channels - Add queue channel for the given TCs
6625 * Configures queue channel mapping to the given TCs
6633 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */ in i40e_configure_queue_channels()
6634 vsi->tc_seid_map[0] = vsi->seid; in i40e_configure_queue_channels()
6636 if (vsi->tc_config.enabled_tc & BIT(i)) { in i40e_configure_queue_channels()
6639 ret = -ENOMEM; in i40e_configure_queue_channels()
6643 INIT_LIST_HEAD(&ch->list); in i40e_configure_queue_channels()
6644 ch->num_queue_pairs = in i40e_configure_queue_channels()
6645 vsi->tc_config.tc_info[i].qcount; in i40e_configure_queue_channels()
6646 ch->base_queue = in i40e_configure_queue_channels()
6647 vsi->tc_config.tc_info[i].qoffset; in i40e_configure_queue_channels()
6652 max_rate = vsi->mqprio_qopt.max_rate[i]; in i40e_configure_queue_channels()
6654 ch->max_tx_rate = max_rate; in i40e_configure_queue_channels()
6656 list_add_tail(&ch->list, &vsi->ch_list); in i40e_configure_queue_channels()
6660 dev_err(&vsi->back->pdev->dev, in i40e_configure_queue_channels()
6662 i, ch->num_queue_pairs); in i40e_configure_queue_channels()
6665 vsi->tc_seid_map[i] = ch->seid; in i40e_configure_queue_channels()
6670 i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true); in i40e_configure_queue_channels()
6679 * i40e_veb_config_tc - Configure TCs for given VEB
6688 struct i40e_pf *pf = veb->pf; in i40e_veb_config_tc()
6692 /* No TCs or already enabled TCs just return */ in i40e_veb_config_tc()
6693 if (!enabled_tc || veb->enabled_tc == enabled_tc) in i40e_veb_config_tc()
6699 /* Enable ETS TCs with equal BW Share for now */ in i40e_veb_config_tc()
6705 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, in i40e_veb_config_tc()
6708 dev_info(&pf->pdev->dev, in i40e_veb_config_tc()
6710 i40e_stat_str(&pf->hw, ret), in i40e_veb_config_tc()
6711 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_veb_config_tc()
6718 dev_info(&pf->pdev->dev, in i40e_veb_config_tc()
6720 i40e_stat_str(&pf->hw, ret), in i40e_veb_config_tc()
6721 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_veb_config_tc()
6730 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6743 /* Enable the TCs available on PF to all VEBs */ in i40e_dcb_reconfigure()
6749 if (!pf->veb[v]) in i40e_dcb_reconfigure()
6751 ret = i40e_veb_config_tc(pf->veb[v], tc_map); in i40e_dcb_reconfigure()
6753 dev_info(&pf->pdev->dev, in i40e_dcb_reconfigure()
6755 pf->veb[v]->seid); in i40e_dcb_reconfigure()
6761 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_dcb_reconfigure()
6762 if (!pf->vsi[v]) in i40e_dcb_reconfigure()
6765 /* - Enable all TCs for the LAN VSI in i40e_dcb_reconfigure()
6766 * - For all others keep them at TC0 for now in i40e_dcb_reconfigure()
6768 if (v == pf->lan_vsi) in i40e_dcb_reconfigure()
6773 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); in i40e_dcb_reconfigure()
6775 dev_info(&pf->pdev->dev, in i40e_dcb_reconfigure()
6777 pf->vsi[v]->seid); in i40e_dcb_reconfigure()
6780 /* Re-configure VSI vectors based on updated TC map */ in i40e_dcb_reconfigure()
6781 i40e_vsi_map_rings_to_vectors(pf->vsi[v]); in i40e_dcb_reconfigure()
6782 if (pf->vsi[v]->netdev) in i40e_dcb_reconfigure()
6783 i40e_dcbnl_set_all(pf->vsi[v]); in i40e_dcb_reconfigure()
6789 * i40e_resume_port_tx - Resume port Tx
6797 struct i40e_hw *hw = &pf->hw; in i40e_resume_port_tx()
6802 dev_info(&pf->pdev->dev, in i40e_resume_port_tx()
6804 i40e_stat_str(&pf->hw, ret), in i40e_resume_port_tx()
6805 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_resume_port_tx()
6807 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_resume_port_tx()
6815 * i40e_suspend_port_tx - Suspend port Tx
6822 struct i40e_hw *hw = &pf->hw; in i40e_suspend_port_tx()
6825 ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL); in i40e_suspend_port_tx()
6827 dev_info(&pf->pdev->dev, in i40e_suspend_port_tx()
6829 i40e_stat_str(&pf->hw, ret), in i40e_suspend_port_tx()
6830 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_suspend_port_tx()
6832 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_suspend_port_tx()
6840 * i40e_hw_set_dcb_config - Program new DCBX settings into HW
6850 struct i40e_dcbx_config *old_cfg = &pf->hw.local_dcbx_config; in i40e_hw_set_dcb_config()
6855 dev_dbg(&pf->pdev->dev, "No Change in DCB Config required.\n"); in i40e_hw_set_dcb_config()
6864 old_cfg->etsrec = old_cfg->etscfg; in i40e_hw_set_dcb_config()
6865 ret = i40e_set_dcb_config(&pf->hw); in i40e_hw_set_dcb_config()
6867 dev_info(&pf->pdev->dev, in i40e_hw_set_dcb_config()
6869 i40e_stat_str(&pf->hw, ret), in i40e_hw_set_dcb_config()
6870 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_hw_set_dcb_config()
6878 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) { in i40e_hw_set_dcb_config()
6879 /* Re-start the VSIs if disabled */ in i40e_hw_set_dcb_config()
6891 * i40e_hw_dcb_config - Program new DCBX settings into HW
6906 struct i40e_hw *hw = &pf->hw; in i40e_hw_dcb_config()
6907 u8 num_ports = hw->num_ports; in i40e_hw_dcb_config()
6909 int ret = -EINVAL; in i40e_hw_dcb_config()
6915 dev_dbg(&pf->pdev->dev, "Configuring DCB registers directly\n"); in i40e_hw_dcb_config()
6916 /* Un-pack information to Program ETS HW via shared API in i40e_hw_dcb_config()
6919 * ETS/NON-ETS arbiter mode in i40e_hw_dcb_config()
6922 * PFC priority bit-map in i40e_hw_dcb_config()
6926 * TSA table (ETS or non-ETS) in i40e_hw_dcb_config()
6936 switch (new_cfg->etscfg.tsatable[i]) { in i40e_hw_dcb_config()
6940 new_cfg->etscfg.tcbwtable[i]; in i40e_hw_dcb_config()
6955 old_cfg = &hw->local_dcbx_config; in i40e_hw_dcb_config()
6965 pf->flags |= I40E_FLAG_DCB_ENABLED; in i40e_hw_dcb_config()
6967 pf->flags &= ~I40E_FLAG_DCB_ENABLED; in i40e_hw_dcb_config()
6969 set_bit(__I40E_PORT_SUSPENDED, pf->state); in i40e_hw_dcb_config()
6981 (hw, pf->mac_seid, &ets_data, in i40e_hw_dcb_config()
6984 dev_info(&pf->pdev->dev, in i40e_hw_dcb_config()
6986 i40e_stat_str(&pf->hw, ret), in i40e_hw_dcb_config()
6987 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_hw_dcb_config()
6999 i40e_dcb_hw_rx_ets_bw_config(hw, new_cfg->etscfg.tcbwtable, mode, in i40e_hw_dcb_config()
7001 i40e_dcb_hw_pfc_config(hw, new_cfg->pfc.pfcenable, in i40e_hw_dcb_config()
7002 new_cfg->etscfg.prioritytable); in i40e_hw_dcb_config()
7003 i40e_dcb_hw_rx_up2tc_config(hw, new_cfg->etscfg.prioritytable); in i40e_hw_dcb_config()
7007 mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu; in i40e_hw_dcb_config()
7012 false, new_cfg->pfc.pfcenable, in i40e_hw_dcb_config()
7014 i40e_dcb_hw_rx_pb_config(hw, &pf->pb_cfg, &pb_cfg); in i40e_hw_dcb_config()
7017 pf->pb_cfg = pb_cfg; in i40e_hw_dcb_config()
7020 ret = i40e_aq_dcb_updated(&pf->hw, NULL); in i40e_hw_dcb_config()
7022 dev_info(&pf->pdev->dev, in i40e_hw_dcb_config()
7024 i40e_stat_str(&pf->hw, ret), in i40e_hw_dcb_config()
7025 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_hw_dcb_config()
7035 /* Re-start the VSIs if disabled */ in i40e_hw_dcb_config()
7039 clear_bit(__I40E_PORT_SUSPENDED, pf->state); in i40e_hw_dcb_config()
7044 /* Wait for the PF's queues to be disabled */ in i40e_hw_dcb_config()
7048 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_hw_dcb_config()
7053 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_hw_dcb_config()
7054 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); in i40e_hw_dcb_config()
7057 if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) in i40e_hw_dcb_config()
7066 * i40e_dcb_sw_default_config - Set default DCB configuration when DCB in SW
7073 struct i40e_dcbx_config *dcb_cfg = &pf->hw.local_dcbx_config; in i40e_dcb_sw_default_config()
7075 struct i40e_hw *hw = &pf->hw; in i40e_dcb_sw_default_config()
7078 if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) { in i40e_dcb_sw_default_config()
7080 memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config)); in i40e_dcb_sw_default_config()
7081 pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING; in i40e_dcb_sw_default_config()
7082 pf->tmp_cfg.etscfg.maxtcs = 0; in i40e_dcb_sw_default_config()
7083 pf->tmp_cfg.etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW; in i40e_dcb_sw_default_config()
7084 pf->tmp_cfg.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS; in i40e_dcb_sw_default_config()
7085 pf->tmp_cfg.pfc.willing = I40E_IEEE_DEFAULT_PFC_WILLING; in i40e_dcb_sw_default_config()
7086 pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; in i40e_dcb_sw_default_config()
7088 pf->tmp_cfg.numapps = I40E_IEEE_DEFAULT_NUM_APPS; in i40e_dcb_sw_default_config()
7089 pf->tmp_cfg.app[0].selector = I40E_APP_SEL_ETHTYPE; in i40e_dcb_sw_default_config()
7090 pf->tmp_cfg.app[0].priority = I40E_IEEE_DEFAULT_APP_PRIO; in i40e_dcb_sw_default_config()
7091 pf->tmp_cfg.app[0].protocolid = I40E_APP_PROTOID_FCOE; in i40e_dcb_sw_default_config()
7093 return i40e_hw_set_dcb_config(pf, &pf->tmp_cfg); in i40e_dcb_sw_default_config()
7103 (hw, pf->mac_seid, &ets_data, in i40e_dcb_sw_default_config()
7106 dev_info(&pf->pdev->dev, in i40e_dcb_sw_default_config()
7108 i40e_stat_str(&pf->hw, err), in i40e_dcb_sw_default_config()
7109 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_dcb_sw_default_config()
7110 err = -ENOENT; in i40e_dcb_sw_default_config()
7115 dcb_cfg->etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING; in i40e_dcb_sw_default_config()
7116 dcb_cfg->etscfg.cbs = 0; in i40e_dcb_sw_default_config()
7117 dcb_cfg->etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS; in i40e_dcb_sw_default_config()
7118 dcb_cfg->etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW; in i40e_dcb_sw_default_config()
7125 * i40e_init_pf_dcb - Initialize DCB configuration
7133 struct i40e_hw *hw = &pf->hw; in i40e_init_pf_dcb()
7139 if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) { in i40e_init_pf_dcb()
7140 dev_info(&pf->pdev->dev, "DCB is not supported.\n"); in i40e_init_pf_dcb()
7144 if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) { in i40e_init_pf_dcb()
7145 dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n"); in i40e_init_pf_dcb()
7148 dev_info(&pf->pdev->dev, "Could not initialize SW DCB\n"); in i40e_init_pf_dcb()
7151 dev_info(&pf->pdev->dev, "SW DCB initialization succeeded.\n"); in i40e_init_pf_dcb()
7152 pf->dcbx_cap = DCB_CAP_DCBX_HOST | in i40e_init_pf_dcb()
7155 pf->flags |= I40E_FLAG_DCB_CAPABLE; in i40e_init_pf_dcb()
7156 pf->flags &= ~I40E_FLAG_DCB_ENABLED; in i40e_init_pf_dcb()
7162 if ((!hw->func_caps.dcb) || in i40e_init_pf_dcb()
7163 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { in i40e_init_pf_dcb()
7164 dev_info(&pf->pdev->dev, in i40e_init_pf_dcb()
7168 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | in i40e_init_pf_dcb()
7171 pf->flags |= I40E_FLAG_DCB_CAPABLE; in i40e_init_pf_dcb()
7175 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) in i40e_init_pf_dcb()
7176 pf->flags |= I40E_FLAG_DCB_ENABLED; in i40e_init_pf_dcb()
7178 pf->flags &= ~I40E_FLAG_DCB_ENABLED; in i40e_init_pf_dcb()
7179 dev_dbg(&pf->pdev->dev, in i40e_init_pf_dcb()
7182 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) { in i40e_init_pf_dcb()
7183 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n"); in i40e_init_pf_dcb()
7184 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP; in i40e_init_pf_dcb()
7186 dev_info(&pf->pdev->dev, in i40e_init_pf_dcb()
7188 i40e_stat_str(&pf->hw, err), in i40e_init_pf_dcb()
7189 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_init_pf_dcb()
7198 * i40e_print_link_message - print link up or down
7205 struct i40e_pf *pf = vsi->back; in i40e_print_link_message()
7213 new_speed = pf->hw.phy.link_info.link_speed; in i40e_print_link_message()
7217 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed)) in i40e_print_link_message()
7219 vsi->current_isup = isup; in i40e_print_link_message()
7220 vsi->current_speed = new_speed; in i40e_print_link_message()
7222 netdev_info(vsi->netdev, "NIC Link is Down\n"); in i40e_print_link_message()
7229 if (pf->hw.func_caps.npar_enable && in i40e_print_link_message()
7230 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || in i40e_print_link_message()
7231 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) in i40e_print_link_message()
7232 netdev_warn(vsi->netdev, in i40e_print_link_message()
7235 switch (pf->hw.phy.link_info.link_speed) { in i40e_print_link_message()
7264 switch (pf->hw.fc.current_mode) { in i40e_print_link_message()
7279 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { in i40e_print_link_message()
7284 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) in i40e_print_link_message()
7287 if (pf->hw.phy.link_info.fec_info & in i40e_print_link_message()
7289 fec = "CL74 FC-FEC/BASE-R"; in i40e_print_link_message()
7290 else if (pf->hw.phy.link_info.fec_info & in i40e_print_link_message()
7292 fec = "CL108 RS-FEC"; in i40e_print_link_message()
7294 /* 'CL108 RS-FEC' should be displayed when RS is requested, or in i40e_print_link_message()
7297 if (vsi->back->hw.phy.link_info.req_fec_info & in i40e_print_link_message()
7299 if (vsi->back->hw.phy.link_info.req_fec_info & in i40e_print_link_message()
7301 req_fec = "CL108 RS-FEC"; in i40e_print_link_message()
7303 req_fec = "CL74 FC-FEC/BASE-R"; in i40e_print_link_message()
7305 netdev_info(vsi->netdev, in i40e_print_link_message()
7308 } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) { in i40e_print_link_message()
7313 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) in i40e_print_link_message()
7316 if (pf->hw.phy.link_info.fec_info & in i40e_print_link_message()
7318 fec = "CL74 FC-FEC/BASE-R"; in i40e_print_link_message()
7320 if (pf->hw.phy.link_info.req_fec_info & in i40e_print_link_message()
7322 req_fec = "CL74 FC-FEC/BASE-R"; in i40e_print_link_message()
7324 netdev_info(vsi->netdev, in i40e_print_link_message()
7328 netdev_info(vsi->netdev, in i40e_print_link_message()
7336 * i40e_up_complete - Finish the last steps of bringing up a connection
7341 struct i40e_pf *pf = vsi->back; in i40e_up_complete()
7344 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_up_complete()
7354 clear_bit(__I40E_VSI_DOWN, vsi->state); in i40e_up_complete()
7358 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && in i40e_up_complete()
7359 (vsi->netdev)) { in i40e_up_complete()
7361 netif_tx_start_all_queues(vsi->netdev); in i40e_up_complete()
7362 netif_carrier_on(vsi->netdev); in i40e_up_complete()
7366 if (vsi->type == I40E_VSI_FDIR) { in i40e_up_complete()
7368 pf->fd_add_err = 0; in i40e_up_complete()
7369 pf->fd_atr_cnt = 0; in i40e_up_complete()
7376 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_up_complete()
7383 * i40e_vsi_reinit_locked - Reset the VSI
7391 struct i40e_pf *pf = vsi->back; in i40e_vsi_reinit_locked()
7393 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) in i40e_vsi_reinit_locked()
7398 clear_bit(__I40E_CONFIG_BUSY, pf->state); in i40e_vsi_reinit_locked()
7402 * i40e_force_link_state - Force the link status
7411 struct i40e_hw *hw = &pf->hw; in i40e_force_link_state()
7425 dev_err(&pf->pdev->dev, in i40e_force_link_state()
7428 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_force_link_state()
7437 dev_err(&pf->pdev->dev, in i40e_force_link_state()
7440 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_force_link_state()
7448 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) in i40e_force_link_state()
7464 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) { in i40e_force_link_state()
7482 dev_err(&pf->pdev->dev, in i40e_force_link_state()
7484 i40e_stat_str(&pf->hw, err), in i40e_force_link_state()
7485 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_force_link_state()
7492 /* Wait a little bit (on 40G cards it sometimes takes a really in i40e_force_link_state()
7506 * i40e_up - Bring the connection back up after being down
7513 if (vsi->type == I40E_VSI_MAIN && in i40e_up()
7514 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED || in i40e_up()
7515 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) in i40e_up()
7516 i40e_force_link_state(vsi->back, true); in i40e_up()
7526 * i40e_down - Shutdown the connection processing
7534 * sets the vsi->state __I40E_VSI_DOWN bit. in i40e_down()
7536 if (vsi->netdev) { in i40e_down()
7537 netif_carrier_off(vsi->netdev); in i40e_down()
7538 netif_tx_disable(vsi->netdev); in i40e_down()
7542 if (vsi->type == I40E_VSI_MAIN && in i40e_down()
7543 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED || in i40e_down()
7544 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) in i40e_down()
7545 i40e_force_link_state(vsi->back, false); in i40e_down()
7548 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_down()
7549 i40e_clean_tx_ring(vsi->tx_rings[i]); in i40e_down()
7551 /* Make sure that in-progress ndo_xdp_xmit and in i40e_down()
7555 i40e_clean_tx_ring(vsi->xdp_rings[i]); in i40e_down()
7557 i40e_clean_rx_ring(vsi->rx_rings[i]); in i40e_down()
7563 * i40e_validate_mqprio_qopt- validate queue mapping info
7574 if (mqprio_qopt->qopt.offset[0] != 0 || in i40e_validate_mqprio_qopt()
7575 mqprio_qopt->qopt.num_tc < 1 || in i40e_validate_mqprio_qopt()
7576 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS) in i40e_validate_mqprio_qopt()
7577 return -EINVAL; in i40e_validate_mqprio_qopt()
7579 if (!mqprio_qopt->qopt.count[i]) in i40e_validate_mqprio_qopt()
7580 return -EINVAL; in i40e_validate_mqprio_qopt()
7581 if (mqprio_qopt->min_rate[i]) { in i40e_validate_mqprio_qopt()
7582 dev_err(&vsi->back->pdev->dev, in i40e_validate_mqprio_qopt()
7584 return -EINVAL; in i40e_validate_mqprio_qopt()
7586 max_rate = mqprio_qopt->max_rate[i]; in i40e_validate_mqprio_qopt()
7590 if (i >= mqprio_qopt->qopt.num_tc - 1) in i40e_validate_mqprio_qopt()
7592 if (mqprio_qopt->qopt.offset[i + 1] != in i40e_validate_mqprio_qopt()
7593 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) in i40e_validate_mqprio_qopt()
7594 return -EINVAL; in i40e_validate_mqprio_qopt()
7596 if (vsi->num_queue_pairs < in i40e_validate_mqprio_qopt()
7597 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) { in i40e_validate_mqprio_qopt()
7598 dev_err(&vsi->back->pdev->dev, in i40e_validate_mqprio_qopt()
7600 return -EINVAL; in i40e_validate_mqprio_qopt()
7603 dev_err(&vsi->back->pdev->dev, in i40e_validate_mqprio_qopt()
7605 return -EINVAL; in i40e_validate_mqprio_qopt()
7611 * i40e_vsi_set_default_tc_config - set default values for tc configuration
7620 vsi->tc_config.numtc = 1; in i40e_vsi_set_default_tc_config()
7621 vsi->tc_config.enabled_tc = 1; in i40e_vsi_set_default_tc_config()
7622 qcount = min_t(int, vsi->alloc_queue_pairs, in i40e_vsi_set_default_tc_config()
7623 i40e_pf_get_max_q_per_tc(vsi->back)); in i40e_vsi_set_default_tc_config()
7628 vsi->tc_config.tc_info[i].qoffset = 0; in i40e_vsi_set_default_tc_config()
7630 vsi->tc_config.tc_info[i].qcount = qcount; in i40e_vsi_set_default_tc_config()
7632 vsi->tc_config.tc_info[i].qcount = 1; in i40e_vsi_set_default_tc_config()
7633 vsi->tc_config.tc_info[i].netdev_tc = 0; in i40e_vsi_set_default_tc_config()
7658 *aq_err = hw->aq.asq_last_status; in i40e_del_macvlan_filter()
7687 *aq_err = hw->aq.asq_last_status; in i40e_add_macvlan_filter()
7693 * i40e_reset_ch_rings - Reset the queue contexts in a channel
7703 for (i = 0; i < ch->num_queue_pairs; i++) { in i40e_reset_ch_rings()
7704 pf_q = ch->base_queue + i; in i40e_reset_ch_rings()
7705 tx_ring = vsi->tx_rings[pf_q]; in i40e_reset_ch_rings()
7706 tx_ring->ch = NULL; in i40e_reset_ch_rings()
7707 rx_ring = vsi->rx_rings[pf_q]; in i40e_reset_ch_rings()
7708 rx_ring->ch = NULL; in i40e_reset_ch_rings()
7725 if (list_empty(&vsi->macvlan_list)) in i40e_free_macvlan_channels()
7728 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { in i40e_free_macvlan_channels()
7733 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); in i40e_free_macvlan_channels()
7734 netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev); in i40e_free_macvlan_channels()
7735 netdev_set_sb_channel(ch->fwd->netdev, 0); in i40e_free_macvlan_channels()
7736 kfree(ch->fwd); in i40e_free_macvlan_channels()
7737 ch->fwd = NULL; in i40e_free_macvlan_channels()
7740 list_del(&ch->list); in i40e_free_macvlan_channels()
7741 parent_vsi = ch->parent_vsi; in i40e_free_macvlan_channels()
7742 if (!parent_vsi || !ch->initialized) { in i40e_free_macvlan_channels()
7748 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid, in i40e_free_macvlan_channels()
7751 dev_err(&vsi->back->pdev->dev, in i40e_free_macvlan_channels()
7753 ch->seid, parent_vsi->seid); in i40e_free_macvlan_channels()
7756 vsi->macvlan_cnt = 0; in i40e_free_macvlan_channels()
7760 * i40e_fwd_ring_up - bring the macvlan device up
7770 struct i40e_pf *pf = vsi->back; in i40e_fwd_ring_up()
7771 struct i40e_hw *hw = &pf->hw; in i40e_fwd_ring_up()
7774 list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) { in i40e_fwd_ring_up()
7776 iter->fwd = fwd; in i40e_fwd_ring_up()
7779 netdev_bind_sb_channel_queue(vsi->netdev, vdev, in i40e_fwd_ring_up()
7781 iter->num_queue_pairs, in i40e_fwd_ring_up()
7782 iter->base_queue); in i40e_fwd_ring_up()
7783 for (i = 0; i < iter->num_queue_pairs; i++) { in i40e_fwd_ring_up()
7787 pf_q = iter->base_queue + i; in i40e_fwd_ring_up()
7790 tx_ring = vsi->tx_rings[pf_q]; in i40e_fwd_ring_up()
7791 tx_ring->ch = iter; in i40e_fwd_ring_up()
7794 rx_ring = vsi->rx_rings[pf_q]; in i40e_fwd_ring_up()
7795 rx_ring->ch = iter; in i40e_fwd_ring_up()
7803 return -EINVAL; in i40e_fwd_ring_up()
7811 ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err); in i40e_fwd_ring_up()
7815 for (i = 0; i < ch->num_queue_pairs; i++) { in i40e_fwd_ring_up()
7819 pf_q = ch->base_queue + i; in i40e_fwd_ring_up()
7820 rx_ring = vsi->rx_rings[pf_q]; in i40e_fwd_ring_up()
7821 rx_ring->netdev = NULL; in i40e_fwd_ring_up()
7823 dev_info(&pf->pdev->dev, in i40e_fwd_ring_up()
7834 * i40e_setup_macvlans - create the channels which will be macvlans
7843 struct i40e_pf *pf = vsi->back; in i40e_setup_macvlans()
7844 struct i40e_hw *hw = &pf->hw; in i40e_setup_macvlans()
7851 if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt) in i40e_setup_macvlans()
7852 return -EINVAL; in i40e_setup_macvlans()
7854 num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt); in i40e_setup_macvlans()
7856 /* find the next higher power-of-2 of num queue pairs */ in i40e_setup_macvlans()
7857 pow = fls(roundup_pow_of_two(num_qps) - 1); in i40e_setup_macvlans()
7866 ctxt.seid = vsi->seid; in i40e_setup_macvlans()
7867 ctxt.pf_num = vsi->back->hw.pf_id; in i40e_setup_macvlans()
7869 ctxt.uplink_seid = vsi->uplink_seid; in i40e_setup_macvlans()
7870 ctxt.info = vsi->info; in i40e_setup_macvlans()
7873 ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); in i40e_setup_macvlans()
7877 vsi->rss_size = max_t(u16, num_qps, qcnt); in i40e_setup_macvlans()
7880 dev_info(&pf->pdev->dev, in i40e_setup_macvlans()
7882 vsi->rss_size); in i40e_setup_macvlans()
7885 vsi->reconfig_rss = true; in i40e_setup_macvlans()
7886 dev_dbg(&vsi->back->pdev->dev, in i40e_setup_macvlans()
7887 "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size); in i40e_setup_macvlans()
7888 vsi->next_base_queue = num_qps; in i40e_setup_macvlans()
7889 vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps; in i40e_setup_macvlans()
7891 /* Update the VSI after updating the VSI queue-mapping in i40e_setup_macvlans()
7896 dev_info(&pf->pdev->dev, in i40e_setup_macvlans()
7899 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_setup_macvlans()
7904 vsi->info.valid_sections = 0; in i40e_setup_macvlans()
7907 INIT_LIST_HEAD(&vsi->macvlan_list); in i40e_setup_macvlans()
7911 ret = -ENOMEM; in i40e_setup_macvlans()
7914 INIT_LIST_HEAD(&ch->list); in i40e_setup_macvlans()
7915 ch->num_queue_pairs = qcnt; in i40e_setup_macvlans()
7917 ret = -EINVAL; in i40e_setup_macvlans()
7921 ch->parent_vsi = vsi; in i40e_setup_macvlans()
7922 vsi->cnt_q_avail -= ch->num_queue_pairs; in i40e_setup_macvlans()
7923 vsi->macvlan_cnt++; in i40e_setup_macvlans()
7924 list_add_tail(&ch->list, &vsi->macvlan_list); in i40e_setup_macvlans()
7930 dev_info(&pf->pdev->dev, "Failed to setup macvlans\n"); in i40e_setup_macvlans()
7937 * i40e_fwd_add - configure macvlans
7945 struct i40e_vsi *vsi = np->vsi; in i40e_fwd_add()
7946 struct i40e_pf *pf = vsi->back; in i40e_fwd_add()
7950 if ((pf->flags & I40E_FLAG_DCB_ENABLED)) { in i40e_fwd_add()
7952 return ERR_PTR(-EINVAL); in i40e_fwd_add()
7956 return ERR_PTR(-EINVAL); in i40e_fwd_add()
7958 if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) { in i40e_fwd_add()
7960 return ERR_PTR(-EINVAL); in i40e_fwd_add()
7967 return ERR_PTR(-ERANGE); in i40e_fwd_add()
7969 if (!vsi->macvlan_cnt) { in i40e_fwd_add()
7971 set_bit(0, vsi->fwd_bitmask); in i40e_fwd_add()
7977 vectors = pf->num_lan_msix; in i40e_fwd_add()
7981 macvlan_cnt = (vectors - 32) / 4; in i40e_fwd_add()
7985 macvlan_cnt = (vectors - 16) / 2; in i40e_fwd_add()
7989 macvlan_cnt = vectors - 16; in i40e_fwd_add()
7993 macvlan_cnt = vectors - 8; in i40e_fwd_add()
7997 macvlan_cnt = vectors - 1; in i40e_fwd_add()
8001 return ERR_PTR(-EBUSY); in i40e_fwd_add()
8015 avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask, in i40e_fwd_add()
8016 vsi->macvlan_cnt); in i40e_fwd_add()
8018 return ERR_PTR(-EBUSY); in i40e_fwd_add()
8023 return ERR_PTR(-ENOMEM); in i40e_fwd_add()
8025 set_bit(avail_macvlan, vsi->fwd_bitmask); in i40e_fwd_add()
8026 fwd->bit_no = avail_macvlan; in i40e_fwd_add()
8028 fwd->netdev = vdev; in i40e_fwd_add()
8041 return ERR_PTR(-EINVAL); in i40e_fwd_add()
8048 * i40e_del_all_macvlans - Delete all the mac filters on the channels
8054 struct i40e_pf *pf = vsi->back; in i40e_del_all_macvlans()
8055 struct i40e_hw *hw = &pf->hw; in i40e_del_all_macvlans()
8058 if (list_empty(&vsi->macvlan_list)) in i40e_del_all_macvlans()
8061 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { in i40e_del_all_macvlans()
8063 ret = i40e_del_macvlan_filter(hw, ch->seid, in i40e_del_all_macvlans()
8069 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); in i40e_del_all_macvlans()
8070 netdev_unbind_sb_channel(vsi->netdev, in i40e_del_all_macvlans()
8071 ch->fwd->netdev); in i40e_del_all_macvlans()
8072 netdev_set_sb_channel(ch->fwd->netdev, 0); in i40e_del_all_macvlans()
8073 kfree(ch->fwd); in i40e_del_all_macvlans()
8074 ch->fwd = NULL; in i40e_del_all_macvlans()
8081 * i40e_fwd_del - delete macvlan interfaces
8090 struct i40e_vsi *vsi = np->vsi; in i40e_fwd_del()
8091 struct i40e_pf *pf = vsi->back; in i40e_fwd_del()
8092 struct i40e_hw *hw = &pf->hw; in i40e_fwd_del()
8096 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { in i40e_fwd_del()
8099 fwd->netdev->dev_addr)) { in i40e_fwd_del()
8100 ret = i40e_del_macvlan_filter(hw, ch->seid, in i40e_fwd_del()
8106 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); in i40e_fwd_del()
8107 netdev_unbind_sb_channel(netdev, fwd->netdev); in i40e_fwd_del()
8108 netdev_set_sb_channel(fwd->netdev, 0); in i40e_fwd_del()
8109 kfree(ch->fwd); in i40e_fwd_del()
8110 ch->fwd = NULL; in i40e_fwd_del()
8112 dev_info(&pf->pdev->dev, in i40e_fwd_del()
8123 * i40e_setup_tc - configure multiple traffic classes
8131 struct i40e_vsi *vsi = np->vsi; in i40e_setup_tc()
8132 struct i40e_pf *pf = vsi->back; in i40e_setup_tc()
8136 int ret = -EINVAL; in i40e_setup_tc()
8140 old_queue_pairs = vsi->num_queue_pairs; in i40e_setup_tc()
8141 num_tc = mqprio_qopt->qopt.num_tc; in i40e_setup_tc()
8142 hw = mqprio_qopt->qopt.hw; in i40e_setup_tc()
8143 mode = mqprio_qopt->mode; in i40e_setup_tc()
8145 pf->flags &= ~I40E_FLAG_TC_MQPRIO; in i40e_setup_tc()
8146 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); in i40e_setup_tc()
8151 if (pf->flags & I40E_FLAG_MFP_ENABLED) { in i40e_setup_tc()
8158 pf->flags &= ~I40E_FLAG_TC_MQPRIO; in i40e_setup_tc()
8161 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { in i40e_setup_tc()
8175 if (pf->flags & I40E_FLAG_DCB_ENABLED) { in i40e_setup_tc()
8180 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) in i40e_setup_tc()
8185 memcpy(&vsi->mqprio_qopt, mqprio_qopt, in i40e_setup_tc()
8187 pf->flags |= I40E_FLAG_TC_MQPRIO; in i40e_setup_tc()
8188 pf->flags &= ~I40E_FLAG_DCB_ENABLED; in i40e_setup_tc()
8191 return -EINVAL; in i40e_setup_tc()
8200 if (enabled_tc == vsi->tc_config.enabled_tc && in i40e_setup_tc()
8210 /* Configure VSI for enabled TCs */ in i40e_setup_tc()
8214 vsi->seid); in i40e_setup_tc()
8218 (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) { in i40e_setup_tc()
8221 vsi->tc_config.tc_info[0].qcount); in i40e_setup_tc()
8222 ret = -EINVAL; in i40e_setup_tc()
8227 dev_info(&vsi->back->pdev->dev, in i40e_setup_tc()
8229 vsi->seid, vsi->tc_config.tc_info[0].qcount); in i40e_setup_tc()
8232 if (vsi->mqprio_qopt.max_rate[0]) { in i40e_setup_tc()
8234 vsi->mqprio_qopt.max_rate[0]); in i40e_setup_tc()
8236 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); in i40e_setup_tc()
8241 dev_dbg(&vsi->back->pdev->dev, in i40e_setup_tc()
8242 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", in i40e_setup_tc()
8245 vsi->seid); in i40e_setup_tc()
8253 vsi->num_queue_pairs = old_queue_pairs; in i40e_setup_tc()
8274 * i40e_set_cld_element - sets cloud filter element data
8288 ether_addr_copy(cld->outer_mac, filter->dst_mac); in i40e_set_cld_element()
8289 ether_addr_copy(cld->inner_mac, filter->src_mac); in i40e_set_cld_element()
8291 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6) in i40e_set_cld_element()
8294 if (filter->n_proto == ETH_P_IPV6) { in i40e_set_cld_element()
8295 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1) in i40e_set_cld_element()
8296 for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) { in i40e_set_cld_element()
8297 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]); in i40e_set_cld_element()
8299 *(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa); in i40e_set_cld_element()
8302 ipa = be32_to_cpu(filter->dst_ipv4); in i40e_set_cld_element()
8304 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa)); in i40e_set_cld_element()
8307 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id)); in i40e_set_cld_element()
8310 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id) in i40e_set_cld_element()
8312 if (filter->tenant_id) in i40e_set_cld_element()
8317 * i40e_add_del_cloud_filter - Add/del cloud filter
8329 struct i40e_pf *pf = vsi->back; in i40e_add_del_cloud_filter()
8348 if (filter->flags >= ARRAY_SIZE(flag_table)) in i40e_add_del_cloud_filter()
8356 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE) in i40e_add_del_cloud_filter()
8357 cld_filter.flags = cpu_to_le16(filter->tunnel_type << in i40e_add_del_cloud_filter()
8360 if (filter->n_proto == ETH_P_IPV6) in i40e_add_del_cloud_filter()
8361 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] | in i40e_add_del_cloud_filter()
8364 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] | in i40e_add_del_cloud_filter()
8368 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid, in i40e_add_del_cloud_filter()
8371 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid, in i40e_add_del_cloud_filter()
8374 dev_dbg(&pf->pdev->dev, in i40e_add_del_cloud_filter()
8376 add ? "add" : "delete", filter->dst_port, ret, in i40e_add_del_cloud_filter()
8377 pf->hw.aq.asq_last_status); in i40e_add_del_cloud_filter()
8379 dev_info(&pf->pdev->dev, in i40e_add_del_cloud_filter()
8381 add ? "Added" : "Deleted", filter->seid); in i40e_add_del_cloud_filter()
8386 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
8399 struct i40e_pf *pf = vsi->back; in i40e_add_del_cloud_filter_big_buf()
8403 if ((is_valid_ether_addr(filter->dst_mac) && in i40e_add_del_cloud_filter_big_buf()
8404 is_valid_ether_addr(filter->src_mac)) || in i40e_add_del_cloud_filter_big_buf()
8405 (is_multicast_ether_addr(filter->dst_mac) && in i40e_add_del_cloud_filter_big_buf()
8406 is_multicast_ether_addr(filter->src_mac))) in i40e_add_del_cloud_filter_big_buf()
8407 return -EOPNOTSUPP; in i40e_add_del_cloud_filter_big_buf()
8409 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP in i40e_add_del_cloud_filter_big_buf()
8412 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP) in i40e_add_del_cloud_filter_big_buf()
8413 return -EOPNOTSUPP; in i40e_add_del_cloud_filter_big_buf()
8416 if (filter->src_port || in i40e_add_del_cloud_filter_big_buf()
8417 (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) || in i40e_add_del_cloud_filter_big_buf()
8418 !ipv6_addr_any(&filter->ip.v6.src_ip6)) in i40e_add_del_cloud_filter_big_buf()
8419 return -EOPNOTSUPP; in i40e_add_del_cloud_filter_big_buf()
8426 if (is_valid_ether_addr(filter->dst_mac) || in i40e_add_del_cloud_filter_big_buf()
8427 is_valid_ether_addr(filter->src_mac) || in i40e_add_del_cloud_filter_big_buf()
8428 is_multicast_ether_addr(filter->dst_mac) || in i40e_add_del_cloud_filter_big_buf()
8429 is_multicast_ether_addr(filter->src_mac)) { in i40e_add_del_cloud_filter_big_buf()
8431 if (filter->dst_ipv4) in i40e_add_del_cloud_filter_big_buf()
8432 return -EOPNOTSUPP; in i40e_add_del_cloud_filter_big_buf()
8441 if (filter->vlan_id) { in i40e_add_del_cloud_filter_big_buf()
8446 } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) || in i40e_add_del_cloud_filter_big_buf()
8447 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) { in i40e_add_del_cloud_filter_big_buf()
8450 if (filter->n_proto == ETH_P_IPV6) in i40e_add_del_cloud_filter_big_buf()
8457 dev_err(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
8459 return -EINVAL; in i40e_add_del_cloud_filter_big_buf()
8464 be16_to_cpu(filter->dst_port); in i40e_add_del_cloud_filter_big_buf()
8470 dev_err(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
8476 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid, in i40e_add_del_cloud_filter_big_buf()
8479 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid, in i40e_add_del_cloud_filter_big_buf()
8484 dev_dbg(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
8486 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status); in i40e_add_del_cloud_filter_big_buf()
8488 dev_info(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
8490 add ? "add" : "delete", filter->seid, in i40e_add_del_cloud_filter_big_buf()
8491 ntohs(filter->dst_port)); in i40e_add_del_cloud_filter_big_buf()
8496 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
8507 struct flow_dissector *dissector = rule->match.dissector; in i40e_parse_cls_flower()
8509 struct i40e_pf *pf = vsi->back; in i40e_parse_cls_flower()
8512 if (dissector->used_keys & in i40e_parse_cls_flower()
8521 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n", in i40e_parse_cls_flower()
8522 dissector->used_keys); in i40e_parse_cls_flower()
8523 return -EOPNOTSUPP; in i40e_parse_cls_flower()
8530 if (match.mask->keyid != 0) in i40e_parse_cls_flower()
8533 filter->tenant_id = be32_to_cpu(match.key->keyid); in i40e_parse_cls_flower()
8540 n_proto_key = ntohs(match.key->n_proto); in i40e_parse_cls_flower()
8541 n_proto_mask = ntohs(match.mask->n_proto); in i40e_parse_cls_flower()
8547 filter->n_proto = n_proto_key & n_proto_mask; in i40e_parse_cls_flower()
8548 filter->ip_proto = match.key->ip_proto; in i40e_parse_cls_flower()
8557 if (!is_zero_ether_addr(match.mask->dst)) { in i40e_parse_cls_flower()
8558 if (is_broadcast_ether_addr(match.mask->dst)) { in i40e_parse_cls_flower()
8561 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n", in i40e_parse_cls_flower()
8562 match.mask->dst); in i40e_parse_cls_flower()
8567 if (!is_zero_ether_addr(match.mask->src)) { in i40e_parse_cls_flower()
8568 if (is_broadcast_ether_addr(match.mask->src)) { in i40e_parse_cls_flower()
8571 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n", in i40e_parse_cls_flower()
8572 match.mask->src); in i40e_parse_cls_flower()
8576 ether_addr_copy(filter->dst_mac, match.key->dst); in i40e_parse_cls_flower()
8577 ether_addr_copy(filter->src_mac, match.key->src); in i40e_parse_cls_flower()
8584 if (match.mask->vlan_id) { in i40e_parse_cls_flower()
8585 if (match.mask->vlan_id == VLAN_VID_MASK) { in i40e_parse_cls_flower()
8589 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n", in i40e_parse_cls_flower()
8590 match.mask->vlan_id); in i40e_parse_cls_flower()
8595 filter->vlan_id = cpu_to_be16(match.key->vlan_id); in i40e_parse_cls_flower()
8602 addr_type = match.key->addr_type; in i40e_parse_cls_flower()
8609 if (match.mask->dst) { in i40e_parse_cls_flower()
8610 if (match.mask->dst == cpu_to_be32(0xffffffff)) { in i40e_parse_cls_flower()
8613 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n", in i40e_parse_cls_flower()
8614 &match.mask->dst); in i40e_parse_cls_flower()
8619 if (match.mask->src) { in i40e_parse_cls_flower()
8620 if (match.mask->src == cpu_to_be32(0xffffffff)) { in i40e_parse_cls_flower()
8623 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n", in i40e_parse_cls_flower()
8624 &match.mask->src); in i40e_parse_cls_flower()
8630 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n"); in i40e_parse_cls_flower()
8633 filter->dst_ipv4 = match.key->dst; in i40e_parse_cls_flower()
8634 filter->src_ipv4 = match.key->src; in i40e_parse_cls_flower()
8645 if (ipv6_addr_loopback(&match.key->dst) || in i40e_parse_cls_flower()
8646 ipv6_addr_loopback(&match.key->src)) { in i40e_parse_cls_flower()
8647 dev_err(&pf->pdev->dev, in i40e_parse_cls_flower()
8651 if (!ipv6_addr_any(&match.mask->dst) || in i40e_parse_cls_flower()
8652 !ipv6_addr_any(&match.mask->src)) in i40e_parse_cls_flower()
8655 memcpy(&filter->src_ipv6, &match.key->src.s6_addr32, in i40e_parse_cls_flower()
8656 sizeof(filter->src_ipv6)); in i40e_parse_cls_flower()
8657 memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32, in i40e_parse_cls_flower()
8658 sizeof(filter->dst_ipv6)); in i40e_parse_cls_flower()
8665 if (match.mask->src) { in i40e_parse_cls_flower()
8666 if (match.mask->src == cpu_to_be16(0xffff)) { in i40e_parse_cls_flower()
8669 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n", in i40e_parse_cls_flower()
8670 be16_to_cpu(match.mask->src)); in i40e_parse_cls_flower()
8675 if (match.mask->dst) { in i40e_parse_cls_flower()
8676 if (match.mask->dst == cpu_to_be16(0xffff)) { in i40e_parse_cls_flower()
8679 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n", in i40e_parse_cls_flower()
8680 be16_to_cpu(match.mask->dst)); in i40e_parse_cls_flower()
8685 filter->dst_port = match.key->dst; in i40e_parse_cls_flower()
8686 filter->src_port = match.key->src; in i40e_parse_cls_flower()
8688 switch (filter->ip_proto) { in i40e_parse_cls_flower()
8693 dev_err(&pf->pdev->dev, in i40e_parse_cls_flower()
8695 return -EINVAL; in i40e_parse_cls_flower()
8698 filter->flags = field_flags; in i40e_parse_cls_flower()
8716 filter->seid = vsi->seid; in i40e_handle_tclass()
8718 } else if (vsi->tc_config.enabled_tc & BIT(tc)) { in i40e_handle_tclass()
8719 if (!filter->dst_port) { in i40e_handle_tclass()
8720 dev_err(&vsi->back->pdev->dev, in i40e_handle_tclass()
8722 return -EINVAL; in i40e_handle_tclass()
8724 if (list_empty(&vsi->ch_list)) in i40e_handle_tclass()
8725 return -EINVAL; in i40e_handle_tclass()
8726 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, in i40e_handle_tclass()
8728 if (ch->seid == vsi->tc_seid_map[tc]) in i40e_handle_tclass()
8729 filter->seid = ch->seid; in i40e_handle_tclass()
8733 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n"); in i40e_handle_tclass()
8734 return -EINVAL; in i40e_handle_tclass()
8738 * i40e_configure_clsflower - Configure tc flower filters
8746 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid); in i40e_configure_clsflower()
8748 struct i40e_pf *pf = vsi->back; in i40e_configure_clsflower()
8752 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n"); in i40e_configure_clsflower()
8753 return -EOPNOTSUPP; in i40e_configure_clsflower()
8757 dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination"); in i40e_configure_clsflower()
8758 return -EINVAL; in i40e_configure_clsflower()
8761 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || in i40e_configure_clsflower()
8762 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) in i40e_configure_clsflower()
8763 return -EBUSY; in i40e_configure_clsflower()
8765 if (pf->fdir_pf_active_filters || in i40e_configure_clsflower()
8766 (!hlist_empty(&pf->fdir_filter_list))) { in i40e_configure_clsflower()
8767 dev_err(&vsi->back->pdev->dev, in i40e_configure_clsflower()
8769 return -EINVAL; in i40e_configure_clsflower()
8772 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_configure_clsflower()
8773 dev_err(&vsi->back->pdev->dev, in i40e_configure_clsflower()
8774 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n"); in i40e_configure_clsflower()
8775 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED; in i40e_configure_clsflower()
8776 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER; in i40e_configure_clsflower()
8781 return -ENOMEM; in i40e_configure_clsflower()
8783 filter->cookie = cls_flower->cookie; in i40e_configure_clsflower()
8794 if (filter->dst_port) in i40e_configure_clsflower()
8800 dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n", in i40e_configure_clsflower()
8806 INIT_HLIST_NODE(&filter->cloud_node); in i40e_configure_clsflower()
8808 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list); in i40e_configure_clsflower()
8810 pf->num_cloud_filters++; in i40e_configure_clsflower()
8819 * i40e_find_cloud_filter - Find the could filter in the list
8831 &vsi->back->cloud_filter_list, cloud_node) in i40e_find_cloud_filter()
8832 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) in i40e_find_cloud_filter()
8838 * i40e_delete_clsflower - Remove tc flower filters
8847 struct i40e_pf *pf = vsi->back; in i40e_delete_clsflower()
8850 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie); in i40e_delete_clsflower()
8853 return -EINVAL; in i40e_delete_clsflower()
8855 hash_del(&filter->cloud_node); in i40e_delete_clsflower()
8857 if (filter->dst_port) in i40e_delete_clsflower()
8864 dev_err(&pf->pdev->dev, in i40e_delete_clsflower()
8866 i40e_stat_str(&pf->hw, err)); in i40e_delete_clsflower()
8867 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status); in i40e_delete_clsflower()
8870 pf->num_cloud_filters--; in i40e_delete_clsflower()
8871 if (!pf->num_cloud_filters) in i40e_delete_clsflower()
8872 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) && in i40e_delete_clsflower()
8873 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) { in i40e_delete_clsflower()
8874 pf->flags |= I40E_FLAG_FD_SB_ENABLED; in i40e_delete_clsflower()
8875 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER; in i40e_delete_clsflower()
8876 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; in i40e_delete_clsflower()
8882 * i40e_setup_tc_cls_flower - flower classifier offloads
8889 struct i40e_vsi *vsi = np->vsi; in i40e_setup_tc_cls_flower()
8891 switch (cls_flower->command) { in i40e_setup_tc_cls_flower()
8897 return -EOPNOTSUPP; in i40e_setup_tc_cls_flower()
8899 return -EOPNOTSUPP; in i40e_setup_tc_cls_flower()
8908 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data)) in i40e_setup_tc_block_cb()
8909 return -EOPNOTSUPP; in i40e_setup_tc_block_cb()
8916 return -EOPNOTSUPP; in i40e_setup_tc_block_cb()
8936 return -EOPNOTSUPP; in __i40e_setup_tc()
8941 * i40e_open - Called when a network interface is made active
8955 struct i40e_vsi *vsi = np->vsi; in i40e_open()
8956 struct i40e_pf *pf = vsi->back; in i40e_open()
8960 if (test_bit(__I40E_TESTING, pf->state) || in i40e_open()
8961 test_bit(__I40E_BAD_EEPROM, pf->state)) in i40e_open()
8962 return -EBUSY; in i40e_open()
8967 return -EAGAIN; in i40e_open()
8974 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | in i40e_open()
8976 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | in i40e_open()
8979 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); in i40e_open()
8986 * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
8997 ret = netif_set_real_num_rx_queues(vsi->netdev, in i40e_netif_set_realnum_tx_rx_queues()
8998 vsi->num_queue_pairs); in i40e_netif_set_realnum_tx_rx_queues()
9002 return netif_set_real_num_tx_queues(vsi->netdev, in i40e_netif_set_realnum_tx_rx_queues()
9003 vsi->num_queue_pairs); in i40e_netif_set_realnum_tx_rx_queues()
9007 * i40e_vsi_open -
9018 struct i40e_pf *pf = vsi->back; in i40e_vsi_open()
9034 if (vsi->netdev) { in i40e_vsi_open()
9035 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", in i40e_vsi_open()
9036 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); in i40e_vsi_open()
9046 } else if (vsi->type == I40E_VSI_FDIR) { in i40e_vsi_open()
9047 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir", in i40e_vsi_open()
9048 dev_driver_string(&pf->pdev->dev), in i40e_vsi_open()
9049 dev_name(&pf->pdev->dev)); in i40e_vsi_open()
9055 err = -EINVAL; in i40e_vsi_open()
9073 if (vsi == pf->vsi[pf->lan_vsi]) in i40e_vsi_open()
9080 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
9093 &pf->fdir_filter_list, fdir_node) { in i40e_fdir_filter_exit()
9094 hlist_del(&filter->fdir_node); in i40e_fdir_filter_exit()
9098 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) { in i40e_fdir_filter_exit()
9099 list_del(&pit_entry->list); in i40e_fdir_filter_exit()
9102 INIT_LIST_HEAD(&pf->l3_flex_pit_list); in i40e_fdir_filter_exit()
9104 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) { in i40e_fdir_filter_exit()
9105 list_del(&pit_entry->list); in i40e_fdir_filter_exit()
9108 INIT_LIST_HEAD(&pf->l4_flex_pit_list); in i40e_fdir_filter_exit()
9110 pf->fdir_pf_active_filters = 0; in i40e_fdir_filter_exit()
9159 * i40e_cloud_filter_exit - Cleans up the cloud filters
9171 &pf->cloud_filter_list, cloud_node) { in i40e_cloud_filter_exit()
9172 hlist_del(&cfilter->cloud_node); in i40e_cloud_filter_exit()
9175 pf->num_cloud_filters = 0; in i40e_cloud_filter_exit()
9177 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) && in i40e_cloud_filter_exit()
9178 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) { in i40e_cloud_filter_exit()
9179 pf->flags |= I40E_FLAG_FD_SB_ENABLED; in i40e_cloud_filter_exit()
9180 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER; in i40e_cloud_filter_exit()
9181 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; in i40e_cloud_filter_exit()
9186 * i40e_close - Disables a network interface
9189 * The close entry point is called when an interface is de-activated
9198 struct i40e_vsi *vsi = np->vsi; in i40e_close()
9206 * i40e_do_reset - Start a PF or Core Reset sequence
9231 dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); in i40e_do_reset()
9232 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); in i40e_do_reset()
9234 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); in i40e_do_reset()
9242 dev_dbg(&pf->pdev->dev, "CoreR requested\n"); in i40e_do_reset()
9243 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); in i40e_do_reset()
9245 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); in i40e_do_reset()
9246 i40e_flush(&pf->hw); in i40e_do_reset()
9252 * Resets only the PF-specific registers in i40e_do_reset()
9254 * This goes directly to the tear-down and rebuild of in i40e_do_reset()
9258 dev_dbg(&pf->pdev->dev, "PFR requested\n"); in i40e_do_reset()
9268 dev_info(&pf->pdev->dev, in i40e_do_reset()
9269 pf->flags & I40E_FLAG_DISABLE_FW_LLDP ? in i40e_do_reset()
9276 /* Find the VSI(s) that requested a re-init */ in i40e_do_reset()
9277 dev_info(&pf->pdev->dev, in i40e_do_reset()
9279 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_do_reset()
9280 struct i40e_vsi *vsi = pf->vsi[v]; in i40e_do_reset()
9284 vsi->state)) in i40e_do_reset()
9285 i40e_vsi_reinit_locked(pf->vsi[v]); in i40e_do_reset()
9291 dev_info(&pf->pdev->dev, "VSI down requested\n"); in i40e_do_reset()
9292 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_do_reset()
9293 struct i40e_vsi *vsi = pf->vsi[v]; in i40e_do_reset()
9297 vsi->state)) { in i40e_do_reset()
9298 set_bit(__I40E_VSI_DOWN, vsi->state); in i40e_do_reset()
9303 dev_info(&pf->pdev->dev, in i40e_do_reset()
9310 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
9322 if (memcmp(&new_cfg->etscfg, in i40e_dcb_need_reconfig()
9323 &old_cfg->etscfg, in i40e_dcb_need_reconfig()
9324 sizeof(new_cfg->etscfg))) { in i40e_dcb_need_reconfig()
9326 if (memcmp(&new_cfg->etscfg.prioritytable, in i40e_dcb_need_reconfig()
9327 &old_cfg->etscfg.prioritytable, in i40e_dcb_need_reconfig()
9328 sizeof(new_cfg->etscfg.prioritytable))) { in i40e_dcb_need_reconfig()
9330 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); in i40e_dcb_need_reconfig()
9333 if (memcmp(&new_cfg->etscfg.tcbwtable, in i40e_dcb_need_reconfig()
9334 &old_cfg->etscfg.tcbwtable, in i40e_dcb_need_reconfig()
9335 sizeof(new_cfg->etscfg.tcbwtable))) in i40e_dcb_need_reconfig()
9336 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); in i40e_dcb_need_reconfig()
9338 if (memcmp(&new_cfg->etscfg.tsatable, in i40e_dcb_need_reconfig()
9339 &old_cfg->etscfg.tsatable, in i40e_dcb_need_reconfig()
9340 sizeof(new_cfg->etscfg.tsatable))) in i40e_dcb_need_reconfig()
9341 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); in i40e_dcb_need_reconfig()
9345 if (memcmp(&new_cfg->pfc, in i40e_dcb_need_reconfig()
9346 &old_cfg->pfc, in i40e_dcb_need_reconfig()
9347 sizeof(new_cfg->pfc))) { in i40e_dcb_need_reconfig()
9349 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); in i40e_dcb_need_reconfig()
9353 if (memcmp(&new_cfg->app, in i40e_dcb_need_reconfig()
9354 &old_cfg->app, in i40e_dcb_need_reconfig()
9355 sizeof(new_cfg->app))) { in i40e_dcb_need_reconfig()
9357 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); in i40e_dcb_need_reconfig()
9360 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); in i40e_dcb_need_reconfig()
9365 * i40e_handle_lldp_event - Handle LLDP Change MIB event
9373 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; in i40e_handle_lldp_event()
9374 struct i40e_hw *hw = &pf->hw; in i40e_handle_lldp_event()
9380 /* X710-T*L 2.5G and 5G speeds don't support DCB */ in i40e_handle_lldp_event()
9381 if (I40E_IS_X710TL_DEVICE(hw->device_id) && in i40e_handle_lldp_event()
9382 (hw->phy.link_info.link_speed & in i40e_handle_lldp_event()
9384 !(pf->flags & I40E_FLAG_DCB_CAPABLE)) in i40e_handle_lldp_event()
9386 pf->flags |= I40E_FLAG_DCB_CAPABLE; in i40e_handle_lldp_event()
9389 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) in i40e_handle_lldp_event()
9393 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) in i40e_handle_lldp_event()
9395 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type); in i40e_handle_lldp_event()
9400 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; in i40e_handle_lldp_event()
9401 dev_dbg(&pf->pdev->dev, in i40e_handle_lldp_event()
9407 &hw->remote_dcbx_config); in i40e_handle_lldp_event()
9412 tmp_dcbx_cfg = hw->local_dcbx_config; in i40e_handle_lldp_event()
9415 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); in i40e_handle_lldp_event()
9417 ret = i40e_get_dcb_config(&pf->hw); in i40e_handle_lldp_event()
9419 /* X710-T*L 2.5G and 5G speeds don't support DCB */ in i40e_handle_lldp_event()
9420 if (I40E_IS_X710TL_DEVICE(hw->device_id) && in i40e_handle_lldp_event()
9421 (hw->phy.link_info.link_speed & in i40e_handle_lldp_event()
9423 dev_warn(&pf->pdev->dev, in i40e_handle_lldp_event()
9424 "DCB is not supported for X710-T*L 2.5/5G speeds\n"); in i40e_handle_lldp_event()
9425 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; in i40e_handle_lldp_event()
9427 dev_info(&pf->pdev->dev, in i40e_handle_lldp_event()
9429 i40e_stat_str(&pf->hw, ret), in i40e_handle_lldp_event()
9430 i40e_aq_str(&pf->hw, in i40e_handle_lldp_event()
9431 pf->hw.aq.asq_last_status)); in i40e_handle_lldp_event()
9437 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config, in i40e_handle_lldp_event()
9439 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); in i40e_handle_lldp_event()
9444 &hw->local_dcbx_config); in i40e_handle_lldp_event()
9446 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); in i40e_handle_lldp_event()
9452 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) in i40e_handle_lldp_event()
9453 pf->flags |= I40E_FLAG_DCB_ENABLED; in i40e_handle_lldp_event()
9455 pf->flags &= ~I40E_FLAG_DCB_ENABLED; in i40e_handle_lldp_event()
9457 set_bit(__I40E_PORT_SUSPENDED, pf->state); in i40e_handle_lldp_event()
9466 clear_bit(__I40E_PORT_SUSPENDED, pf->state); in i40e_handle_lldp_event()
9471 /* Wait for the PF's queues to be disabled */ in i40e_handle_lldp_event()
9475 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_handle_lldp_event()
9479 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_handle_lldp_event()
9480 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); in i40e_handle_lldp_event()
9489 * i40e_do_reset_safe - Protected reset path for userland calls.
9502 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
9513 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; in i40e_handle_lan_overflow_event()
9514 u32 queue = le32_to_cpu(data->prtdcb_rupto); in i40e_handle_lan_overflow_event()
9515 u32 qtx_ctl = le32_to_cpu(data->otx_ctl); in i40e_handle_lan_overflow_event()
9516 struct i40e_hw *hw = &pf->hw; in i40e_handle_lan_overflow_event()
9520 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", in i40e_handle_lan_overflow_event()
9528 vf_id -= hw->func_caps.vf_base_id; in i40e_handle_lan_overflow_event()
9529 vf = &pf->vf[vf_id]; in i40e_handle_lan_overflow_event()
9538 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
9545 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); in i40e_get_cur_guaranteed_fd_count()
9551 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
9558 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); in i40e_get_current_fd_count()
9566 * i40e_get_global_fd_count - Get total FD filters programmed on device
9573 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); in i40e_get_global_fd_count()
9581 * i40e_reenable_fdir_sb - Restore FDir SB capability
9586 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) in i40e_reenable_fdir_sb()
9587 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && in i40e_reenable_fdir_sb()
9588 (I40E_DEBUG_FD & pf->hw.debug_mask)) in i40e_reenable_fdir_sb()
9589 …dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now… in i40e_reenable_fdir_sb()
9593 * i40e_reenable_fdir_atr - Restore FDir ATR capability
9598 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) { in i40e_reenable_fdir_atr()
9608 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && in i40e_reenable_fdir_atr()
9609 (I40E_DEBUG_FD & pf->hw.debug_mask)) in i40e_reenable_fdir_atr()
9610 …dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no c… in i40e_reenable_fdir_atr()
9615 * i40e_delete_invalid_filter - Delete an invalid FDIR filter
9623 pf->fdir_pf_active_filters--; in i40e_delete_invalid_filter()
9624 pf->fd_inv = 0; in i40e_delete_invalid_filter()
9626 switch (filter->flow_type) { in i40e_delete_invalid_filter()
9628 pf->fd_tcp4_filter_cnt--; in i40e_delete_invalid_filter()
9631 pf->fd_udp4_filter_cnt--; in i40e_delete_invalid_filter()
9634 pf->fd_sctp4_filter_cnt--; in i40e_delete_invalid_filter()
9637 pf->fd_tcp6_filter_cnt--; in i40e_delete_invalid_filter()
9640 pf->fd_udp6_filter_cnt--; in i40e_delete_invalid_filter()
9643 pf->fd_udp6_filter_cnt--; in i40e_delete_invalid_filter()
9646 switch (filter->ipl4_proto) { in i40e_delete_invalid_filter()
9648 pf->fd_tcp4_filter_cnt--; in i40e_delete_invalid_filter()
9651 pf->fd_udp4_filter_cnt--; in i40e_delete_invalid_filter()
9654 pf->fd_sctp4_filter_cnt--; in i40e_delete_invalid_filter()
9657 pf->fd_ip4_filter_cnt--; in i40e_delete_invalid_filter()
9662 switch (filter->ipl4_proto) { in i40e_delete_invalid_filter()
9664 pf->fd_tcp6_filter_cnt--; in i40e_delete_invalid_filter()
9667 pf->fd_udp6_filter_cnt--; in i40e_delete_invalid_filter()
9670 pf->fd_sctp6_filter_cnt--; in i40e_delete_invalid_filter()
9673 pf->fd_ip6_filter_cnt--; in i40e_delete_invalid_filter()
9680 hlist_del(&filter->fdir_node); in i40e_delete_invalid_filter()
9685 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
9694 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) in i40e_fdir_check_and_reenable()
9697 /* Check if we have enough room to re-enable FDir SB capability. */ in i40e_fdir_check_and_reenable()
9699 fcnt_avail = pf->fdir_pf_filter_count; in i40e_fdir_check_and_reenable()
9700 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || in i40e_fdir_check_and_reenable()
9701 (pf->fd_add_err == 0) || in i40e_fdir_check_and_reenable()
9702 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) in i40e_fdir_check_and_reenable()
9705 /* We should wait for even more space before re-enabling ATR. in i40e_fdir_check_and_reenable()
9709 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) && in i40e_fdir_check_and_reenable()
9710 pf->fd_tcp4_filter_cnt == 0 && pf->fd_tcp6_filter_cnt == 0) in i40e_fdir_check_and_reenable()
9714 if (pf->fd_inv > 0) { in i40e_fdir_check_and_reenable()
9716 &pf->fdir_filter_list, fdir_node) in i40e_fdir_check_and_reenable()
9717 if (filter->fd_id == pf->fd_inv) in i40e_fdir_check_and_reenable()
9725 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
9736 if (!time_after(jiffies, pf->fd_flush_timestamp + in i40e_fdir_flush_and_replay()
9741 * should not re-enable ATR for some time. in i40e_fdir_flush_and_replay()
9743 min_flush_time = pf->fd_flush_timestamp + in i40e_fdir_flush_and_replay()
9745 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; in i40e_fdir_flush_and_replay()
9749 if (I40E_DEBUG_FD & pf->hw.debug_mask) in i40e_fdir_flush_and_replay()
9750 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); in i40e_fdir_flush_and_replay()
9754 pf->fd_flush_timestamp = jiffies; in i40e_fdir_flush_and_replay()
9755 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); in i40e_fdir_flush_and_replay()
9757 wr32(&pf->hw, I40E_PFQF_CTL_1, in i40e_fdir_flush_and_replay()
9759 i40e_flush(&pf->hw); in i40e_fdir_flush_and_replay()
9760 pf->fd_flush_cnt++; in i40e_fdir_flush_and_replay()
9761 pf->fd_add_err = 0; in i40e_fdir_flush_and_replay()
9763 /* Check FD flush status every 5-6msec */ in i40e_fdir_flush_and_replay()
9765 reg = rd32(&pf->hw, I40E_PFQF_CTL_1); in i40e_fdir_flush_and_replay()
9768 } while (flush_wait_retry--); in i40e_fdir_flush_and_replay()
9770 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); in i40e_fdir_flush_and_replay()
9773 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); in i40e_fdir_flush_and_replay()
9774 if (!disable_atr && !pf->fd_tcp4_filter_cnt) in i40e_fdir_flush_and_replay()
9775 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); in i40e_fdir_flush_and_replay()
9776 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); in i40e_fdir_flush_and_replay()
9777 if (I40E_DEBUG_FD & pf->hw.debug_mask) in i40e_fdir_flush_and_replay()
9778 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); in i40e_fdir_flush_and_replay()
9783 * i40e_get_current_atr_cnt - Get the count of total FD ATR filters programmed
9788 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; in i40e_get_current_atr_cnt()
9792 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
9799 if (test_bit(__I40E_DOWN, pf->state)) in i40e_fdir_reinit_subtask()
9802 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) in i40e_fdir_reinit_subtask()
9810 * i40e_vsi_link_event - notify VSI of a link event
9816 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_vsi_link_event()
9819 switch (vsi->type) { in i40e_vsi_link_event()
9821 if (!vsi->netdev || !vsi->netdev_registered) in i40e_vsi_link_event()
9825 netif_carrier_on(vsi->netdev); in i40e_vsi_link_event()
9826 netif_tx_wake_all_queues(vsi->netdev); in i40e_vsi_link_event()
9828 netif_carrier_off(vsi->netdev); in i40e_vsi_link_event()
9829 netif_tx_stop_all_queues(vsi->netdev); in i40e_vsi_link_event()
9845 * i40e_veb_link_event - notify elements on the veb of a link event
9854 if (!veb || !veb->pf) in i40e_veb_link_event()
9856 pf = veb->pf; in i40e_veb_link_event()
9860 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) in i40e_veb_link_event()
9861 i40e_veb_link_event(pf->veb[i], link_up); in i40e_veb_link_event()
9864 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_veb_link_event()
9865 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) in i40e_veb_link_event()
9866 i40e_vsi_link_event(pf->vsi[i], link_up); in i40e_veb_link_event()
9870 * i40e_link_event - Update netif_carrier status
9875 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_link_event()
9884 pf->hw.phy.get_link_info = true; in i40e_link_event()
9885 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); in i40e_link_event()
9886 status = i40e_get_link_status(&pf->hw, &new_link); in i40e_link_event()
9890 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state); in i40e_link_event()
9895 set_bit(__I40E_TEMP_LINK_POLLING, pf->state); in i40e_link_event()
9896 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", in i40e_link_event()
9901 old_link_speed = pf->hw.phy.link_info_old.link_speed; in i40e_link_event()
9902 new_link_speed = pf->hw.phy.link_info.link_speed; in i40e_link_event()
9906 (test_bit(__I40E_VSI_DOWN, vsi->state) || in i40e_link_event()
9907 new_link == netif_carrier_ok(vsi->netdev))) in i40e_link_event()
9915 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) in i40e_link_event()
9916 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); in i40e_link_event()
9920 if (pf->vf) in i40e_link_event()
9923 if (pf->flags & I40E_FLAG_PTP) in i40e_link_event()
9929 if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) in i40e_link_event()
9936 dev_dbg(&pf->pdev->dev, "Reconfig DCB to single TC as result of Link Down\n"); in i40e_link_event()
9937 memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg)); in i40e_link_event()
9940 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | in i40e_link_event()
9943 pf->dcbx_cap = DCB_CAP_DCBX_HOST | in i40e_link_event()
9945 pf->flags |= I40E_FLAG_DCB_CAPABLE; in i40e_link_event()
9946 pf->flags &= ~I40E_FLAG_DCB_ENABLED; in i40e_link_event()
9953 * i40e_watchdog_subtask - periodic checks not using event driven response
9961 if (test_bit(__I40E_DOWN, pf->state) || in i40e_watchdog_subtask()
9962 test_bit(__I40E_CONFIG_BUSY, pf->state)) in i40e_watchdog_subtask()
9966 if (time_before(jiffies, (pf->service_timer_previous + in i40e_watchdog_subtask()
9967 pf->service_timer_period))) in i40e_watchdog_subtask()
9969 pf->service_timer_previous = jiffies; in i40e_watchdog_subtask()
9971 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) || in i40e_watchdog_subtask()
9972 test_bit(__I40E_TEMP_LINK_POLLING, pf->state)) in i40e_watchdog_subtask()
9978 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_watchdog_subtask()
9979 if (pf->vsi[i] && pf->vsi[i]->netdev) in i40e_watchdog_subtask()
9980 i40e_update_stats(pf->vsi[i]); in i40e_watchdog_subtask()
9982 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) { in i40e_watchdog_subtask()
9985 if (pf->veb[i]) in i40e_watchdog_subtask()
9986 i40e_update_veb_stats(pf->veb[i]); in i40e_watchdog_subtask()
9994 * i40e_reset_subtask - Set up for resetting the device and driver
10001 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) { in i40e_reset_subtask()
10003 clear_bit(__I40E_REINIT_REQUESTED, pf->state); in i40e_reset_subtask()
10005 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) { in i40e_reset_subtask()
10007 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_reset_subtask()
10009 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) { in i40e_reset_subtask()
10011 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state); in i40e_reset_subtask()
10013 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) { in i40e_reset_subtask()
10015 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); in i40e_reset_subtask()
10017 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) { in i40e_reset_subtask()
10019 clear_bit(__I40E_DOWN_REQUESTED, pf->state); in i40e_reset_subtask()
10025 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) { in i40e_reset_subtask()
10033 !test_bit(__I40E_DOWN, pf->state) && in i40e_reset_subtask()
10034 !test_bit(__I40E_CONFIG_BUSY, pf->state)) { in i40e_reset_subtask()
10040 * i40e_handle_link_event - Handle link event
10048 (struct i40e_aqc_get_link_status *)&e->desc.params.raw; in i40e_handle_link_event()
10050 /* Do a new status request to re-enable LSE reporting in i40e_handle_link_event()
10059 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) { in i40e_handle_link_event()
10060 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
10062 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
10068 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && in i40e_handle_link_event()
10069 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && in i40e_handle_link_event()
10070 (!(status->link_info & I40E_AQ_LINK_UP)) && in i40e_handle_link_event()
10071 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) { in i40e_handle_link_event()
10072 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
10074 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
10081 * i40e_clean_adminq_subtask - Clean the AdminQ rings
10087 struct i40e_hw *hw = &pf->hw; in i40e_clean_adminq_subtask()
10095 if (test_bit(__I40E_RESET_FAILED, pf->state)) in i40e_clean_adminq_subtask()
10099 val = rd32(&pf->hw, pf->hw.aq.arq.len); in i40e_clean_adminq_subtask()
10102 if (hw->debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
10103 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); in i40e_clean_adminq_subtask()
10107 if (hw->debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
10108 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); in i40e_clean_adminq_subtask()
10110 pf->arq_overflows++; in i40e_clean_adminq_subtask()
10113 if (hw->debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
10114 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); in i40e_clean_adminq_subtask()
10118 wr32(&pf->hw, pf->hw.aq.arq.len, val); in i40e_clean_adminq_subtask()
10120 val = rd32(&pf->hw, pf->hw.aq.asq.len); in i40e_clean_adminq_subtask()
10123 if (pf->hw.debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
10124 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); in i40e_clean_adminq_subtask()
10128 if (pf->hw.debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
10129 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); in i40e_clean_adminq_subtask()
10133 if (pf->hw.debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
10134 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); in i40e_clean_adminq_subtask()
10138 wr32(&pf->hw, pf->hw.aq.asq.len, val); in i40e_clean_adminq_subtask()
10150 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); in i40e_clean_adminq_subtask()
10171 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); in i40e_clean_adminq_subtask()
10179 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); in i40e_clean_adminq_subtask()
10183 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); in i40e_clean_adminq_subtask()
10188 i40e_debug(&pf->hw, I40E_DEBUG_NVM, in i40e_clean_adminq_subtask()
10193 dev_info(&pf->pdev->dev, in i40e_clean_adminq_subtask()
10198 } while (i++ < pf->adminq_work_limit); in i40e_clean_adminq_subtask()
10200 if (i < pf->adminq_work_limit) in i40e_clean_adminq_subtask()
10201 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); in i40e_clean_adminq_subtask()
10203 /* re-enable Admin queue interrupt cause */ in i40e_clean_adminq_subtask()
10213 * i40e_verify_eeprom - make sure eeprom is good to use
10220 err = i40e_diag_eeprom_test(&pf->hw); in i40e_verify_eeprom()
10223 err = i40e_diag_eeprom_test(&pf->hw); in i40e_verify_eeprom()
10225 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", in i40e_verify_eeprom()
10227 set_bit(__I40E_BAD_EEPROM, pf->state); in i40e_verify_eeprom()
10231 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) { in i40e_verify_eeprom()
10232 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); in i40e_verify_eeprom()
10233 clear_bit(__I40E_BAD_EEPROM, pf->state); in i40e_verify_eeprom()
10241 * enable switch loop back or die - no point in a return value
10245 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_enable_pf_switch_lb()
10249 ctxt.seid = pf->main_vsi_seid; in i40e_enable_pf_switch_lb()
10250 ctxt.pf_num = pf->hw.pf_id; in i40e_enable_pf_switch_lb()
10252 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); in i40e_enable_pf_switch_lb()
10254 dev_info(&pf->pdev->dev, in i40e_enable_pf_switch_lb()
10256 i40e_stat_str(&pf->hw, ret), in i40e_enable_pf_switch_lb()
10257 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_enable_pf_switch_lb()
10264 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_enable_pf_switch_lb()
10266 dev_info(&pf->pdev->dev, in i40e_enable_pf_switch_lb()
10268 i40e_stat_str(&pf->hw, ret), in i40e_enable_pf_switch_lb()
10269 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_enable_pf_switch_lb()
10277 * disable switch loop back or die - no point in a return value
10281 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_disable_pf_switch_lb()
10285 ctxt.seid = pf->main_vsi_seid; in i40e_disable_pf_switch_lb()
10286 ctxt.pf_num = pf->hw.pf_id; in i40e_disable_pf_switch_lb()
10288 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); in i40e_disable_pf_switch_lb()
10290 dev_info(&pf->pdev->dev, in i40e_disable_pf_switch_lb()
10292 i40e_stat_str(&pf->hw, ret), in i40e_disable_pf_switch_lb()
10293 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_disable_pf_switch_lb()
10300 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_disable_pf_switch_lb()
10302 dev_info(&pf->pdev->dev, in i40e_disable_pf_switch_lb()
10304 i40e_stat_str(&pf->hw, ret), in i40e_disable_pf_switch_lb()
10305 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_disable_pf_switch_lb()
10310 * i40e_config_bridge_mode - Configure the HW bridge mode
10319 struct i40e_pf *pf = veb->pf; in i40e_config_bridge_mode()
10321 if (pf->hw.debug_mask & I40E_DEBUG_LAN) in i40e_config_bridge_mode()
10322 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", in i40e_config_bridge_mode()
10323 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); in i40e_config_bridge_mode()
10324 if (veb->bridge_mode & BRIDGE_MODE_VEPA) in i40e_config_bridge_mode()
10331 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
10342 struct i40e_pf *pf = veb->pf; in i40e_reconstitute_veb()
10347 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { in i40e_reconstitute_veb()
10348 if (pf->vsi[v] && in i40e_reconstitute_veb()
10349 pf->vsi[v]->veb_idx == veb->idx && in i40e_reconstitute_veb()
10350 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { in i40e_reconstitute_veb()
10351 ctl_vsi = pf->vsi[v]; in i40e_reconstitute_veb()
10356 dev_info(&pf->pdev->dev, in i40e_reconstitute_veb()
10357 "missing owner VSI for veb_idx %d\n", veb->idx); in i40e_reconstitute_veb()
10358 ret = -ENOENT; in i40e_reconstitute_veb()
10361 if (ctl_vsi != pf->vsi[pf->lan_vsi]) in i40e_reconstitute_veb()
10362 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; in i40e_reconstitute_veb()
10365 dev_info(&pf->pdev->dev, in i40e_reconstitute_veb()
10367 veb->idx, ret); in i40e_reconstitute_veb()
10377 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) in i40e_reconstitute_veb()
10378 veb->bridge_mode = BRIDGE_MODE_VEB; in i40e_reconstitute_veb()
10380 veb->bridge_mode = BRIDGE_MODE_VEPA; in i40e_reconstitute_veb()
10384 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_reconstitute_veb()
10385 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) in i40e_reconstitute_veb()
10388 if (pf->vsi[v]->veb_idx == veb->idx) { in i40e_reconstitute_veb()
10389 struct i40e_vsi *vsi = pf->vsi[v]; in i40e_reconstitute_veb()
10391 vsi->uplink_seid = veb->seid; in i40e_reconstitute_veb()
10394 dev_info(&pf->pdev->dev, in i40e_reconstitute_veb()
10403 /* create any VEBs attached to this VEB - RECURSION */ in i40e_reconstitute_veb()
10405 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { in i40e_reconstitute_veb()
10406 pf->veb[veb_idx]->uplink_seid = veb->seid; in i40e_reconstitute_veb()
10407 ret = i40e_reconstitute_veb(pf->veb[veb_idx]); in i40e_reconstitute_veb()
10418 * i40e_get_capabilities - get info about the HW
10434 return -ENOMEM; in i40e_get_capabilities()
10437 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, in i40e_get_capabilities()
10443 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { in i40e_get_capabilities()
10446 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) { in i40e_get_capabilities()
10447 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10449 i40e_stat_str(&pf->hw, err), in i40e_get_capabilities()
10450 i40e_aq_str(&pf->hw, in i40e_get_capabilities()
10451 pf->hw.aq.asq_last_status)); in i40e_get_capabilities()
10452 return -ENODEV; in i40e_get_capabilities()
10456 if (pf->hw.debug_mask & I40E_DEBUG_USER) { in i40e_get_capabilities()
10458 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10460 pf->hw.pf_id, pf->hw.func_caps.num_vfs, in i40e_get_capabilities()
10461 pf->hw.func_caps.num_msix_vectors, in i40e_get_capabilities()
10462 pf->hw.func_caps.num_msix_vectors_vf, in i40e_get_capabilities()
10463 pf->hw.func_caps.fd_filters_guaranteed, in i40e_get_capabilities()
10464 pf->hw.func_caps.fd_filters_best_effort, in i40e_get_capabilities()
10465 pf->hw.func_caps.num_tx_qp, in i40e_get_capabilities()
10466 pf->hw.func_caps.num_vsis); in i40e_get_capabilities()
10468 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10470 pf->hw.dev_caps.switch_mode, in i40e_get_capabilities()
10471 pf->hw.dev_caps.valid_functions); in i40e_get_capabilities()
10472 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10473 "SR-IOV=%d, num_vfs for all function=%u\n", in i40e_get_capabilities()
10474 pf->hw.dev_caps.sr_iov_1_1, in i40e_get_capabilities()
10475 pf->hw.dev_caps.num_vfs); in i40e_get_capabilities()
10476 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10478 pf->hw.dev_caps.num_vsis, in i40e_get_capabilities()
10479 pf->hw.dev_caps.num_rx_qp, in i40e_get_capabilities()
10480 pf->hw.dev_caps.num_tx_qp); in i40e_get_capabilities()
10484 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ in i40e_get_capabilities()
10485 + pf->hw.func_caps.num_vfs) in i40e_get_capabilities()
10486 if (pf->hw.revision_id == 0 && in i40e_get_capabilities()
10487 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) { in i40e_get_capabilities()
10488 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10490 pf->hw.func_caps.num_vsis, DEF_NUM_VSI); in i40e_get_capabilities()
10491 pf->hw.func_caps.num_vsis = DEF_NUM_VSI; in i40e_get_capabilities()
10500 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
10510 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { in i40e_fdir_sb_setup()
10519 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); in i40e_fdir_sb_setup()
10522 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) in i40e_fdir_sb_setup()
10531 pf->vsi[pf->lan_vsi]->seid, 0); in i40e_fdir_sb_setup()
10533 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); in i40e_fdir_sb_setup()
10534 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; in i40e_fdir_sb_setup()
10535 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_fdir_sb_setup()
10544 * i40e_fdir_teardown - release the Flow Director resources
10558 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
10568 struct i40e_pf *pf = vsi->back; in i40e_rebuild_cloud_filters()
10573 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list, in i40e_rebuild_cloud_filters()
10575 if (cfilter->seid != seid) in i40e_rebuild_cloud_filters()
10578 if (cfilter->dst_port) in i40e_rebuild_cloud_filters()
10585 dev_dbg(&pf->pdev->dev, in i40e_rebuild_cloud_filters()
10587 i40e_stat_str(&pf->hw, ret), in i40e_rebuild_cloud_filters()
10588 i40e_aq_str(&pf->hw, in i40e_rebuild_cloud_filters()
10589 pf->hw.aq.asq_last_status)); in i40e_rebuild_cloud_filters()
10597 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
10607 if (list_empty(&vsi->ch_list)) in i40e_rebuild_channels()
10610 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { in i40e_rebuild_channels()
10611 if (!ch->initialized) in i40e_rebuild_channels()
10614 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch); in i40e_rebuild_channels()
10616 dev_info(&vsi->back->pdev->dev, in i40e_rebuild_channels()
10618 vsi->uplink_seid); in i40e_rebuild_channels()
10622 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch); in i40e_rebuild_channels()
10624 dev_info(&vsi->back->pdev->dev, in i40e_rebuild_channels()
10626 ch->seid); in i40e_rebuild_channels()
10630 vsi->next_base_queue = vsi->next_base_queue + in i40e_rebuild_channels()
10631 ch->num_queue_pairs; in i40e_rebuild_channels()
10632 if (ch->max_tx_rate) { in i40e_rebuild_channels()
10633 u64 credits = ch->max_tx_rate; in i40e_rebuild_channels()
10635 if (i40e_set_bw_limit(vsi, ch->seid, in i40e_rebuild_channels()
10636 ch->max_tx_rate)) in i40e_rebuild_channels()
10637 return -EINVAL; in i40e_rebuild_channels()
10640 dev_dbg(&vsi->back->pdev->dev, in i40e_rebuild_channels()
10641 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", in i40e_rebuild_channels()
10642 ch->max_tx_rate, in i40e_rebuild_channels()
10644 ch->seid); in i40e_rebuild_channels()
10646 ret = i40e_rebuild_cloud_filters(vsi, ch->seid); in i40e_rebuild_channels()
10648 dev_dbg(&vsi->back->pdev->dev, in i40e_rebuild_channels()
10650 ch->seid); in i40e_rebuild_channels()
10658 * i40e_clean_xps_state - clean xps state for every tx_ring
10665 if (vsi->tx_rings) in i40e_clean_xps_state()
10666 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_clean_xps_state()
10667 if (vsi->tx_rings[i]) in i40e_clean_xps_state()
10669 vsi->tx_rings[i]->state); in i40e_clean_xps_state()
10673 * i40e_prep_for_reset - prep for the core to reset
10680 struct i40e_hw *hw = &pf->hw; in i40e_prep_for_reset()
10684 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state); in i40e_prep_for_reset()
10685 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_prep_for_reset()
10687 if (i40e_check_asq_alive(&pf->hw)) in i40e_prep_for_reset()
10690 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); in i40e_prep_for_reset()
10695 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_prep_for_reset()
10696 if (pf->vsi[v]) { in i40e_prep_for_reset()
10697 i40e_clean_xps_state(pf->vsi[v]); in i40e_prep_for_reset()
10698 pf->vsi[v]->seid = 0; in i40e_prep_for_reset()
10702 i40e_shutdown_adminq(&pf->hw); in i40e_prep_for_reset()
10705 if (hw->hmc.hmc_obj) { in i40e_prep_for_reset()
10708 dev_warn(&pf->pdev->dev, in i40e_prep_for_reset()
10719 * i40e_send_version - update firmware with driver version
10731 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); in i40e_send_version()
10735 * i40e_get_oem_version - get OEM specific version information
10775 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release; in i40e_get_oem_version()
10776 hw->nvm.eetrack = I40E_OEM_EETRACK_ID; in i40e_get_oem_version()
10780 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
10785 struct i40e_hw *hw = &pf->hw; in i40e_reset()
10790 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); in i40e_reset()
10791 set_bit(__I40E_RESET_FAILED, pf->state); in i40e_reset()
10792 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); in i40e_reset()
10794 pf->pfr_count++; in i40e_reset()
10800 * i40e_rebuild - rebuild using a saved config
10802 * @reinit: if the Main VSI needs to re-initialized.
10809 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_rebuild()
10810 struct i40e_hw *hw = &pf->hw; in i40e_rebuild()
10815 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && in i40e_rebuild()
10817 i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev); in i40e_rebuild()
10819 if (test_bit(__I40E_DOWN, pf->state) && in i40e_rebuild()
10820 !test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_rebuild()
10822 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); in i40e_rebuild()
10825 ret = i40e_init_adminq(&pf->hw); in i40e_rebuild()
10827 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n", in i40e_rebuild()
10828 i40e_stat_str(&pf->hw, ret), in i40e_rebuild()
10829 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_rebuild()
10832 i40e_get_oem_version(&pf->hw); in i40e_rebuild()
10834 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) { in i40e_rebuild()
10839 /* re-verify the eeprom if we just had an EMP reset */ in i40e_rebuild()
10840 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) in i40e_rebuild()
10847 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_rebuild()
10865 free_irq(pf->pdev->irq, pf); in i40e_rebuild()
10885 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, in i40e_rebuild()
10886 hw->func_caps.num_rx_qp, 0, 0); in i40e_rebuild()
10888 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); in i40e_rebuild()
10893 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); in i40e_rebuild()
10898 /* Enable FW to write a default DCB config on link-up in i40e_rebuild()
10905 if (I40E_IS_X710TL_DEVICE(hw->device_id) && in i40e_rebuild()
10906 (hw->phy.link_info.link_speed & in i40e_rebuild()
10909 dev_warn(&pf->pdev->dev, in i40e_rebuild()
10910 "DCB is not supported for X710-T*L 2.5/5G speeds\n"); in i40e_rebuild()
10911 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; in i40e_rebuild()
10916 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", in i40e_rebuild()
10918 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; in i40e_rebuild()
10934 ret = i40e_aq_set_phy_int_mask(&pf->hw, in i40e_rebuild()
10939 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", in i40e_rebuild()
10940 i40e_stat_str(&pf->hw, ret), in i40e_rebuild()
10941 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_rebuild()
10950 if (vsi->uplink_seid != pf->mac_seid) { in i40e_rebuild()
10951 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); in i40e_rebuild()
10954 if (!pf->veb[v]) in i40e_rebuild()
10957 if (pf->veb[v]->uplink_seid == pf->mac_seid || in i40e_rebuild()
10958 pf->veb[v]->uplink_seid == 0) { in i40e_rebuild()
10959 ret = i40e_reconstitute_veb(pf->veb[v]); in i40e_rebuild()
10970 if (pf->veb[v]->uplink_seid == pf->mac_seid) { in i40e_rebuild()
10971 dev_info(&pf->pdev->dev, in i40e_rebuild()
10974 vsi->uplink_seid = pf->mac_seid; in i40e_rebuild()
10976 } else if (pf->veb[v]->uplink_seid == 0) { in i40e_rebuild()
10977 dev_info(&pf->pdev->dev, in i40e_rebuild()
10985 if (vsi->uplink_seid == pf->mac_seid) { in i40e_rebuild()
10986 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); in i40e_rebuild()
10990 dev_info(&pf->pdev->dev, in i40e_rebuild()
10996 if (vsi->mqprio_qopt.max_rate[0]) { in i40e_rebuild()
10998 vsi->mqprio_qopt.max_rate[0]); in i40e_rebuild()
11001 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); in i40e_rebuild()
11007 dev_dbg(&vsi->back->pdev->dev, in i40e_rebuild()
11008 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", in i40e_rebuild()
11011 vsi->seid); in i40e_rebuild()
11014 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid); in i40e_rebuild()
11039 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { in i40e_rebuild()
11041 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); in i40e_rebuild()
11043 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", in i40e_rebuild()
11044 i40e_stat_str(&pf->hw, ret), in i40e_rebuild()
11045 i40e_aq_str(&pf->hw, in i40e_rebuild()
11046 pf->hw.aq.asq_last_status)); in i40e_rebuild()
11049 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_rebuild()
11058 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, in i40e_rebuild()
11059 pf->main_vsi_seid); in i40e_rebuild()
11069 ret = i40e_set_promiscuous(pf, pf->cur_promisc); in i40e_rebuild()
11071 dev_warn(&pf->pdev->dev, in i40e_rebuild()
11073 pf->cur_promisc ? "on" : "off", in i40e_rebuild()
11074 i40e_stat_str(&pf->hw, ret), in i40e_rebuild()
11075 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_rebuild()
11089 clear_bit(__I40E_RESET_FAILED, pf->state); in i40e_rebuild()
11091 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); in i40e_rebuild()
11092 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state); in i40e_rebuild()
11096 * i40e_reset_and_rebuild - reset and rebuild using a saved config
11098 * @reinit: if the Main VSI needs to re-initialized.
11107 if (test_bit(__I40E_IN_REMOVE, pf->state)) in i40e_reset_and_rebuild()
11109 /* Now we wait for GRST to settle out. in i40e_reset_and_rebuild()
11119 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
11141 struct i40e_hw *hw = &pf->hw; in i40e_handle_mdd_event()
11147 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state)) in i40e_handle_mdd_event()
11160 I40E_GL_MDET_TX_QUEUE_SHIFT) - in i40e_handle_mdd_event()
11161 pf->hw.func_caps.base_queue; in i40e_handle_mdd_event()
11163 …dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x … in i40e_handle_mdd_event()
11175 I40E_GL_MDET_RX_QUEUE_SHIFT) - in i40e_handle_mdd_event()
11176 pf->hw.func_caps.base_queue; in i40e_handle_mdd_event()
11178 …dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02… in i40e_handle_mdd_event()
11188 dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n"); in i40e_handle_mdd_event()
11193 dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n"); in i40e_handle_mdd_event()
11198 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { in i40e_handle_mdd_event()
11199 vf = &(pf->vf[i]); in i40e_handle_mdd_event()
11203 vf->num_mdd_events++; in i40e_handle_mdd_event()
11204 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", in i40e_handle_mdd_event()
11206 dev_info(&pf->pdev->dev, in i40e_handle_mdd_event()
11207 "Use PF Control I/F to re-enable the VF\n"); in i40e_handle_mdd_event()
11208 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); in i40e_handle_mdd_event()
11214 vf->num_mdd_events++; in i40e_handle_mdd_event()
11215 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", in i40e_handle_mdd_event()
11217 dev_info(&pf->pdev->dev, in i40e_handle_mdd_event()
11218 "Use PF Control I/F to re-enable the VF\n"); in i40e_handle_mdd_event()
11219 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); in i40e_handle_mdd_event()
11223 /* re-enable mdd interrupt cause */ in i40e_handle_mdd_event()
11224 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state); in i40e_handle_mdd_event()
11232 * i40e_service_task - Run the driver's async subtasks
11243 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || in i40e_service_task()
11244 test_bit(__I40E_SUSPENDED, pf->state)) in i40e_service_task()
11247 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state)) in i40e_service_task()
11250 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_service_task()
11251 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]); in i40e_service_task()
11258 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) { in i40e_service_task()
11260 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], in i40e_service_task()
11265 pf->state)) in i40e_service_task()
11267 pf->vsi[pf->lan_vsi]); in i40e_service_task()
11278 clear_bit(__I40E_SERVICE_SCHED, pf->state); in i40e_service_task()
11282 * rather than wait for the timer to tick again. in i40e_service_task()
11284 if (time_after(jiffies, (start_time + pf->service_timer_period)) || in i40e_service_task()
11285 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) || in i40e_service_task()
11286 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) || in i40e_service_task()
11287 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) in i40e_service_task()
11292 * i40e_service_timer - timer callback
11299 mod_timer(&pf->service_timer, in i40e_service_timer()
11300 round_jiffies(jiffies + pf->service_timer_period)); in i40e_service_timer()
11305 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
11310 struct i40e_pf *pf = vsi->back; in i40e_set_num_rings_in_vsi()
11312 switch (vsi->type) { in i40e_set_num_rings_in_vsi()
11314 vsi->alloc_queue_pairs = pf->num_lan_qps; in i40e_set_num_rings_in_vsi()
11315 if (!vsi->num_tx_desc) in i40e_set_num_rings_in_vsi()
11316 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11318 if (!vsi->num_rx_desc) in i40e_set_num_rings_in_vsi()
11319 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11321 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_set_num_rings_in_vsi()
11322 vsi->num_q_vectors = pf->num_lan_msix; in i40e_set_num_rings_in_vsi()
11324 vsi->num_q_vectors = 1; in i40e_set_num_rings_in_vsi()
11329 vsi->alloc_queue_pairs = 1; in i40e_set_num_rings_in_vsi()
11330 vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT, in i40e_set_num_rings_in_vsi()
11332 vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT, in i40e_set_num_rings_in_vsi()
11334 vsi->num_q_vectors = pf->num_fdsb_msix; in i40e_set_num_rings_in_vsi()
11338 vsi->alloc_queue_pairs = pf->num_vmdq_qps; in i40e_set_num_rings_in_vsi()
11339 if (!vsi->num_tx_desc) in i40e_set_num_rings_in_vsi()
11340 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11342 if (!vsi->num_rx_desc) in i40e_set_num_rings_in_vsi()
11343 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11345 vsi->num_q_vectors = pf->num_vmdq_msix; in i40e_set_num_rings_in_vsi()
11349 vsi->alloc_queue_pairs = pf->num_vf_qps; in i40e_set_num_rings_in_vsi()
11350 if (!vsi->num_tx_desc) in i40e_set_num_rings_in_vsi()
11351 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11353 if (!vsi->num_rx_desc) in i40e_set_num_rings_in_vsi()
11354 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11360 return -ENODATA; in i40e_set_num_rings_in_vsi()
11364 vsi->num_tx_desc = I40E_MIN_NUM_DESCRIPTORS; in i40e_set_num_rings_in_vsi()
11365 vsi->num_rx_desc = I40E_MIN_NUM_DESCRIPTORS; in i40e_set_num_rings_in_vsi()
11372 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
11386 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * in i40e_vsi_alloc_arrays()
11388 vsi->tx_rings = kzalloc(size, GFP_KERNEL); in i40e_vsi_alloc_arrays()
11389 if (!vsi->tx_rings) in i40e_vsi_alloc_arrays()
11390 return -ENOMEM; in i40e_vsi_alloc_arrays()
11391 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs; in i40e_vsi_alloc_arrays()
11393 vsi->xdp_rings = next_rings; in i40e_vsi_alloc_arrays()
11394 next_rings += vsi->alloc_queue_pairs; in i40e_vsi_alloc_arrays()
11396 vsi->rx_rings = next_rings; in i40e_vsi_alloc_arrays()
11400 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; in i40e_vsi_alloc_arrays()
11401 vsi->q_vectors = kzalloc(size, GFP_KERNEL); in i40e_vsi_alloc_arrays()
11402 if (!vsi->q_vectors) { in i40e_vsi_alloc_arrays()
11403 ret = -ENOMEM; in i40e_vsi_alloc_arrays()
11410 kfree(vsi->tx_rings); in i40e_vsi_alloc_arrays()
11415 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
11424 int ret = -ENODEV; in i40e_vsi_mem_alloc()
11430 mutex_lock(&pf->switch_mutex); in i40e_vsi_mem_alloc()
11438 i = pf->next_vsi; in i40e_vsi_mem_alloc()
11439 while (i < pf->num_alloc_vsi && pf->vsi[i]) in i40e_vsi_mem_alloc()
11441 if (i >= pf->num_alloc_vsi) { in i40e_vsi_mem_alloc()
11443 while (i < pf->next_vsi && pf->vsi[i]) in i40e_vsi_mem_alloc()
11447 if (i < pf->num_alloc_vsi && !pf->vsi[i]) { in i40e_vsi_mem_alloc()
11450 ret = -ENODEV; in i40e_vsi_mem_alloc()
11453 pf->next_vsi = ++i; in i40e_vsi_mem_alloc()
11457 ret = -ENOMEM; in i40e_vsi_mem_alloc()
11460 vsi->type = type; in i40e_vsi_mem_alloc()
11461 vsi->back = pf; in i40e_vsi_mem_alloc()
11462 set_bit(__I40E_VSI_DOWN, vsi->state); in i40e_vsi_mem_alloc()
11463 vsi->flags = 0; in i40e_vsi_mem_alloc()
11464 vsi->idx = vsi_idx; in i40e_vsi_mem_alloc()
11465 vsi->int_rate_limit = 0; in i40e_vsi_mem_alloc()
11466 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? in i40e_vsi_mem_alloc()
11467 pf->rss_table_size : 64; in i40e_vsi_mem_alloc()
11468 vsi->netdev_registered = false; in i40e_vsi_mem_alloc()
11469 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; in i40e_vsi_mem_alloc()
11470 hash_init(vsi->mac_filter_hash); in i40e_vsi_mem_alloc()
11471 vsi->irqs_ready = false; in i40e_vsi_mem_alloc()
11474 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL); in i40e_vsi_mem_alloc()
11475 if (!vsi->af_xdp_zc_qps) in i40e_vsi_mem_alloc()
11491 spin_lock_init(&vsi->mac_filter_hash_lock); in i40e_vsi_mem_alloc()
11492 pf->vsi[vsi_idx] = vsi; in i40e_vsi_mem_alloc()
11497 bitmap_free(vsi->af_xdp_zc_qps); in i40e_vsi_mem_alloc()
11498 pf->next_vsi = i - 1; in i40e_vsi_mem_alloc()
11501 mutex_unlock(&pf->switch_mutex); in i40e_vsi_mem_alloc()
11506 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
11517 kfree(vsi->q_vectors); in i40e_vsi_free_arrays()
11518 vsi->q_vectors = NULL; in i40e_vsi_free_arrays()
11520 kfree(vsi->tx_rings); in i40e_vsi_free_arrays()
11521 vsi->tx_rings = NULL; in i40e_vsi_free_arrays()
11522 vsi->rx_rings = NULL; in i40e_vsi_free_arrays()
11523 vsi->xdp_rings = NULL; in i40e_vsi_free_arrays()
11527 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
11536 kfree(vsi->rss_hkey_user); in i40e_clear_rss_config_user()
11537 vsi->rss_hkey_user = NULL; in i40e_clear_rss_config_user()
11539 kfree(vsi->rss_lut_user); in i40e_clear_rss_config_user()
11540 vsi->rss_lut_user = NULL; in i40e_clear_rss_config_user()
11544 * i40e_vsi_clear - Deallocate the VSI provided
11545 * @vsi: the VSI being un-configured
11554 if (!vsi->back) in i40e_vsi_clear()
11556 pf = vsi->back; in i40e_vsi_clear()
11558 mutex_lock(&pf->switch_mutex); in i40e_vsi_clear()
11559 if (!pf->vsi[vsi->idx]) { in i40e_vsi_clear()
11560 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n", in i40e_vsi_clear()
11561 vsi->idx, vsi->idx, vsi->type); in i40e_vsi_clear()
11565 if (pf->vsi[vsi->idx] != vsi) { in i40e_vsi_clear()
11566 dev_err(&pf->pdev->dev, in i40e_vsi_clear()
11567 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n", in i40e_vsi_clear()
11568 pf->vsi[vsi->idx]->idx, in i40e_vsi_clear()
11569 pf->vsi[vsi->idx]->type, in i40e_vsi_clear()
11570 vsi->idx, vsi->type); in i40e_vsi_clear()
11575 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); in i40e_vsi_clear()
11576 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); in i40e_vsi_clear()
11578 bitmap_free(vsi->af_xdp_zc_qps); in i40e_vsi_clear()
11582 pf->vsi[vsi->idx] = NULL; in i40e_vsi_clear()
11583 if (vsi->idx < pf->next_vsi) in i40e_vsi_clear()
11584 pf->next_vsi = vsi->idx; in i40e_vsi_clear()
11587 mutex_unlock(&pf->switch_mutex); in i40e_vsi_clear()
11595 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
11602 if (vsi->tx_rings && vsi->tx_rings[0]) { in i40e_vsi_clear_rings()
11603 for (i = 0; i < vsi->alloc_queue_pairs; i++) { in i40e_vsi_clear_rings()
11604 kfree_rcu(vsi->tx_rings[i], rcu); in i40e_vsi_clear_rings()
11605 WRITE_ONCE(vsi->tx_rings[i], NULL); in i40e_vsi_clear_rings()
11606 WRITE_ONCE(vsi->rx_rings[i], NULL); in i40e_vsi_clear_rings()
11607 if (vsi->xdp_rings) in i40e_vsi_clear_rings()
11608 WRITE_ONCE(vsi->xdp_rings[i], NULL); in i40e_vsi_clear_rings()
11614 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
11620 struct i40e_pf *pf = vsi->back; in i40e_alloc_rings()
11624 for (i = 0; i < vsi->alloc_queue_pairs; i++) { in i40e_alloc_rings()
11630 ring->queue_index = i; in i40e_alloc_rings()
11631 ring->reg_idx = vsi->base_queue + i; in i40e_alloc_rings()
11632 ring->ring_active = false; in i40e_alloc_rings()
11633 ring->vsi = vsi; in i40e_alloc_rings()
11634 ring->netdev = vsi->netdev; in i40e_alloc_rings()
11635 ring->dev = &pf->pdev->dev; in i40e_alloc_rings()
11636 ring->count = vsi->num_tx_desc; in i40e_alloc_rings()
11637 ring->size = 0; in i40e_alloc_rings()
11638 ring->dcb_tc = 0; in i40e_alloc_rings()
11639 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) in i40e_alloc_rings()
11640 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; in i40e_alloc_rings()
11641 ring->itr_setting = pf->tx_itr_default; in i40e_alloc_rings()
11642 WRITE_ONCE(vsi->tx_rings[i], ring++); in i40e_alloc_rings()
11647 ring->queue_index = vsi->alloc_queue_pairs + i; in i40e_alloc_rings()
11648 ring->reg_idx = vsi->base_queue + ring->queue_index; in i40e_alloc_rings()
11649 ring->ring_active = false; in i40e_alloc_rings()
11650 ring->vsi = vsi; in i40e_alloc_rings()
11651 ring->netdev = NULL; in i40e_alloc_rings()
11652 ring->dev = &pf->pdev->dev; in i40e_alloc_rings()
11653 ring->count = vsi->num_tx_desc; in i40e_alloc_rings()
11654 ring->size = 0; in i40e_alloc_rings()
11655 ring->dcb_tc = 0; in i40e_alloc_rings()
11656 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) in i40e_alloc_rings()
11657 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; in i40e_alloc_rings()
11659 ring->itr_setting = pf->tx_itr_default; in i40e_alloc_rings()
11660 WRITE_ONCE(vsi->xdp_rings[i], ring++); in i40e_alloc_rings()
11663 ring->queue_index = i; in i40e_alloc_rings()
11664 ring->reg_idx = vsi->base_queue + i; in i40e_alloc_rings()
11665 ring->ring_active = false; in i40e_alloc_rings()
11666 ring->vsi = vsi; in i40e_alloc_rings()
11667 ring->netdev = vsi->netdev; in i40e_alloc_rings()
11668 ring->dev = &pf->pdev->dev; in i40e_alloc_rings()
11669 ring->count = vsi->num_rx_desc; in i40e_alloc_rings()
11670 ring->size = 0; in i40e_alloc_rings()
11671 ring->dcb_tc = 0; in i40e_alloc_rings()
11672 ring->itr_setting = pf->rx_itr_default; in i40e_alloc_rings()
11673 WRITE_ONCE(vsi->rx_rings[i], ring); in i40e_alloc_rings()
11680 return -ENOMEM; in i40e_alloc_rings()
11684 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
11686 * @vectors: the number of MSI-X vectors to request
11692 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, in i40e_reserve_msix_vectors()
11695 dev_info(&pf->pdev->dev, in i40e_reserve_msix_vectors()
11696 "MSI-X vector reservation failed: %d\n", vectors); in i40e_reserve_msix_vectors()
11704 * i40e_init_msix - Setup the MSIX capability
11713 struct i40e_hw *hw = &pf->hw; in i40e_init_msix()
11720 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) in i40e_init_msix()
11721 return -ENODEV; in i40e_init_msix()
11724 * - Add 1 for "other" cause for Admin Queue events, etc. in i40e_init_msix()
11725 * - The number of LAN queue pairs in i40e_init_msix()
11726 * - Queues being used for RSS. in i40e_init_msix()
11730 * - assumes symmetric Tx/Rx pairing in i40e_init_msix()
11731 * - The number of VMDq pairs in i40e_init_msix()
11732 * - The CPU count within the NUMA node if iWARP is enabled in i40e_init_msix()
11738 vectors_left = hw->func_caps.num_msix_vectors; in i40e_init_msix()
11744 vectors_left--; in i40e_init_msix()
11755 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2); in i40e_init_msix()
11756 vectors_left -= pf->num_lan_msix; in i40e_init_msix()
11759 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_init_msix()
11761 pf->num_fdsb_msix = 1; in i40e_init_msix()
11763 vectors_left--; in i40e_init_msix()
11765 pf->num_fdsb_msix = 0; in i40e_init_msix()
11770 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_init_msix()
11771 iwarp_requested = pf->num_iwarp_msix; in i40e_init_msix()
11774 pf->num_iwarp_msix = 0; in i40e_init_msix()
11775 else if (vectors_left < pf->num_iwarp_msix) in i40e_init_msix()
11776 pf->num_iwarp_msix = 1; in i40e_init_msix()
11777 v_budget += pf->num_iwarp_msix; in i40e_init_msix()
11778 vectors_left -= pf->num_iwarp_msix; in i40e_init_msix()
11782 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { in i40e_init_msix()
11784 pf->num_vmdq_msix = 0; in i40e_init_msix()
11785 pf->num_vmdq_qps = 0; in i40e_init_msix()
11788 pf->num_vmdq_vsis * pf->num_vmdq_qps; in i40e_init_msix()
11799 pf->num_vmdq_qps = 1; in i40e_init_msix()
11800 vmdq_vecs_wanted = pf->num_vmdq_vsis; in i40e_init_msix()
11805 pf->num_vmdq_msix = pf->num_vmdq_qps; in i40e_init_msix()
11808 vectors_left -= vmdq_vecs; in i40e_init_msix()
11821 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left); in i40e_init_msix()
11822 pf->num_lan_msix += extra_vectors; in i40e_init_msix()
11823 vectors_left -= extra_vectors; in i40e_init_msix()
11826 …remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n"); in i40e_init_msix()
11828 v_budget += pf->num_lan_msix; in i40e_init_msix()
11829 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), in i40e_init_msix()
11831 if (!pf->msix_entries) in i40e_init_msix()
11832 return -ENOMEM; in i40e_init_msix()
11835 pf->msix_entries[i].entry = i; in i40e_init_msix()
11839 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; in i40e_init_msix()
11840 kfree(pf->msix_entries); in i40e_init_msix()
11841 pf->msix_entries = NULL; in i40e_init_msix()
11842 pci_disable_msix(pf->pdev); in i40e_init_msix()
11843 return -ENODEV; in i40e_init_msix()
11847 pf->num_vmdq_vsis = 0; in i40e_init_msix()
11848 pf->num_vmdq_qps = 0; in i40e_init_msix()
11849 pf->num_lan_qps = 1; in i40e_init_msix()
11850 pf->num_lan_msix = 1; in i40e_init_msix()
11860 dev_info(&pf->pdev->dev, in i40e_init_msix()
11861 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n", in i40e_init_msix()
11864 vec = v_actual - 1; in i40e_init_msix()
11867 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ in i40e_init_msix()
11868 pf->num_vmdq_vsis = 1; in i40e_init_msix()
11869 pf->num_vmdq_qps = 1; in i40e_init_msix()
11874 pf->num_lan_msix = 1; in i40e_init_msix()
11877 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_init_msix()
11878 pf->num_lan_msix = 1; in i40e_init_msix()
11879 pf->num_iwarp_msix = 1; in i40e_init_msix()
11881 pf->num_lan_msix = 2; in i40e_init_msix()
11885 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_init_msix()
11886 pf->num_iwarp_msix = min_t(int, (vec / 3), in i40e_init_msix()
11888 pf->num_vmdq_vsis = min_t(int, (vec / 3), in i40e_init_msix()
11891 pf->num_vmdq_vsis = min_t(int, (vec / 2), in i40e_init_msix()
11894 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_init_msix()
11895 pf->num_fdsb_msix = 1; in i40e_init_msix()
11896 vec--; in i40e_init_msix()
11898 pf->num_lan_msix = min_t(int, in i40e_init_msix()
11899 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)), in i40e_init_msix()
11900 pf->num_lan_msix); in i40e_init_msix()
11901 pf->num_lan_qps = pf->num_lan_msix; in i40e_init_msix()
11906 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && in i40e_init_msix()
11907 (pf->num_fdsb_msix == 0)) { in i40e_init_msix()
11908 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n"); in i40e_init_msix()
11909 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; in i40e_init_msix()
11910 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_init_msix()
11912 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && in i40e_init_msix()
11913 (pf->num_vmdq_msix == 0)) { in i40e_init_msix()
11914 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); in i40e_init_msix()
11915 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; in i40e_init_msix()
11918 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && in i40e_init_msix()
11919 (pf->num_iwarp_msix == 0)) { in i40e_init_msix()
11920 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n"); in i40e_init_msix()
11921 pf->flags &= ~I40E_FLAG_IWARP_ENABLED; in i40e_init_msix()
11923 i40e_debug(&pf->hw, I40E_DEBUG_INIT, in i40e_init_msix()
11924 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n", in i40e_init_msix()
11925 pf->num_lan_msix, in i40e_init_msix()
11926 pf->num_vmdq_msix * pf->num_vmdq_vsis, in i40e_init_msix()
11927 pf->num_fdsb_msix, in i40e_init_msix()
11928 pf->num_iwarp_msix); in i40e_init_msix()
11934 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
11938 * We allocate one q_vector. If allocation fails we return -ENOMEM.
11947 return -ENOMEM; in i40e_vsi_alloc_q_vector()
11949 q_vector->vsi = vsi; in i40e_vsi_alloc_q_vector()
11950 q_vector->v_idx = v_idx; in i40e_vsi_alloc_q_vector()
11951 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); in i40e_vsi_alloc_q_vector()
11953 if (vsi->netdev) in i40e_vsi_alloc_q_vector()
11954 netif_napi_add(vsi->netdev, &q_vector->napi, i40e_napi_poll); in i40e_vsi_alloc_q_vector()
11957 vsi->q_vectors[v_idx] = q_vector; in i40e_vsi_alloc_q_vector()
11963 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
11967 * return -ENOMEM.
11971 struct i40e_pf *pf = vsi->back; in i40e_vsi_alloc_q_vectors()
11975 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_vsi_alloc_q_vectors()
11976 num_q_vectors = vsi->num_q_vectors; in i40e_vsi_alloc_q_vectors()
11977 else if (vsi == pf->vsi[pf->lan_vsi]) in i40e_vsi_alloc_q_vectors()
11980 return -EINVAL; in i40e_vsi_alloc_q_vectors()
11991 while (v_idx--) in i40e_vsi_alloc_q_vectors()
11998 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
12006 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_init_interrupt_scheme()
12009 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | in i40e_init_interrupt_scheme()
12018 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_init_interrupt_scheme()
12025 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && in i40e_init_interrupt_scheme()
12026 (pf->flags & I40E_FLAG_MSI_ENABLED)) { in i40e_init_interrupt_scheme()
12027 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); in i40e_init_interrupt_scheme()
12028 vectors = pci_enable_msi(pf->pdev); in i40e_init_interrupt_scheme()
12030 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", in i40e_init_interrupt_scheme()
12032 pf->flags &= ~I40E_FLAG_MSI_ENABLED; in i40e_init_interrupt_scheme()
12037 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) in i40e_init_interrupt_scheme()
12038 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); in i40e_init_interrupt_scheme()
12042 pf->irq_pile = kzalloc(size, GFP_KERNEL); in i40e_init_interrupt_scheme()
12043 if (!pf->irq_pile) in i40e_init_interrupt_scheme()
12044 return -ENOMEM; in i40e_init_interrupt_scheme()
12046 pf->irq_pile->num_entries = vectors; in i40e_init_interrupt_scheme()
12049 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); in i40e_init_interrupt_scheme()
12055 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
12059 * device. This should be called during resume to re-allocate the q_vectors
12066 /* We cleared the MSI and MSI-X flags when disabling the old interrupt in i40e_restore_interrupt_scheme()
12067 * scheme. We need to re-enabled them here in order to attempt to in i40e_restore_interrupt_scheme()
12068 * re-acquire the MSI or MSI-X vectors in i40e_restore_interrupt_scheme()
12070 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); in i40e_restore_interrupt_scheme()
12076 /* Now that we've re-acquired IRQs, we need to remap the vectors and in i40e_restore_interrupt_scheme()
12079 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_restore_interrupt_scheme()
12080 if (pf->vsi[i]) { in i40e_restore_interrupt_scheme()
12081 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]); in i40e_restore_interrupt_scheme()
12084 i40e_vsi_map_rings_to_vectors(pf->vsi[i]); in i40e_restore_interrupt_scheme()
12092 if (pf->flags & I40E_FLAG_IWARP_ENABLED) in i40e_restore_interrupt_scheme()
12098 while (i--) { in i40e_restore_interrupt_scheme()
12099 if (pf->vsi[i]) in i40e_restore_interrupt_scheme()
12100 i40e_vsi_free_q_vectors(pf->vsi[i]); in i40e_restore_interrupt_scheme()
12107 * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
12112 * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
12120 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_setup_misc_vector_for_recovery_mode()
12124 dev_info(&pf->pdev->dev, in i40e_setup_misc_vector_for_recovery_mode()
12125 "MSI-X misc vector request failed, error %d\n", in i40e_setup_misc_vector_for_recovery_mode()
12130 u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED; in i40e_setup_misc_vector_for_recovery_mode()
12132 err = request_irq(pf->pdev->irq, i40e_intr, flags, in i40e_setup_misc_vector_for_recovery_mode()
12133 pf->int_name, pf); in i40e_setup_misc_vector_for_recovery_mode()
12136 dev_info(&pf->pdev->dev, in i40e_setup_misc_vector_for_recovery_mode()
12149 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
12153 * non-queue interrupts, e.g. AdminQ and errors. This is not used
12158 struct i40e_hw *hw = &pf->hw; in i40e_setup_misc_vector()
12162 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) { in i40e_setup_misc_vector()
12163 err = request_irq(pf->msix_entries[0].vector, in i40e_setup_misc_vector()
12164 i40e_intr, 0, pf->int_name, pf); in i40e_setup_misc_vector()
12166 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); in i40e_setup_misc_vector()
12167 dev_info(&pf->pdev->dev, in i40e_setup_misc_vector()
12169 pf->int_name, err); in i40e_setup_misc_vector()
12170 return -EFAULT; in i40e_setup_misc_vector()
12188 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
12199 struct i40e_pf *pf = vsi->back; in i40e_get_rss_aq()
12200 struct i40e_hw *hw = &pf->hw; in i40e_get_rss_aq()
12204 ret = i40e_aq_get_rss_key(hw, vsi->id, in i40e_get_rss_aq()
12207 dev_info(&pf->pdev->dev, in i40e_get_rss_aq()
12209 i40e_stat_str(&pf->hw, ret), in i40e_get_rss_aq()
12210 i40e_aq_str(&pf->hw, in i40e_get_rss_aq()
12211 pf->hw.aq.asq_last_status)); in i40e_get_rss_aq()
12217 bool pf_lut = vsi->type == I40E_VSI_MAIN; in i40e_get_rss_aq()
12219 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); in i40e_get_rss_aq()
12221 dev_info(&pf->pdev->dev, in i40e_get_rss_aq()
12223 i40e_stat_str(&pf->hw, ret), in i40e_get_rss_aq()
12224 i40e_aq_str(&pf->hw, in i40e_get_rss_aq()
12225 pf->hw.aq.asq_last_status)); in i40e_get_rss_aq()
12234 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
12245 struct i40e_pf *pf = vsi->back; in i40e_config_rss_reg()
12246 struct i40e_hw *hw = &pf->hw; in i40e_config_rss_reg()
12247 u16 vf_id = vsi->vf_id; in i40e_config_rss_reg()
12254 if (vsi->type == I40E_VSI_MAIN) { in i40e_config_rss_reg()
12257 } else if (vsi->type == I40E_VSI_SRIOV) { in i40e_config_rss_reg()
12261 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n"); in i40e_config_rss_reg()
12268 if (vsi->type == I40E_VSI_MAIN) { in i40e_config_rss_reg()
12270 return -EINVAL; in i40e_config_rss_reg()
12273 } else if (vsi->type == I40E_VSI_SRIOV) { in i40e_config_rss_reg()
12275 return -EINVAL; in i40e_config_rss_reg()
12279 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); in i40e_config_rss_reg()
12288 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
12299 struct i40e_pf *pf = vsi->back; in i40e_get_rss_reg()
12300 struct i40e_hw *hw = &pf->hw; in i40e_get_rss_reg()
12313 return -EINVAL; in i40e_get_rss_reg()
12322 * i40e_config_rss - Configure RSS keys and lut
12332 struct i40e_pf *pf = vsi->back; in i40e_config_rss()
12334 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) in i40e_config_rss()
12341 * i40e_get_rss - Get RSS keys and lut
12351 struct i40e_pf *pf = vsi->back; in i40e_get_rss()
12353 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) in i40e_get_rss()
12360 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
12376 * i40e_pf_config_rss - Prepare for RSS if used
12381 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_pf_config_rss()
12384 struct i40e_hw *hw = &pf->hw; in i40e_pf_config_rss()
12399 reg_val = (pf->rss_table_size == 512) ? in i40e_pf_config_rss()
12405 if (!vsi->rss_size) { in i40e_pf_config_rss()
12408 * could end up with zero TCs. Check for that to avoid in i40e_pf_config_rss()
12409 * divide-by-zero. It probably won't pass traffic, but it also in i40e_pf_config_rss()
12412 qcount = vsi->num_queue_pairs / in i40e_pf_config_rss()
12413 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1); in i40e_pf_config_rss()
12414 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); in i40e_pf_config_rss()
12416 if (!vsi->rss_size) in i40e_pf_config_rss()
12417 return -EINVAL; in i40e_pf_config_rss()
12419 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in i40e_pf_config_rss()
12421 return -ENOMEM; in i40e_pf_config_rss()
12424 if (vsi->rss_lut_user) in i40e_pf_config_rss()
12425 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in i40e_pf_config_rss()
12427 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); in i40e_pf_config_rss()
12432 if (vsi->rss_hkey_user) in i40e_pf_config_rss()
12433 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); in i40e_pf_config_rss()
12436 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); in i40e_pf_config_rss()
12443 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
12453 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_reconfig_rss_queues()
12456 if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) in i40e_reconfig_rss_queues()
12460 new_rss_size = min_t(int, queue_count, pf->rss_size_max); in i40e_reconfig_rss_queues()
12462 if (queue_count != vsi->num_queue_pairs) { in i40e_reconfig_rss_queues()
12465 vsi->req_queue_pairs = queue_count; in i40e_reconfig_rss_queues()
12467 if (test_bit(__I40E_IN_REMOVE, pf->state)) in i40e_reconfig_rss_queues()
12468 return pf->alloc_rss_size; in i40e_reconfig_rss_queues()
12470 pf->alloc_rss_size = new_rss_size; in i40e_reconfig_rss_queues()
12477 if (queue_count < vsi->rss_size) { in i40e_reconfig_rss_queues()
12479 dev_dbg(&pf->pdev->dev, in i40e_reconfig_rss_queues()
12483 /* Reset vsi->rss_size, as number of enabled queues changed */ in i40e_reconfig_rss_queues()
12484 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc; in i40e_reconfig_rss_queues()
12485 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); in i40e_reconfig_rss_queues()
12489 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n", in i40e_reconfig_rss_queues()
12490 vsi->req_queue_pairs, pf->rss_size_max); in i40e_reconfig_rss_queues()
12491 return pf->alloc_rss_size; in i40e_reconfig_rss_queues()
12495 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
12504 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, in i40e_get_partition_bw_setting()
12509 pf->min_bw = min_bw; in i40e_get_partition_bw_setting()
12511 pf->max_bw = max_bw; in i40e_get_partition_bw_setting()
12518 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
12529 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id)); in i40e_set_partition_bw_setting()
12530 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK; in i40e_set_partition_bw_setting()
12531 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK; in i40e_set_partition_bw_setting()
12534 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); in i40e_set_partition_bw_setting()
12540 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
12550 if (pf->hw.partition_id != 1) { in i40e_commit_partition_bw_setting()
12551 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
12553 pf->hw.partition_id); in i40e_commit_partition_bw_setting()
12559 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); in i40e_commit_partition_bw_setting()
12560 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
12562 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
12564 i40e_stat_str(&pf->hw, ret), in i40e_commit_partition_bw_setting()
12565 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
12569 /* Read word 0x10 of NVM - SW compatibility word 1 */ in i40e_commit_partition_bw_setting()
12570 ret = i40e_aq_read_nvm(&pf->hw, in i40e_commit_partition_bw_setting()
12577 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
12578 i40e_release_nvm(&pf->hw); in i40e_commit_partition_bw_setting()
12580 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n", in i40e_commit_partition_bw_setting()
12581 i40e_stat_str(&pf->hw, ret), in i40e_commit_partition_bw_setting()
12582 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
12586 /* Wait a bit for NVM release to complete */ in i40e_commit_partition_bw_setting()
12590 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); in i40e_commit_partition_bw_setting()
12591 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
12593 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
12595 i40e_stat_str(&pf->hw, ret), in i40e_commit_partition_bw_setting()
12596 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
12601 * the NVM - thus storing the bandwidth values permanently. in i40e_commit_partition_bw_setting()
12603 ret = i40e_aq_update_nvm(&pf->hw, in i40e_commit_partition_bw_setting()
12610 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
12611 i40e_release_nvm(&pf->hw); in i40e_commit_partition_bw_setting()
12613 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
12615 i40e_stat_str(&pf->hw, ret), in i40e_commit_partition_bw_setting()
12616 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
12623 * i40e_is_total_port_shutdown_enabled - read NVM and return value
12642 read_status = i40e_read_nvm_word(&pf->hw, in i40e_is_total_port_shutdown_enabled()
12647 read_status = i40e_read_nvm_word(&pf->hw, in i40e_is_total_port_shutdown_enabled()
12654 read_status = i40e_read_nvm_module_data(&pf->hw, in i40e_is_total_port_shutdown_enabled()
12662 link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH); in i40e_is_total_port_shutdown_enabled()
12668 dev_warn(&pf->pdev->dev, in i40e_is_total_port_shutdown_enabled()
12669 "total-port-shutdown feature is off due to read nvm error: %s\n", in i40e_is_total_port_shutdown_enabled()
12670 i40e_stat_str(&pf->hw, read_status)); in i40e_is_total_port_shutdown_enabled()
12675 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
12689 pf->flags = I40E_FLAG_RX_CSUM_ENABLED | in i40e_sw_init()
12694 pf->rx_itr_default = I40E_ITR_RX_DEF; in i40e_sw_init()
12695 pf->tx_itr_default = I40E_ITR_TX_DEF; in i40e_sw_init()
12700 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); in i40e_sw_init()
12701 pf->alloc_rss_size = 1; in i40e_sw_init()
12702 pf->rss_table_size = pf->hw.func_caps.rss_table_size; in i40e_sw_init()
12703 pf->rss_size_max = min_t(int, pf->rss_size_max, in i40e_sw_init()
12704 pf->hw.func_caps.num_tx_qp); in i40e_sw_init()
12706 /* find the next higher power-of-2 of num cpus */ in i40e_sw_init()
12708 pf->rss_size_max = min_t(int, pf->rss_size_max, pow); in i40e_sw_init()
12710 if (pf->hw.func_caps.rss) { in i40e_sw_init()
12711 pf->flags |= I40E_FLAG_RSS_ENABLED; in i40e_sw_init()
12712 pf->alloc_rss_size = min_t(int, pf->rss_size_max, in i40e_sw_init()
12717 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { in i40e_sw_init()
12718 pf->flags |= I40E_FLAG_MFP_ENABLED; in i40e_sw_init()
12719 dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); in i40e_sw_init()
12721 dev_warn(&pf->pdev->dev, in i40e_sw_init()
12724 dev_info(&pf->pdev->dev, in i40e_sw_init()
12726 pf->min_bw, pf->max_bw); in i40e_sw_init()
12733 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || in i40e_sw_init()
12734 (pf->hw.func_caps.fd_filters_best_effort > 0)) { in i40e_sw_init()
12735 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; in i40e_sw_init()
12736 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; in i40e_sw_init()
12737 if (pf->flags & I40E_FLAG_MFP_ENABLED && in i40e_sw_init()
12738 pf->hw.num_partitions > 1) in i40e_sw_init()
12739 dev_info(&pf->pdev->dev, in i40e_sw_init()
12742 pf->flags |= I40E_FLAG_FD_SB_ENABLED; in i40e_sw_init()
12743 pf->fdir_pf_filter_count = in i40e_sw_init()
12744 pf->hw.func_caps.fd_filters_guaranteed; in i40e_sw_init()
12745 pf->hw.fdir_shared_filter_count = in i40e_sw_init()
12746 pf->hw.func_caps.fd_filters_best_effort; in i40e_sw_init()
12749 if (pf->hw.mac.type == I40E_MAC_X722) { in i40e_sw_init()
12750 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE | in i40e_sw_init()
12763 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) != in i40e_sw_init()
12765 dev_warn(&pf->pdev->dev, in i40e_sw_init()
12767 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE; in i40e_sw_init()
12769 } else if ((pf->hw.aq.api_maj_ver > 1) || in i40e_sw_init()
12770 ((pf->hw.aq.api_maj_ver == 1) && in i40e_sw_init()
12771 (pf->hw.aq.api_min_ver > 4))) { in i40e_sw_init()
12773 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE; in i40e_sw_init()
12777 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE) in i40e_sw_init()
12778 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED; in i40e_sw_init()
12780 if ((pf->hw.mac.type == I40E_MAC_XL710) && in i40e_sw_init()
12781 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || in i40e_sw_init()
12782 (pf->hw.aq.fw_maj_ver < 4))) { in i40e_sw_init()
12783 pf->hw_features |= I40E_HW_RESTART_AUTONEG; in i40e_sw_init()
12785 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT; in i40e_sw_init()
12789 if ((pf->hw.mac.type == I40E_MAC_XL710) && in i40e_sw_init()
12790 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || in i40e_sw_init()
12791 (pf->hw.aq.fw_maj_ver < 4))) in i40e_sw_init()
12792 pf->hw_features |= I40E_HW_STOP_FW_LLDP; in i40e_sw_init()
12795 if ((pf->hw.mac.type == I40E_MAC_XL710) && in i40e_sw_init()
12796 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) || in i40e_sw_init()
12797 (pf->hw.aq.fw_maj_ver >= 5))) in i40e_sw_init()
12798 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB; in i40e_sw_init()
12801 if (pf->hw.mac.type == I40E_MAC_XL710 && in i40e_sw_init()
12802 pf->hw.aq.fw_maj_ver >= 6) in i40e_sw_init()
12803 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE; in i40e_sw_init()
12805 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) { in i40e_sw_init()
12806 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; in i40e_sw_init()
12807 pf->flags |= I40E_FLAG_VMDQ_ENABLED; in i40e_sw_init()
12808 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); in i40e_sw_init()
12811 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) { in i40e_sw_init()
12812 pf->flags |= I40E_FLAG_IWARP_ENABLED; in i40e_sw_init()
12814 pf->num_iwarp_msix = (int)num_online_cpus() + 1; in i40e_sw_init()
12821 if (pf->hw.mac.type == I40E_MAC_XL710 && in i40e_sw_init()
12822 pf->hw.func_caps.npar_enable && in i40e_sw_init()
12823 (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) in i40e_sw_init()
12824 pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE; in i40e_sw_init()
12827 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { in i40e_sw_init()
12828 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; in i40e_sw_init()
12829 pf->flags |= I40E_FLAG_SRIOV_ENABLED; in i40e_sw_init()
12830 pf->num_req_vfs = min_t(int, in i40e_sw_init()
12831 pf->hw.func_caps.num_vfs, in i40e_sw_init()
12835 pf->eeprom_version = 0xDEAD; in i40e_sw_init()
12836 pf->lan_veb = I40E_NO_VEB; in i40e_sw_init()
12837 pf->lan_vsi = I40E_NO_VSI; in i40e_sw_init()
12840 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; in i40e_sw_init()
12844 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); in i40e_sw_init()
12845 pf->qp_pile = kzalloc(size, GFP_KERNEL); in i40e_sw_init()
12846 if (!pf->qp_pile) { in i40e_sw_init()
12847 err = -ENOMEM; in i40e_sw_init()
12850 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; in i40e_sw_init()
12852 pf->tx_timeout_recovery_level = 1; in i40e_sw_init()
12854 if (pf->hw.mac.type != I40E_MAC_X722 && in i40e_sw_init()
12859 pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED | in i40e_sw_init()
12861 dev_info(&pf->pdev->dev, in i40e_sw_init()
12862 "total-port-shutdown was enabled, link-down-on-close is forced on\n"); in i40e_sw_init()
12864 mutex_init(&pf->switch_mutex); in i40e_sw_init()
12871 * i40e_set_ntuple - set the ntuple feature flag and take action
12881 /* Check if Flow Director n-tuple support was enabled or disabled. If in i40e_set_ntuple()
12886 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) in i40e_set_ntuple()
12888 /* enable FD_SB only if there is MSI-X vector and no cloud in i40e_set_ntuple()
12891 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) { in i40e_set_ntuple()
12892 pf->flags |= I40E_FLAG_FD_SB_ENABLED; in i40e_set_ntuple()
12893 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; in i40e_set_ntuple()
12897 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_set_ntuple()
12901 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; in i40e_set_ntuple()
12902 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state); in i40e_set_ntuple()
12903 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_set_ntuple()
12906 pf->fd_add_err = 0; in i40e_set_ntuple()
12907 pf->fd_atr_cnt = 0; in i40e_set_ntuple()
12908 /* if ATR was auto disabled it can be re-enabled. */ in i40e_set_ntuple()
12909 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) in i40e_set_ntuple()
12910 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && in i40e_set_ntuple()
12911 (I40E_DEBUG_FD & pf->hw.debug_mask)) in i40e_set_ntuple()
12912 dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); in i40e_set_ntuple()
12918 * i40e_clear_rss_lut - clear the rx hash lookup table
12923 struct i40e_pf *pf = vsi->back; in i40e_clear_rss_lut()
12924 struct i40e_hw *hw = &pf->hw; in i40e_clear_rss_lut()
12925 u16 vf_id = vsi->vf_id; in i40e_clear_rss_lut()
12928 if (vsi->type == I40E_VSI_MAIN) { in i40e_clear_rss_lut()
12931 } else if (vsi->type == I40E_VSI_SRIOV) { in i40e_clear_rss_lut()
12935 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); in i40e_clear_rss_lut()
12940 * i40e_set_features - set the netdev feature flags
12949 struct i40e_vsi *vsi = np->vsi; in i40e_set_features()
12950 struct i40e_pf *pf = vsi->back; in i40e_set_features()
12953 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) in i40e_set_features()
12956 netdev->features & NETIF_F_RXHASH) in i40e_set_features()
12965 (netdev->features & NETIF_F_HW_TC) && pf->num_cloud_filters) { in i40e_set_features()
12966 dev_err(&pf->pdev->dev, in i40e_set_features()
12968 return -EINVAL; in i40e_set_features()
12971 if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt) in i40e_set_features()
12987 struct i40e_hw *hw = &np->vsi->back->hw; in i40e_udp_tunnel_set_port()
12991 type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN : in i40e_udp_tunnel_set_port()
12994 ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index, in i40e_udp_tunnel_set_port()
12999 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_udp_tunnel_set_port()
13000 return -EIO; in i40e_udp_tunnel_set_port()
13012 struct i40e_hw *hw = &np->vsi->back->hw; in i40e_udp_tunnel_unset_port()
13015 ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL); in i40e_udp_tunnel_unset_port()
13019 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_udp_tunnel_unset_port()
13020 return -EIO; in i40e_udp_tunnel_unset_port()
13030 struct i40e_pf *pf = np->vsi->back; in i40e_get_phys_port_id()
13031 struct i40e_hw *hw = &pf->hw; in i40e_get_phys_port_id()
13033 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID)) in i40e_get_phys_port_id()
13034 return -EOPNOTSUPP; in i40e_get_phys_port_id()
13036 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); in i40e_get_phys_port_id()
13037 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len); in i40e_get_phys_port_id()
13043 * i40e_ndo_fdb_add - add an entry to the hardware database
13059 struct i40e_pf *pf = np->vsi->back; in i40e_ndo_fdb_add()
13062 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) in i40e_ndo_fdb_add()
13063 return -EOPNOTSUPP; in i40e_ndo_fdb_add()
13066 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name); in i40e_ndo_fdb_add()
13067 return -EINVAL; in i40e_ndo_fdb_add()
13073 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { in i40e_ndo_fdb_add()
13075 return -EINVAL; in i40e_ndo_fdb_add()
13083 err = -EINVAL; in i40e_ndo_fdb_add()
13086 if (err == -EEXIST && !(flags & NLM_F_EXCL)) in i40e_ndo_fdb_add()
13093 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
13114 struct i40e_vsi *vsi = np->vsi; in i40e_ndo_bridge_setlink()
13115 struct i40e_pf *pf = vsi->back; in i40e_ndo_bridge_setlink()
13121 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) in i40e_ndo_bridge_setlink()
13122 return -EOPNOTSUPP; in i40e_ndo_bridge_setlink()
13126 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) in i40e_ndo_bridge_setlink()
13127 veb = pf->veb[i]; in i40e_ndo_bridge_setlink()
13141 return -EINVAL; in i40e_ndo_bridge_setlink()
13145 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, in i40e_ndo_bridge_setlink()
13146 vsi->tc_config.enabled_tc); in i40e_ndo_bridge_setlink()
13148 veb->bridge_mode = mode; in i40e_ndo_bridge_setlink()
13152 return -ENOENT; in i40e_ndo_bridge_setlink()
13155 } else if (mode != veb->bridge_mode) { in i40e_ndo_bridge_setlink()
13157 veb->bridge_mode = mode; in i40e_ndo_bridge_setlink()
13160 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; in i40e_ndo_bridge_setlink()
13162 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; in i40e_ndo_bridge_setlink()
13172 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
13189 struct i40e_vsi *vsi = np->vsi; in i40e_ndo_bridge_getlink()
13190 struct i40e_pf *pf = vsi->back; in i40e_ndo_bridge_getlink()
13195 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) in i40e_ndo_bridge_getlink()
13196 return -EOPNOTSUPP; in i40e_ndo_bridge_getlink()
13200 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) in i40e_ndo_bridge_getlink()
13201 veb = pf->veb[i]; in i40e_ndo_bridge_getlink()
13207 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, in i40e_ndo_bridge_getlink()
13212 * i40e_features_check - Validate encapsulated packet conforms to limits
13227 if (skb->ip_summed != CHECKSUM_PARTIAL) in i40e_features_check()
13233 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) in i40e_features_check()
13237 len = skb_network_header(skb) - skb->data; in i40e_features_check()
13242 len = skb_transport_header(skb) - skb_network_header(skb); in i40e_features_check()
13246 if (skb->encapsulation) { in i40e_features_check()
13248 len = skb_inner_network_header(skb) - skb_transport_header(skb); in i40e_features_check()
13253 len = skb_inner_transport_header(skb) - in i40e_features_check()
13270 * i40e_xdp_setup - add/remove an XDP program
13278 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; in i40e_xdp_setup()
13279 struct i40e_pf *pf = vsi->back; in i40e_xdp_setup()
13285 if (frame_size > vsi->rx_buf_len) { in i40e_xdp_setup()
13287 return -EINVAL; in i40e_xdp_setup()
13290 /* When turning XDP on->off/off->on we reset and rebuild the rings. */ in i40e_xdp_setup()
13297 if (test_bit(__I40E_IN_REMOVE, pf->state)) in i40e_xdp_setup()
13298 return -EINVAL; in i40e_xdp_setup()
13300 old_prog = xchg(&vsi->xdp_prog, prog); in i40e_xdp_setup()
13304 /* Wait until ndo_xsk_wakeup completes. */ in i40e_xdp_setup()
13311 return -ENOMEM; in i40e_xdp_setup()
13314 return -ENOMEM; in i40e_xdp_setup()
13317 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_xdp_setup()
13318 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); in i40e_xdp_setup()
13327 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_xdp_setup()
13328 if (vsi->xdp_rings[i]->xsk_pool) in i40e_xdp_setup()
13329 (void)i40e_xsk_wakeup(vsi->netdev, i, in i40e_xdp_setup()
13336 * i40e_enter_busy_conf - Enters busy config state
13343 struct i40e_pf *pf = vsi->back; in i40e_enter_busy_conf()
13346 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { in i40e_enter_busy_conf()
13347 timeout--; in i40e_enter_busy_conf()
13349 return -EBUSY; in i40e_enter_busy_conf()
13357 * i40e_exit_busy_conf - Exits busy config state
13362 struct i40e_pf *pf = vsi->back; in i40e_exit_busy_conf()
13364 clear_bit(__I40E_CONFIG_BUSY, pf->state); in i40e_exit_busy_conf()
13368 * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
13374 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0, in i40e_queue_pair_reset_stats()
13375 sizeof(vsi->rx_rings[queue_pair]->rx_stats)); in i40e_queue_pair_reset_stats()
13376 memset(&vsi->tx_rings[queue_pair]->stats, 0, in i40e_queue_pair_reset_stats()
13377 sizeof(vsi->tx_rings[queue_pair]->stats)); in i40e_queue_pair_reset_stats()
13379 memset(&vsi->xdp_rings[queue_pair]->stats, 0, in i40e_queue_pair_reset_stats()
13380 sizeof(vsi->xdp_rings[queue_pair]->stats)); in i40e_queue_pair_reset_stats()
13385 * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
13391 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]); in i40e_queue_pair_clean_rings()
13393 /* Make sure that in-progress ndo_xdp_xmit calls are in i40e_queue_pair_clean_rings()
13397 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]); in i40e_queue_pair_clean_rings()
13399 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); in i40e_queue_pair_clean_rings()
13403 * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
13411 struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; in i40e_queue_pair_toggle_napi()
13412 struct i40e_q_vector *q_vector = rxr->q_vector; in i40e_queue_pair_toggle_napi()
13414 if (!vsi->netdev) in i40e_queue_pair_toggle_napi()
13418 if (q_vector->rx.ring || q_vector->tx.ring) { in i40e_queue_pair_toggle_napi()
13420 napi_enable(&q_vector->napi); in i40e_queue_pair_toggle_napi()
13422 napi_disable(&q_vector->napi); in i40e_queue_pair_toggle_napi()
13427 * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
13437 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_toggle_rings()
13440 pf_q = vsi->base_queue + queue_pair; in i40e_queue_pair_toggle_rings()
13441 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q, in i40e_queue_pair_toggle_rings()
13444 dev_info(&pf->pdev->dev, in i40e_queue_pair_toggle_rings()
13446 vsi->seid, pf_q, (enable ? "en" : "dis")); in i40e_queue_pair_toggle_rings()
13453 dev_info(&pf->pdev->dev, in i40e_queue_pair_toggle_rings()
13455 vsi->seid, pf_q, (enable ? "en" : "dis")); in i40e_queue_pair_toggle_rings()
13468 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_queue_pair_toggle_rings()
13469 pf_q + vsi->alloc_queue_pairs, in i40e_queue_pair_toggle_rings()
13472 dev_info(&pf->pdev->dev, in i40e_queue_pair_toggle_rings()
13474 vsi->seid, pf_q, (enable ? "en" : "dis")); in i40e_queue_pair_toggle_rings()
13481 * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
13487 struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; in i40e_queue_pair_enable_irq()
13488 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_enable_irq()
13489 struct i40e_hw *hw = &pf->hw; in i40e_queue_pair_enable_irq()
13492 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_queue_pair_enable_irq()
13493 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx); in i40e_queue_pair_enable_irq()
13501 * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
13507 struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; in i40e_queue_pair_disable_irq()
13508 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_disable_irq()
13509 struct i40e_hw *hw = &pf->hw; in i40e_queue_pair_disable_irq()
13517 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_queue_pair_disable_irq()
13518 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx; in i40e_queue_pair_disable_irq()
13520 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0); in i40e_queue_pair_disable_irq()
13522 synchronize_irq(pf->msix_entries[intpf].vector); in i40e_queue_pair_disable_irq()
13524 /* Legacy and MSI mode - this stops all interrupt handling */ in i40e_queue_pair_disable_irq()
13528 synchronize_irq(pf->pdev->irq); in i40e_queue_pair_disable_irq()
13533 * i40e_queue_pair_disable - Disables a queue pair
13549 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); in i40e_queue_pair_disable()
13558 * i40e_queue_pair_enable - Enables a queue pair
13568 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]); in i40e_queue_pair_enable()
13573 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]); in i40e_queue_pair_enable()
13578 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]); in i40e_queue_pair_enable()
13592 * i40e_xdp - implements ndo_bpf for i40e
13600 struct i40e_vsi *vsi = np->vsi; in i40e_xdp()
13602 if (vsi->type != I40E_VSI_MAIN) in i40e_xdp()
13603 return -EINVAL; in i40e_xdp()
13605 switch (xdp->command) { in i40e_xdp()
13607 return i40e_xdp_setup(vsi, xdp->prog, xdp->extack); in i40e_xdp()
13609 return i40e_xsk_pool_setup(vsi, xdp->xsk.pool, in i40e_xdp()
13610 xdp->xsk.queue_id); in i40e_xdp()
13612 return -EINVAL; in i40e_xdp()
13656 * i40e_config_netdev - Setup the netdev flags
13663 struct i40e_pf *pf = vsi->back; in i40e_config_netdev()
13664 struct i40e_hw *hw = &pf->hw; in i40e_config_netdev()
13674 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); in i40e_config_netdev()
13676 return -ENOMEM; in i40e_config_netdev()
13678 vsi->netdev = netdev; in i40e_config_netdev()
13680 np->vsi = vsi; in i40e_config_netdev()
13702 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE)) in i40e_config_netdev()
13703 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; in i40e_config_netdev()
13705 netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic; in i40e_config_netdev()
13707 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; in i40e_config_netdev()
13709 netdev->hw_enc_features |= hw_enc_features; in i40e_config_netdev()
13712 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; in i40e_config_netdev()
13721 netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES; in i40e_config_netdev()
13722 netdev->features |= NETIF_F_GSO_PARTIAL | in i40e_config_netdev()
13725 netdev->mpls_features |= NETIF_F_SG; in i40e_config_netdev()
13726 netdev->mpls_features |= NETIF_F_HW_CSUM; in i40e_config_netdev()
13727 netdev->mpls_features |= NETIF_F_TSO; in i40e_config_netdev()
13728 netdev->mpls_features |= NETIF_F_TSO6; in i40e_config_netdev()
13729 netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES; in i40e_config_netdev()
13732 netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; in i40e_config_netdev()
13738 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) in i40e_config_netdev()
13741 netdev->hw_features |= hw_features; in i40e_config_netdev()
13743 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; in i40e_config_netdev()
13744 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; in i40e_config_netdev()
13746 netdev->features &= ~NETIF_F_HW_TC; in i40e_config_netdev()
13748 if (vsi->type == I40E_VSI_MAIN) { in i40e_config_netdev()
13749 SET_NETDEV_DEV(netdev, &pf->pdev->dev); in i40e_config_netdev()
13750 ether_addr_copy(mac_addr, hw->mac.perm_addr); in i40e_config_netdev()
13752 * some older NVM configurations load a default MAC-VLAN in i40e_config_netdev()
13762 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13764 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13769 * original name by IFNAMSIZ - 4 in i40e_config_netdev()
13771 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d", in i40e_config_netdev()
13772 IFNAMSIZ - 4, in i40e_config_netdev()
13773 pf->vsi[pf->lan_vsi]->netdev->name); in i40e_config_netdev()
13776 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13778 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13795 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13797 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13800 ether_addr_copy(netdev->perm_addr, mac_addr); in i40e_config_netdev()
13802 /* i40iw_net_event() reads 16 bytes from neigh->primary_key */ in i40e_config_netdev()
13803 netdev->neigh_priv_len = sizeof(u32) * 4; in i40e_config_netdev()
13805 netdev->priv_flags |= IFF_UNICAST_FLT; in i40e_config_netdev()
13806 netdev->priv_flags |= IFF_SUPP_NOFCS; in i40e_config_netdev()
13808 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); in i40e_config_netdev()
13810 netdev->netdev_ops = &i40e_netdev_ops; in i40e_config_netdev()
13811 netdev->watchdog_timeo = 5 * HZ; in i40e_config_netdev()
13814 /* MTU range: 68 - 9706 */ in i40e_config_netdev()
13815 netdev->min_mtu = ETH_MIN_MTU; in i40e_config_netdev()
13816 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD; in i40e_config_netdev()
13822 * i40e_vsi_delete - Delete a VSI from the switch
13830 if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) in i40e_vsi_delete()
13833 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); in i40e_vsi_delete()
13837 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
13845 struct i40e_pf *pf = vsi->back; in i40e_is_vsi_uplink_mode_veb()
13848 if (vsi->veb_idx >= I40E_MAX_VEB) in i40e_is_vsi_uplink_mode_veb()
13851 veb = pf->veb[vsi->veb_idx]; in i40e_is_vsi_uplink_mode_veb()
13853 dev_info(&pf->pdev->dev, in i40e_is_vsi_uplink_mode_veb()
13855 return -ENOENT; in i40e_is_vsi_uplink_mode_veb()
13859 if (veb->bridge_mode & BRIDGE_MODE_VEPA) { in i40e_is_vsi_uplink_mode_veb()
13871 * i40e_add_vsi - Add a VSI to the switch
13879 int ret = -ENODEV; in i40e_add_vsi()
13880 struct i40e_pf *pf = vsi->back; in i40e_add_vsi()
13881 struct i40e_hw *hw = &pf->hw; in i40e_add_vsi()
13891 switch (vsi->type) { in i40e_add_vsi()
13898 ctxt.seid = pf->main_vsi_seid; in i40e_add_vsi()
13899 ctxt.pf_num = pf->hw.pf_id; in i40e_add_vsi()
13901 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); in i40e_add_vsi()
13904 dev_info(&pf->pdev->dev, in i40e_add_vsi()
13906 i40e_stat_str(&pf->hw, ret), in i40e_add_vsi()
13907 i40e_aq_str(&pf->hw, in i40e_add_vsi()
13908 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
13909 return -ENOENT; in i40e_add_vsi()
13911 vsi->info = ctxt.info; in i40e_add_vsi()
13912 vsi->info.valid_sections = 0; in i40e_add_vsi()
13914 vsi->seid = ctxt.seid; in i40e_add_vsi()
13915 vsi->id = ctxt.vsi_number; in i40e_add_vsi()
13920 * negative logic - if it's set, we need to fiddle with in i40e_add_vsi()
13923 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) { in i40e_add_vsi()
13925 ctxt.seid = pf->main_vsi_seid; in i40e_add_vsi()
13926 ctxt.pf_num = pf->hw.pf_id; in i40e_add_vsi()
13934 dev_info(&pf->pdev->dev, in i40e_add_vsi()
13936 i40e_stat_str(&pf->hw, ret), in i40e_add_vsi()
13937 i40e_aq_str(&pf->hw, in i40e_add_vsi()
13938 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
13939 ret = -ENOENT; in i40e_add_vsi()
13945 if ((pf->flags & I40E_FLAG_MFP_ENABLED) && in i40e_add_vsi()
13946 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ in i40e_add_vsi()
13948 ctxt.seid = pf->main_vsi_seid; in i40e_add_vsi()
13949 ctxt.pf_num = pf->hw.pf_id; in i40e_add_vsi()
13954 dev_info(&pf->pdev->dev, in i40e_add_vsi()
13956 i40e_stat_str(&pf->hw, ret), in i40e_add_vsi()
13957 i40e_aq_str(&pf->hw, in i40e_add_vsi()
13958 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
13959 ret = -ENOENT; in i40e_add_vsi()
13964 vsi->info.valid_sections = 0; in i40e_add_vsi()
13967 * reconfigure it to enable all TCs that are in i40e_add_vsi()
13977 dev_info(&pf->pdev->dev, in i40e_add_vsi()
13978 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n", in i40e_add_vsi()
13980 i40e_stat_str(&pf->hw, ret), in i40e_add_vsi()
13981 i40e_aq_str(&pf->hw, in i40e_add_vsi()
13982 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
13988 ctxt.pf_num = hw->pf_id; in i40e_add_vsi()
13990 ctxt.uplink_seid = vsi->uplink_seid; in i40e_add_vsi()
13993 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && in i40e_add_vsi()
14004 ctxt.pf_num = hw->pf_id; in i40e_add_vsi()
14006 ctxt.uplink_seid = vsi->uplink_seid; in i40e_add_vsi()
14025 ctxt.pf_num = hw->pf_id; in i40e_add_vsi()
14026 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; in i40e_add_vsi()
14027 ctxt.uplink_seid = vsi->uplink_seid; in i40e_add_vsi()
14041 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_add_vsi()
14051 if (pf->vf[vsi->vf_id].spoofchk) { in i40e_add_vsi()
14067 return -ENODEV; in i40e_add_vsi()
14070 if (vsi->type != I40E_VSI_MAIN) { in i40e_add_vsi()
14073 dev_info(&vsi->back->pdev->dev, in i40e_add_vsi()
14075 i40e_stat_str(&pf->hw, ret), in i40e_add_vsi()
14076 i40e_aq_str(&pf->hw, in i40e_add_vsi()
14077 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
14078 ret = -ENOENT; in i40e_add_vsi()
14081 vsi->info = ctxt.info; in i40e_add_vsi()
14082 vsi->info.valid_sections = 0; in i40e_add_vsi()
14083 vsi->seid = ctxt.seid; in i40e_add_vsi()
14084 vsi->id = ctxt.vsi_number; in i40e_add_vsi()
14087 vsi->active_filters = 0; in i40e_add_vsi()
14088 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_add_vsi()
14089 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_add_vsi()
14091 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_add_vsi()
14092 f->state = I40E_FILTER_NEW; in i40e_add_vsi()
14095 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_add_vsi()
14098 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_add_vsi()
14099 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); in i40e_add_vsi()
14105 dev_info(&pf->pdev->dev, in i40e_add_vsi()
14107 i40e_stat_str(&pf->hw, ret), in i40e_add_vsi()
14108 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_vsi()
14118 * i40e_vsi_release - Delete a VSI and free its resources
14132 pf = vsi->back; in i40e_vsi_release()
14134 /* release of a VEB-owner or last VSI is not allowed */ in i40e_vsi_release()
14135 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { in i40e_vsi_release()
14136 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", in i40e_vsi_release()
14137 vsi->seid, vsi->uplink_seid); in i40e_vsi_release()
14138 return -ENODEV; in i40e_vsi_release()
14140 if (vsi == pf->vsi[pf->lan_vsi] && in i40e_vsi_release()
14141 !test_bit(__I40E_DOWN, pf->state)) { in i40e_vsi_release()
14142 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); in i40e_vsi_release()
14143 return -ENODEV; in i40e_vsi_release()
14145 set_bit(__I40E_VSI_RELEASING, vsi->state); in i40e_vsi_release()
14146 uplink_seid = vsi->uplink_seid; in i40e_vsi_release()
14147 if (vsi->type != I40E_VSI_SRIOV) { in i40e_vsi_release()
14148 if (vsi->netdev_registered) { in i40e_vsi_release()
14149 vsi->netdev_registered = false; in i40e_vsi_release()
14150 if (vsi->netdev) { in i40e_vsi_release()
14152 unregister_netdev(vsi->netdev); in i40e_vsi_release()
14160 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_release()
14163 if (vsi->netdev) { in i40e_vsi_release()
14164 __dev_uc_unsync(vsi->netdev, NULL); in i40e_vsi_release()
14165 __dev_mc_unsync(vsi->netdev, NULL); in i40e_vsi_release()
14169 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) in i40e_vsi_release()
14172 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_release()
14178 if (vsi->netdev) { in i40e_vsi_release()
14179 free_netdev(vsi->netdev); in i40e_vsi_release()
14180 vsi->netdev = NULL; in i40e_vsi_release()
14190 * the orphan VEBs yet. We'll wait for an explicit remove request in i40e_vsi_release()
14193 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { in i40e_vsi_release()
14194 if (pf->vsi[i] && in i40e_vsi_release()
14195 pf->vsi[i]->uplink_seid == uplink_seid && in i40e_vsi_release()
14196 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { in i40e_vsi_release()
14201 if (!pf->veb[i]) in i40e_vsi_release()
14203 if (pf->veb[i]->uplink_seid == uplink_seid) in i40e_vsi_release()
14205 if (pf->veb[i]->seid == uplink_seid) in i40e_vsi_release()
14206 veb = pf->veb[i]; in i40e_vsi_release()
14208 if (n == 0 && veb && veb->uplink_seid != 0) in i40e_vsi_release()
14215 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
14226 int ret = -ENOENT; in i40e_vsi_setup_vectors()
14227 struct i40e_pf *pf = vsi->back; in i40e_vsi_setup_vectors()
14229 if (vsi->q_vectors[0]) { in i40e_vsi_setup_vectors()
14230 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", in i40e_vsi_setup_vectors()
14231 vsi->seid); in i40e_vsi_setup_vectors()
14232 return -EEXIST; in i40e_vsi_setup_vectors()
14235 if (vsi->base_vector) { in i40e_vsi_setup_vectors()
14236 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", in i40e_vsi_setup_vectors()
14237 vsi->seid, vsi->base_vector); in i40e_vsi_setup_vectors()
14238 return -EEXIST; in i40e_vsi_setup_vectors()
14243 dev_info(&pf->pdev->dev, in i40e_vsi_setup_vectors()
14245 vsi->num_q_vectors, vsi->seid, ret); in i40e_vsi_setup_vectors()
14246 vsi->num_q_vectors = 0; in i40e_vsi_setup_vectors()
14253 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) in i40e_vsi_setup_vectors()
14255 if (vsi->num_q_vectors) in i40e_vsi_setup_vectors()
14256 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, in i40e_vsi_setup_vectors()
14257 vsi->num_q_vectors, vsi->idx); in i40e_vsi_setup_vectors()
14258 if (vsi->base_vector < 0) { in i40e_vsi_setup_vectors()
14259 dev_info(&pf->pdev->dev, in i40e_vsi_setup_vectors()
14261 vsi->num_q_vectors, vsi->seid, vsi->base_vector); in i40e_vsi_setup_vectors()
14263 ret = -ENOENT; in i40e_vsi_setup_vectors()
14272 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
14275 * This re-allocates a vsi's queue resources.
14290 pf = vsi->back; in i40e_vsi_reinit_setup()
14292 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); in i40e_vsi_reinit_setup()
14301 alloc_queue_pairs = vsi->alloc_queue_pairs * in i40e_vsi_reinit_setup()
14304 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); in i40e_vsi_reinit_setup()
14306 dev_info(&pf->pdev->dev, in i40e_vsi_reinit_setup()
14308 alloc_queue_pairs, vsi->seid, ret); in i40e_vsi_reinit_setup()
14311 vsi->base_queue = ret; in i40e_vsi_reinit_setup()
14316 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; in i40e_vsi_reinit_setup()
14317 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; in i40e_vsi_reinit_setup()
14318 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; in i40e_vsi_reinit_setup()
14319 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); in i40e_vsi_reinit_setup()
14320 if (vsi->type == I40E_VSI_MAIN) in i40e_vsi_reinit_setup()
14321 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr); in i40e_vsi_reinit_setup()
14334 if (vsi->netdev_registered) { in i40e_vsi_reinit_setup()
14335 vsi->netdev_registered = false; in i40e_vsi_reinit_setup()
14336 unregister_netdev(vsi->netdev); in i40e_vsi_reinit_setup()
14337 free_netdev(vsi->netdev); in i40e_vsi_reinit_setup()
14338 vsi->netdev = NULL; in i40e_vsi_reinit_setup()
14340 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); in i40e_vsi_reinit_setup()
14347 * i40e_vsi_setup - Set up a VSI by a given type
14369 * - the PF's port seid in i40e_vsi_setup()
14372 * - seid of an existing VEB in i40e_vsi_setup()
14373 * - seid of a VSI that owns an existing VEB in i40e_vsi_setup()
14374 * - seid of a VSI that doesn't own a VEB in i40e_vsi_setup()
14376 * - seid of the PF VSI, which is what creates the first VEB in i40e_vsi_setup()
14382 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { in i40e_vsi_setup()
14383 veb = pf->veb[i]; in i40e_vsi_setup()
14388 if (!veb && uplink_seid != pf->mac_seid) { in i40e_vsi_setup()
14390 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_vsi_setup()
14391 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { in i40e_vsi_setup()
14392 vsi = pf->vsi[i]; in i40e_vsi_setup()
14397 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", in i40e_vsi_setup()
14402 if (vsi->uplink_seid == pf->mac_seid) in i40e_vsi_setup()
14403 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, in i40e_vsi_setup()
14404 vsi->tc_config.enabled_tc); in i40e_vsi_setup()
14405 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) in i40e_vsi_setup()
14406 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, in i40e_vsi_setup()
14407 vsi->tc_config.enabled_tc); in i40e_vsi_setup()
14409 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { in i40e_vsi_setup()
14410 dev_info(&vsi->back->pdev->dev, in i40e_vsi_setup()
14418 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { in i40e_vsi_setup()
14419 veb->bridge_mode = BRIDGE_MODE_VEPA; in i40e_vsi_setup()
14420 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; in i40e_vsi_setup()
14425 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) in i40e_vsi_setup()
14426 veb = pf->veb[i]; in i40e_vsi_setup()
14429 dev_info(&pf->pdev->dev, "couldn't add VEB\n"); in i40e_vsi_setup()
14433 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; in i40e_vsi_setup()
14434 uplink_seid = veb->seid; in i40e_vsi_setup()
14441 vsi = pf->vsi[v_idx]; in i40e_vsi_setup()
14444 vsi->type = type; in i40e_vsi_setup()
14445 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); in i40e_vsi_setup()
14448 pf->lan_vsi = v_idx; in i40e_vsi_setup()
14450 vsi->vf_id = param1; in i40e_vsi_setup()
14452 alloc_queue_pairs = vsi->alloc_queue_pairs * in i40e_vsi_setup()
14455 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); in i40e_vsi_setup()
14457 dev_info(&pf->pdev->dev, in i40e_vsi_setup()
14459 alloc_queue_pairs, vsi->seid, ret); in i40e_vsi_setup()
14462 vsi->base_queue = ret; in i40e_vsi_setup()
14465 vsi->uplink_seid = uplink_seid; in i40e_vsi_setup()
14470 switch (vsi->type) { in i40e_vsi_setup()
14480 ret = register_netdev(vsi->netdev); in i40e_vsi_setup()
14483 vsi->netdev_registered = true; in i40e_vsi_setup()
14484 netif_carrier_off(vsi->netdev); in i40e_vsi_setup()
14510 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && in i40e_vsi_setup()
14511 (vsi->type == I40E_VSI_VMDQ2)) { in i40e_vsi_setup()
14519 if (vsi->netdev_registered) { in i40e_vsi_setup()
14520 vsi->netdev_registered = false; in i40e_vsi_setup()
14521 unregister_netdev(vsi->netdev); in i40e_vsi_setup()
14522 free_netdev(vsi->netdev); in i40e_vsi_setup()
14523 vsi->netdev = NULL; in i40e_vsi_setup()
14526 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); in i40e_vsi_setup()
14534 * i40e_veb_get_bw_info - Query VEB BW information
14543 struct i40e_pf *pf = veb->pf; in i40e_veb_get_bw_info()
14544 struct i40e_hw *hw = &pf->hw; in i40e_veb_get_bw_info()
14549 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, in i40e_veb_get_bw_info()
14552 dev_info(&pf->pdev->dev, in i40e_veb_get_bw_info()
14554 i40e_stat_str(&pf->hw, ret), in i40e_veb_get_bw_info()
14555 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); in i40e_veb_get_bw_info()
14559 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, in i40e_veb_get_bw_info()
14562 dev_info(&pf->pdev->dev, in i40e_veb_get_bw_info()
14564 i40e_stat_str(&pf->hw, ret), in i40e_veb_get_bw_info()
14565 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); in i40e_veb_get_bw_info()
14569 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); in i40e_veb_get_bw_info()
14570 veb->bw_max_quanta = ets_data.tc_bw_max; in i40e_veb_get_bw_info()
14571 veb->is_abs_credits = bw_data.absolute_credits_enable; in i40e_veb_get_bw_info()
14572 veb->enabled_tc = ets_data.tc_valid_bits; in i40e_veb_get_bw_info()
14576 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; in i40e_veb_get_bw_info()
14577 veb->bw_tc_limit_credits[i] = in i40e_veb_get_bw_info()
14579 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); in i40e_veb_get_bw_info()
14587 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
14595 int ret = -ENOENT; in i40e_veb_mem_alloc()
14600 mutex_lock(&pf->switch_mutex); in i40e_veb_mem_alloc()
14609 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) in i40e_veb_mem_alloc()
14612 ret = -ENOMEM; in i40e_veb_mem_alloc()
14618 ret = -ENOMEM; in i40e_veb_mem_alloc()
14621 veb->pf = pf; in i40e_veb_mem_alloc()
14622 veb->idx = i; in i40e_veb_mem_alloc()
14623 veb->enabled_tc = 1; in i40e_veb_mem_alloc()
14625 pf->veb[i] = veb; in i40e_veb_mem_alloc()
14628 mutex_unlock(&pf->switch_mutex); in i40e_veb_mem_alloc()
14633 * i40e_switch_branch_release - Delete a branch of the switch tree
14641 struct i40e_pf *pf = branch->pf; in i40e_switch_branch_release()
14642 u16 branch_seid = branch->seid; in i40e_switch_branch_release()
14643 u16 veb_idx = branch->idx; in i40e_switch_branch_release()
14646 /* release any VEBs on this VEB - RECURSION */ in i40e_switch_branch_release()
14648 if (!pf->veb[i]) in i40e_switch_branch_release()
14650 if (pf->veb[i]->uplink_seid == branch->seid) in i40e_switch_branch_release()
14651 i40e_switch_branch_release(pf->veb[i]); in i40e_switch_branch_release()
14659 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_switch_branch_release()
14660 if (!pf->vsi[i]) in i40e_switch_branch_release()
14662 if (pf->vsi[i]->uplink_seid == branch_seid && in i40e_switch_branch_release()
14663 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { in i40e_switch_branch_release()
14664 i40e_vsi_release(pf->vsi[i]); in i40e_switch_branch_release()
14673 if (pf->veb[veb_idx]) in i40e_switch_branch_release()
14674 i40e_veb_release(pf->veb[veb_idx]); in i40e_switch_branch_release()
14678 * i40e_veb_clear - remove veb struct
14686 if (veb->pf) { in i40e_veb_clear()
14687 struct i40e_pf *pf = veb->pf; in i40e_veb_clear()
14689 mutex_lock(&pf->switch_mutex); in i40e_veb_clear()
14690 if (pf->veb[veb->idx] == veb) in i40e_veb_clear()
14691 pf->veb[veb->idx] = NULL; in i40e_veb_clear()
14692 mutex_unlock(&pf->switch_mutex); in i40e_veb_clear()
14699 * i40e_veb_release - Delete a VEB and free its resources
14708 pf = veb->pf; in i40e_veb_release()
14711 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_veb_release()
14712 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { in i40e_veb_release()
14714 vsi = pf->vsi[i]; in i40e_veb_release()
14718 dev_info(&pf->pdev->dev, in i40e_veb_release()
14720 veb->seid, n); in i40e_veb_release()
14725 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; in i40e_veb_release()
14726 if (veb->uplink_seid) { in i40e_veb_release()
14727 vsi->uplink_seid = veb->uplink_seid; in i40e_veb_release()
14728 if (veb->uplink_seid == pf->mac_seid) in i40e_veb_release()
14729 vsi->veb_idx = I40E_NO_VEB; in i40e_veb_release()
14731 vsi->veb_idx = veb->veb_idx; in i40e_veb_release()
14734 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; in i40e_veb_release()
14735 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; in i40e_veb_release()
14738 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); in i40e_veb_release()
14743 * i40e_add_veb - create the VEB in the switch
14749 struct i40e_pf *pf = veb->pf; in i40e_add_veb()
14750 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); in i40e_add_veb()
14753 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, in i40e_add_veb()
14754 veb->enabled_tc, false, in i40e_add_veb()
14755 &veb->seid, enable_stats, NULL); in i40e_add_veb()
14759 dev_info(&pf->pdev->dev, in i40e_add_veb()
14761 i40e_stat_str(&pf->hw, ret), in i40e_add_veb()
14762 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_veb()
14763 return -EPERM; in i40e_add_veb()
14767 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL, in i40e_add_veb()
14768 &veb->stats_idx, NULL, NULL, NULL); in i40e_add_veb()
14770 dev_info(&pf->pdev->dev, in i40e_add_veb()
14772 i40e_stat_str(&pf->hw, ret), in i40e_add_veb()
14773 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_veb()
14774 return -EPERM; in i40e_add_veb()
14778 dev_info(&pf->pdev->dev, in i40e_add_veb()
14780 i40e_stat_str(&pf->hw, ret), in i40e_add_veb()
14781 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_veb()
14782 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); in i40e_add_veb()
14783 return -ENOENT; in i40e_add_veb()
14786 vsi->uplink_seid = veb->seid; in i40e_add_veb()
14787 vsi->veb_idx = veb->idx; in i40e_add_veb()
14788 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; in i40e_add_veb()
14794 * i40e_veb_setup - Set up a VEB
14799 * @enabled_tc: Enabled TC bit-map
14820 dev_info(&pf->pdev->dev, in i40e_veb_setup()
14827 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) in i40e_veb_setup()
14828 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) in i40e_veb_setup()
14830 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) { in i40e_veb_setup()
14831 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", in i40e_veb_setup()
14836 if (uplink_seid && uplink_seid != pf->mac_seid) { in i40e_veb_setup()
14838 if (pf->veb[veb_idx] && in i40e_veb_setup()
14839 pf->veb[veb_idx]->seid == uplink_seid) { in i40e_veb_setup()
14840 uplink_veb = pf->veb[veb_idx]; in i40e_veb_setup()
14845 dev_info(&pf->pdev->dev, in i40e_veb_setup()
14855 veb = pf->veb[veb_idx]; in i40e_veb_setup()
14856 veb->flags = flags; in i40e_veb_setup()
14857 veb->uplink_seid = uplink_seid; in i40e_veb_setup()
14858 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); in i40e_veb_setup()
14859 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); in i40e_veb_setup()
14862 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); in i40e_veb_setup()
14865 if (vsi_idx == pf->lan_vsi) in i40e_veb_setup()
14866 pf->lan_veb = veb->idx; in i40e_veb_setup()
14877 * i40e_setup_pf_switch_element - set PF vars based on switch type
14889 u16 downlink_seid = le16_to_cpu(ele->downlink_seid); in i40e_setup_pf_switch_element()
14890 u16 uplink_seid = le16_to_cpu(ele->uplink_seid); in i40e_setup_pf_switch_element()
14891 u8 element_type = ele->element_type; in i40e_setup_pf_switch_element()
14892 u16 seid = le16_to_cpu(ele->seid); in i40e_setup_pf_switch_element()
14895 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch_element()
14901 pf->mac_seid = seid; in i40e_setup_pf_switch_element()
14905 if (uplink_seid != pf->mac_seid) in i40e_setup_pf_switch_element()
14907 if (pf->lan_veb >= I40E_MAX_VEB) { in i40e_setup_pf_switch_element()
14912 if (pf->veb[v] && (pf->veb[v]->seid == seid)) { in i40e_setup_pf_switch_element()
14913 pf->lan_veb = v; in i40e_setup_pf_switch_element()
14917 if (pf->lan_veb >= I40E_MAX_VEB) { in i40e_setup_pf_switch_element()
14921 pf->lan_veb = v; in i40e_setup_pf_switch_element()
14924 if (pf->lan_veb >= I40E_MAX_VEB) in i40e_setup_pf_switch_element()
14927 pf->veb[pf->lan_veb]->seid = seid; in i40e_setup_pf_switch_element()
14928 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; in i40e_setup_pf_switch_element()
14929 pf->veb[pf->lan_veb]->pf = pf; in i40e_setup_pf_switch_element()
14930 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; in i40e_setup_pf_switch_element()
14938 pf->mac_seid = uplink_seid; in i40e_setup_pf_switch_element()
14939 pf->pf_seid = downlink_seid; in i40e_setup_pf_switch_element()
14940 pf->main_vsi_seid = seid; in i40e_setup_pf_switch_element()
14942 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch_element()
14944 pf->pf_seid, pf->main_vsi_seid); in i40e_setup_pf_switch_element()
14955 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", in i40e_setup_pf_switch_element()
14962 * i40e_fetch_switch_configuration - Get switch config from firmware
14979 return -ENOMEM; in i40e_fetch_switch_configuration()
14985 ret = i40e_aq_get_switch_config(&pf->hw, sw_config, in i40e_fetch_switch_configuration()
14989 dev_info(&pf->pdev->dev, in i40e_fetch_switch_configuration()
14991 i40e_stat_str(&pf->hw, ret), in i40e_fetch_switch_configuration()
14992 i40e_aq_str(&pf->hw, in i40e_fetch_switch_configuration()
14993 pf->hw.aq.asq_last_status)); in i40e_fetch_switch_configuration()
14995 return -ENOENT; in i40e_fetch_switch_configuration()
14998 num_reported = le16_to_cpu(sw_config->header.num_reported); in i40e_fetch_switch_configuration()
14999 num_total = le16_to_cpu(sw_config->header.num_total); in i40e_fetch_switch_configuration()
15002 dev_info(&pf->pdev->dev, in i40e_fetch_switch_configuration()
15008 &sw_config->element[i]; in i40e_fetch_switch_configuration()
15020 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
15022 * @reinit: if the Main VSI needs to re-initialized.
15035 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch()
15037 i40e_stat_str(&pf->hw, ret), in i40e_setup_pf_switch()
15038 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_setup_pf_switch()
15049 if ((pf->hw.pf_id == 0) && in i40e_setup_pf_switch()
15050 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) { in i40e_setup_pf_switch()
15052 pf->last_sw_conf_flags = flags; in i40e_setup_pf_switch()
15055 if (pf->hw.pf_id == 0) { in i40e_setup_pf_switch()
15059 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0, in i40e_setup_pf_switch()
15061 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { in i40e_setup_pf_switch()
15062 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch()
15064 i40e_stat_str(&pf->hw, ret), in i40e_setup_pf_switch()
15065 i40e_aq_str(&pf->hw, in i40e_setup_pf_switch()
15066 pf->hw.aq.asq_last_status)); in i40e_setup_pf_switch()
15069 pf->last_sw_conf_valid_flags = valid_flags; in i40e_setup_pf_switch()
15073 if (pf->lan_vsi == I40E_NO_VSI || reinit) { in i40e_setup_pf_switch()
15080 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) in i40e_setup_pf_switch()
15081 uplink_seid = pf->veb[pf->lan_veb]->seid; in i40e_setup_pf_switch()
15083 uplink_seid = pf->mac_seid; in i40e_setup_pf_switch()
15084 if (pf->lan_vsi == I40E_NO_VSI) in i40e_setup_pf_switch()
15087 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); in i40e_setup_pf_switch()
15089 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); in i40e_setup_pf_switch()
15092 return -EAGAIN; in i40e_setup_pf_switch()
15096 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; in i40e_setup_pf_switch()
15098 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; in i40e_setup_pf_switch()
15099 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; in i40e_setup_pf_switch()
15100 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); in i40e_setup_pf_switch()
15102 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); in i40e_setup_pf_switch()
15109 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", in i40e_setup_pf_switch()
15117 if ((pf->flags & I40E_FLAG_RSS_ENABLED)) in i40e_setup_pf_switch()
15123 /* Initialize user-specific link properties */ in i40e_setup_pf_switch()
15124 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & in i40e_setup_pf_switch()
15133 udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev); in i40e_setup_pf_switch()
15142 * i40e_determine_queue_usage - Work out queue distribution
15150 pf->num_lan_qps = 0; in i40e_determine_queue_usage()
15156 queues_left = pf->hw.func_caps.num_tx_qp; in i40e_determine_queue_usage()
15159 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { in i40e_determine_queue_usage()
15162 pf->alloc_rss_size = pf->num_lan_qps = 1; in i40e_determine_queue_usage()
15165 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | in i40e_determine_queue_usage()
15173 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_determine_queue_usage()
15174 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | in i40e_determine_queue_usage()
15179 pf->alloc_rss_size = pf->num_lan_qps = 1; in i40e_determine_queue_usage()
15180 queues_left -= pf->num_lan_qps; in i40e_determine_queue_usage()
15182 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | in i40e_determine_queue_usage()
15188 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_determine_queue_usage()
15190 /* Not enough queues for all TCs */ in i40e_determine_queue_usage()
15191 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && in i40e_determine_queue_usage()
15193 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | in i40e_determine_queue_usage()
15195 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); in i40e_determine_queue_usage()
15199 q_max = max_t(int, pf->rss_size_max, num_online_cpus()); in i40e_determine_queue_usage()
15200 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp); in i40e_determine_queue_usage()
15201 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors); in i40e_determine_queue_usage()
15202 pf->num_lan_qps = q_max; in i40e_determine_queue_usage()
15204 queues_left -= pf->num_lan_qps; in i40e_determine_queue_usage()
15207 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_determine_queue_usage()
15209 queues_left -= 1; /* save 1 queue for FD */ in i40e_determine_queue_usage()
15211 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; in i40e_determine_queue_usage()
15212 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_determine_queue_usage()
15213 …dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n… in i40e_determine_queue_usage()
15217 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && in i40e_determine_queue_usage()
15218 pf->num_vf_qps && pf->num_req_vfs && queues_left) { in i40e_determine_queue_usage()
15219 pf->num_req_vfs = min_t(int, pf->num_req_vfs, in i40e_determine_queue_usage()
15220 (queues_left / pf->num_vf_qps)); in i40e_determine_queue_usage()
15221 queues_left -= (pf->num_req_vfs * pf->num_vf_qps); in i40e_determine_queue_usage()
15224 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && in i40e_determine_queue_usage()
15225 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { in i40e_determine_queue_usage()
15226 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, in i40e_determine_queue_usage()
15227 (queues_left / pf->num_vmdq_qps)); in i40e_determine_queue_usage()
15228 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); in i40e_determine_queue_usage()
15231 pf->queues_left = queues_left; in i40e_determine_queue_usage()
15232 dev_dbg(&pf->pdev->dev, in i40e_determine_queue_usage()
15234 pf->hw.func_caps.num_tx_qp, in i40e_determine_queue_usage()
15235 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED), in i40e_determine_queue_usage()
15236 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, in i40e_determine_queue_usage()
15237 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, in i40e_determine_queue_usage()
15242 * i40e_setup_pf_filter_control - Setup PF static filter control
15254 struct i40e_filter_control_settings *settings = &pf->filter_settings; in i40e_setup_pf_filter_control()
15256 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; in i40e_setup_pf_filter_control()
15259 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) in i40e_setup_pf_filter_control()
15260 settings->enable_fdir = true; in i40e_setup_pf_filter_control()
15263 settings->enable_ethtype = true; in i40e_setup_pf_filter_control()
15264 settings->enable_macvlan = true; in i40e_setup_pf_filter_control()
15266 if (i40e_set_filter_control(&pf->hw, settings)) in i40e_setup_pf_filter_control()
15267 return -ENOENT; in i40e_setup_pf_filter_control()
15273 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
15276 struct i40e_hw *hw = &pf->hw; in i40e_print_features()
15284 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id); in i40e_print_features()
15286 i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); in i40e_print_features()
15289 pf->hw.func_caps.num_vsis, in i40e_print_features()
15290 pf->vsi[pf->lan_vsi]->num_queue_pairs); in i40e_print_features()
15291 if (pf->flags & I40E_FLAG_RSS_ENABLED) in i40e_print_features()
15293 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) in i40e_print_features()
15295 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_print_features()
15299 if (pf->flags & I40E_FLAG_DCB_CAPABLE) in i40e_print_features()
15303 if (pf->flags & I40E_FLAG_PTP) in i40e_print_features()
15305 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) in i40e_print_features()
15310 dev_info(&pf->pdev->dev, "%s\n", buf); in i40e_print_features()
15316 * i40e_get_platform_mac_addr - get platform-specific MAC address
15327 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr)) in i40e_get_platform_mac_addr()
15328 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr); in i40e_get_platform_mac_addr()
15332 * i40e_set_fec_in_flags - helper function for setting FEC options in flags
15355 * i40e_check_recovery_mode - check if we are running transition firmware
15365 u32 val = rd32(&pf->hw, I40E_GL_FWSTS); in i40e_check_recovery_mode()
15368 dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n"); in i40e_check_recovery_mode()
15369 …dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for detai… in i40e_check_recovery_mode()
15370 set_bit(__I40E_RECOVERY_MODE, pf->state); in i40e_check_recovery_mode()
15374 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_check_recovery_mode()
15375 …dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full … in i40e_check_recovery_mode()
15381 * i40e_pf_loop_reset - perform reset in a loop.
15391 * state is to issue a series of pf-resets and check a return value.
15403 /* wait max 10 seconds for PF reset to succeed */ in i40e_pf_loop_reset()
15406 struct i40e_hw *hw = &pf->hw; in i40e_pf_loop_reset()
15416 pf->pfr_count++; in i40e_pf_loop_reset()
15418 dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret); in i40e_pf_loop_reset()
15424 * i40e_check_fw_empr - check if FW issued unexpected EMP Reset
15436 const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) & in i40e_check_fw_empr()
15443 * i40e_handle_resets - handle EMP resets and PF resets
15459 …dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several … in i40e_handle_resets()
15465 * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
15480 pci_save_state(pf->pdev); in i40e_init_recovery_mode()
15483 timer_setup(&pf->service_timer, i40e_service_timer, 0); in i40e_init_recovery_mode()
15484 pf->service_timer_period = HZ; in i40e_init_recovery_mode()
15486 INIT_WORK(&pf->service_task, i40e_service_task); in i40e_init_recovery_mode()
15487 clear_bit(__I40E_SERVICE_SCHED, pf->state); in i40e_init_recovery_mode()
15498 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) in i40e_init_recovery_mode()
15499 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; in i40e_init_recovery_mode()
15501 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; in i40e_init_recovery_mode()
15504 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), in i40e_init_recovery_mode()
15506 if (!pf->vsi) { in i40e_init_recovery_mode()
15507 err = -ENOMEM; in i40e_init_recovery_mode()
15519 pf->lan_vsi = v_idx; in i40e_init_recovery_mode()
15520 vsi = pf->vsi[v_idx]; in i40e_init_recovery_mode()
15522 err = -EFAULT; in i40e_init_recovery_mode()
15525 vsi->alloc_queue_pairs = 1; in i40e_init_recovery_mode()
15529 err = register_netdev(vsi->netdev); in i40e_init_recovery_mode()
15532 vsi->netdev_registered = true; in i40e_init_recovery_mode()
15543 mod_timer(&pf->service_timer, in i40e_init_recovery_mode()
15544 round_jiffies(jiffies + pf->service_timer_period)); in i40e_init_recovery_mode()
15550 del_timer_sync(&pf->service_timer); in i40e_init_recovery_mode()
15552 iounmap(hw->hw_addr); in i40e_init_recovery_mode()
15553 pci_disable_pcie_error_reporting(pf->pdev); in i40e_init_recovery_mode()
15554 pci_release_mem_regions(pf->pdev); in i40e_init_recovery_mode()
15555 pci_disable_device(pf->pdev); in i40e_init_recovery_mode()
15562 * i40e_set_subsystem_device_id - set subsystem device id
15570 struct pci_dev *pdev = ((struct i40e_pf *)hw->back)->pdev; in i40e_set_subsystem_device_id()
15572 hw->subsystem_device_id = pdev->subsystem_device ? in i40e_set_subsystem_device_id()
15573 pdev->subsystem_device : in i40e_set_subsystem_device_id()
15578 * i40e_probe - Device initialization routine
15609 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in i40e_probe()
15611 dev_err(&pdev->dev, in i40e_probe()
15619 dev_info(&pdev->dev, in i40e_probe()
15634 err = -ENOMEM; in i40e_probe()
15637 pf->next_vsi = 0; in i40e_probe()
15638 pf->pdev = pdev; in i40e_probe()
15639 set_bit(__I40E_DOWN, pf->state); in i40e_probe()
15641 hw = &pf->hw; in i40e_probe()
15642 hw->back = pf; in i40e_probe()
15644 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), in i40e_probe()
15651 if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) { in i40e_probe()
15652 dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n", in i40e_probe()
15653 pf->ioremap_len); in i40e_probe()
15654 err = -ENOMEM; in i40e_probe()
15657 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len); in i40e_probe()
15658 if (!hw->hw_addr) { in i40e_probe()
15659 err = -EIO; in i40e_probe()
15660 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", in i40e_probe()
15662 pf->ioremap_len, err); in i40e_probe()
15665 hw->vendor_id = pdev->vendor; in i40e_probe()
15666 hw->device_id = pdev->device; in i40e_probe()
15667 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); in i40e_probe()
15668 hw->subsystem_vendor_id = pdev->subsystem_vendor; in i40e_probe()
15670 hw->bus.device = PCI_SLOT(pdev->devfn); in i40e_probe()
15671 hw->bus.func = PCI_FUNC(pdev->devfn); in i40e_probe()
15672 hw->bus.bus_id = pdev->bus->number; in i40e_probe()
15673 pf->instance = pfs_found; in i40e_probe()
15678 hw->switch_tag = 0xffff; in i40e_probe()
15679 hw->first_tag = ETH_P_8021AD; in i40e_probe()
15680 hw->second_tag = ETH_P_8021Q; in i40e_probe()
15682 INIT_LIST_HEAD(&pf->l3_flex_pit_list); in i40e_probe()
15683 INIT_LIST_HEAD(&pf->l4_flex_pit_list); in i40e_probe()
15684 INIT_LIST_HEAD(&pf->ddp_old_prof); in i40e_probe()
15689 mutex_init(&hw->aq.asq_mutex); in i40e_probe()
15690 mutex_init(&hw->aq.arq_mutex); in i40e_probe()
15692 pf->msg_enable = netif_msg_init(debug, in i40e_probe()
15696 if (debug < -1) in i40e_probe()
15697 pf->hw.debug_mask = debug; in i40e_probe()
15700 if (hw->revision_id == 0 && in i40e_probe()
15705 pf->corer_count++; in i40e_probe()
15715 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", in i40e_probe()
15727 hw->aq.num_arq_entries = I40E_MIN_ARQ_LEN; in i40e_probe()
15728 hw->aq.num_asq_entries = I40E_MIN_ASQ_LEN; in i40e_probe()
15730 hw->aq.num_arq_entries = I40E_AQ_LEN; in i40e_probe()
15731 hw->aq.num_asq_entries = I40E_AQ_LEN; in i40e_probe()
15733 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; in i40e_probe()
15734 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; in i40e_probe()
15735 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; in i40e_probe()
15737 snprintf(pf->int_name, sizeof(pf->int_name) - 1, in i40e_probe()
15738 "%s-%s:misc", in i40e_probe()
15739 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); in i40e_probe()
15743 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", in i40e_probe()
15749 pf->hw.fc.requested_mode = I40E_FC_NONE; in i40e_probe()
15754 dev_info(&pdev->dev, in i40e_probe()
15756 hw->aq.api_maj_ver, in i40e_probe()
15757 hw->aq.api_min_ver, in i40e_probe()
15761 dev_info(&pdev->dev, in i40e_probe()
15769 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n", in i40e_probe()
15770 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, in i40e_probe()
15771 hw->aq.api_maj_ver, hw->aq.api_min_ver, in i40e_probe()
15772 i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id, in i40e_probe()
15773 hw->subsystem_vendor_id, hw->subsystem_device_id); in i40e_probe()
15775 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && in i40e_probe()
15776 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) in i40e_probe()
15777 dev_dbg(&pdev->dev, in i40e_probe()
15779 hw->aq.api_maj_ver, in i40e_probe()
15780 hw->aq.api_min_ver, in i40e_probe()
15783 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) in i40e_probe()
15784 dev_info(&pdev->dev, in i40e_probe()
15786 hw->aq.api_maj_ver, in i40e_probe()
15787 hw->aq.api_min_ver, in i40e_probe()
15794 if (hw->revision_id < 1) in i40e_probe()
15795 …dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be is… in i40e_probe()
15805 dev_info(&pdev->dev, "sw_init failed: %d\n", err); in i40e_probe()
15809 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_probe()
15812 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, in i40e_probe()
15813 hw->func_caps.num_rx_qp, 0, 0); in i40e_probe()
15815 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); in i40e_probe()
15821 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err); in i40e_probe()
15822 err = -ENOENT; in i40e_probe()
15830 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) { in i40e_probe()
15831 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); in i40e_probe()
15838 if (!is_valid_ether_addr(hw->mac.addr)) { in i40e_probe()
15839 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); in i40e_probe()
15840 err = -EIO; in i40e_probe()
15843 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); in i40e_probe()
15844 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); in i40e_probe()
15845 i40e_get_port_mac_addr(hw, hw->mac.port_addr); in i40e_probe()
15846 if (is_valid_ether_addr(hw->mac.port_addr)) in i40e_probe()
15847 pf->hw_features |= I40E_HW_PORT_ID_VALID; in i40e_probe()
15854 status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status); in i40e_probe()
15857 (pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) : in i40e_probe()
15858 (pf->flags |= I40E_FLAG_DISABLE_FW_LLDP); in i40e_probe()
15859 dev_info(&pdev->dev, in i40e_probe()
15860 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ? in i40e_probe()
15864 /* Enable FW to write default DCB config on link-up */ in i40e_probe()
15869 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); in i40e_probe()
15870 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED); in i40e_probe()
15876 timer_setup(&pf->service_timer, i40e_service_timer, 0); in i40e_probe()
15877 pf->service_timer_period = HZ; in i40e_probe()
15879 INIT_WORK(&pf->service_task, i40e_service_task); in i40e_probe()
15880 clear_bit(__I40E_SERVICE_SCHED, pf->state); in i40e_probe()
15884 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1) in i40e_probe()
15885 pf->wol_en = false; in i40e_probe()
15887 pf->wol_en = true; in i40e_probe()
15888 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); in i40e_probe()
15897 * When MSI-X is enabled, it's not allowed to use more TC queue in i40e_probe()
15898 * pairs than MSI-X vectors (pf->num_lan_msix) exist. Thus in i40e_probe()
15899 * vsi->num_queue_pairs will be equal to pf->num_lan_msix, i.e., 1. in i40e_probe()
15902 pf->num_lan_msix = 1; in i40e_probe()
15904 pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port; in i40e_probe()
15905 pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port; in i40e_probe()
15906 pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; in i40e_probe()
15907 pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared; in i40e_probe()
15908 pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS; in i40e_probe()
15909 pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN | in i40e_probe()
15917 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) in i40e_probe()
15918 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; in i40e_probe()
15920 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; in i40e_probe()
15921 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { in i40e_probe()
15922 dev_warn(&pf->pdev->dev, in i40e_probe()
15924 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); in i40e_probe()
15925 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; in i40e_probe()
15929 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), in i40e_probe()
15931 if (!pf->vsi) { in i40e_probe()
15932 err = -ENOMEM; in i40e_probe()
15938 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && in i40e_probe()
15939 (pf->flags & I40E_FLAG_MSIX_ENABLED) && in i40e_probe()
15940 !test_bit(__I40E_BAD_EEPROM, pf->state)) { in i40e_probe()
15942 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; in i40e_probe()
15947 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); in i40e_probe()
15950 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list); in i40e_probe()
15953 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_probe()
15954 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { in i40e_probe()
15955 i40e_vsi_open(pf->vsi[i]); in i40e_probe()
15963 err = i40e_aq_set_phy_int_mask(&pf->hw, in i40e_probe()
15968 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", in i40e_probe()
15969 i40e_stat_str(&pf->hw, err), in i40e_probe()
15970 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_probe()
15983 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { in i40e_probe()
15985 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); in i40e_probe()
15987 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", in i40e_probe()
15988 i40e_stat_str(&pf->hw, err), in i40e_probe()
15989 i40e_aq_str(&pf->hw, in i40e_probe()
15990 pf->hw.aq.asq_last_status)); in i40e_probe()
15996 clear_bit(__I40E_DOWN, pf->state); in i40e_probe()
16003 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_probe()
16006 dev_info(&pdev->dev, in i40e_probe()
16016 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && in i40e_probe()
16017 (pf->flags & I40E_FLAG_MSIX_ENABLED) && in i40e_probe()
16018 !test_bit(__I40E_BAD_EEPROM, pf->state)) { in i40e_probe()
16026 dev_info(&pdev->dev, in i40e_probe()
16030 dev_info(&pdev->dev, in i40e_probe()
16037 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_probe()
16038 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile, in i40e_probe()
16039 pf->num_iwarp_msix, in i40e_probe()
16041 if (pf->iwarp_base_vector < 0) { in i40e_probe()
16042 dev_info(&pdev->dev, in i40e_probe()
16044 pf->num_iwarp_msix, pf->iwarp_base_vector); in i40e_probe()
16045 pf->flags &= ~I40E_FLAG_IWARP_ENABLED; in i40e_probe()
16055 mod_timer(&pf->service_timer, in i40e_probe()
16056 round_jiffies(jiffies + pf->service_timer_period)); in i40e_probe()
16059 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_probe()
16062 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", in i40e_probe()
16072 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) { in i40e_probe()
16079 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, in i40e_probe()
16084 switch (hw->bus.speed) { in i40e_probe()
16094 switch (hw->bus.width) { in i40e_probe()
16107 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n", in i40e_probe()
16110 if (hw->bus.width < i40e_bus_width_pcie_x8 || in i40e_probe()
16111 hw->bus.speed < i40e_bus_speed_8000) { in i40e_probe()
16112 …dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for opti… in i40e_probe()
16113 …dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or high… in i40e_probe()
16120 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n", in i40e_probe()
16121 i40e_stat_str(&pf->hw, err), in i40e_probe()
16122 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_probe()
16123 pf->hw.phy.link_info.requested_speeds = abilities.link_speed; in i40e_probe()
16126 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags); in i40e_probe()
16131 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n", in i40e_probe()
16132 i40e_stat_str(&pf->hw, err), in i40e_probe()
16133 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_probe()
16137 val = (rd32(&pf->hw, I40E_PRTGL_SAH) & in i40e_probe()
16140 dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n", in i40e_probe()
16149 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, in i40e_probe()
16150 pf->main_vsi_seid); in i40e_probe()
16152 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) || in i40e_probe()
16153 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) in i40e_probe()
16154 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS; in i40e_probe()
16155 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722) in i40e_probe()
16156 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER; in i40e_probe()
16164 set_bit(__I40E_DOWN, pf->state); in i40e_probe()
16166 kfree(pf->vsi); in i40e_probe()
16169 del_timer_sync(&pf->service_timer); in i40e_probe()
16174 kfree(pf->qp_pile); in i40e_probe()
16178 iounmap(hw->hw_addr); in i40e_probe()
16191 * i40e_remove - Device removal routine
16196 * Hot-Plug event, or because the driver is going to be removed from
16202 struct i40e_hw *hw = &pf->hw; in i40e_remove()
16218 while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_remove()
16220 set_bit(__I40E_IN_REMOVE, pf->state); in i40e_remove()
16222 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { in i40e_remove()
16223 set_bit(__I40E_VF_RESETS_DISABLED, pf->state); in i40e_remove()
16225 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; in i40e_remove()
16228 set_bit(__I40E_SUSPENDED, pf->state); in i40e_remove()
16229 set_bit(__I40E_DOWN, pf->state); in i40e_remove()
16230 if (pf->service_timer.function) in i40e_remove()
16231 del_timer_sync(&pf->service_timer); in i40e_remove()
16232 if (pf->service_task.func) in i40e_remove()
16233 cancel_work_sync(&pf->service_task); in i40e_remove()
16235 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_remove()
16236 struct i40e_vsi *vsi = pf->vsi[0]; in i40e_remove()
16242 unregister_netdev(vsi->netdev); in i40e_remove()
16243 free_netdev(vsi->netdev); in i40e_remove()
16251 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); in i40e_remove()
16259 if (!pf->veb[i]) in i40e_remove()
16262 if (pf->veb[i]->uplink_seid == pf->mac_seid || in i40e_remove()
16263 pf->veb[i]->uplink_seid == 0) in i40e_remove()
16264 i40e_switch_branch_release(pf->veb[i]); in i40e_remove()
16270 if (pf->vsi[pf->lan_vsi]) in i40e_remove()
16271 i40e_vsi_release(pf->vsi[pf->lan_vsi]); in i40e_remove()
16276 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_remove()
16279 dev_warn(&pdev->dev, "Failed to delete client device: %d\n", in i40e_remove()
16284 if (hw->hmc.hmc_obj) { in i40e_remove()
16287 dev_warn(&pdev->dev, in i40e_remove()
16294 if (test_bit(__I40E_RECOVERY_MODE, pf->state) && in i40e_remove()
16295 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) in i40e_remove()
16296 free_irq(pf->pdev->irq, pf); in i40e_remove()
16302 mutex_destroy(&hw->aq.arq_mutex); in i40e_remove()
16303 mutex_destroy(&hw->aq.asq_mutex); in i40e_remove()
16308 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_remove()
16309 if (pf->vsi[i]) { in i40e_remove()
16310 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_remove()
16311 i40e_vsi_clear_rings(pf->vsi[i]); in i40e_remove()
16312 i40e_vsi_clear(pf->vsi[i]); in i40e_remove()
16313 pf->vsi[i] = NULL; in i40e_remove()
16319 kfree(pf->veb[i]); in i40e_remove()
16320 pf->veb[i] = NULL; in i40e_remove()
16323 kfree(pf->qp_pile); in i40e_remove()
16324 kfree(pf->vsi); in i40e_remove()
16326 iounmap(hw->hw_addr); in i40e_remove()
16335 * i40e_pci_error_detected - warning that something funky happened in PCI land
16348 dev_info(&pdev->dev, "%s: error %d\n", __func__, error); in i40e_pci_error_detected()
16351 dev_info(&pdev->dev, in i40e_pci_error_detected()
16352 "Cannot recover - error happened during device probe\n"); in i40e_pci_error_detected()
16357 if (!test_bit(__I40E_SUSPENDED, pf->state)) in i40e_pci_error_detected()
16365 * i40e_pci_error_slot_reset - a PCI slot reset just happened
16379 dev_dbg(&pdev->dev, "%s\n", __func__); in i40e_pci_error_slot_reset()
16381 dev_info(&pdev->dev, in i40e_pci_error_slot_reset()
16382 "Cannot re-enable PCI device after reset.\n"); in i40e_pci_error_slot_reset()
16390 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); in i40e_pci_error_slot_reset()
16401 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
16412 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
16419 if (test_bit(__I40E_IN_REMOVE, pf->state)) in i40e_pci_error_reset_done()
16426 * i40e_pci_error_resume - restart operations after PCI error recovery
16436 dev_dbg(&pdev->dev, "%s\n", __func__); in i40e_pci_error_resume()
16437 if (test_bit(__I40E_SUSPENDED, pf->state)) in i40e_pci_error_resume()
16444 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
16450 struct i40e_hw *hw = &pf->hw; in i40e_enable_mc_magic_wake()
16456 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) { in i40e_enable_mc_magic_wake()
16458 pf->vsi[pf->lan_vsi]->netdev->dev_addr); in i40e_enable_mc_magic_wake()
16460 dev_err(&pf->pdev->dev, in i40e_enable_mc_magic_wake()
16462 ether_addr_copy(mac_addr, hw->mac.addr); in i40e_enable_mc_magic_wake()
16471 if (hw->func_caps.flex10_enable && hw->partition_id != 1) in i40e_enable_mc_magic_wake()
16476 dev_err(&pf->pdev->dev, in i40e_enable_mc_magic_wake()
16486 dev_err(&pf->pdev->dev, in i40e_enable_mc_magic_wake()
16491 * i40e_shutdown - PCI callback for shutting down
16497 struct i40e_hw *hw = &pf->hw; in i40e_shutdown()
16499 set_bit(__I40E_SUSPENDED, pf->state); in i40e_shutdown()
16500 set_bit(__I40E_DOWN, pf->state); in i40e_shutdown()
16502 del_timer_sync(&pf->service_timer); in i40e_shutdown()
16503 cancel_work_sync(&pf->service_task); in i40e_shutdown()
16510 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); in i40e_shutdown()
16512 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) in i40e_shutdown()
16518 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); in i40e_shutdown()
16520 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); in i40e_shutdown()
16523 if (test_bit(__I40E_RECOVERY_MODE, pf->state) && in i40e_shutdown()
16524 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) in i40e_shutdown()
16525 free_irq(pf->pdev->irq, pf); in i40e_shutdown()
16536 pci_wake_from_d3(pdev, pf->wol_en); in i40e_shutdown()
16542 * i40e_suspend - PM callback for moving to D3
16548 struct i40e_hw *hw = &pf->hw; in i40e_suspend()
16551 if (test_and_set_bit(__I40E_SUSPENDED, pf->state)) in i40e_suspend()
16554 set_bit(__I40E_DOWN, pf->state); in i40e_suspend()
16557 del_timer_sync(&pf->service_timer); in i40e_suspend()
16558 cancel_work_sync(&pf->service_task); in i40e_suspend()
16563 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); in i40e_suspend()
16565 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) in i40e_suspend()
16576 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); in i40e_suspend()
16577 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); in i40e_suspend()
16592 * i40e_resume - PM callback for waking up from D3
16601 if (!test_bit(__I40E_SUSPENDED, pf->state)) in i40e_resume()
16618 clear_bit(__I40E_DOWN, pf->state); in i40e_resume()
16624 clear_bit(__I40E_SUSPENDED, pf->state); in i40e_resume()
16627 mod_timer(&pf->service_timer, in i40e_resume()
16628 round_jiffies(jiffies + pf->service_timer_period)); in i40e_resume()
16657 * i40e_init_module - Driver registration routine
16679 return -ENOMEM; in i40e_init_module()
16695 * i40e_exit_module - Driver exit cleanup routine