Lines Matching +full:tcs +full:- +full:wait
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2021 Intel Corporation. */
28 static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
54 /* i40e_pci_tbl - PCI Device ID Table
92 static int debug = -1;
96 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
103 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
112 struct i40e_pf *pf = (struct i40e_pf *)hw->back; in i40e_allocate_dma_mem_d()
114 mem->size = ALIGN(size, alignment); in i40e_allocate_dma_mem_d()
115 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa, in i40e_allocate_dma_mem_d()
117 if (!mem->va) in i40e_allocate_dma_mem_d()
118 return -ENOMEM; in i40e_allocate_dma_mem_d()
124 * i40e_free_dma_mem_d - OS specific memory free for shared code
130 struct i40e_pf *pf = (struct i40e_pf *)hw->back; in i40e_free_dma_mem_d()
132 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); in i40e_free_dma_mem_d()
133 mem->va = NULL; in i40e_free_dma_mem_d()
134 mem->pa = 0; in i40e_free_dma_mem_d()
135 mem->size = 0; in i40e_free_dma_mem_d()
141 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
149 mem->size = size; in i40e_allocate_virt_mem_d()
150 mem->va = kzalloc(size, GFP_KERNEL); in i40e_allocate_virt_mem_d()
152 if (!mem->va) in i40e_allocate_virt_mem_d()
153 return -ENOMEM; in i40e_allocate_virt_mem_d()
159 * i40e_free_virt_mem_d - OS specific memory free for shared code
166 kfree(mem->va); in i40e_free_virt_mem_d()
167 mem->va = NULL; in i40e_free_virt_mem_d()
168 mem->size = 0; in i40e_free_virt_mem_d()
174 * i40e_get_lump - find a lump of free generic resource
182 * The search_hint trick and lack of advanced fit-finding only work
189 int ret = -ENOMEM; in i40e_get_lump()
193 dev_info(&pf->pdev->dev, in i40e_get_lump()
196 return -EINVAL; in i40e_get_lump()
200 i = pile->search_hint; in i40e_get_lump()
201 while (i < pile->num_entries) { in i40e_get_lump()
203 if (pile->list[i] & I40E_PILE_VALID_BIT) { in i40e_get_lump()
209 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { in i40e_get_lump()
210 if (pile->list[i+j] & I40E_PILE_VALID_BIT) in i40e_get_lump()
217 pile->list[i+j] = id | I40E_PILE_VALID_BIT; in i40e_get_lump()
219 pile->search_hint = i + j; in i40e_get_lump()
231 * i40e_put_lump - return a lump of generic resource
244 if (!pile || index >= pile->num_entries) in i40e_put_lump()
245 return -EINVAL; in i40e_put_lump()
248 i < pile->num_entries && pile->list[i] == valid_id; in i40e_put_lump()
250 pile->list[i] = 0; in i40e_put_lump()
254 if (count && index < pile->search_hint) in i40e_put_lump()
255 pile->search_hint = index; in i40e_put_lump()
261 * i40e_find_vsi_from_id - searches for the vsi with the given id
269 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_find_vsi_from_id()
270 if (pf->vsi[i] && (pf->vsi[i]->id == id)) in i40e_find_vsi_from_id()
271 return pf->vsi[i]; in i40e_find_vsi_from_id()
277 * i40e_service_event_schedule - Schedule the service task to wake up
284 if ((!test_bit(__I40E_DOWN, pf->state) && in i40e_service_event_schedule()
285 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) || in i40e_service_event_schedule()
286 test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_service_event_schedule()
287 queue_work(i40e_wq, &pf->service_task); in i40e_service_event_schedule()
291 * i40e_tx_timeout - Respond to a Tx Hang
302 struct i40e_vsi *vsi = np->vsi; in i40e_tx_timeout()
303 struct i40e_pf *pf = vsi->back; in i40e_tx_timeout()
308 pf->tx_timeout_count++; in i40e_tx_timeout()
311 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_tx_timeout()
312 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { in i40e_tx_timeout()
314 vsi->tx_rings[i]->queue_index) { in i40e_tx_timeout()
315 tx_ring = vsi->tx_rings[i]; in i40e_tx_timeout()
321 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) in i40e_tx_timeout()
322 pf->tx_timeout_recovery_level = 1; /* reset after some time */ in i40e_tx_timeout()
324 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) in i40e_tx_timeout()
328 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state)) in i40e_tx_timeout()
334 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_tx_timeout()
335 val = rd32(&pf->hw, in i40e_tx_timeout()
336 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + in i40e_tx_timeout()
337 tx_ring->vsi->base_vector - 1)); in i40e_tx_timeout()
339 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); in i40e_tx_timeout()
342 vsi->seid, txqueue, tx_ring->next_to_clean, in i40e_tx_timeout()
343 head, tx_ring->next_to_use, in i40e_tx_timeout()
344 readl(tx_ring->tail), val); in i40e_tx_timeout()
347 pf->tx_timeout_last_recovery = jiffies; in i40e_tx_timeout()
349 pf->tx_timeout_recovery_level, txqueue); in i40e_tx_timeout()
351 switch (pf->tx_timeout_recovery_level) { in i40e_tx_timeout()
353 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_tx_timeout()
356 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state); in i40e_tx_timeout()
359 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); in i40e_tx_timeout()
367 pf->tx_timeout_recovery_level++; in i40e_tx_timeout()
371 * i40e_get_vsi_stats_struct - Get System Network Statistics
379 return &vsi->net_stats; in i40e_get_vsi_stats_struct()
383 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
394 start = u64_stats_fetch_begin_irq(&ring->syncp); in i40e_get_netdev_stats_struct_tx()
395 packets = ring->stats.packets; in i40e_get_netdev_stats_struct_tx()
396 bytes = ring->stats.bytes; in i40e_get_netdev_stats_struct_tx()
397 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); in i40e_get_netdev_stats_struct_tx()
399 stats->tx_packets += packets; in i40e_get_netdev_stats_struct_tx()
400 stats->tx_bytes += bytes; in i40e_get_netdev_stats_struct_tx()
404 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
415 struct i40e_vsi *vsi = np->vsi; in i40e_get_netdev_stats_struct()
420 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_get_netdev_stats_struct()
423 if (!vsi->tx_rings) in i40e_get_netdev_stats_struct()
427 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_get_netdev_stats_struct()
431 ring = READ_ONCE(vsi->tx_rings[i]); in i40e_get_netdev_stats_struct()
437 ring = READ_ONCE(vsi->xdp_rings[i]); in i40e_get_netdev_stats_struct()
443 ring = READ_ONCE(vsi->rx_rings[i]); in i40e_get_netdev_stats_struct()
447 start = u64_stats_fetch_begin_irq(&ring->syncp); in i40e_get_netdev_stats_struct()
448 packets = ring->stats.packets; in i40e_get_netdev_stats_struct()
449 bytes = ring->stats.bytes; in i40e_get_netdev_stats_struct()
450 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); in i40e_get_netdev_stats_struct()
452 stats->rx_packets += packets; in i40e_get_netdev_stats_struct()
453 stats->rx_bytes += bytes; in i40e_get_netdev_stats_struct()
459 stats->multicast = vsi_stats->multicast; in i40e_get_netdev_stats_struct()
460 stats->tx_errors = vsi_stats->tx_errors; in i40e_get_netdev_stats_struct()
461 stats->tx_dropped = vsi_stats->tx_dropped; in i40e_get_netdev_stats_struct()
462 stats->rx_errors = vsi_stats->rx_errors; in i40e_get_netdev_stats_struct()
463 stats->rx_dropped = vsi_stats->rx_dropped; in i40e_get_netdev_stats_struct()
464 stats->rx_crc_errors = vsi_stats->rx_crc_errors; in i40e_get_netdev_stats_struct()
465 stats->rx_length_errors = vsi_stats->rx_length_errors; in i40e_get_netdev_stats_struct()
469 * i40e_vsi_reset_stats - Resets all stats of the given vsi
482 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); in i40e_vsi_reset_stats()
483 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); in i40e_vsi_reset_stats()
484 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); in i40e_vsi_reset_stats()
485 if (vsi->rx_rings && vsi->rx_rings[0]) { in i40e_vsi_reset_stats()
486 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_vsi_reset_stats()
487 memset(&vsi->rx_rings[i]->stats, 0, in i40e_vsi_reset_stats()
488 sizeof(vsi->rx_rings[i]->stats)); in i40e_vsi_reset_stats()
489 memset(&vsi->rx_rings[i]->rx_stats, 0, in i40e_vsi_reset_stats()
490 sizeof(vsi->rx_rings[i]->rx_stats)); in i40e_vsi_reset_stats()
491 memset(&vsi->tx_rings[i]->stats, 0, in i40e_vsi_reset_stats()
492 sizeof(vsi->tx_rings[i]->stats)); in i40e_vsi_reset_stats()
493 memset(&vsi->tx_rings[i]->tx_stats, 0, in i40e_vsi_reset_stats()
494 sizeof(vsi->tx_rings[i]->tx_stats)); in i40e_vsi_reset_stats()
497 vsi->stat_offsets_loaded = false; in i40e_vsi_reset_stats()
501 * i40e_pf_reset_stats - Reset all of the stats for the given PF
508 memset(&pf->stats, 0, sizeof(pf->stats)); in i40e_pf_reset_stats()
509 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); in i40e_pf_reset_stats()
510 pf->stat_offsets_loaded = false; in i40e_pf_reset_stats()
513 if (pf->veb[i]) { in i40e_pf_reset_stats()
514 memset(&pf->veb[i]->stats, 0, in i40e_pf_reset_stats()
515 sizeof(pf->veb[i]->stats)); in i40e_pf_reset_stats()
516 memset(&pf->veb[i]->stats_offsets, 0, in i40e_pf_reset_stats()
517 sizeof(pf->veb[i]->stats_offsets)); in i40e_pf_reset_stats()
518 memset(&pf->veb[i]->tc_stats, 0, in i40e_pf_reset_stats()
519 sizeof(pf->veb[i]->tc_stats)); in i40e_pf_reset_stats()
520 memset(&pf->veb[i]->tc_stats_offsets, 0, in i40e_pf_reset_stats()
521 sizeof(pf->veb[i]->tc_stats_offsets)); in i40e_pf_reset_stats()
522 pf->veb[i]->stat_offsets_loaded = false; in i40e_pf_reset_stats()
525 pf->hw_csum_rx_error = 0; in i40e_pf_reset_stats()
529 * i40e_stat_update48 - read and update a 48 bit stat from the chip
541 * the potential roll-over.
548 if (hw->device_id == I40E_DEV_ID_QEMU) { in i40e_stat_update48()
557 *stat = new_data - *offset; in i40e_stat_update48()
559 *stat = (new_data + BIT_ULL(48)) - *offset; in i40e_stat_update48()
564 * i40e_stat_update32 - read and update a 32 bit stat from the chip
580 *stat = (u32)(new_data - *offset); in i40e_stat_update32()
582 *stat = (u32)((new_data + BIT_ULL(32)) - *offset); in i40e_stat_update32()
586 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
600 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
605 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); in i40e_update_eth_stats()
606 struct i40e_pf *pf = vsi->back; in i40e_update_eth_stats()
607 struct i40e_hw *hw = &pf->hw; in i40e_update_eth_stats()
611 es = &vsi->eth_stats; in i40e_update_eth_stats()
612 oes = &vsi->eth_stats_offsets; in i40e_update_eth_stats()
616 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
617 &oes->tx_errors, &es->tx_errors); in i40e_update_eth_stats()
619 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
620 &oes->rx_discards, &es->rx_discards); in i40e_update_eth_stats()
622 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
623 &oes->rx_unknown_protocol, &es->rx_unknown_protocol); in i40e_update_eth_stats()
627 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
628 &oes->rx_bytes, &es->rx_bytes); in i40e_update_eth_stats()
631 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
632 &oes->rx_unicast, &es->rx_unicast); in i40e_update_eth_stats()
635 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
636 &oes->rx_multicast, &es->rx_multicast); in i40e_update_eth_stats()
639 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
640 &oes->rx_broadcast, &es->rx_broadcast); in i40e_update_eth_stats()
644 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
645 &oes->tx_bytes, &es->tx_bytes); in i40e_update_eth_stats()
648 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
649 &oes->tx_unicast, &es->tx_unicast); in i40e_update_eth_stats()
652 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
653 &oes->tx_multicast, &es->tx_multicast); in i40e_update_eth_stats()
656 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
657 &oes->tx_broadcast, &es->tx_broadcast); in i40e_update_eth_stats()
658 vsi->stat_offsets_loaded = true; in i40e_update_eth_stats()
662 * i40e_update_veb_stats - Update Switch component statistics
667 struct i40e_pf *pf = veb->pf; in i40e_update_veb_stats()
668 struct i40e_hw *hw = &pf->hw; in i40e_update_veb_stats()
675 idx = veb->stats_idx; in i40e_update_veb_stats()
676 es = &veb->stats; in i40e_update_veb_stats()
677 oes = &veb->stats_offsets; in i40e_update_veb_stats()
678 veb_es = &veb->tc_stats; in i40e_update_veb_stats()
679 veb_oes = &veb->tc_stats_offsets; in i40e_update_veb_stats()
683 veb->stat_offsets_loaded, in i40e_update_veb_stats()
684 &oes->tx_discards, &es->tx_discards); in i40e_update_veb_stats()
685 if (hw->revision_id > 0) in i40e_update_veb_stats()
687 veb->stat_offsets_loaded, in i40e_update_veb_stats()
688 &oes->rx_unknown_protocol, in i40e_update_veb_stats()
689 &es->rx_unknown_protocol); in i40e_update_veb_stats()
691 veb->stat_offsets_loaded, in i40e_update_veb_stats()
692 &oes->rx_bytes, &es->rx_bytes); in i40e_update_veb_stats()
694 veb->stat_offsets_loaded, in i40e_update_veb_stats()
695 &oes->rx_unicast, &es->rx_unicast); in i40e_update_veb_stats()
697 veb->stat_offsets_loaded, in i40e_update_veb_stats()
698 &oes->rx_multicast, &es->rx_multicast); in i40e_update_veb_stats()
700 veb->stat_offsets_loaded, in i40e_update_veb_stats()
701 &oes->rx_broadcast, &es->rx_broadcast); in i40e_update_veb_stats()
704 veb->stat_offsets_loaded, in i40e_update_veb_stats()
705 &oes->tx_bytes, &es->tx_bytes); in i40e_update_veb_stats()
707 veb->stat_offsets_loaded, in i40e_update_veb_stats()
708 &oes->tx_unicast, &es->tx_unicast); in i40e_update_veb_stats()
710 veb->stat_offsets_loaded, in i40e_update_veb_stats()
711 &oes->tx_multicast, &es->tx_multicast); in i40e_update_veb_stats()
713 veb->stat_offsets_loaded, in i40e_update_veb_stats()
714 &oes->tx_broadcast, &es->tx_broadcast); in i40e_update_veb_stats()
718 veb->stat_offsets_loaded, in i40e_update_veb_stats()
719 &veb_oes->tc_rx_packets[i], in i40e_update_veb_stats()
720 &veb_es->tc_rx_packets[i]); in i40e_update_veb_stats()
723 veb->stat_offsets_loaded, in i40e_update_veb_stats()
724 &veb_oes->tc_rx_bytes[i], in i40e_update_veb_stats()
725 &veb_es->tc_rx_bytes[i]); in i40e_update_veb_stats()
728 veb->stat_offsets_loaded, in i40e_update_veb_stats()
729 &veb_oes->tc_tx_packets[i], in i40e_update_veb_stats()
730 &veb_es->tc_tx_packets[i]); in i40e_update_veb_stats()
733 veb->stat_offsets_loaded, in i40e_update_veb_stats()
734 &veb_oes->tc_tx_bytes[i], in i40e_update_veb_stats()
735 &veb_es->tc_tx_bytes[i]); in i40e_update_veb_stats()
737 veb->stat_offsets_loaded = true; in i40e_update_veb_stats()
741 * i40e_update_vsi_stats - Update the vsi statistics counters.
752 struct i40e_pf *pf = vsi->back; in i40e_update_vsi_stats()
768 if (test_bit(__I40E_VSI_DOWN, vsi->state) || in i40e_update_vsi_stats()
769 test_bit(__I40E_CONFIG_BUSY, pf->state)) in i40e_update_vsi_stats()
773 ons = &vsi->net_stats_offsets; in i40e_update_vsi_stats()
774 es = &vsi->eth_stats; in i40e_update_vsi_stats()
775 oes = &vsi->eth_stats_offsets; in i40e_update_vsi_stats()
786 for (q = 0; q < vsi->num_queue_pairs; q++) { in i40e_update_vsi_stats()
788 p = READ_ONCE(vsi->tx_rings[q]); in i40e_update_vsi_stats()
793 start = u64_stats_fetch_begin_irq(&p->syncp); in i40e_update_vsi_stats()
794 packets = p->stats.packets; in i40e_update_vsi_stats()
795 bytes = p->stats.bytes; in i40e_update_vsi_stats()
796 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); in i40e_update_vsi_stats()
799 tx_restart += p->tx_stats.restart_queue; in i40e_update_vsi_stats()
800 tx_busy += p->tx_stats.tx_busy; in i40e_update_vsi_stats()
801 tx_linearize += p->tx_stats.tx_linearize; in i40e_update_vsi_stats()
802 tx_force_wb += p->tx_stats.tx_force_wb; in i40e_update_vsi_stats()
805 p = READ_ONCE(vsi->rx_rings[q]); in i40e_update_vsi_stats()
810 start = u64_stats_fetch_begin_irq(&p->syncp); in i40e_update_vsi_stats()
811 packets = p->stats.packets; in i40e_update_vsi_stats()
812 bytes = p->stats.bytes; in i40e_update_vsi_stats()
813 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); in i40e_update_vsi_stats()
816 rx_buf += p->rx_stats.alloc_buff_failed; in i40e_update_vsi_stats()
817 rx_page += p->rx_stats.alloc_page_failed; in i40e_update_vsi_stats()
821 p = READ_ONCE(vsi->xdp_rings[q]); in i40e_update_vsi_stats()
826 start = u64_stats_fetch_begin_irq(&p->syncp); in i40e_update_vsi_stats()
827 packets = p->stats.packets; in i40e_update_vsi_stats()
828 bytes = p->stats.bytes; in i40e_update_vsi_stats()
829 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); in i40e_update_vsi_stats()
832 tx_restart += p->tx_stats.restart_queue; in i40e_update_vsi_stats()
833 tx_busy += p->tx_stats.tx_busy; in i40e_update_vsi_stats()
834 tx_linearize += p->tx_stats.tx_linearize; in i40e_update_vsi_stats()
835 tx_force_wb += p->tx_stats.tx_force_wb; in i40e_update_vsi_stats()
839 vsi->tx_restart = tx_restart; in i40e_update_vsi_stats()
840 vsi->tx_busy = tx_busy; in i40e_update_vsi_stats()
841 vsi->tx_linearize = tx_linearize; in i40e_update_vsi_stats()
842 vsi->tx_force_wb = tx_force_wb; in i40e_update_vsi_stats()
843 vsi->rx_page_failed = rx_page; in i40e_update_vsi_stats()
844 vsi->rx_buf_failed = rx_buf; in i40e_update_vsi_stats()
846 ns->rx_packets = rx_p; in i40e_update_vsi_stats()
847 ns->rx_bytes = rx_b; in i40e_update_vsi_stats()
848 ns->tx_packets = tx_p; in i40e_update_vsi_stats()
849 ns->tx_bytes = tx_b; in i40e_update_vsi_stats()
853 ons->tx_errors = oes->tx_errors; in i40e_update_vsi_stats()
854 ns->tx_errors = es->tx_errors; in i40e_update_vsi_stats()
855 ons->multicast = oes->rx_multicast; in i40e_update_vsi_stats()
856 ns->multicast = es->rx_multicast; in i40e_update_vsi_stats()
857 ons->rx_dropped = oes->rx_discards; in i40e_update_vsi_stats()
858 ns->rx_dropped = es->rx_discards; in i40e_update_vsi_stats()
859 ons->tx_dropped = oes->tx_discards; in i40e_update_vsi_stats()
860 ns->tx_dropped = es->tx_discards; in i40e_update_vsi_stats()
863 if (vsi == pf->vsi[pf->lan_vsi]) { in i40e_update_vsi_stats()
864 ns->rx_crc_errors = pf->stats.crc_errors; in i40e_update_vsi_stats()
865 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; in i40e_update_vsi_stats()
866 ns->rx_length_errors = pf->stats.rx_length_errors; in i40e_update_vsi_stats()
871 * i40e_update_pf_stats - Update the PF statistics counters.
876 struct i40e_hw_port_stats *osd = &pf->stats_offsets; in i40e_update_pf_stats()
877 struct i40e_hw_port_stats *nsd = &pf->stats; in i40e_update_pf_stats()
878 struct i40e_hw *hw = &pf->hw; in i40e_update_pf_stats()
882 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), in i40e_update_pf_stats()
883 I40E_GLPRT_GORCL(hw->port), in i40e_update_pf_stats()
884 pf->stat_offsets_loaded, in i40e_update_pf_stats()
885 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); in i40e_update_pf_stats()
886 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), in i40e_update_pf_stats()
887 I40E_GLPRT_GOTCL(hw->port), in i40e_update_pf_stats()
888 pf->stat_offsets_loaded, in i40e_update_pf_stats()
889 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); in i40e_update_pf_stats()
890 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), in i40e_update_pf_stats()
891 pf->stat_offsets_loaded, in i40e_update_pf_stats()
892 &osd->eth.rx_discards, in i40e_update_pf_stats()
893 &nsd->eth.rx_discards); in i40e_update_pf_stats()
894 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), in i40e_update_pf_stats()
895 I40E_GLPRT_UPRCL(hw->port), in i40e_update_pf_stats()
896 pf->stat_offsets_loaded, in i40e_update_pf_stats()
897 &osd->eth.rx_unicast, in i40e_update_pf_stats()
898 &nsd->eth.rx_unicast); in i40e_update_pf_stats()
899 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), in i40e_update_pf_stats()
900 I40E_GLPRT_MPRCL(hw->port), in i40e_update_pf_stats()
901 pf->stat_offsets_loaded, in i40e_update_pf_stats()
902 &osd->eth.rx_multicast, in i40e_update_pf_stats()
903 &nsd->eth.rx_multicast); in i40e_update_pf_stats()
904 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), in i40e_update_pf_stats()
905 I40E_GLPRT_BPRCL(hw->port), in i40e_update_pf_stats()
906 pf->stat_offsets_loaded, in i40e_update_pf_stats()
907 &osd->eth.rx_broadcast, in i40e_update_pf_stats()
908 &nsd->eth.rx_broadcast); in i40e_update_pf_stats()
909 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), in i40e_update_pf_stats()
910 I40E_GLPRT_UPTCL(hw->port), in i40e_update_pf_stats()
911 pf->stat_offsets_loaded, in i40e_update_pf_stats()
912 &osd->eth.tx_unicast, in i40e_update_pf_stats()
913 &nsd->eth.tx_unicast); in i40e_update_pf_stats()
914 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), in i40e_update_pf_stats()
915 I40E_GLPRT_MPTCL(hw->port), in i40e_update_pf_stats()
916 pf->stat_offsets_loaded, in i40e_update_pf_stats()
917 &osd->eth.tx_multicast, in i40e_update_pf_stats()
918 &nsd->eth.tx_multicast); in i40e_update_pf_stats()
919 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), in i40e_update_pf_stats()
920 I40E_GLPRT_BPTCL(hw->port), in i40e_update_pf_stats()
921 pf->stat_offsets_loaded, in i40e_update_pf_stats()
922 &osd->eth.tx_broadcast, in i40e_update_pf_stats()
923 &nsd->eth.tx_broadcast); in i40e_update_pf_stats()
925 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), in i40e_update_pf_stats()
926 pf->stat_offsets_loaded, in i40e_update_pf_stats()
927 &osd->tx_dropped_link_down, in i40e_update_pf_stats()
928 &nsd->tx_dropped_link_down); in i40e_update_pf_stats()
930 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), in i40e_update_pf_stats()
931 pf->stat_offsets_loaded, in i40e_update_pf_stats()
932 &osd->crc_errors, &nsd->crc_errors); in i40e_update_pf_stats()
934 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), in i40e_update_pf_stats()
935 pf->stat_offsets_loaded, in i40e_update_pf_stats()
936 &osd->illegal_bytes, &nsd->illegal_bytes); in i40e_update_pf_stats()
938 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), in i40e_update_pf_stats()
939 pf->stat_offsets_loaded, in i40e_update_pf_stats()
940 &osd->mac_local_faults, in i40e_update_pf_stats()
941 &nsd->mac_local_faults); in i40e_update_pf_stats()
942 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), in i40e_update_pf_stats()
943 pf->stat_offsets_loaded, in i40e_update_pf_stats()
944 &osd->mac_remote_faults, in i40e_update_pf_stats()
945 &nsd->mac_remote_faults); in i40e_update_pf_stats()
947 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), in i40e_update_pf_stats()
948 pf->stat_offsets_loaded, in i40e_update_pf_stats()
949 &osd->rx_length_errors, in i40e_update_pf_stats()
950 &nsd->rx_length_errors); in i40e_update_pf_stats()
952 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), in i40e_update_pf_stats()
953 pf->stat_offsets_loaded, in i40e_update_pf_stats()
954 &osd->link_xon_rx, &nsd->link_xon_rx); in i40e_update_pf_stats()
955 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), in i40e_update_pf_stats()
956 pf->stat_offsets_loaded, in i40e_update_pf_stats()
957 &osd->link_xon_tx, &nsd->link_xon_tx); in i40e_update_pf_stats()
958 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), in i40e_update_pf_stats()
959 pf->stat_offsets_loaded, in i40e_update_pf_stats()
960 &osd->link_xoff_rx, &nsd->link_xoff_rx); in i40e_update_pf_stats()
961 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), in i40e_update_pf_stats()
962 pf->stat_offsets_loaded, in i40e_update_pf_stats()
963 &osd->link_xoff_tx, &nsd->link_xoff_tx); in i40e_update_pf_stats()
966 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), in i40e_update_pf_stats()
967 pf->stat_offsets_loaded, in i40e_update_pf_stats()
968 &osd->priority_xoff_rx[i], in i40e_update_pf_stats()
969 &nsd->priority_xoff_rx[i]); in i40e_update_pf_stats()
970 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), in i40e_update_pf_stats()
971 pf->stat_offsets_loaded, in i40e_update_pf_stats()
972 &osd->priority_xon_rx[i], in i40e_update_pf_stats()
973 &nsd->priority_xon_rx[i]); in i40e_update_pf_stats()
974 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), in i40e_update_pf_stats()
975 pf->stat_offsets_loaded, in i40e_update_pf_stats()
976 &osd->priority_xon_tx[i], in i40e_update_pf_stats()
977 &nsd->priority_xon_tx[i]); in i40e_update_pf_stats()
978 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), in i40e_update_pf_stats()
979 pf->stat_offsets_loaded, in i40e_update_pf_stats()
980 &osd->priority_xoff_tx[i], in i40e_update_pf_stats()
981 &nsd->priority_xoff_tx[i]); in i40e_update_pf_stats()
983 I40E_GLPRT_RXON2OFFCNT(hw->port, i), in i40e_update_pf_stats()
984 pf->stat_offsets_loaded, in i40e_update_pf_stats()
985 &osd->priority_xon_2_xoff[i], in i40e_update_pf_stats()
986 &nsd->priority_xon_2_xoff[i]); in i40e_update_pf_stats()
989 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), in i40e_update_pf_stats()
990 I40E_GLPRT_PRC64L(hw->port), in i40e_update_pf_stats()
991 pf->stat_offsets_loaded, in i40e_update_pf_stats()
992 &osd->rx_size_64, &nsd->rx_size_64); in i40e_update_pf_stats()
993 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), in i40e_update_pf_stats()
994 I40E_GLPRT_PRC127L(hw->port), in i40e_update_pf_stats()
995 pf->stat_offsets_loaded, in i40e_update_pf_stats()
996 &osd->rx_size_127, &nsd->rx_size_127); in i40e_update_pf_stats()
997 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), in i40e_update_pf_stats()
998 I40E_GLPRT_PRC255L(hw->port), in i40e_update_pf_stats()
999 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1000 &osd->rx_size_255, &nsd->rx_size_255); in i40e_update_pf_stats()
1001 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), in i40e_update_pf_stats()
1002 I40E_GLPRT_PRC511L(hw->port), in i40e_update_pf_stats()
1003 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1004 &osd->rx_size_511, &nsd->rx_size_511); in i40e_update_pf_stats()
1005 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), in i40e_update_pf_stats()
1006 I40E_GLPRT_PRC1023L(hw->port), in i40e_update_pf_stats()
1007 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1008 &osd->rx_size_1023, &nsd->rx_size_1023); in i40e_update_pf_stats()
1009 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), in i40e_update_pf_stats()
1010 I40E_GLPRT_PRC1522L(hw->port), in i40e_update_pf_stats()
1011 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1012 &osd->rx_size_1522, &nsd->rx_size_1522); in i40e_update_pf_stats()
1013 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), in i40e_update_pf_stats()
1014 I40E_GLPRT_PRC9522L(hw->port), in i40e_update_pf_stats()
1015 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1016 &osd->rx_size_big, &nsd->rx_size_big); in i40e_update_pf_stats()
1018 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), in i40e_update_pf_stats()
1019 I40E_GLPRT_PTC64L(hw->port), in i40e_update_pf_stats()
1020 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1021 &osd->tx_size_64, &nsd->tx_size_64); in i40e_update_pf_stats()
1022 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), in i40e_update_pf_stats()
1023 I40E_GLPRT_PTC127L(hw->port), in i40e_update_pf_stats()
1024 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1025 &osd->tx_size_127, &nsd->tx_size_127); in i40e_update_pf_stats()
1026 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), in i40e_update_pf_stats()
1027 I40E_GLPRT_PTC255L(hw->port), in i40e_update_pf_stats()
1028 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1029 &osd->tx_size_255, &nsd->tx_size_255); in i40e_update_pf_stats()
1030 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), in i40e_update_pf_stats()
1031 I40E_GLPRT_PTC511L(hw->port), in i40e_update_pf_stats()
1032 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1033 &osd->tx_size_511, &nsd->tx_size_511); in i40e_update_pf_stats()
1034 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), in i40e_update_pf_stats()
1035 I40E_GLPRT_PTC1023L(hw->port), in i40e_update_pf_stats()
1036 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1037 &osd->tx_size_1023, &nsd->tx_size_1023); in i40e_update_pf_stats()
1038 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), in i40e_update_pf_stats()
1039 I40E_GLPRT_PTC1522L(hw->port), in i40e_update_pf_stats()
1040 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1041 &osd->tx_size_1522, &nsd->tx_size_1522); in i40e_update_pf_stats()
1042 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), in i40e_update_pf_stats()
1043 I40E_GLPRT_PTC9522L(hw->port), in i40e_update_pf_stats()
1044 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1045 &osd->tx_size_big, &nsd->tx_size_big); in i40e_update_pf_stats()
1047 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), in i40e_update_pf_stats()
1048 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1049 &osd->rx_undersize, &nsd->rx_undersize); in i40e_update_pf_stats()
1050 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), in i40e_update_pf_stats()
1051 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1052 &osd->rx_fragments, &nsd->rx_fragments); in i40e_update_pf_stats()
1053 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), in i40e_update_pf_stats()
1054 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1055 &osd->rx_oversize, &nsd->rx_oversize); in i40e_update_pf_stats()
1056 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), in i40e_update_pf_stats()
1057 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1058 &osd->rx_jabber, &nsd->rx_jabber); in i40e_update_pf_stats()
1062 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)), in i40e_update_pf_stats()
1063 &nsd->fd_atr_match); in i40e_update_pf_stats()
1065 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)), in i40e_update_pf_stats()
1066 &nsd->fd_sb_match); in i40e_update_pf_stats()
1068 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)), in i40e_update_pf_stats()
1069 &nsd->fd_atr_tunnel_match); in i40e_update_pf_stats()
1072 nsd->tx_lpi_status = in i40e_update_pf_stats()
1075 nsd->rx_lpi_status = in i40e_update_pf_stats()
1079 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1080 &osd->tx_lpi_count, &nsd->tx_lpi_count); in i40e_update_pf_stats()
1082 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1083 &osd->rx_lpi_count, &nsd->rx_lpi_count); in i40e_update_pf_stats()
1085 if (pf->flags & I40E_FLAG_FD_SB_ENABLED && in i40e_update_pf_stats()
1086 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) in i40e_update_pf_stats()
1087 nsd->fd_sb_status = true; in i40e_update_pf_stats()
1089 nsd->fd_sb_status = false; in i40e_update_pf_stats()
1091 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && in i40e_update_pf_stats()
1092 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) in i40e_update_pf_stats()
1093 nsd->fd_atr_status = true; in i40e_update_pf_stats()
1095 nsd->fd_atr_status = false; in i40e_update_pf_stats()
1097 pf->stat_offsets_loaded = true; in i40e_update_pf_stats()
1101 * i40e_update_stats - Update the various statistics counters.
1108 struct i40e_pf *pf = vsi->back; in i40e_update_stats()
1110 if (vsi == pf->vsi[pf->lan_vsi]) in i40e_update_stats()
1117 * i40e_count_filters - counts VSI mac filters
1129 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) in i40e_count_filters()
1136 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1153 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) { in i40e_find_filter()
1154 if ((ether_addr_equal(macaddr, f->macaddr)) && in i40e_find_filter()
1155 (vlan == f->vlan)) in i40e_find_filter()
1162 * i40e_find_mac - Find a mac addr in the macvlan filters list
1178 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) { in i40e_find_mac()
1179 if ((ether_addr_equal(macaddr, f->macaddr))) in i40e_find_mac()
1186 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1194 if (vsi->info.pvid) in i40e_is_vsi_in_vlan()
1217 return vsi->has_vlan_filter; in i40e_is_vsi_in_vlan()
1221 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1227 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1229 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1231 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1236 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1241 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1254 s16 pvid = le16_to_cpu(vsi->info.pvid); in i40e_correct_mac_vlan_filters()
1267 * which are marked as VLAN=-1 must be replaced with in i40e_correct_mac_vlan_filters()
1271 * marked as VLAN=-1 in i40e_correct_mac_vlan_filters()
1276 if (pvid && new->f->vlan != pvid) in i40e_correct_mac_vlan_filters()
1277 new->f->vlan = pvid; in i40e_correct_mac_vlan_filters()
1278 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY) in i40e_correct_mac_vlan_filters()
1279 new->f->vlan = 0; in i40e_correct_mac_vlan_filters()
1280 else if (!vlan_filters && new->f->vlan == 0) in i40e_correct_mac_vlan_filters()
1281 new->f->vlan = I40E_VLAN_ANY; in i40e_correct_mac_vlan_filters()
1285 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_correct_mac_vlan_filters()
1291 if ((pvid && f->vlan != pvid) || in i40e_correct_mac_vlan_filters()
1292 (vlan_filters && f->vlan == I40E_VLAN_ANY) || in i40e_correct_mac_vlan_filters()
1293 (!vlan_filters && f->vlan == 0)) { in i40e_correct_mac_vlan_filters()
1303 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan); in i40e_correct_mac_vlan_filters()
1305 return -ENOMEM; in i40e_correct_mac_vlan_filters()
1310 return -ENOMEM; in i40e_correct_mac_vlan_filters()
1312 new->f = add_head; in i40e_correct_mac_vlan_filters()
1313 new->state = add_head->state; in i40e_correct_mac_vlan_filters()
1316 hlist_add_head(&new->hlist, tmp_add_list); in i40e_correct_mac_vlan_filters()
1319 f->state = I40E_FILTER_REMOVE; in i40e_correct_mac_vlan_filters()
1320 hash_del(&f->hlist); in i40e_correct_mac_vlan_filters()
1321 hlist_add_head(&f->hlist, tmp_del_list); in i40e_correct_mac_vlan_filters()
1325 vsi->has_vlan_filter = !!vlan_filters; in i40e_correct_mac_vlan_filters()
1331 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1332 * @vsi: the PF Main VSI - inappropriate for any other VSI
1341 struct i40e_pf *pf = vsi->back; in i40e_rm_default_mac_filter()
1344 if (vsi->type != I40E_VSI_MAIN) in i40e_rm_default_mac_filter()
1352 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); in i40e_rm_default_mac_filter()
1360 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); in i40e_rm_default_mac_filter()
1364 * i40e_add_filter - Add a mac/vlan filter to the VSI
1393 vsi->has_vlan_filter = true; in i40e_add_filter()
1395 ether_addr_copy(f->macaddr, macaddr); in i40e_add_filter()
1396 f->vlan = vlan; in i40e_add_filter()
1397 f->state = I40E_FILTER_NEW; in i40e_add_filter()
1398 INIT_HLIST_NODE(&f->hlist); in i40e_add_filter()
1401 hash_add(vsi->mac_filter_hash, &f->hlist, key); in i40e_add_filter()
1403 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_add_filter()
1404 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); in i40e_add_filter()
1415 if (f->state == I40E_FILTER_REMOVE) in i40e_add_filter()
1416 f->state = I40E_FILTER_ACTIVE; in i40e_add_filter()
1422 * __i40e_del_filter - Remove a specific filter from the VSI
1445 if ((f->state == I40E_FILTER_FAILED) || in __i40e_del_filter()
1446 (f->state == I40E_FILTER_NEW)) { in __i40e_del_filter()
1447 hash_del(&f->hlist); in __i40e_del_filter()
1450 f->state = I40E_FILTER_REMOVE; in __i40e_del_filter()
1453 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in __i40e_del_filter()
1454 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); in __i40e_del_filter()
1458 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1481 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1499 if (vsi->info.pvid) in i40e_add_mac_filter()
1501 le16_to_cpu(vsi->info.pvid)); in i40e_add_mac_filter()
1506 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_add_mac_filter()
1507 if (f->state == I40E_FILTER_REMOVE) in i40e_add_mac_filter()
1509 add = i40e_add_filter(vsi, macaddr, f->vlan); in i40e_add_mac_filter()
1518 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1534 lockdep_assert_held(&vsi->mac_filter_hash_lock); in i40e_del_mac_filter()
1535 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_del_mac_filter()
1536 if (ether_addr_equal(macaddr, f->macaddr)) { in i40e_del_mac_filter()
1545 return -ENOENT; in i40e_del_mac_filter()
1549 * i40e_set_mac - NDO callback to set mac address
1558 struct i40e_vsi *vsi = np->vsi; in i40e_set_mac()
1559 struct i40e_pf *pf = vsi->back; in i40e_set_mac()
1560 struct i40e_hw *hw = &pf->hw; in i40e_set_mac()
1563 if (!is_valid_ether_addr(addr->sa_data)) in i40e_set_mac()
1564 return -EADDRNOTAVAIL; in i40e_set_mac()
1566 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { in i40e_set_mac()
1568 addr->sa_data); in i40e_set_mac()
1572 if (test_bit(__I40E_DOWN, pf->state) || in i40e_set_mac()
1573 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_set_mac()
1574 return -EADDRNOTAVAIL; in i40e_set_mac()
1576 if (ether_addr_equal(hw->mac.addr, addr->sa_data)) in i40e_set_mac()
1578 hw->mac.addr); in i40e_set_mac()
1580 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); in i40e_set_mac()
1584 * - Remove old address from MAC filter in i40e_set_mac()
1585 * - Copy new address in i40e_set_mac()
1586 * - Add new address to MAC filter in i40e_set_mac()
1588 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_set_mac()
1589 i40e_del_mac_filter(vsi, netdev->dev_addr); in i40e_set_mac()
1590 ether_addr_copy(netdev->dev_addr, addr->sa_data); in i40e_set_mac()
1591 i40e_add_mac_filter(vsi, netdev->dev_addr); in i40e_set_mac()
1592 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_set_mac()
1594 if (vsi->type == I40E_VSI_MAIN) { in i40e_set_mac()
1598 addr->sa_data, NULL); in i40e_set_mac()
1602 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_mac()
1613 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1622 struct i40e_pf *pf = vsi->back; in i40e_config_rss_aq()
1623 struct i40e_hw *hw = &pf->hw; in i40e_config_rss_aq()
1629 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw); in i40e_config_rss_aq()
1631 dev_info(&pf->pdev->dev, in i40e_config_rss_aq()
1634 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_config_rss_aq()
1639 bool pf_lut = vsi->type == I40E_VSI_MAIN; in i40e_config_rss_aq()
1641 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); in i40e_config_rss_aq()
1643 dev_info(&pf->pdev->dev, in i40e_config_rss_aq()
1646 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_config_rss_aq()
1654 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1659 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_rss()
1664 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)) in i40e_vsi_config_rss()
1666 if (!vsi->rss_size) in i40e_vsi_config_rss()
1667 vsi->rss_size = min_t(int, pf->alloc_rss_size, in i40e_vsi_config_rss()
1668 vsi->num_queue_pairs); in i40e_vsi_config_rss()
1669 if (!vsi->rss_size) in i40e_vsi_config_rss()
1670 return -EINVAL; in i40e_vsi_config_rss()
1671 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in i40e_vsi_config_rss()
1673 return -ENOMEM; in i40e_vsi_config_rss()
1678 if (vsi->rss_lut_user) in i40e_vsi_config_rss()
1679 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in i40e_vsi_config_rss()
1681 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); in i40e_vsi_config_rss()
1682 if (vsi->rss_hkey_user) in i40e_vsi_config_rss()
1683 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); in i40e_vsi_config_rss()
1686 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); in i40e_vsi_config_rss()
1692 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1707 if (vsi->type != I40E_VSI_MAIN) in i40e_vsi_setup_queue_map_mqprio()
1708 return -EINVAL; in i40e_vsi_setup_queue_map_mqprio()
1711 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc; in i40e_vsi_setup_queue_map_mqprio()
1712 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; in i40e_vsi_setup_queue_map_mqprio()
1713 num_qps = vsi->mqprio_qopt.qopt.count[0]; in i40e_vsi_setup_queue_map_mqprio()
1715 /* find the next higher power-of-2 of num queue pairs */ in i40e_vsi_setup_queue_map_mqprio()
1722 /* Setup queue offset/count for all TCs for given VSI */ in i40e_vsi_setup_queue_map_mqprio()
1723 max_qcount = vsi->mqprio_qopt.qopt.count[0]; in i40e_vsi_setup_queue_map_mqprio()
1726 if (vsi->tc_config.enabled_tc & BIT(i)) { in i40e_vsi_setup_queue_map_mqprio()
1727 offset = vsi->mqprio_qopt.qopt.offset[i]; in i40e_vsi_setup_queue_map_mqprio()
1728 qcount = vsi->mqprio_qopt.qopt.count[i]; in i40e_vsi_setup_queue_map_mqprio()
1731 vsi->tc_config.tc_info[i].qoffset = offset; in i40e_vsi_setup_queue_map_mqprio()
1732 vsi->tc_config.tc_info[i].qcount = qcount; in i40e_vsi_setup_queue_map_mqprio()
1733 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; in i40e_vsi_setup_queue_map_mqprio()
1739 vsi->tc_config.tc_info[i].qoffset = 0; in i40e_vsi_setup_queue_map_mqprio()
1740 vsi->tc_config.tc_info[i].qcount = 1; in i40e_vsi_setup_queue_map_mqprio()
1741 vsi->tc_config.tc_info[i].netdev_tc = 0; in i40e_vsi_setup_queue_map_mqprio()
1746 vsi->num_queue_pairs = offset + qcount; in i40e_vsi_setup_queue_map_mqprio()
1749 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in i40e_vsi_setup_queue_map_mqprio()
1750 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); in i40e_vsi_setup_queue_map_mqprio()
1751 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); in i40e_vsi_setup_queue_map_mqprio()
1752 ctxt->info.valid_sections |= cpu_to_le16(sections); in i40e_vsi_setup_queue_map_mqprio()
1755 vsi->rss_size = max_qcount; in i40e_vsi_setup_queue_map_mqprio()
1758 dev_info(&vsi->back->pdev->dev, in i40e_vsi_setup_queue_map_mqprio()
1763 vsi->reconfig_rss = true; in i40e_vsi_setup_queue_map_mqprio()
1764 dev_dbg(&vsi->back->pdev->dev, in i40e_vsi_setup_queue_map_mqprio()
1770 override_q = vsi->mqprio_qopt.qopt.count[0]; in i40e_vsi_setup_queue_map_mqprio()
1771 if (override_q && override_q < vsi->num_queue_pairs) { in i40e_vsi_setup_queue_map_mqprio()
1772 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q; in i40e_vsi_setup_queue_map_mqprio()
1773 vsi->next_base_queue = override_q; in i40e_vsi_setup_queue_map_mqprio()
1779 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1782 * @enabled_tc: Enabled TCs bitmap
1792 struct i40e_pf *pf = vsi->back; in i40e_vsi_setup_queue_map()
1806 num_tc_qps = vsi->alloc_queue_pairs; in i40e_vsi_setup_queue_map()
1807 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { in i40e_vsi_setup_queue_map()
1814 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); in i40e_vsi_setup_queue_map()
1822 vsi->tc_config.numtc = numtc; in i40e_vsi_setup_queue_map()
1823 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; in i40e_vsi_setup_queue_map()
1825 /* Do not allow use more TC queue pairs than MSI-X vectors exist */ in i40e_vsi_setup_queue_map()
1826 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_vsi_setup_queue_map()
1827 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix); in i40e_vsi_setup_queue_map()
1829 /* Setup queue offset/count for all TCs for given VSI */ in i40e_vsi_setup_queue_map()
1832 if (vsi->tc_config.enabled_tc & BIT(i)) { in i40e_vsi_setup_queue_map()
1836 switch (vsi->type) { in i40e_vsi_setup_queue_map()
1838 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | in i40e_vsi_setup_queue_map()
1840 vsi->tc_config.enabled_tc != 1) { in i40e_vsi_setup_queue_map()
1841 qcount = min_t(int, pf->alloc_rss_size, in i40e_vsi_setup_queue_map()
1854 vsi->tc_config.tc_info[i].qoffset = offset; in i40e_vsi_setup_queue_map()
1855 vsi->tc_config.tc_info[i].qcount = qcount; in i40e_vsi_setup_queue_map()
1857 /* find the next higher power-of-2 of num queue pairs */ in i40e_vsi_setup_queue_map()
1865 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; in i40e_vsi_setup_queue_map()
1876 vsi->tc_config.tc_info[i].qoffset = 0; in i40e_vsi_setup_queue_map()
1877 vsi->tc_config.tc_info[i].qcount = 1; in i40e_vsi_setup_queue_map()
1878 vsi->tc_config.tc_info[i].netdev_tc = 0; in i40e_vsi_setup_queue_map()
1882 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); in i40e_vsi_setup_queue_map()
1886 vsi->num_queue_pairs = offset; in i40e_vsi_setup_queue_map()
1887 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { in i40e_vsi_setup_queue_map()
1888 if (vsi->req_queue_pairs > 0) in i40e_vsi_setup_queue_map()
1889 vsi->num_queue_pairs = vsi->req_queue_pairs; in i40e_vsi_setup_queue_map()
1890 else if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_vsi_setup_queue_map()
1891 vsi->num_queue_pairs = pf->num_lan_msix; in i40e_vsi_setup_queue_map()
1898 ctxt->info.up_enable_bits = enabled_tc; in i40e_vsi_setup_queue_map()
1900 if (vsi->type == I40E_VSI_SRIOV) { in i40e_vsi_setup_queue_map()
1901 ctxt->info.mapping_flags |= in i40e_vsi_setup_queue_map()
1903 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_setup_queue_map()
1904 ctxt->info.queue_mapping[i] = in i40e_vsi_setup_queue_map()
1905 cpu_to_le16(vsi->base_queue + i); in i40e_vsi_setup_queue_map()
1907 ctxt->info.mapping_flags |= in i40e_vsi_setup_queue_map()
1909 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); in i40e_vsi_setup_queue_map()
1911 ctxt->info.valid_sections |= cpu_to_le16(sections); in i40e_vsi_setup_queue_map()
1915 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1925 struct i40e_vsi *vsi = np->vsi; in i40e_addr_sync()
1930 return -ENOMEM; in i40e_addr_sync()
1934 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1944 struct i40e_vsi *vsi = np->vsi; in i40e_addr_unsync()
1951 if (ether_addr_equal(addr, netdev->dev_addr)) in i40e_addr_unsync()
1960 * i40e_set_rx_mode - NDO callback to set the netdev filters
1966 struct i40e_vsi *vsi = np->vsi; in i40e_set_rx_mode()
1968 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_set_rx_mode()
1973 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_set_rx_mode()
1976 if (vsi->current_netdev_flags != vsi->netdev->flags) { in i40e_set_rx_mode()
1977 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_set_rx_mode()
1978 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); in i40e_set_rx_mode()
1983 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1985 * @from: Pointer to list which contains MAC filter entries - changes to
1997 u64 key = i40e_addr_to_hkey(f->macaddr); in i40e_undo_del_filter_entries()
2000 hlist_del(&f->hlist); in i40e_undo_del_filter_entries()
2001 hash_add(vsi->mac_filter_hash, &f->hlist, key); in i40e_undo_del_filter_entries()
2006 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
2008 * @from: Pointer to list which contains MAC filter entries - changes to
2021 hlist_del(&new->hlist); in i40e_undo_add_filter_entries()
2027 * i40e_next_filter - Get the next non-broadcast filter from a list
2030 * Returns the next non-broadcast filter in the list. Required so that we
2038 if (!is_broadcast_ether_addr(next->f->macaddr)) in i40e_next_filter()
2046 * i40e_update_filter_state - Update filter state based on return data
2065 * the firmware return status because we pre-set the filter in i40e_update_filter_state()
2071 add_head->state = I40E_FILTER_FAILED; in i40e_update_filter_state()
2073 add_head->state = I40E_FILTER_ACTIVE; in i40e_update_filter_state()
2086 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2091 * @retval: Set to -EIO on failure to delete
2103 struct i40e_hw *hw = &vsi->back->hw; in i40e_aqc_del_filters()
2107 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL); in i40e_aqc_del_filters()
2108 aq_err = hw->aq.asq_last_status; in i40e_aqc_del_filters()
2112 *retval = -EIO; in i40e_aqc_del_filters()
2113 dev_info(&vsi->back->pdev->dev, in i40e_aqc_del_filters()
2121 * i40e_aqc_add_filters - Request firmware to add a set of filters
2129 * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
2138 struct i40e_hw *hw = &vsi->back->hw; in i40e_aqc_add_filters()
2141 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL); in i40e_aqc_add_filters()
2142 aq_err = hw->aq.asq_last_status; in i40e_aqc_add_filters()
2146 if (vsi->type == I40E_VSI_MAIN) { in i40e_aqc_add_filters()
2147 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_aqc_add_filters()
2148 dev_warn(&vsi->back->pdev->dev, in i40e_aqc_add_filters()
2151 } else if (vsi->type == I40E_VSI_SRIOV || in i40e_aqc_add_filters()
2152 vsi->type == I40E_VSI_VMDQ1 || in i40e_aqc_add_filters()
2153 vsi->type == I40E_VSI_VMDQ2) { in i40e_aqc_add_filters()
2154 dev_warn(&vsi->back->pdev->dev, in i40e_aqc_add_filters()
2158 dev_warn(&vsi->back->pdev->dev, in i40e_aqc_add_filters()
2160 i40e_aq_str(hw, aq_err), vsi_name, vsi->type); in i40e_aqc_add_filters()
2166 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2181 bool enable = f->state == I40E_FILTER_NEW; in i40e_aqc_broadcast_filter()
2182 struct i40e_hw *hw = &vsi->back->hw; in i40e_aqc_broadcast_filter()
2185 if (f->vlan == I40E_VLAN_ANY) { in i40e_aqc_broadcast_filter()
2187 vsi->seid, in i40e_aqc_broadcast_filter()
2192 vsi->seid, in i40e_aqc_broadcast_filter()
2194 f->vlan, in i40e_aqc_broadcast_filter()
2199 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_aqc_broadcast_filter()
2200 dev_warn(&vsi->back->pdev->dev, in i40e_aqc_broadcast_filter()
2202 i40e_aq_str(hw, hw->aq.asq_last_status), in i40e_aqc_broadcast_filter()
2210 * i40e_set_promiscuous - set promiscuous mode
2220 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_set_promiscuous()
2221 struct i40e_hw *hw = &pf->hw; in i40e_set_promiscuous()
2224 if (vsi->type == I40E_VSI_MAIN && in i40e_set_promiscuous()
2225 pf->lan_veb != I40E_NO_VEB && in i40e_set_promiscuous()
2226 !(pf->flags & I40E_FLAG_MFP_ENABLED)) { in i40e_set_promiscuous()
2234 vsi->seid, in i40e_set_promiscuous()
2238 vsi->seid, in i40e_set_promiscuous()
2241 dev_info(&pf->pdev->dev, in i40e_set_promiscuous()
2244 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_promiscuous()
2249 vsi->seid, in i40e_set_promiscuous()
2253 dev_info(&pf->pdev->dev, in i40e_set_promiscuous()
2256 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_promiscuous()
2260 vsi->seid, in i40e_set_promiscuous()
2263 dev_info(&pf->pdev->dev, in i40e_set_promiscuous()
2266 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_promiscuous()
2271 pf->cur_promisc = promisc; in i40e_set_promiscuous()
2277 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2289 struct i40e_hw *hw = &vsi->back->hw; in i40e_sync_vsi_filters()
2310 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state)) in i40e_sync_vsi_filters()
2312 pf = vsi->back; in i40e_sync_vsi_filters()
2314 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_sync_vsi_filters()
2316 if (vsi->netdev) { in i40e_sync_vsi_filters()
2317 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; in i40e_sync_vsi_filters()
2318 vsi->current_netdev_flags = vsi->netdev->flags; in i40e_sync_vsi_filters()
2324 if (vsi->type == I40E_VSI_SRIOV) in i40e_sync_vsi_filters()
2325 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id); in i40e_sync_vsi_filters()
2326 else if (vsi->type != I40E_VSI_MAIN) in i40e_sync_vsi_filters()
2327 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid); in i40e_sync_vsi_filters()
2329 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { in i40e_sync_vsi_filters()
2330 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; in i40e_sync_vsi_filters()
2332 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2334 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_sync_vsi_filters()
2335 if (f->state == I40E_FILTER_REMOVE) { in i40e_sync_vsi_filters()
2337 hash_del(&f->hlist); in i40e_sync_vsi_filters()
2338 hlist_add_head(&f->hlist, &tmp_del_list); in i40e_sync_vsi_filters()
2343 if (f->state == I40E_FILTER_NEW) { in i40e_sync_vsi_filters()
2350 new->f = f; in i40e_sync_vsi_filters()
2351 new->state = f->state; in i40e_sync_vsi_filters()
2354 hlist_add_head(&new->hlist, &tmp_add_list); in i40e_sync_vsi_filters()
2361 if (f->vlan > 0) in i40e_sync_vsi_filters()
2372 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2377 filter_list_len = hw->aq.asq_buf_size / in i40e_sync_vsi_filters()
2391 if (is_broadcast_ether_addr(f->macaddr)) { in i40e_sync_vsi_filters()
2394 hlist_del(&f->hlist); in i40e_sync_vsi_filters()
2400 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr); in i40e_sync_vsi_filters()
2401 if (f->vlan == I40E_VLAN_ANY) { in i40e_sync_vsi_filters()
2406 cpu_to_le16((u16)(f->vlan)); in i40e_sync_vsi_filters()
2423 hlist_del(&f->hlist); in i40e_sync_vsi_filters()
2438 filter_list_len = hw->aq.asq_buf_size / in i40e_sync_vsi_filters()
2451 if (is_broadcast_ether_addr(new->f->macaddr)) { in i40e_sync_vsi_filters()
2453 new->f)) in i40e_sync_vsi_filters()
2454 new->state = I40E_FILTER_FAILED; in i40e_sync_vsi_filters()
2456 new->state = I40E_FILTER_ACTIVE; in i40e_sync_vsi_filters()
2465 new->f->macaddr); in i40e_sync_vsi_filters()
2466 if (new->f->vlan == I40E_VLAN_ANY) { in i40e_sync_vsi_filters()
2471 cpu_to_le16((u16)(new->f->vlan)); in i40e_sync_vsi_filters()
2495 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2498 if (new->f->state == I40E_FILTER_NEW) in i40e_sync_vsi_filters()
2499 new->f->state = new->state; in i40e_sync_vsi_filters()
2500 hlist_del(&new->hlist); in i40e_sync_vsi_filters()
2503 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2509 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2510 vsi->active_filters = 0; in i40e_sync_vsi_filters()
2511 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { in i40e_sync_vsi_filters()
2512 if (f->state == I40E_FILTER_ACTIVE) in i40e_sync_vsi_filters()
2513 vsi->active_filters++; in i40e_sync_vsi_filters()
2514 else if (f->state == I40E_FILTER_FAILED) in i40e_sync_vsi_filters()
2517 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2524 vsi->active_filters < vsi->promisc_threshold) { in i40e_sync_vsi_filters()
2525 dev_info(&pf->pdev->dev, in i40e_sync_vsi_filters()
2528 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_sync_vsi_filters()
2529 vsi->promisc_threshold = 0; in i40e_sync_vsi_filters()
2533 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) { in i40e_sync_vsi_filters()
2534 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_sync_vsi_filters()
2538 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_sync_vsi_filters()
2544 vsi->promisc_threshold = (vsi->active_filters * 3) / 4; in i40e_sync_vsi_filters()
2550 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); in i40e_sync_vsi_filters()
2551 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, in i40e_sync_vsi_filters()
2552 vsi->seid, in i40e_sync_vsi_filters()
2557 hw->aq.asq_last_status); in i40e_sync_vsi_filters()
2558 dev_info(&pf->pdev->dev, in i40e_sync_vsi_filters()
2562 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_sync_vsi_filters()
2564 dev_info(&pf->pdev->dev, "%s allmulti mode.\n", in i40e_sync_vsi_filters()
2572 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || in i40e_sync_vsi_filters()
2577 hw->aq.asq_last_status); in i40e_sync_vsi_filters()
2578 dev_info(&pf->pdev->dev, in i40e_sync_vsi_filters()
2583 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_sync_vsi_filters()
2589 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_sync_vsi_filters()
2591 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state); in i40e_sync_vsi_filters()
2596 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2600 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2602 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_sync_vsi_filters()
2603 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state); in i40e_sync_vsi_filters()
2604 return -ENOMEM; in i40e_sync_vsi_filters()
2608 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2617 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state)) in i40e_sync_filters_subtask()
2619 if (test_bit(__I40E_VF_DISABLE, pf->state)) { in i40e_sync_filters_subtask()
2620 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); in i40e_sync_filters_subtask()
2624 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_sync_filters_subtask()
2625 if (pf->vsi[v] && in i40e_sync_filters_subtask()
2626 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) { in i40e_sync_filters_subtask()
2627 int ret = i40e_sync_vsi_filters(pf->vsi[v]); in i40e_sync_filters_subtask()
2632 pf->state); in i40e_sync_filters_subtask()
2640 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2645 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) in i40e_max_xdp_frame_size()
2652 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2661 struct i40e_vsi *vsi = np->vsi; in i40e_change_mtu()
2662 struct i40e_pf *pf = vsi->back; in i40e_change_mtu()
2668 return -EINVAL; in i40e_change_mtu()
2672 netdev->mtu, new_mtu); in i40e_change_mtu()
2673 netdev->mtu = new_mtu; in i40e_change_mtu()
2676 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_change_mtu()
2677 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); in i40e_change_mtu()
2682 * i40e_ioctl - Access the hwtstamp interface
2690 struct i40e_pf *pf = np->vsi->back; in i40e_ioctl()
2698 return -EOPNOTSUPP; in i40e_ioctl()
2703 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2712 if (vsi->info.pvid) in i40e_vlan_stripping_enable()
2715 if ((vsi->info.valid_sections & in i40e_vlan_stripping_enable()
2717 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) in i40e_vlan_stripping_enable()
2720 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); in i40e_vlan_stripping_enable()
2721 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | in i40e_vlan_stripping_enable()
2724 ctxt.seid = vsi->seid; in i40e_vlan_stripping_enable()
2725 ctxt.info = vsi->info; in i40e_vlan_stripping_enable()
2726 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_vlan_stripping_enable()
2728 dev_info(&vsi->back->pdev->dev, in i40e_vlan_stripping_enable()
2730 i40e_stat_str(&vsi->back->hw, ret), in i40e_vlan_stripping_enable()
2731 i40e_aq_str(&vsi->back->hw, in i40e_vlan_stripping_enable()
2732 vsi->back->hw.aq.asq_last_status)); in i40e_vlan_stripping_enable()
2737 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2746 if (vsi->info.pvid) in i40e_vlan_stripping_disable()
2749 if ((vsi->info.valid_sections & in i40e_vlan_stripping_disable()
2751 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == in i40e_vlan_stripping_disable()
2755 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); in i40e_vlan_stripping_disable()
2756 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | in i40e_vlan_stripping_disable()
2759 ctxt.seid = vsi->seid; in i40e_vlan_stripping_disable()
2760 ctxt.info = vsi->info; in i40e_vlan_stripping_disable()
2761 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_vlan_stripping_disable()
2763 dev_info(&vsi->back->pdev->dev, in i40e_vlan_stripping_disable()
2765 i40e_stat_str(&vsi->back->hw, ret), in i40e_vlan_stripping_disable()
2766 i40e_aq_str(&vsi->back->hw, in i40e_vlan_stripping_disable()
2767 vsi->back->hw.aq.asq_last_status)); in i40e_vlan_stripping_disable()
2772 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
2774 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2790 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_add_vlan_all_mac()
2791 if (f->state == I40E_FILTER_REMOVE) in i40e_add_vlan_all_mac()
2793 add_f = i40e_add_filter(vsi, f->macaddr, vid); in i40e_add_vlan_all_mac()
2795 dev_info(&vsi->back->pdev->dev, in i40e_add_vlan_all_mac()
2797 vid, f->macaddr); in i40e_add_vlan_all_mac()
2798 return -ENOMEM; in i40e_add_vlan_all_mac()
2806 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2814 if (vsi->info.pvid) in i40e_vsi_add_vlan()
2815 return -EINVAL; in i40e_vsi_add_vlan()
2829 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_add_vlan()
2831 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_add_vlan()
2838 i40e_service_event_schedule(vsi->back); in i40e_vsi_add_vlan()
2843 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
2845 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2861 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_rm_vlan_all_mac()
2862 if (f->vlan == vid) in i40e_rm_vlan_all_mac()
2868 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2874 if (!vid || vsi->info.pvid) in i40e_vsi_kill_vlan()
2877 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_kill_vlan()
2879 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_kill_vlan()
2884 i40e_service_event_schedule(vsi->back); in i40e_vsi_kill_vlan()
2888 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2899 struct i40e_vsi *vsi = np->vsi; in i40e_vlan_rx_add_vid()
2903 return -EINVAL; in i40e_vlan_rx_add_vid()
2907 set_bit(vid, vsi->active_vlans); in i40e_vlan_rx_add_vid()
2913 * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
2922 struct i40e_vsi *vsi = np->vsi; in i40e_vlan_rx_add_vid_up()
2926 set_bit(vid, vsi->active_vlans); in i40e_vlan_rx_add_vid_up()
2930 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2941 struct i40e_vsi *vsi = np->vsi; in i40e_vlan_rx_kill_vid()
2949 clear_bit(vid, vsi->active_vlans); in i40e_vlan_rx_kill_vid()
2955 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2962 if (!vsi->netdev) in i40e_restore_vlan()
2965 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) in i40e_restore_vlan()
2970 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) in i40e_restore_vlan()
2971 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q), in i40e_restore_vlan()
2976 * i40e_vsi_add_pvid - Add pvid for the VSI
2985 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); in i40e_vsi_add_pvid()
2986 vsi->info.pvid = cpu_to_le16(vid); in i40e_vsi_add_pvid()
2987 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | in i40e_vsi_add_pvid()
2991 ctxt.seid = vsi->seid; in i40e_vsi_add_pvid()
2992 ctxt.info = vsi->info; in i40e_vsi_add_pvid()
2993 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_vsi_add_pvid()
2995 dev_info(&vsi->back->pdev->dev, in i40e_vsi_add_pvid()
2997 i40e_stat_str(&vsi->back->hw, ret), in i40e_vsi_add_pvid()
2998 i40e_aq_str(&vsi->back->hw, in i40e_vsi_add_pvid()
2999 vsi->back->hw.aq.asq_last_status)); in i40e_vsi_add_pvid()
3000 return -ENOENT; in i40e_vsi_add_pvid()
3007 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
3014 vsi->info.pvid = 0; in i40e_vsi_remove_pvid()
3020 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
3033 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_setup_tx_resources()
3034 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); in i40e_vsi_setup_tx_resources()
3039 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_setup_tx_resources()
3040 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]); in i40e_vsi_setup_tx_resources()
3046 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
3055 if (vsi->tx_rings) { in i40e_vsi_free_tx_resources()
3056 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_free_tx_resources()
3057 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) in i40e_vsi_free_tx_resources()
3058 i40e_free_tx_resources(vsi->tx_rings[i]); in i40e_vsi_free_tx_resources()
3061 if (vsi->xdp_rings) { in i40e_vsi_free_tx_resources()
3062 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_free_tx_resources()
3063 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) in i40e_vsi_free_tx_resources()
3064 i40e_free_tx_resources(vsi->xdp_rings[i]); in i40e_vsi_free_tx_resources()
3069 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3082 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_setup_rx_resources()
3083 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); in i40e_vsi_setup_rx_resources()
3088 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3097 if (!vsi->rx_rings) in i40e_vsi_free_rx_resources()
3100 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_free_rx_resources()
3101 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) in i40e_vsi_free_rx_resources()
3102 i40e_free_rx_resources(vsi->rx_rings[i]); in i40e_vsi_free_rx_resources()
3106 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3110 * based on the TCs enabled for the VSI that ring belongs to.
3116 if (!ring->q_vector || !ring->netdev || ring->ch) in i40e_config_xps_tx_ring()
3120 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state)) in i40e_config_xps_tx_ring()
3123 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1); in i40e_config_xps_tx_ring()
3124 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu), in i40e_config_xps_tx_ring()
3125 ring->queue_index); in i40e_config_xps_tx_ring()
3129 * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled
3136 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi); in i40e_xsk_pool()
3137 int qid = ring->queue_index; in i40e_xsk_pool()
3140 qid -= ring->vsi->alloc_queue_pairs; in i40e_xsk_pool()
3142 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps)) in i40e_xsk_pool()
3145 return xsk_get_pool_from_qid(ring->vsi->netdev, qid); in i40e_xsk_pool()
3149 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3156 struct i40e_vsi *vsi = ring->vsi; in i40e_configure_tx_ring()
3157 u16 pf_q = vsi->base_queue + ring->queue_index; in i40e_configure_tx_ring()
3158 struct i40e_hw *hw = &vsi->back->hw; in i40e_configure_tx_ring()
3164 ring->xsk_pool = i40e_xsk_pool(ring); in i40e_configure_tx_ring()
3167 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { in i40e_configure_tx_ring()
3168 ring->atr_sample_rate = vsi->back->atr_sample_rate; in i40e_configure_tx_ring()
3169 ring->atr_count = 0; in i40e_configure_tx_ring()
3171 ring->atr_sample_rate = 0; in i40e_configure_tx_ring()
3181 tx_ctx.base = (ring->dma / 128); in i40e_configure_tx_ring()
3182 tx_ctx.qlen = ring->count; in i40e_configure_tx_ring()
3183 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | in i40e_configure_tx_ring()
3185 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); in i40e_configure_tx_ring()
3187 if (vsi->type != I40E_VSI_FDIR) in i40e_configure_tx_ring()
3189 tx_ctx.head_wb_addr = ring->dma + in i40e_configure_tx_ring()
3190 (ring->count * sizeof(struct i40e_tx_desc)); in i40e_configure_tx_ring()
3203 if (ring->ch) in i40e_configure_tx_ring()
3205 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]); in i40e_configure_tx_ring()
3208 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); in i40e_configure_tx_ring()
3215 dev_info(&vsi->back->pdev->dev, in i40e_configure_tx_ring()
3217 ring->queue_index, pf_q, err); in i40e_configure_tx_ring()
3218 return -ENOMEM; in i40e_configure_tx_ring()
3224 dev_info(&vsi->back->pdev->dev, in i40e_configure_tx_ring()
3226 ring->queue_index, pf_q, err); in i40e_configure_tx_ring()
3227 return -ENOMEM; in i40e_configure_tx_ring()
3231 if (ring->ch) { in i40e_configure_tx_ring()
3232 if (ring->ch->type == I40E_VSI_VMDQ2) in i40e_configure_tx_ring()
3235 return -EINVAL; in i40e_configure_tx_ring()
3237 qtx_ctl |= (ring->ch->vsi_number << in i40e_configure_tx_ring()
3241 if (vsi->type == I40E_VSI_VMDQ2) { in i40e_configure_tx_ring()
3243 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & in i40e_configure_tx_ring()
3250 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & in i40e_configure_tx_ring()
3256 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); in i40e_configure_tx_ring()
3262 * i40e_rx_offset - Return expected offset into page to access data
3273 * i40e_configure_rx_ring - Configure a receive ring context
3280 struct i40e_vsi *vsi = ring->vsi; in i40e_configure_rx_ring()
3281 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; in i40e_configure_rx_ring()
3282 u16 pf_q = vsi->base_queue + ring->queue_index; in i40e_configure_rx_ring()
3283 struct i40e_hw *hw = &vsi->back->hw; in i40e_configure_rx_ring()
3289 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS); in i40e_configure_rx_ring()
3294 if (ring->vsi->type == I40E_VSI_MAIN) in i40e_configure_rx_ring()
3295 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); in i40e_configure_rx_ring()
3297 kfree(ring->rx_bi); in i40e_configure_rx_ring()
3298 ring->xsk_pool = i40e_xsk_pool(ring); in i40e_configure_rx_ring()
3299 if (ring->xsk_pool) { in i40e_configure_rx_ring()
3303 ring->rx_buf_len = in i40e_configure_rx_ring()
3304 xsk_pool_get_rx_frame_size(ring->xsk_pool); in i40e_configure_rx_ring()
3307 * handling in the fast-path. in i40e_configure_rx_ring()
3310 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in i40e_configure_rx_ring()
3315 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3317 ring->queue_index); in i40e_configure_rx_ring()
3323 ring->rx_buf_len = vsi->rx_buf_len; in i40e_configure_rx_ring()
3324 if (ring->vsi->type == I40E_VSI_MAIN) { in i40e_configure_rx_ring()
3325 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in i40e_configure_rx_ring()
3333 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len, in i40e_configure_rx_ring()
3336 rx_ctx.base = (ring->dma / 128); in i40e_configure_rx_ring()
3337 rx_ctx.qlen = ring->count; in i40e_configure_rx_ring()
3347 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len); in i40e_configure_rx_ring()
3348 if (hw->revision_id == 0) in i40e_configure_rx_ring()
3362 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3364 ring->queue_index, pf_q, err); in i40e_configure_rx_ring()
3365 return -ENOMEM; in i40e_configure_rx_ring()
3371 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3373 ring->queue_index, pf_q, err); in i40e_configure_rx_ring()
3374 return -ENOMEM; in i40e_configure_rx_ring()
3378 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) in i40e_configure_rx_ring()
3383 ring->rx_offset = i40e_rx_offset(ring); in i40e_configure_rx_ring()
3386 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); in i40e_configure_rx_ring()
3387 writel(0, ring->tail); in i40e_configure_rx_ring()
3389 if (ring->xsk_pool) { in i40e_configure_rx_ring()
3390 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in i40e_configure_rx_ring()
3399 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3401 ring->xsk_pool ? "AF_XDP ZC enabled " : "", in i40e_configure_rx_ring()
3402 ring->queue_index, pf_q); in i40e_configure_rx_ring()
3409 * i40e_vsi_configure_tx - Configure the VSI for Tx
3419 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) in i40e_vsi_configure_tx()
3420 err = i40e_configure_tx_ring(vsi->tx_rings[i]); in i40e_vsi_configure_tx()
3425 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) in i40e_vsi_configure_tx()
3426 err = i40e_configure_tx_ring(vsi->xdp_rings[i]); in i40e_vsi_configure_tx()
3432 * i40e_vsi_configure_rx - Configure the VSI for Rx
3442 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) { in i40e_vsi_configure_rx()
3443 vsi->max_frame = I40E_MAX_RXBUFFER; in i40e_vsi_configure_rx()
3444 vsi->rx_buf_len = I40E_RXBUFFER_2048; in i40e_vsi_configure_rx()
3447 (vsi->netdev->mtu <= ETH_DATA_LEN)) { in i40e_vsi_configure_rx()
3448 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN; in i40e_vsi_configure_rx()
3449 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN; in i40e_vsi_configure_rx()
3452 vsi->max_frame = I40E_MAX_RXBUFFER; in i40e_vsi_configure_rx()
3453 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 : in i40e_vsi_configure_rx()
3458 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_configure_rx()
3459 err = i40e_configure_rx_ring(vsi->rx_rings[i]); in i40e_vsi_configure_rx()
3465 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3474 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { in i40e_vsi_config_dcb_rings()
3476 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_vsi_config_dcb_rings()
3477 rx_ring = vsi->rx_rings[i]; in i40e_vsi_config_dcb_rings()
3478 tx_ring = vsi->tx_rings[i]; in i40e_vsi_config_dcb_rings()
3479 rx_ring->dcb_tc = 0; in i40e_vsi_config_dcb_rings()
3480 tx_ring->dcb_tc = 0; in i40e_vsi_config_dcb_rings()
3486 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n))) in i40e_vsi_config_dcb_rings()
3489 qoffset = vsi->tc_config.tc_info[n].qoffset; in i40e_vsi_config_dcb_rings()
3490 qcount = vsi->tc_config.tc_info[n].qcount; in i40e_vsi_config_dcb_rings()
3492 rx_ring = vsi->rx_rings[i]; in i40e_vsi_config_dcb_rings()
3493 tx_ring = vsi->tx_rings[i]; in i40e_vsi_config_dcb_rings()
3494 rx_ring->dcb_tc = n; in i40e_vsi_config_dcb_rings()
3495 tx_ring->dcb_tc = n; in i40e_vsi_config_dcb_rings()
3501 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3506 if (vsi->netdev) in i40e_set_vsi_rx_mode()
3507 i40e_set_rx_mode(vsi->netdev); in i40e_set_vsi_rx_mode()
3511 * i40e_reset_fdir_filter_cnt - Reset flow director filter counters
3518 pf->fd_tcp4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3519 pf->fd_udp4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3520 pf->fd_sctp4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3521 pf->fd_ip4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3522 pf->fd_tcp6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3523 pf->fd_udp6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3524 pf->fd_sctp6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3525 pf->fd_ip6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3529 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3538 struct i40e_pf *pf = vsi->back; in i40e_fdir_filter_restore()
3541 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) in i40e_fdir_filter_restore()
3548 &pf->fdir_filter_list, fdir_node) { in i40e_fdir_filter_restore()
3554 * i40e_vsi_configure - Set up the VSI for action
3572 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3578 struct i40e_pf *pf = vsi->back; in i40e_vsi_configure_msix()
3579 struct i40e_hw *hw = &pf->hw; in i40e_vsi_configure_msix()
3586 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) in i40e_vsi_configure_msix()
3588 qp = vsi->base_queue; in i40e_vsi_configure_msix()
3589 vector = vsi->base_vector; in i40e_vsi_configure_msix()
3590 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { in i40e_vsi_configure_msix()
3591 struct i40e_q_vector *q_vector = vsi->q_vectors[i]; in i40e_vsi_configure_msix()
3593 q_vector->rx.next_update = jiffies + 1; in i40e_vsi_configure_msix()
3594 q_vector->rx.target_itr = in i40e_vsi_configure_msix()
3595 ITR_TO_REG(vsi->rx_rings[i]->itr_setting); in i40e_vsi_configure_msix()
3596 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), in i40e_vsi_configure_msix()
3597 q_vector->rx.target_itr >> 1); in i40e_vsi_configure_msix()
3598 q_vector->rx.current_itr = q_vector->rx.target_itr; in i40e_vsi_configure_msix()
3600 q_vector->tx.next_update = jiffies + 1; in i40e_vsi_configure_msix()
3601 q_vector->tx.target_itr = in i40e_vsi_configure_msix()
3602 ITR_TO_REG(vsi->tx_rings[i]->itr_setting); in i40e_vsi_configure_msix()
3603 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), in i40e_vsi_configure_msix()
3604 q_vector->tx.target_itr >> 1); in i40e_vsi_configure_msix()
3605 q_vector->tx.current_itr = q_vector->tx.target_itr; in i40e_vsi_configure_msix()
3607 wr32(hw, I40E_PFINT_RATEN(vector - 1), in i40e_vsi_configure_msix()
3608 i40e_intrl_usec_to_reg(vsi->int_rate_limit)); in i40e_vsi_configure_msix()
3611 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); in i40e_vsi_configure_msix()
3612 for (q = 0; q < q_vector->num_ringpairs; q++) { in i40e_vsi_configure_msix()
3613 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp; in i40e_vsi_configure_msix()
3644 if (q == (q_vector->num_ringpairs - 1)) in i40e_vsi_configure_msix()
3657 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3662 struct i40e_hw *hw = &pf->hw; in i40e_enable_misc_int_causes()
3678 if (pf->flags & I40E_FLAG_IWARP_ENABLED) in i40e_enable_misc_int_causes()
3681 if (pf->flags & I40E_FLAG_PTP) in i40e_enable_misc_int_causes()
3695 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3700 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0; in i40e_configure_msi_and_legacy()
3701 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; in i40e_configure_msi_and_legacy()
3702 struct i40e_pf *pf = vsi->back; in i40e_configure_msi_and_legacy()
3703 struct i40e_hw *hw = &pf->hw; in i40e_configure_msi_and_legacy()
3707 q_vector->rx.next_update = jiffies + 1; in i40e_configure_msi_and_legacy()
3708 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting); in i40e_configure_msi_and_legacy()
3709 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1); in i40e_configure_msi_and_legacy()
3710 q_vector->rx.current_itr = q_vector->rx.target_itr; in i40e_configure_msi_and_legacy()
3711 q_vector->tx.next_update = jiffies + 1; in i40e_configure_msi_and_legacy()
3712 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting); in i40e_configure_msi_and_legacy()
3713 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1); in i40e_configure_msi_and_legacy()
3714 q_vector->tx.current_itr = q_vector->tx.target_itr; in i40e_configure_msi_and_legacy()
3747 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3752 struct i40e_hw *hw = &pf->hw; in i40e_irq_dynamic_disable_icr0()
3760 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3765 struct i40e_hw *hw = &pf->hw; in i40e_irq_dynamic_enable_icr0()
3777 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3785 if (!q_vector->tx.ring && !q_vector->rx.ring) in i40e_msix_clean_rings()
3788 napi_schedule_irqoff(&q_vector->napi); in i40e_msix_clean_rings()
3794 * i40e_irq_affinity_notify - Callback for affinity changes
3807 cpumask_copy(&q_vector->affinity_mask, mask); in i40e_irq_affinity_notify()
3811 * i40e_irq_affinity_release - Callback for affinity notifier release
3821 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3825 * Allocates MSI-X vectors and requests interrupts from the kernel.
3829 int q_vectors = vsi->num_q_vectors; in i40e_vsi_request_irq_msix()
3830 struct i40e_pf *pf = vsi->back; in i40e_vsi_request_irq_msix()
3831 int base = vsi->base_vector; in i40e_vsi_request_irq_msix()
3839 struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; in i40e_vsi_request_irq_msix()
3841 irq_num = pf->msix_entries[base + vector].vector; in i40e_vsi_request_irq_msix()
3843 if (q_vector->tx.ring && q_vector->rx.ring) { in i40e_vsi_request_irq_msix()
3844 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in i40e_vsi_request_irq_msix()
3845 "%s-%s-%d", basename, "TxRx", rx_int_idx++); in i40e_vsi_request_irq_msix()
3847 } else if (q_vector->rx.ring) { in i40e_vsi_request_irq_msix()
3848 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in i40e_vsi_request_irq_msix()
3849 "%s-%s-%d", basename, "rx", rx_int_idx++); in i40e_vsi_request_irq_msix()
3850 } else if (q_vector->tx.ring) { in i40e_vsi_request_irq_msix()
3851 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in i40e_vsi_request_irq_msix()
3852 "%s-%s-%d", basename, "tx", tx_int_idx++); in i40e_vsi_request_irq_msix()
3858 vsi->irq_handler, in i40e_vsi_request_irq_msix()
3860 q_vector->name, in i40e_vsi_request_irq_msix()
3863 dev_info(&pf->pdev->dev, in i40e_vsi_request_irq_msix()
3869 q_vector->affinity_notify.notify = i40e_irq_affinity_notify; in i40e_vsi_request_irq_msix()
3870 q_vector->affinity_notify.release = i40e_irq_affinity_release; in i40e_vsi_request_irq_msix()
3871 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); in i40e_vsi_request_irq_msix()
3878 cpu = cpumask_local_spread(q_vector->v_idx, -1); in i40e_vsi_request_irq_msix()
3882 vsi->irqs_ready = true; in i40e_vsi_request_irq_msix()
3887 vector--; in i40e_vsi_request_irq_msix()
3888 irq_num = pf->msix_entries[base + vector].vector; in i40e_vsi_request_irq_msix()
3891 free_irq(irq_num, &vsi->q_vectors[vector]); in i40e_vsi_request_irq_msix()
3897 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3898 * @vsi: the VSI being un-configured
3902 struct i40e_pf *pf = vsi->back; in i40e_vsi_disable_irq()
3903 struct i40e_hw *hw = &pf->hw; in i40e_vsi_disable_irq()
3904 int base = vsi->base_vector; in i40e_vsi_disable_irq()
3908 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_vsi_disable_irq()
3911 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx)); in i40e_vsi_disable_irq()
3913 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val); in i40e_vsi_disable_irq()
3915 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx)); in i40e_vsi_disable_irq()
3917 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val); in i40e_vsi_disable_irq()
3921 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0); in i40e_vsi_disable_irq()
3925 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_vsi_disable_irq()
3926 for (i = vsi->base_vector; in i40e_vsi_disable_irq()
3927 i < (vsi->num_q_vectors + vsi->base_vector); i++) in i40e_vsi_disable_irq()
3928 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); in i40e_vsi_disable_irq()
3931 for (i = 0; i < vsi->num_q_vectors; i++) in i40e_vsi_disable_irq()
3932 synchronize_irq(pf->msix_entries[i + base].vector); in i40e_vsi_disable_irq()
3934 /* Legacy and MSI mode - this stops all interrupt handling */ in i40e_vsi_disable_irq()
3938 synchronize_irq(pf->pdev->irq); in i40e_vsi_disable_irq()
3943 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3948 struct i40e_pf *pf = vsi->back; in i40e_vsi_enable_irq()
3951 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_vsi_enable_irq()
3952 for (i = 0; i < vsi->num_q_vectors; i++) in i40e_vsi_enable_irq()
3958 i40e_flush(&pf->hw); in i40e_vsi_enable_irq()
3963 * i40e_free_misc_vector - Free the vector that handles non-queue events
3969 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); in i40e_free_misc_vector()
3970 i40e_flush(&pf->hw); in i40e_free_misc_vector()
3972 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) { in i40e_free_misc_vector()
3973 synchronize_irq(pf->msix_entries[0].vector); in i40e_free_misc_vector()
3974 free_irq(pf->msix_entries[0].vector, pf); in i40e_free_misc_vector()
3975 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); in i40e_free_misc_vector()
3980 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3985 * with both queue and non-queue interrupts. This is also used in
3986 * MSIX mode to handle the non-queue interrupts.
3991 struct i40e_hw *hw = &pf->hw; in i40e_intr()
4006 pf->sw_int_count++; in i40e_intr()
4008 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && in i40e_intr()
4011 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n"); in i40e_intr()
4012 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state); in i40e_intr()
4017 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_intr()
4018 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; in i40e_intr()
4026 if (!test_bit(__I40E_DOWN, pf->state)) in i40e_intr()
4027 napi_schedule_irqoff(&q_vector->napi); in i40e_intr()
4032 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); in i40e_intr()
4033 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n"); in i40e_intr()
4038 set_bit(__I40E_MDD_EVENT_PENDING, pf->state); in i40e_intr()
4043 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) { in i40e_intr()
4050 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state); in i40e_intr()
4055 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_intr()
4056 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state); in i40e_intr()
4062 pf->corer_count++; in i40e_intr()
4064 pf->globr_count++; in i40e_intr()
4066 pf->empr_count++; in i40e_intr()
4067 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state); in i40e_intr()
4073 dev_info(&pf->pdev->dev, "HMC error interrupt\n"); in i40e_intr()
4074 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n", in i40e_intr()
4083 schedule_work(&pf->ptp_extts0_work); in i40e_intr()
4097 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", in i40e_intr()
4102 dev_info(&pf->pdev->dev, "device will be reset\n"); in i40e_intr()
4103 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_intr()
4111 /* re-enable interrupt causes */ in i40e_intr()
4113 if (!test_bit(__I40E_DOWN, pf->state) || in i40e_intr()
4114 test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_intr()
4123 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
4131 struct i40e_vsi *vsi = tx_ring->vsi; in i40e_clean_fdir_tx_irq()
4132 u16 i = tx_ring->next_to_clean; in i40e_clean_fdir_tx_irq()
4136 tx_buf = &tx_ring->tx_bi[i]; in i40e_clean_fdir_tx_irq()
4138 i -= tx_ring->count; in i40e_clean_fdir_tx_irq()
4141 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; in i40e_clean_fdir_tx_irq()
4151 if (!(eop_desc->cmd_type_offset_bsz & in i40e_clean_fdir_tx_irq()
4156 tx_buf->next_to_watch = NULL; in i40e_clean_fdir_tx_irq()
4158 tx_desc->buffer_addr = 0; in i40e_clean_fdir_tx_irq()
4159 tx_desc->cmd_type_offset_bsz = 0; in i40e_clean_fdir_tx_irq()
4165 i -= tx_ring->count; in i40e_clean_fdir_tx_irq()
4166 tx_buf = tx_ring->tx_bi; in i40e_clean_fdir_tx_irq()
4170 dma_unmap_single(tx_ring->dev, in i40e_clean_fdir_tx_irq()
4174 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB) in i40e_clean_fdir_tx_irq()
4175 kfree(tx_buf->raw_buf); in i40e_clean_fdir_tx_irq()
4177 tx_buf->raw_buf = NULL; in i40e_clean_fdir_tx_irq()
4178 tx_buf->tx_flags = 0; in i40e_clean_fdir_tx_irq()
4179 tx_buf->next_to_watch = NULL; in i40e_clean_fdir_tx_irq()
4181 tx_desc->buffer_addr = 0; in i40e_clean_fdir_tx_irq()
4182 tx_desc->cmd_type_offset_bsz = 0; in i40e_clean_fdir_tx_irq()
4189 i -= tx_ring->count; in i40e_clean_fdir_tx_irq()
4190 tx_buf = tx_ring->tx_bi; in i40e_clean_fdir_tx_irq()
4195 budget--; in i40e_clean_fdir_tx_irq()
4198 i += tx_ring->count; in i40e_clean_fdir_tx_irq()
4199 tx_ring->next_to_clean = i; in i40e_clean_fdir_tx_irq()
4201 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) in i40e_clean_fdir_tx_irq()
4202 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx); in i40e_clean_fdir_tx_irq()
4208 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4217 if (!q_vector->tx.ring) in i40e_fdir_clean_ring()
4220 vsi = q_vector->tx.ring->vsi; in i40e_fdir_clean_ring()
4221 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); in i40e_fdir_clean_ring()
4227 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4234 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; in i40e_map_vector_to_qp()
4235 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; in i40e_map_vector_to_qp()
4236 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; in i40e_map_vector_to_qp()
4238 tx_ring->q_vector = q_vector; in i40e_map_vector_to_qp()
4239 tx_ring->next = q_vector->tx.ring; in i40e_map_vector_to_qp()
4240 q_vector->tx.ring = tx_ring; in i40e_map_vector_to_qp()
4241 q_vector->tx.count++; in i40e_map_vector_to_qp()
4245 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx]; in i40e_map_vector_to_qp()
4247 xdp_ring->q_vector = q_vector; in i40e_map_vector_to_qp()
4248 xdp_ring->next = q_vector->tx.ring; in i40e_map_vector_to_qp()
4249 q_vector->tx.ring = xdp_ring; in i40e_map_vector_to_qp()
4250 q_vector->tx.count++; in i40e_map_vector_to_qp()
4253 rx_ring->q_vector = q_vector; in i40e_map_vector_to_qp()
4254 rx_ring->next = q_vector->rx.ring; in i40e_map_vector_to_qp()
4255 q_vector->rx.ring = rx_ring; in i40e_map_vector_to_qp()
4256 q_vector->rx.count++; in i40e_map_vector_to_qp()
4260 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4263 * This function maps descriptor rings to the queue-specific vectors
4264 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4270 int qp_remaining = vsi->num_queue_pairs; in i40e_vsi_map_rings_to_vectors()
4271 int q_vectors = vsi->num_q_vectors; in i40e_vsi_map_rings_to_vectors()
4276 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to in i40e_vsi_map_rings_to_vectors()
4284 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; in i40e_vsi_map_rings_to_vectors()
4286 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); in i40e_vsi_map_rings_to_vectors()
4288 q_vector->num_ringpairs = num_ringpairs; in i40e_vsi_map_rings_to_vectors()
4289 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1; in i40e_vsi_map_rings_to_vectors()
4291 q_vector->rx.count = 0; in i40e_vsi_map_rings_to_vectors()
4292 q_vector->tx.count = 0; in i40e_vsi_map_rings_to_vectors()
4293 q_vector->rx.ring = NULL; in i40e_vsi_map_rings_to_vectors()
4294 q_vector->tx.ring = NULL; in i40e_vsi_map_rings_to_vectors()
4296 while (num_ringpairs--) { in i40e_vsi_map_rings_to_vectors()
4299 qp_remaining--; in i40e_vsi_map_rings_to_vectors()
4305 * i40e_vsi_request_irq - Request IRQ from the OS
4311 struct i40e_pf *pf = vsi->back; in i40e_vsi_request_irq()
4314 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_vsi_request_irq()
4316 else if (pf->flags & I40E_FLAG_MSI_ENABLED) in i40e_vsi_request_irq()
4317 err = request_irq(pf->pdev->irq, i40e_intr, 0, in i40e_vsi_request_irq()
4318 pf->int_name, pf); in i40e_vsi_request_irq()
4320 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, in i40e_vsi_request_irq()
4321 pf->int_name, pf); in i40e_vsi_request_irq()
4324 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); in i40e_vsi_request_irq()
4331 * i40e_netpoll - A Polling 'interrupt' handler
4334 * This is used by netconsole to send skbs without having to re-enable
4340 struct i40e_vsi *vsi = np->vsi; in i40e_netpoll()
4341 struct i40e_pf *pf = vsi->back; in i40e_netpoll()
4345 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_netpoll()
4348 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_netpoll()
4349 for (i = 0; i < vsi->num_q_vectors; i++) in i40e_netpoll()
4350 i40e_msix_clean_rings(0, vsi->q_vectors[i]); in i40e_netpoll()
4352 i40e_intr(pf->pdev->irq, netdev); in i40e_netpoll()
4360 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4365 * This routine will wait for the given Tx queue of the PF to reach the
4367 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4376 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q)); in i40e_pf_txq_wait()
4383 return -ETIMEDOUT; in i40e_pf_txq_wait()
4389 * i40e_control_tx_q - Start or stop a particular Tx queue
4400 struct i40e_hw *hw = &pf->hw; in i40e_control_tx_q()
4405 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); in i40e_control_tx_q()
4433 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4447 /* wait for the change to finish */ in i40e_control_wait_tx_q()
4450 dev_info(&pf->pdev->dev, in i40e_control_wait_tx_q()
4460 * i40e_vsi_enable_tx - Start a VSI's rings
4465 struct i40e_pf *pf = vsi->back; in i40e_vsi_enable_tx()
4468 pf_q = vsi->base_queue; in i40e_vsi_enable_tx()
4469 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_enable_tx()
4470 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_vsi_enable_tx()
4479 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_vsi_enable_tx()
4480 pf_q + vsi->alloc_queue_pairs, in i40e_vsi_enable_tx()
4489 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4494 * This routine will wait for the given Rx queue of the PF to reach the
4496 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4505 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q)); in i40e_pf_rxq_wait()
4512 return -ETIMEDOUT; in i40e_pf_rxq_wait()
4518 * i40e_control_rx_q - Start or stop a particular Rx queue
4529 struct i40e_hw *hw = &pf->hw; in i40e_control_rx_q()
4570 /* wait for the change to finish */ in i40e_control_wait_rx_q()
4579 * i40e_vsi_enable_rx - Start a VSI's rings
4584 struct i40e_pf *pf = vsi->back; in i40e_vsi_enable_rx()
4587 pf_q = vsi->base_queue; in i40e_vsi_enable_rx()
4588 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_enable_rx()
4591 dev_info(&pf->pdev->dev, in i40e_vsi_enable_rx()
4593 vsi->seid, pf_q); in i40e_vsi_enable_rx()
4602 * i40e_vsi_start_rings - Start a VSI's rings
4621 * i40e_vsi_stop_rings - Stop a VSI's rings
4626 struct i40e_pf *pf = vsi->back; in i40e_vsi_stop_rings()
4629 /* When port TX is suspended, don't wait */ in i40e_vsi_stop_rings()
4630 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state)) in i40e_vsi_stop_rings()
4633 q_end = vsi->base_queue + vsi->num_queue_pairs; in i40e_vsi_stop_rings()
4634 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) in i40e_vsi_stop_rings()
4635 i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false); in i40e_vsi_stop_rings()
4637 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) { in i40e_vsi_stop_rings()
4640 dev_info(&pf->pdev->dev, in i40e_vsi_stop_rings()
4642 vsi->seid, pf_q); in i40e_vsi_stop_rings()
4646 pf_q = vsi->base_queue; in i40e_vsi_stop_rings()
4647 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) in i40e_vsi_stop_rings()
4648 wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0); in i40e_vsi_stop_rings()
4654 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4666 struct i40e_pf *pf = vsi->back; in i40e_vsi_stop_rings_no_wait()
4669 pf_q = vsi->base_queue; in i40e_vsi_stop_rings_no_wait()
4670 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_stop_rings_no_wait()
4677 * i40e_vsi_free_irq - Free the irq association with the OS
4682 struct i40e_pf *pf = vsi->back; in i40e_vsi_free_irq()
4683 struct i40e_hw *hw = &pf->hw; in i40e_vsi_free_irq()
4684 int base = vsi->base_vector; in i40e_vsi_free_irq()
4688 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_vsi_free_irq()
4689 if (!vsi->q_vectors) in i40e_vsi_free_irq()
4692 if (!vsi->irqs_ready) in i40e_vsi_free_irq()
4695 vsi->irqs_ready = false; in i40e_vsi_free_irq()
4696 for (i = 0; i < vsi->num_q_vectors; i++) { in i40e_vsi_free_irq()
4701 irq_num = pf->msix_entries[vector].vector; in i40e_vsi_free_irq()
4704 if (!vsi->q_vectors[i] || in i40e_vsi_free_irq()
4705 !vsi->q_vectors[i]->num_ringpairs) in i40e_vsi_free_irq()
4713 free_irq(irq_num, vsi->q_vectors[i]); in i40e_vsi_free_irq()
4722 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); in i40e_vsi_free_irq()
4727 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); in i40e_vsi_free_irq()
4762 free_irq(pf->pdev->irq, pf); in i40e_vsi_free_irq()
4797 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4807 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; in i40e_free_q_vector()
4814 i40e_for_each_ring(ring, q_vector->tx) in i40e_free_q_vector()
4815 ring->q_vector = NULL; in i40e_free_q_vector()
4817 i40e_for_each_ring(ring, q_vector->rx) in i40e_free_q_vector()
4818 ring->q_vector = NULL; in i40e_free_q_vector()
4821 if (vsi->netdev) in i40e_free_q_vector()
4822 netif_napi_del(&q_vector->napi); in i40e_free_q_vector()
4824 vsi->q_vectors[v_idx] = NULL; in i40e_free_q_vector()
4830 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4831 * @vsi: the VSI being un-configured
4840 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) in i40e_vsi_free_q_vectors()
4845 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4851 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_reset_interrupt_capability()
4852 pci_disable_msix(pf->pdev); in i40e_reset_interrupt_capability()
4853 kfree(pf->msix_entries); in i40e_reset_interrupt_capability()
4854 pf->msix_entries = NULL; in i40e_reset_interrupt_capability()
4855 kfree(pf->irq_pile); in i40e_reset_interrupt_capability()
4856 pf->irq_pile = NULL; in i40e_reset_interrupt_capability()
4857 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { in i40e_reset_interrupt_capability()
4858 pci_disable_msi(pf->pdev); in i40e_reset_interrupt_capability()
4860 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); in i40e_reset_interrupt_capability()
4864 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4868 * to pre-load conditions
4874 if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) in i40e_clear_interrupt_scheme()
4877 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector, in i40e_clear_interrupt_scheme()
4880 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); in i40e_clear_interrupt_scheme()
4881 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_clear_interrupt_scheme()
4882 if (pf->vsi[i]) in i40e_clear_interrupt_scheme()
4883 i40e_vsi_free_q_vectors(pf->vsi[i]); in i40e_clear_interrupt_scheme()
4888 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4895 if (!vsi->netdev) in i40e_napi_enable_all()
4898 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { in i40e_napi_enable_all()
4899 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; in i40e_napi_enable_all()
4901 if (q_vector->rx.ring || q_vector->tx.ring) in i40e_napi_enable_all()
4902 napi_enable(&q_vector->napi); in i40e_napi_enable_all()
4907 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4914 if (!vsi->netdev) in i40e_napi_disable_all()
4917 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { in i40e_napi_disable_all()
4918 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; in i40e_napi_disable_all()
4920 if (q_vector->rx.ring || q_vector->tx.ring) in i40e_napi_disable_all()
4921 napi_disable(&q_vector->napi); in i40e_napi_disable_all()
4926 * i40e_vsi_close - Shut down a VSI
4931 struct i40e_pf *pf = vsi->back; in i40e_vsi_close()
4932 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_vsi_close()
4937 vsi->current_netdev_flags = 0; in i40e_vsi_close()
4938 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_vsi_close()
4939 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_vsi_close()
4940 set_bit(__I40E_CLIENT_RESET, pf->state); in i40e_vsi_close()
4944 * i40e_quiesce_vsi - Pause a given VSI
4949 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_quiesce_vsi()
4952 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state); in i40e_quiesce_vsi()
4953 if (vsi->netdev && netif_running(vsi->netdev)) in i40e_quiesce_vsi()
4954 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); in i40e_quiesce_vsi()
4960 * i40e_unquiesce_vsi - Resume a given VSI
4965 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state)) in i40e_unquiesce_vsi()
4968 if (vsi->netdev && netif_running(vsi->netdev)) in i40e_unquiesce_vsi()
4969 vsi->netdev->netdev_ops->ndo_open(vsi->netdev); in i40e_unquiesce_vsi()
4975 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4982 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_pf_quiesce_all_vsi()
4983 if (pf->vsi[v]) in i40e_pf_quiesce_all_vsi()
4984 i40e_quiesce_vsi(pf->vsi[v]); in i40e_pf_quiesce_all_vsi()
4989 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4996 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_pf_unquiesce_all_vsi()
4997 if (pf->vsi[v]) in i40e_pf_unquiesce_all_vsi()
4998 i40e_unquiesce_vsi(pf->vsi[v]); in i40e_pf_unquiesce_all_vsi()
5003 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
5006 * Wait until all queues on a given VSI have been disabled.
5010 struct i40e_pf *pf = vsi->back; in i40e_vsi_wait_queues_disabled()
5013 pf_q = vsi->base_queue; in i40e_vsi_wait_queues_disabled()
5014 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_wait_queues_disabled()
5015 /* Check and wait for the Tx queue */ in i40e_vsi_wait_queues_disabled()
5018 dev_info(&pf->pdev->dev, in i40e_vsi_wait_queues_disabled()
5020 vsi->seid, pf_q); in i40e_vsi_wait_queues_disabled()
5027 /* Check and wait for the XDP Tx queue */ in i40e_vsi_wait_queues_disabled()
5028 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs, in i40e_vsi_wait_queues_disabled()
5031 dev_info(&pf->pdev->dev, in i40e_vsi_wait_queues_disabled()
5033 vsi->seid, pf_q); in i40e_vsi_wait_queues_disabled()
5037 /* Check and wait for the Rx queue */ in i40e_vsi_wait_queues_disabled()
5040 dev_info(&pf->pdev->dev, in i40e_vsi_wait_queues_disabled()
5042 vsi->seid, pf_q); in i40e_vsi_wait_queues_disabled()
5052 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
5062 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { in i40e_pf_wait_queues_disabled()
5063 if (pf->vsi[v]) { in i40e_pf_wait_queues_disabled()
5064 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); in i40e_pf_wait_queues_disabled()
5076 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
5085 struct i40e_hw *hw = &pf->hw; in i40e_get_iscsi_tc_map()
5089 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; in i40e_get_iscsi_tc_map()
5091 for (i = 0; i < dcbcfg->numapps; i++) { in i40e_get_iscsi_tc_map()
5092 app = dcbcfg->app[i]; in i40e_get_iscsi_tc_map()
5095 tc = dcbcfg->etscfg.prioritytable[app.priority]; in i40e_get_iscsi_tc_map()
5105 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
5108 * Return the number of TCs from given DCBx configuration
5118 * and create a bitmask of enabled TCs in i40e_dcb_get_num_tc()
5121 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]); in i40e_dcb_get_num_tc()
5124 * contiguous TCs starting with TC0 in i40e_dcb_get_num_tc()
5131 pr_err("Non-contiguous TC - Disabling DCB\n"); in i40e_dcb_get_num_tc()
5147 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5166 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5174 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_mqprio_get_enabled_tc()
5175 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc; in i40e_mqprio_get_enabled_tc()
5184 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5191 struct i40e_hw *hw = &pf->hw; in i40e_pf_get_num_tc()
5194 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; in i40e_pf_get_num_tc()
5196 if (pf->flags & I40E_FLAG_TC_MQPRIO) in i40e_pf_get_num_tc()
5197 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc; in i40e_pf_get_num_tc()
5200 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) in i40e_pf_get_num_tc()
5203 /* SFP mode will be enabled for all TCs on port */ in i40e_pf_get_num_tc()
5204 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) in i40e_pf_get_num_tc()
5207 /* MFP mode return count of enabled TCs for this PF */ in i40e_pf_get_num_tc()
5208 if (pf->hw.func_caps.iscsi) in i40e_pf_get_num_tc()
5221 * i40e_pf_get_tc_map - Get bitmap for enabled traffic classes
5228 if (pf->flags & I40E_FLAG_TC_MQPRIO) in i40e_pf_get_tc_map()
5234 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) in i40e_pf_get_tc_map()
5237 /* SFP mode we want PF to be enabled for all TCs */ in i40e_pf_get_tc_map()
5238 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) in i40e_pf_get_tc_map()
5239 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); in i40e_pf_get_tc_map()
5242 if (pf->hw.func_caps.iscsi) in i40e_pf_get_tc_map()
5249 * i40e_vsi_get_bw_info - Query VSI BW Information
5258 struct i40e_pf *pf = vsi->back; in i40e_vsi_get_bw_info()
5259 struct i40e_hw *hw = &pf->hw; in i40e_vsi_get_bw_info()
5265 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); in i40e_vsi_get_bw_info()
5267 dev_info(&pf->pdev->dev, in i40e_vsi_get_bw_info()
5269 i40e_stat_str(&pf->hw, ret), in i40e_vsi_get_bw_info()
5270 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_vsi_get_bw_info()
5271 return -EINVAL; in i40e_vsi_get_bw_info()
5275 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, in i40e_vsi_get_bw_info()
5278 dev_info(&pf->pdev->dev, in i40e_vsi_get_bw_info()
5280 i40e_stat_str(&pf->hw, ret), in i40e_vsi_get_bw_info()
5281 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_vsi_get_bw_info()
5282 return -EINVAL; in i40e_vsi_get_bw_info()
5286 dev_info(&pf->pdev->dev, in i40e_vsi_get_bw_info()
5287 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", in i40e_vsi_get_bw_info()
5293 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); in i40e_vsi_get_bw_info()
5294 vsi->bw_max_quanta = bw_config.max_bw; in i40e_vsi_get_bw_info()
5298 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; in i40e_vsi_get_bw_info()
5299 vsi->bw_ets_limit_credits[i] = in i40e_vsi_get_bw_info()
5302 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); in i40e_vsi_get_bw_info()
5309 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5320 struct i40e_pf *pf = vsi->back; in i40e_vsi_configure_bw_alloc()
5325 if (pf->flags & I40E_FLAG_TC_MQPRIO) in i40e_vsi_configure_bw_alloc()
5327 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) { in i40e_vsi_configure_bw_alloc()
5328 ret = i40e_set_bw_limit(vsi, vsi->seid, 0); in i40e_vsi_configure_bw_alloc()
5330 dev_info(&pf->pdev->dev, in i40e_vsi_configure_bw_alloc()
5331 "Failed to reset tx rate for vsi->seid %u\n", in i40e_vsi_configure_bw_alloc()
5332 vsi->seid); in i40e_vsi_configure_bw_alloc()
5340 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL); in i40e_vsi_configure_bw_alloc()
5342 dev_info(&pf->pdev->dev, in i40e_vsi_configure_bw_alloc()
5344 pf->hw.aq.asq_last_status); in i40e_vsi_configure_bw_alloc()
5345 return -EINVAL; in i40e_vsi_configure_bw_alloc()
5349 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; in i40e_vsi_configure_bw_alloc()
5355 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5362 struct net_device *netdev = vsi->netdev; in i40e_vsi_config_netdev_tc()
5363 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_netdev_tc()
5364 struct i40e_hw *hw = &pf->hw; in i40e_vsi_config_netdev_tc()
5367 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; in i40e_vsi_config_netdev_tc()
5377 /* Set up actual enabled TCs on the VSI */ in i40e_vsi_config_netdev_tc()
5378 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) in i40e_vsi_config_netdev_tc()
5383 /* Only set TC queues for enabled tcs in i40e_vsi_config_netdev_tc()
5390 if (vsi->tc_config.enabled_tc & BIT(i)) in i40e_vsi_config_netdev_tc()
5392 vsi->tc_config.tc_info[i].netdev_tc, in i40e_vsi_config_netdev_tc()
5393 vsi->tc_config.tc_info[i].qcount, in i40e_vsi_config_netdev_tc()
5394 vsi->tc_config.tc_info[i].qoffset); in i40e_vsi_config_netdev_tc()
5397 if (pf->flags & I40E_FLAG_TC_MQPRIO) in i40e_vsi_config_netdev_tc()
5403 u8 ets_tc = dcbcfg->etscfg.prioritytable[i]; in i40e_vsi_config_netdev_tc()
5405 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; in i40e_vsi_config_netdev_tc()
5411 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5422 vsi->info.mapping_flags = ctxt->info.mapping_flags; in i40e_vsi_update_queue_map()
5423 memcpy(&vsi->info.queue_mapping, in i40e_vsi_update_queue_map()
5424 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); in i40e_vsi_update_queue_map()
5425 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, in i40e_vsi_update_queue_map()
5426 sizeof(vsi->info.tc_mapping)); in i40e_vsi_update_queue_map()
5430 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5434 * This configures a particular VSI for TCs that are mapped to the
5435 * given TC bitmap. It uses default bandwidth share for TCs across
5445 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_tc()
5446 struct i40e_hw *hw = &pf->hw; in i40e_vsi_config_tc()
5451 /* Check if enabled_tc is same as existing or new TCs */ in i40e_vsi_config_tc()
5452 if (vsi->tc_config.enabled_tc == enabled_tc && in i40e_vsi_config_tc()
5453 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) in i40e_vsi_config_tc()
5456 /* Enable ETS TCs with equal BW Share for now across all VSIs */ in i40e_vsi_config_tc()
5466 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5468 enabled_tc, vsi->seid); in i40e_vsi_config_tc()
5469 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, in i40e_vsi_config_tc()
5472 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5475 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_vsi_config_tc()
5485 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5493 dev_err(&pf->pdev->dev, in i40e_vsi_config_tc()
5495 enabled_tc, vsi->seid); in i40e_vsi_config_tc()
5501 ctxt.seid = vsi->seid; in i40e_vsi_config_tc()
5502 ctxt.pf_num = vsi->back->hw.pf_id; in i40e_vsi_config_tc()
5504 ctxt.uplink_seid = vsi->uplink_seid; in i40e_vsi_config_tc()
5505 ctxt.info = vsi->info; in i40e_vsi_config_tc()
5506 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) { in i40e_vsi_config_tc()
5514 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled in i40e_vsi_config_tc()
5517 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) { in i40e_vsi_config_tc()
5518 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size, in i40e_vsi_config_tc()
5519 vsi->num_queue_pairs); in i40e_vsi_config_tc()
5522 dev_info(&vsi->back->pdev->dev, in i40e_vsi_config_tc()
5526 vsi->reconfig_rss = false; in i40e_vsi_config_tc()
5528 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_vsi_config_tc()
5534 /* Update the VSI after updating the VSI queue-mapping in i40e_vsi_config_tc()
5539 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5542 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_vsi_config_tc()
5547 vsi->info.valid_sections = 0; in i40e_vsi_config_tc()
5552 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5555 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_vsi_config_tc()
5566 * i40e_get_link_speed - Returns link speed for the interface
5572 struct i40e_pf *pf = vsi->back; in i40e_get_link_speed()
5574 switch (pf->hw.phy.link_info.link_speed) { in i40e_get_link_speed()
5586 return -EINVAL; in i40e_get_link_speed()
5591 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5600 struct i40e_pf *pf = vsi->back; in i40e_set_bw_limit()
5607 dev_err(&pf->pdev->dev, in i40e_set_bw_limit()
5610 return -EINVAL; in i40e_set_bw_limit()
5613 dev_warn(&pf->pdev->dev, in i40e_set_bw_limit()
5621 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits, in i40e_set_bw_limit()
5624 dev_err(&pf->pdev->dev, in i40e_set_bw_limit()
5625 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n", in i40e_set_bw_limit()
5626 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret), in i40e_set_bw_limit()
5627 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_set_bw_limit()
5632 * i40e_remove_queue_channels - Remove queue channels for the TCs
5635 * Remove queue channels for the TCs
5642 struct i40e_pf *pf = vsi->back; in i40e_remove_queue_channels()
5647 * channel VSIs with non-power-of-2 queue count. in i40e_remove_queue_channels()
5649 vsi->current_rss_size = 0; in i40e_remove_queue_channels()
5652 if (list_empty(&vsi->ch_list)) in i40e_remove_queue_channels()
5655 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { in i40e_remove_queue_channels()
5658 list_del(&ch->list); in i40e_remove_queue_channels()
5659 p_vsi = ch->parent_vsi; in i40e_remove_queue_channels()
5660 if (!p_vsi || !ch->initialized) { in i40e_remove_queue_channels()
5665 for (i = 0; i < ch->num_queue_pairs; i++) { in i40e_remove_queue_channels()
5669 pf_q = ch->base_queue + i; in i40e_remove_queue_channels()
5670 tx_ring = vsi->tx_rings[pf_q]; in i40e_remove_queue_channels()
5671 tx_ring->ch = NULL; in i40e_remove_queue_channels()
5673 rx_ring = vsi->rx_rings[pf_q]; in i40e_remove_queue_channels()
5674 rx_ring->ch = NULL; in i40e_remove_queue_channels()
5678 ret = i40e_set_bw_limit(vsi, ch->seid, 0); in i40e_remove_queue_channels()
5680 dev_info(&vsi->back->pdev->dev, in i40e_remove_queue_channels()
5681 "Failed to reset tx rate for ch->seid %u\n", in i40e_remove_queue_channels()
5682 ch->seid); in i40e_remove_queue_channels()
5686 &pf->cloud_filter_list, cloud_node) { in i40e_remove_queue_channels()
5687 if (cfilter->seid != ch->seid) in i40e_remove_queue_channels()
5690 hash_del(&cfilter->cloud_node); in i40e_remove_queue_channels()
5691 if (cfilter->dst_port) in i40e_remove_queue_channels()
5698 last_aq_status = pf->hw.aq.asq_last_status; in i40e_remove_queue_channels()
5700 dev_info(&pf->pdev->dev, in i40e_remove_queue_channels()
5702 i40e_stat_str(&pf->hw, ret), in i40e_remove_queue_channels()
5703 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_remove_queue_channels()
5708 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid, in i40e_remove_queue_channels()
5711 dev_err(&vsi->back->pdev->dev, in i40e_remove_queue_channels()
5713 ch->seid, p_vsi->seid); in i40e_remove_queue_channels()
5716 INIT_LIST_HEAD(&vsi->ch_list); in i40e_remove_queue_channels()
5720 * i40e_is_any_channel - channel exist or not
5729 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { in i40e_is_any_channel()
5730 if (ch->initialized) in i40e_is_any_channel()
5742 * channels/TCs created.
5749 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { in i40e_get_max_queues_for_channel()
5750 if (!ch->initialized) in i40e_get_max_queues_for_channel()
5752 if (ch->num_queue_pairs > max) in i40e_get_max_queues_for_channel()
5753 max = ch->num_queue_pairs; in i40e_get_max_queues_for_channel()
5760 * i40e_validate_num_queues - validate num_queues w.r.t channel
5776 return -EINVAL; in i40e_validate_num_queues()
5779 if (vsi->current_rss_size) { in i40e_validate_num_queues()
5780 if (num_queues > vsi->current_rss_size) { in i40e_validate_num_queues()
5781 dev_dbg(&pf->pdev->dev, in i40e_validate_num_queues()
5783 num_queues, vsi->current_rss_size); in i40e_validate_num_queues()
5784 return -EINVAL; in i40e_validate_num_queues()
5785 } else if ((num_queues < vsi->current_rss_size) && in i40e_validate_num_queues()
5787 dev_dbg(&pf->pdev->dev, in i40e_validate_num_queues()
5789 num_queues, vsi->current_rss_size); in i40e_validate_num_queues()
5790 return -EINVAL; in i40e_validate_num_queues()
5802 dev_dbg(&pf->pdev->dev, in i40e_validate_num_queues()
5805 return -EINVAL; in i40e_validate_num_queues()
5814 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
5822 struct i40e_pf *pf = vsi->back; in i40e_vsi_reconfig_rss()
5824 struct i40e_hw *hw = &pf->hw; in i40e_vsi_reconfig_rss()
5829 if (!vsi->rss_size) in i40e_vsi_reconfig_rss()
5830 return -EINVAL; in i40e_vsi_reconfig_rss()
5832 if (rss_size > vsi->rss_size) in i40e_vsi_reconfig_rss()
5833 return -EINVAL; in i40e_vsi_reconfig_rss()
5835 local_rss_size = min_t(int, vsi->rss_size, rss_size); in i40e_vsi_reconfig_rss()
5836 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in i40e_vsi_reconfig_rss()
5838 return -ENOMEM; in i40e_vsi_reconfig_rss()
5841 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size); in i40e_vsi_reconfig_rss()
5846 if (vsi->rss_hkey_user) in i40e_vsi_reconfig_rss()
5847 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); in i40e_vsi_reconfig_rss()
5851 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); in i40e_vsi_reconfig_rss()
5853 dev_info(&pf->pdev->dev, in i40e_vsi_reconfig_rss()
5856 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_vsi_reconfig_rss()
5863 if (!vsi->orig_rss_size) in i40e_vsi_reconfig_rss()
5864 vsi->orig_rss_size = vsi->rss_size; in i40e_vsi_reconfig_rss()
5865 vsi->current_rss_size = local_rss_size; in i40e_vsi_reconfig_rss()
5871 * i40e_channel_setup_queue_map - Setup a channel queue map
5889 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix); in i40e_channel_setup_queue_map()
5890 ch->num_queue_pairs = qcount; in i40e_channel_setup_queue_map()
5892 /* find the next higher power-of-2 of num queue pairs */ in i40e_channel_setup_queue_map()
5901 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in i40e_channel_setup_queue_map()
5903 ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */ in i40e_channel_setup_queue_map()
5904 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); in i40e_channel_setup_queue_map()
5905 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue); in i40e_channel_setup_queue_map()
5906 ctxt->info.valid_sections |= cpu_to_le16(sections); in i40e_channel_setup_queue_map()
5910 * i40e_add_channel - add a channel by adding VSI
5920 struct i40e_hw *hw = &pf->hw; in i40e_add_channel()
5925 if (ch->type != I40E_VSI_VMDQ2) { in i40e_add_channel()
5926 dev_info(&pf->pdev->dev, in i40e_add_channel()
5927 "add new vsi failed, ch->type %d\n", ch->type); in i40e_add_channel()
5928 return -EINVAL; in i40e_add_channel()
5932 ctxt.pf_num = hw->pf_id; in i40e_add_channel()
5936 if (ch->type == I40E_VSI_VMDQ2) in i40e_add_channel()
5939 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) { in i40e_add_channel()
5952 dev_info(&pf->pdev->dev, in i40e_add_channel()
5954 i40e_stat_str(&pf->hw, ret), in i40e_add_channel()
5955 i40e_aq_str(&pf->hw, in i40e_add_channel()
5956 pf->hw.aq.asq_last_status)); in i40e_add_channel()
5957 return -ENOENT; in i40e_add_channel()
5963 ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc; in i40e_add_channel()
5964 ch->seid = ctxt.seid; in i40e_add_channel()
5965 ch->vsi_number = ctxt.vsi_number; in i40e_add_channel()
5966 ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx); in i40e_add_channel()
5972 ch->info.mapping_flags = ctxt.info.mapping_flags; in i40e_add_channel()
5973 memcpy(&ch->info.queue_mapping, in i40e_add_channel()
5975 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping, in i40e_add_channel()
5989 bw_data.tc_valid_bits = ch->enabled_tc; in i40e_channel_config_bw()
5993 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid, in i40e_channel_config_bw()
5996 dev_info(&vsi->back->pdev->dev, in i40e_channel_config_bw()
5997 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n", in i40e_channel_config_bw()
5998 vsi->back->hw.aq.asq_last_status, ch->seid); in i40e_channel_config_bw()
5999 return -EINVAL; in i40e_channel_config_bw()
6003 ch->info.qs_handle[i] = bw_data.qs_handles[i]; in i40e_channel_config_bw()
6009 * i40e_channel_config_tx_ring - config TX ring associated with new channel
6025 /* Enable ETS TCs with equal BW Share for now across all VSIs */ in i40e_channel_config_tx_ring()
6027 if (ch->enabled_tc & BIT(i)) in i40e_channel_config_tx_ring()
6034 dev_info(&vsi->back->pdev->dev, in i40e_channel_config_tx_ring()
6036 ch->enabled_tc, ch->seid); in i40e_channel_config_tx_ring()
6040 for (i = 0; i < ch->num_queue_pairs; i++) { in i40e_channel_config_tx_ring()
6044 pf_q = ch->base_queue + i; in i40e_channel_config_tx_ring()
6046 /* Get to TX ring ptr of main VSI, for re-setup TX queue in i40e_channel_config_tx_ring()
6049 tx_ring = vsi->tx_rings[pf_q]; in i40e_channel_config_tx_ring()
6050 tx_ring->ch = ch; in i40e_channel_config_tx_ring()
6053 rx_ring = vsi->rx_rings[pf_q]; in i40e_channel_config_tx_ring()
6054 rx_ring->ch = ch; in i40e_channel_config_tx_ring()
6061 * i40e_setup_hw_channel - setup new channel
6078 ch->initialized = false; in i40e_setup_hw_channel()
6079 ch->base_queue = vsi->next_base_queue; in i40e_setup_hw_channel()
6080 ch->type = type; in i40e_setup_hw_channel()
6085 dev_info(&pf->pdev->dev, in i40e_setup_hw_channel()
6092 ch->initialized = true; in i40e_setup_hw_channel()
6097 dev_info(&pf->pdev->dev, in i40e_setup_hw_channel()
6099 ch->seid); in i40e_setup_hw_channel()
6104 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs; in i40e_setup_hw_channel()
6105 dev_dbg(&pf->pdev->dev, in i40e_setup_hw_channel()
6106 …"Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base… in i40e_setup_hw_channel()
6107 ch->seid, ch->vsi_number, ch->stat_counter_idx, in i40e_setup_hw_channel()
6108 ch->num_queue_pairs, in i40e_setup_hw_channel()
6109 vsi->next_base_queue); in i40e_setup_hw_channel()
6114 * i40e_setup_channel - setup new channel using uplink element
6129 if (vsi->type == I40E_VSI_MAIN) { in i40e_setup_channel()
6132 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n", in i40e_setup_channel()
6133 vsi->type); in i40e_setup_channel()
6138 seid = pf->vsi[pf->lan_vsi]->uplink_seid; in i40e_setup_channel()
6143 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n"); in i40e_setup_channel()
6147 return ch->initialized ? true : false; in i40e_setup_channel()
6151 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6160 struct i40e_pf *pf = vsi->back; in i40e_validate_and_set_switch_mode()
6161 struct i40e_hw *hw = &pf->hw; in i40e_validate_and_set_switch_mode()
6166 return -EINVAL; in i40e_validate_and_set_switch_mode()
6168 if (hw->dev_caps.switch_mode) { in i40e_validate_and_set_switch_mode()
6169 /* if switch mode is set, support mode2 (non-tunneled for in i40e_validate_and_set_switch_mode()
6172 u32 switch_mode = hw->dev_caps.switch_mode & in i40e_validate_and_set_switch_mode()
6177 dev_err(&pf->pdev->dev, in i40e_validate_and_set_switch_mode()
6178 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n", in i40e_validate_and_set_switch_mode()
6179 hw->dev_caps.switch_mode); in i40e_validate_and_set_switch_mode()
6180 return -EINVAL; in i40e_validate_and_set_switch_mode()
6194 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags, in i40e_validate_and_set_switch_mode()
6195 pf->last_sw_conf_valid_flags, in i40e_validate_and_set_switch_mode()
6197 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH) in i40e_validate_and_set_switch_mode()
6198 dev_err(&pf->pdev->dev, in i40e_validate_and_set_switch_mode()
6202 hw->aq.asq_last_status)); in i40e_validate_and_set_switch_mode()
6208 * i40e_create_queue_channel - function to create channel
6218 struct i40e_pf *pf = vsi->back; in i40e_create_queue_channel()
6223 return -EINVAL; in i40e_create_queue_channel()
6225 if (!ch->num_queue_pairs) { in i40e_create_queue_channel()
6226 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n", in i40e_create_queue_channel()
6227 ch->num_queue_pairs); in i40e_create_queue_channel()
6228 return -EINVAL; in i40e_create_queue_channel()
6232 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi, in i40e_create_queue_channel()
6235 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n", in i40e_create_queue_channel()
6236 ch->num_queue_pairs); in i40e_create_queue_channel()
6237 return -EINVAL; in i40e_create_queue_channel()
6243 if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) || in i40e_create_queue_channel()
6245 if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) { in i40e_create_queue_channel()
6246 dev_dbg(&pf->pdev->dev, in i40e_create_queue_channel()
6248 vsi->tc_config.tc_info[0].qcount); in i40e_create_queue_channel()
6249 return -EINVAL; in i40e_create_queue_channel()
6252 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { in i40e_create_queue_channel()
6253 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; in i40e_create_queue_channel()
6255 if (vsi->type == I40E_VSI_MAIN) { in i40e_create_queue_channel()
6256 if (pf->flags & I40E_FLAG_TC_MQPRIO) in i40e_create_queue_channel()
6269 /* By this time, vsi->cnt_q_avail shall be set to non-zero and in i40e_create_queue_channel()
6272 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) { in i40e_create_queue_channel()
6273 dev_dbg(&pf->pdev->dev, in i40e_create_queue_channel()
6275 vsi->cnt_q_avail, ch->num_queue_pairs); in i40e_create_queue_channel()
6276 return -EINVAL; in i40e_create_queue_channel()
6280 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) { in i40e_create_queue_channel()
6281 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs); in i40e_create_queue_channel()
6283 dev_info(&pf->pdev->dev, in i40e_create_queue_channel()
6285 ch->num_queue_pairs); in i40e_create_queue_channel()
6286 return -EINVAL; in i40e_create_queue_channel()
6291 dev_info(&pf->pdev->dev, "Failed to setup channel\n"); in i40e_create_queue_channel()
6292 return -EINVAL; in i40e_create_queue_channel()
6295 dev_info(&pf->pdev->dev, in i40e_create_queue_channel()
6297 ch->seid, ch->num_queue_pairs); in i40e_create_queue_channel()
6300 if (ch->max_tx_rate) { in i40e_create_queue_channel()
6301 u64 credits = ch->max_tx_rate; in i40e_create_queue_channel()
6303 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate)) in i40e_create_queue_channel()
6304 return -EINVAL; in i40e_create_queue_channel()
6307 dev_dbg(&pf->pdev->dev, in i40e_create_queue_channel()
6308 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", in i40e_create_queue_channel()
6309 ch->max_tx_rate, in i40e_create_queue_channel()
6311 ch->seid); in i40e_create_queue_channel()
6315 ch->parent_vsi = vsi; in i40e_create_queue_channel()
6318 vsi->cnt_q_avail -= ch->num_queue_pairs; in i40e_create_queue_channel()
6324 * i40e_configure_queue_channels - Add queue channel for the given TCs
6327 * Configures queue channel mapping to the given TCs
6335 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */ in i40e_configure_queue_channels()
6336 vsi->tc_seid_map[0] = vsi->seid; in i40e_configure_queue_channels()
6338 if (vsi->tc_config.enabled_tc & BIT(i)) { in i40e_configure_queue_channels()
6341 ret = -ENOMEM; in i40e_configure_queue_channels()
6345 INIT_LIST_HEAD(&ch->list); in i40e_configure_queue_channels()
6346 ch->num_queue_pairs = in i40e_configure_queue_channels()
6347 vsi->tc_config.tc_info[i].qcount; in i40e_configure_queue_channels()
6348 ch->base_queue = in i40e_configure_queue_channels()
6349 vsi->tc_config.tc_info[i].qoffset; in i40e_configure_queue_channels()
6354 max_rate = vsi->mqprio_qopt.max_rate[i]; in i40e_configure_queue_channels()
6356 ch->max_tx_rate = max_rate; in i40e_configure_queue_channels()
6358 list_add_tail(&ch->list, &vsi->ch_list); in i40e_configure_queue_channels()
6362 dev_err(&vsi->back->pdev->dev, in i40e_configure_queue_channels()
6364 i, ch->num_queue_pairs); in i40e_configure_queue_channels()
6367 vsi->tc_seid_map[i] = ch->seid; in i40e_configure_queue_channels()
6378 * i40e_veb_config_tc - Configure TCs for given VEB
6387 struct i40e_pf *pf = veb->pf; in i40e_veb_config_tc()
6391 /* No TCs or already enabled TCs just return */ in i40e_veb_config_tc()
6392 if (!enabled_tc || veb->enabled_tc == enabled_tc) in i40e_veb_config_tc()
6398 /* Enable ETS TCs with equal BW Share for now */ in i40e_veb_config_tc()
6404 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, in i40e_veb_config_tc()
6407 dev_info(&pf->pdev->dev, in i40e_veb_config_tc()
6409 i40e_stat_str(&pf->hw, ret), in i40e_veb_config_tc()
6410 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_veb_config_tc()
6417 dev_info(&pf->pdev->dev, in i40e_veb_config_tc()
6419 i40e_stat_str(&pf->hw, ret), in i40e_veb_config_tc()
6420 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_veb_config_tc()
6429 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6442 /* Enable the TCs available on PF to all VEBs */ in i40e_dcb_reconfigure()
6448 if (!pf->veb[v]) in i40e_dcb_reconfigure()
6450 ret = i40e_veb_config_tc(pf->veb[v], tc_map); in i40e_dcb_reconfigure()
6452 dev_info(&pf->pdev->dev, in i40e_dcb_reconfigure()
6454 pf->veb[v]->seid); in i40e_dcb_reconfigure()
6460 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_dcb_reconfigure()
6461 if (!pf->vsi[v]) in i40e_dcb_reconfigure()
6464 /* - Enable all TCs for the LAN VSI in i40e_dcb_reconfigure()
6465 * - For all others keep them at TC0 for now in i40e_dcb_reconfigure()
6467 if (v == pf->lan_vsi) in i40e_dcb_reconfigure()
6472 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); in i40e_dcb_reconfigure()
6474 dev_info(&pf->pdev->dev, in i40e_dcb_reconfigure()
6476 pf->vsi[v]->seid); in i40e_dcb_reconfigure()
6479 /* Re-configure VSI vectors based on updated TC map */ in i40e_dcb_reconfigure()
6480 i40e_vsi_map_rings_to_vectors(pf->vsi[v]); in i40e_dcb_reconfigure()
6481 if (pf->vsi[v]->netdev) in i40e_dcb_reconfigure()
6482 i40e_dcbnl_set_all(pf->vsi[v]); in i40e_dcb_reconfigure()
6488 * i40e_resume_port_tx - Resume port Tx
6496 struct i40e_hw *hw = &pf->hw; in i40e_resume_port_tx()
6501 dev_info(&pf->pdev->dev, in i40e_resume_port_tx()
6503 i40e_stat_str(&pf->hw, ret), in i40e_resume_port_tx()
6504 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_resume_port_tx()
6506 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_resume_port_tx()
6514 * i40e_suspend_port_tx - Suspend port Tx
6521 struct i40e_hw *hw = &pf->hw; in i40e_suspend_port_tx()
6524 ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL); in i40e_suspend_port_tx()
6526 dev_info(&pf->pdev->dev, in i40e_suspend_port_tx()
6528 i40e_stat_str(&pf->hw, ret), in i40e_suspend_port_tx()
6529 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_suspend_port_tx()
6531 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_suspend_port_tx()
6539 * i40e_hw_set_dcb_config - Program new DCBX settings into HW
6549 struct i40e_dcbx_config *old_cfg = &pf->hw.local_dcbx_config; in i40e_hw_set_dcb_config()
6554 dev_dbg(&pf->pdev->dev, "No Change in DCB Config required.\n"); in i40e_hw_set_dcb_config()
6563 old_cfg->etsrec = old_cfg->etscfg; in i40e_hw_set_dcb_config()
6564 ret = i40e_set_dcb_config(&pf->hw); in i40e_hw_set_dcb_config()
6566 dev_info(&pf->pdev->dev, in i40e_hw_set_dcb_config()
6568 i40e_stat_str(&pf->hw, ret), in i40e_hw_set_dcb_config()
6569 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_hw_set_dcb_config()
6577 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) { in i40e_hw_set_dcb_config()
6578 /* Re-start the VSIs if disabled */ in i40e_hw_set_dcb_config()
6590 * i40e_hw_dcb_config - Program new DCBX settings into HW
6605 struct i40e_hw *hw = &pf->hw; in i40e_hw_dcb_config()
6606 u8 num_ports = hw->num_ports; in i40e_hw_dcb_config()
6608 int ret = -EINVAL; in i40e_hw_dcb_config()
6614 dev_dbg(&pf->pdev->dev, "Configuring DCB registers directly\n"); in i40e_hw_dcb_config()
6615 /* Un-pack information to Program ETS HW via shared API in i40e_hw_dcb_config()
6618 * ETS/NON-ETS arbiter mode in i40e_hw_dcb_config()
6621 * PFC priority bit-map in i40e_hw_dcb_config()
6625 * TSA table (ETS or non-ETS) in i40e_hw_dcb_config()
6635 switch (new_cfg->etscfg.tsatable[i]) { in i40e_hw_dcb_config()
6639 new_cfg->etscfg.tcbwtable[i]; in i40e_hw_dcb_config()
6654 old_cfg = &hw->local_dcbx_config; in i40e_hw_dcb_config()
6664 pf->flags |= I40E_FLAG_DCB_ENABLED; in i40e_hw_dcb_config()
6666 pf->flags &= ~I40E_FLAG_DCB_ENABLED; in i40e_hw_dcb_config()
6668 set_bit(__I40E_PORT_SUSPENDED, pf->state); in i40e_hw_dcb_config()
6680 (hw, pf->mac_seid, &ets_data, in i40e_hw_dcb_config()
6683 dev_info(&pf->pdev->dev, in i40e_hw_dcb_config()
6685 i40e_stat_str(&pf->hw, ret), in i40e_hw_dcb_config()
6686 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_hw_dcb_config()
6698 i40e_dcb_hw_rx_ets_bw_config(hw, new_cfg->etscfg.tcbwtable, mode, in i40e_hw_dcb_config()
6700 i40e_dcb_hw_pfc_config(hw, new_cfg->pfc.pfcenable, in i40e_hw_dcb_config()
6701 new_cfg->etscfg.prioritytable); in i40e_hw_dcb_config()
6702 i40e_dcb_hw_rx_up2tc_config(hw, new_cfg->etscfg.prioritytable); in i40e_hw_dcb_config()
6706 mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu; in i40e_hw_dcb_config()
6711 false, new_cfg->pfc.pfcenable, in i40e_hw_dcb_config()
6713 i40e_dcb_hw_rx_pb_config(hw, &pf->pb_cfg, &pb_cfg); in i40e_hw_dcb_config()
6716 pf->pb_cfg = pb_cfg; in i40e_hw_dcb_config()
6719 ret = i40e_aq_dcb_updated(&pf->hw, NULL); in i40e_hw_dcb_config()
6721 dev_info(&pf->pdev->dev, in i40e_hw_dcb_config()
6723 i40e_stat_str(&pf->hw, ret), in i40e_hw_dcb_config()
6724 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_hw_dcb_config()
6734 /* Re-start the VSIs if disabled */ in i40e_hw_dcb_config()
6738 clear_bit(__I40E_PORT_SUSPENDED, pf->state); in i40e_hw_dcb_config()
6743 /* Wait for the PF's queues to be disabled */ in i40e_hw_dcb_config()
6747 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_hw_dcb_config()
6752 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_hw_dcb_config()
6753 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); in i40e_hw_dcb_config()
6756 if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) in i40e_hw_dcb_config()
6765 * i40e_dcb_sw_default_config - Set default DCB configuration when DCB in SW
6772 struct i40e_dcbx_config *dcb_cfg = &pf->hw.local_dcbx_config; in i40e_dcb_sw_default_config()
6774 struct i40e_hw *hw = &pf->hw; in i40e_dcb_sw_default_config()
6777 if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) { in i40e_dcb_sw_default_config()
6779 memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config)); in i40e_dcb_sw_default_config()
6780 pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING; in i40e_dcb_sw_default_config()
6781 pf->tmp_cfg.etscfg.maxtcs = 0; in i40e_dcb_sw_default_config()
6782 pf->tmp_cfg.etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW; in i40e_dcb_sw_default_config()
6783 pf->tmp_cfg.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS; in i40e_dcb_sw_default_config()
6784 pf->tmp_cfg.pfc.willing = I40E_IEEE_DEFAULT_PFC_WILLING; in i40e_dcb_sw_default_config()
6785 pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; in i40e_dcb_sw_default_config()
6787 pf->tmp_cfg.numapps = I40E_IEEE_DEFAULT_NUM_APPS; in i40e_dcb_sw_default_config()
6788 pf->tmp_cfg.app[0].selector = I40E_APP_SEL_ETHTYPE; in i40e_dcb_sw_default_config()
6789 pf->tmp_cfg.app[0].priority = I40E_IEEE_DEFAULT_APP_PRIO; in i40e_dcb_sw_default_config()
6790 pf->tmp_cfg.app[0].protocolid = I40E_APP_PROTOID_FCOE; in i40e_dcb_sw_default_config()
6792 return i40e_hw_set_dcb_config(pf, &pf->tmp_cfg); in i40e_dcb_sw_default_config()
6802 (hw, pf->mac_seid, &ets_data, in i40e_dcb_sw_default_config()
6805 dev_info(&pf->pdev->dev, in i40e_dcb_sw_default_config()
6807 i40e_stat_str(&pf->hw, err), in i40e_dcb_sw_default_config()
6808 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_dcb_sw_default_config()
6809 err = -ENOENT; in i40e_dcb_sw_default_config()
6814 dcb_cfg->etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING; in i40e_dcb_sw_default_config()
6815 dcb_cfg->etscfg.cbs = 0; in i40e_dcb_sw_default_config()
6816 dcb_cfg->etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS; in i40e_dcb_sw_default_config()
6817 dcb_cfg->etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW; in i40e_dcb_sw_default_config()
6824 * i40e_init_pf_dcb - Initialize DCB configuration
6832 struct i40e_hw *hw = &pf->hw; in i40e_init_pf_dcb()
6838 if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) { in i40e_init_pf_dcb()
6839 dev_info(&pf->pdev->dev, "DCB is not supported.\n"); in i40e_init_pf_dcb()
6843 if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) { in i40e_init_pf_dcb()
6844 dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n"); in i40e_init_pf_dcb()
6847 dev_info(&pf->pdev->dev, "Could not initialize SW DCB\n"); in i40e_init_pf_dcb()
6850 dev_info(&pf->pdev->dev, "SW DCB initialization succeeded.\n"); in i40e_init_pf_dcb()
6851 pf->dcbx_cap = DCB_CAP_DCBX_HOST | in i40e_init_pf_dcb()
6854 pf->flags |= I40E_FLAG_DCB_CAPABLE; in i40e_init_pf_dcb()
6855 pf->flags &= ~I40E_FLAG_DCB_ENABLED; in i40e_init_pf_dcb()
6861 if ((!hw->func_caps.dcb) || in i40e_init_pf_dcb()
6862 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { in i40e_init_pf_dcb()
6863 dev_info(&pf->pdev->dev, in i40e_init_pf_dcb()
6867 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | in i40e_init_pf_dcb()
6870 pf->flags |= I40E_FLAG_DCB_CAPABLE; in i40e_init_pf_dcb()
6874 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) in i40e_init_pf_dcb()
6875 pf->flags |= I40E_FLAG_DCB_ENABLED; in i40e_init_pf_dcb()
6877 pf->flags &= ~I40E_FLAG_DCB_ENABLED; in i40e_init_pf_dcb()
6878 dev_dbg(&pf->pdev->dev, in i40e_init_pf_dcb()
6881 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) { in i40e_init_pf_dcb()
6882 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n"); in i40e_init_pf_dcb()
6883 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP; in i40e_init_pf_dcb()
6885 dev_info(&pf->pdev->dev, in i40e_init_pf_dcb()
6887 i40e_stat_str(&pf->hw, err), in i40e_init_pf_dcb()
6888 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_init_pf_dcb()
6897 * i40e_print_link_message - print link up or down
6904 struct i40e_pf *pf = vsi->back; in i40e_print_link_message()
6912 new_speed = pf->hw.phy.link_info.link_speed; in i40e_print_link_message()
6916 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed)) in i40e_print_link_message()
6918 vsi->current_isup = isup; in i40e_print_link_message()
6919 vsi->current_speed = new_speed; in i40e_print_link_message()
6921 netdev_info(vsi->netdev, "NIC Link is Down\n"); in i40e_print_link_message()
6928 if (pf->hw.func_caps.npar_enable && in i40e_print_link_message()
6929 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || in i40e_print_link_message()
6930 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) in i40e_print_link_message()
6931 netdev_warn(vsi->netdev, in i40e_print_link_message()
6934 switch (pf->hw.phy.link_info.link_speed) { in i40e_print_link_message()
6963 switch (pf->hw.fc.current_mode) { in i40e_print_link_message()
6978 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { in i40e_print_link_message()
6983 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) in i40e_print_link_message()
6986 if (pf->hw.phy.link_info.fec_info & in i40e_print_link_message()
6988 fec = "CL74 FC-FEC/BASE-R"; in i40e_print_link_message()
6989 else if (pf->hw.phy.link_info.fec_info & in i40e_print_link_message()
6991 fec = "CL108 RS-FEC"; in i40e_print_link_message()
6993 /* 'CL108 RS-FEC' should be displayed when RS is requested, or in i40e_print_link_message()
6996 if (vsi->back->hw.phy.link_info.req_fec_info & in i40e_print_link_message()
6998 if (vsi->back->hw.phy.link_info.req_fec_info & in i40e_print_link_message()
7000 req_fec = "CL108 RS-FEC"; in i40e_print_link_message()
7002 req_fec = "CL74 FC-FEC/BASE-R"; in i40e_print_link_message()
7004 netdev_info(vsi->netdev, in i40e_print_link_message()
7007 } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) { in i40e_print_link_message()
7012 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) in i40e_print_link_message()
7015 if (pf->hw.phy.link_info.fec_info & in i40e_print_link_message()
7017 fec = "CL74 FC-FEC/BASE-R"; in i40e_print_link_message()
7019 if (pf->hw.phy.link_info.req_fec_info & in i40e_print_link_message()
7021 req_fec = "CL74 FC-FEC/BASE-R"; in i40e_print_link_message()
7023 netdev_info(vsi->netdev, in i40e_print_link_message()
7027 netdev_info(vsi->netdev, in i40e_print_link_message()
7035 * i40e_up_complete - Finish the last steps of bringing up a connection
7040 struct i40e_pf *pf = vsi->back; in i40e_up_complete()
7043 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_up_complete()
7053 clear_bit(__I40E_VSI_DOWN, vsi->state); in i40e_up_complete()
7057 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && in i40e_up_complete()
7058 (vsi->netdev)) { in i40e_up_complete()
7060 netif_tx_start_all_queues(vsi->netdev); in i40e_up_complete()
7061 netif_carrier_on(vsi->netdev); in i40e_up_complete()
7065 if (vsi->type == I40E_VSI_FDIR) { in i40e_up_complete()
7067 pf->fd_add_err = 0; in i40e_up_complete()
7068 pf->fd_atr_cnt = 0; in i40e_up_complete()
7075 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_up_complete()
7082 * i40e_vsi_reinit_locked - Reset the VSI
7090 struct i40e_pf *pf = vsi->back; in i40e_vsi_reinit_locked()
7092 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) in i40e_vsi_reinit_locked()
7097 clear_bit(__I40E_CONFIG_BUSY, pf->state); in i40e_vsi_reinit_locked()
7101 * i40e_force_link_state - Force the link status
7110 struct i40e_hw *hw = &pf->hw; in i40e_force_link_state()
7124 dev_err(&pf->pdev->dev, in i40e_force_link_state()
7127 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_force_link_state()
7136 dev_err(&pf->pdev->dev, in i40e_force_link_state()
7139 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_force_link_state()
7147 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) in i40e_force_link_state()
7163 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) { in i40e_force_link_state()
7181 dev_err(&pf->pdev->dev, in i40e_force_link_state()
7183 i40e_stat_str(&pf->hw, err), in i40e_force_link_state()
7184 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_force_link_state()
7191 /* Wait a little bit (on 40G cards it sometimes takes a really in i40e_force_link_state()
7205 * i40e_up - Bring the connection back up after being down
7212 if (vsi->type == I40E_VSI_MAIN && in i40e_up()
7213 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED || in i40e_up()
7214 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) in i40e_up()
7215 i40e_force_link_state(vsi->back, true); in i40e_up()
7225 * i40e_down - Shutdown the connection processing
7233 * sets the vsi->state __I40E_VSI_DOWN bit. in i40e_down()
7235 if (vsi->netdev) { in i40e_down()
7236 netif_carrier_off(vsi->netdev); in i40e_down()
7237 netif_tx_disable(vsi->netdev); in i40e_down()
7241 if (vsi->type == I40E_VSI_MAIN && in i40e_down()
7242 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED || in i40e_down()
7243 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) in i40e_down()
7244 i40e_force_link_state(vsi->back, false); in i40e_down()
7247 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_down()
7248 i40e_clean_tx_ring(vsi->tx_rings[i]); in i40e_down()
7250 /* Make sure that in-progress ndo_xdp_xmit and in i40e_down()
7254 i40e_clean_tx_ring(vsi->xdp_rings[i]); in i40e_down()
7256 i40e_clean_rx_ring(vsi->rx_rings[i]); in i40e_down()
7262 * i40e_validate_mqprio_qopt- validate queue mapping info
7273 if (mqprio_qopt->qopt.offset[0] != 0 || in i40e_validate_mqprio_qopt()
7274 mqprio_qopt->qopt.num_tc < 1 || in i40e_validate_mqprio_qopt()
7275 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS) in i40e_validate_mqprio_qopt()
7276 return -EINVAL; in i40e_validate_mqprio_qopt()
7278 if (!mqprio_qopt->qopt.count[i]) in i40e_validate_mqprio_qopt()
7279 return -EINVAL; in i40e_validate_mqprio_qopt()
7280 if (mqprio_qopt->min_rate[i]) { in i40e_validate_mqprio_qopt()
7281 dev_err(&vsi->back->pdev->dev, in i40e_validate_mqprio_qopt()
7283 return -EINVAL; in i40e_validate_mqprio_qopt()
7285 max_rate = mqprio_qopt->max_rate[i]; in i40e_validate_mqprio_qopt()
7289 if (i >= mqprio_qopt->qopt.num_tc - 1) in i40e_validate_mqprio_qopt()
7291 if (mqprio_qopt->qopt.offset[i + 1] != in i40e_validate_mqprio_qopt()
7292 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) in i40e_validate_mqprio_qopt()
7293 return -EINVAL; in i40e_validate_mqprio_qopt()
7295 if (vsi->num_queue_pairs < in i40e_validate_mqprio_qopt()
7296 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) { in i40e_validate_mqprio_qopt()
7297 dev_err(&vsi->back->pdev->dev, in i40e_validate_mqprio_qopt()
7299 return -EINVAL; in i40e_validate_mqprio_qopt()
7302 dev_err(&vsi->back->pdev->dev, in i40e_validate_mqprio_qopt()
7304 return -EINVAL; in i40e_validate_mqprio_qopt()
7310 * i40e_vsi_set_default_tc_config - set default values for tc configuration
7319 vsi->tc_config.numtc = 1; in i40e_vsi_set_default_tc_config()
7320 vsi->tc_config.enabled_tc = 1; in i40e_vsi_set_default_tc_config()
7321 qcount = min_t(int, vsi->alloc_queue_pairs, in i40e_vsi_set_default_tc_config()
7322 i40e_pf_get_max_q_per_tc(vsi->back)); in i40e_vsi_set_default_tc_config()
7327 vsi->tc_config.tc_info[i].qoffset = 0; in i40e_vsi_set_default_tc_config()
7329 vsi->tc_config.tc_info[i].qcount = qcount; in i40e_vsi_set_default_tc_config()
7331 vsi->tc_config.tc_info[i].qcount = 1; in i40e_vsi_set_default_tc_config()
7332 vsi->tc_config.tc_info[i].netdev_tc = 0; in i40e_vsi_set_default_tc_config()
7357 *aq_err = hw->aq.asq_last_status; in i40e_del_macvlan_filter()
7386 *aq_err = hw->aq.asq_last_status; in i40e_add_macvlan_filter()
7392 * i40e_reset_ch_rings - Reset the queue contexts in a channel
7402 for (i = 0; i < ch->num_queue_pairs; i++) { in i40e_reset_ch_rings()
7403 pf_q = ch->base_queue + i; in i40e_reset_ch_rings()
7404 tx_ring = vsi->tx_rings[pf_q]; in i40e_reset_ch_rings()
7405 tx_ring->ch = NULL; in i40e_reset_ch_rings()
7406 rx_ring = vsi->rx_rings[pf_q]; in i40e_reset_ch_rings()
7407 rx_ring->ch = NULL; in i40e_reset_ch_rings()
7424 if (list_empty(&vsi->macvlan_list)) in i40e_free_macvlan_channels()
7427 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { in i40e_free_macvlan_channels()
7432 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); in i40e_free_macvlan_channels()
7433 netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev); in i40e_free_macvlan_channels()
7434 netdev_set_sb_channel(ch->fwd->netdev, 0); in i40e_free_macvlan_channels()
7435 kfree(ch->fwd); in i40e_free_macvlan_channels()
7436 ch->fwd = NULL; in i40e_free_macvlan_channels()
7439 list_del(&ch->list); in i40e_free_macvlan_channels()
7440 parent_vsi = ch->parent_vsi; in i40e_free_macvlan_channels()
7441 if (!parent_vsi || !ch->initialized) { in i40e_free_macvlan_channels()
7447 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid, in i40e_free_macvlan_channels()
7450 dev_err(&vsi->back->pdev->dev, in i40e_free_macvlan_channels()
7452 ch->seid, parent_vsi->seid); in i40e_free_macvlan_channels()
7455 vsi->macvlan_cnt = 0; in i40e_free_macvlan_channels()
7459 * i40e_fwd_ring_up - bring the macvlan device up
7469 struct i40e_pf *pf = vsi->back; in i40e_fwd_ring_up()
7470 struct i40e_hw *hw = &pf->hw; in i40e_fwd_ring_up()
7472 if (list_empty(&vsi->macvlan_list)) in i40e_fwd_ring_up()
7473 return -EINVAL; in i40e_fwd_ring_up()
7476 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { in i40e_fwd_ring_up()
7478 ch->fwd = fwd; in i40e_fwd_ring_up()
7481 netdev_bind_sb_channel_queue(vsi->netdev, vdev, in i40e_fwd_ring_up()
7483 ch->num_queue_pairs, in i40e_fwd_ring_up()
7484 ch->base_queue); in i40e_fwd_ring_up()
7485 for (i = 0; i < ch->num_queue_pairs; i++) { in i40e_fwd_ring_up()
7489 pf_q = ch->base_queue + i; in i40e_fwd_ring_up()
7492 tx_ring = vsi->tx_rings[pf_q]; in i40e_fwd_ring_up()
7493 tx_ring->ch = ch; in i40e_fwd_ring_up()
7496 rx_ring = vsi->rx_rings[pf_q]; in i40e_fwd_ring_up()
7497 rx_ring->ch = ch; in i40e_fwd_ring_up()
7509 ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err); in i40e_fwd_ring_up()
7513 for (i = 0; i < ch->num_queue_pairs; i++) { in i40e_fwd_ring_up()
7517 pf_q = ch->base_queue + i; in i40e_fwd_ring_up()
7518 rx_ring = vsi->rx_rings[pf_q]; in i40e_fwd_ring_up()
7519 rx_ring->netdev = NULL; in i40e_fwd_ring_up()
7521 dev_info(&pf->pdev->dev, in i40e_fwd_ring_up()
7532 * i40e_setup_macvlans - create the channels which will be macvlans
7541 struct i40e_pf *pf = vsi->back; in i40e_setup_macvlans()
7542 struct i40e_hw *hw = &pf->hw; in i40e_setup_macvlans()
7549 if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt) in i40e_setup_macvlans()
7550 return -EINVAL; in i40e_setup_macvlans()
7552 num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt); in i40e_setup_macvlans()
7554 /* find the next higher power-of-2 of num queue pairs */ in i40e_setup_macvlans()
7555 pow = fls(roundup_pow_of_two(num_qps) - 1); in i40e_setup_macvlans()
7564 ctxt.seid = vsi->seid; in i40e_setup_macvlans()
7565 ctxt.pf_num = vsi->back->hw.pf_id; in i40e_setup_macvlans()
7567 ctxt.uplink_seid = vsi->uplink_seid; in i40e_setup_macvlans()
7568 ctxt.info = vsi->info; in i40e_setup_macvlans()
7571 ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); in i40e_setup_macvlans()
7575 vsi->rss_size = max_t(u16, num_qps, qcnt); in i40e_setup_macvlans()
7578 dev_info(&pf->pdev->dev, in i40e_setup_macvlans()
7580 vsi->rss_size); in i40e_setup_macvlans()
7583 vsi->reconfig_rss = true; in i40e_setup_macvlans()
7584 dev_dbg(&vsi->back->pdev->dev, in i40e_setup_macvlans()
7585 "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size); in i40e_setup_macvlans()
7586 vsi->next_base_queue = num_qps; in i40e_setup_macvlans()
7587 vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps; in i40e_setup_macvlans()
7589 /* Update the VSI after updating the VSI queue-mapping in i40e_setup_macvlans()
7594 dev_info(&pf->pdev->dev, in i40e_setup_macvlans()
7597 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_setup_macvlans()
7602 vsi->info.valid_sections = 0; in i40e_setup_macvlans()
7605 INIT_LIST_HEAD(&vsi->macvlan_list); in i40e_setup_macvlans()
7609 ret = -ENOMEM; in i40e_setup_macvlans()
7612 INIT_LIST_HEAD(&ch->list); in i40e_setup_macvlans()
7613 ch->num_queue_pairs = qcnt; in i40e_setup_macvlans()
7615 ret = -EINVAL; in i40e_setup_macvlans()
7619 ch->parent_vsi = vsi; in i40e_setup_macvlans()
7620 vsi->cnt_q_avail -= ch->num_queue_pairs; in i40e_setup_macvlans()
7621 vsi->macvlan_cnt++; in i40e_setup_macvlans()
7622 list_add_tail(&ch->list, &vsi->macvlan_list); in i40e_setup_macvlans()
7628 dev_info(&pf->pdev->dev, "Failed to setup macvlans\n"); in i40e_setup_macvlans()
7635 * i40e_fwd_add - configure macvlans
7643 struct i40e_vsi *vsi = np->vsi; in i40e_fwd_add()
7644 struct i40e_pf *pf = vsi->back; in i40e_fwd_add()
7648 if ((pf->flags & I40E_FLAG_DCB_ENABLED)) { in i40e_fwd_add()
7650 return ERR_PTR(-EINVAL); in i40e_fwd_add()
7652 if ((pf->flags & I40E_FLAG_TC_MQPRIO)) { in i40e_fwd_add()
7654 return ERR_PTR(-EINVAL); in i40e_fwd_add()
7656 if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) { in i40e_fwd_add()
7658 return ERR_PTR(-EINVAL); in i40e_fwd_add()
7665 return ERR_PTR(-ERANGE); in i40e_fwd_add()
7667 if (!vsi->macvlan_cnt) { in i40e_fwd_add()
7669 set_bit(0, vsi->fwd_bitmask); in i40e_fwd_add()
7675 vectors = pf->num_lan_msix; in i40e_fwd_add()
7679 macvlan_cnt = (vectors - 32) / 4; in i40e_fwd_add()
7683 macvlan_cnt = (vectors - 16) / 2; in i40e_fwd_add()
7687 macvlan_cnt = vectors - 16; in i40e_fwd_add()
7691 macvlan_cnt = vectors - 8; in i40e_fwd_add()
7695 macvlan_cnt = vectors - 1; in i40e_fwd_add()
7699 return ERR_PTR(-EBUSY); in i40e_fwd_add()
7713 avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask, in i40e_fwd_add()
7714 vsi->macvlan_cnt); in i40e_fwd_add()
7716 return ERR_PTR(-EBUSY); in i40e_fwd_add()
7721 return ERR_PTR(-ENOMEM); in i40e_fwd_add()
7723 set_bit(avail_macvlan, vsi->fwd_bitmask); in i40e_fwd_add()
7724 fwd->bit_no = avail_macvlan; in i40e_fwd_add()
7726 fwd->netdev = vdev; in i40e_fwd_add()
7739 return ERR_PTR(-EINVAL); in i40e_fwd_add()
7746 * i40e_del_all_macvlans - Delete all the mac filters on the channels
7752 struct i40e_pf *pf = vsi->back; in i40e_del_all_macvlans()
7753 struct i40e_hw *hw = &pf->hw; in i40e_del_all_macvlans()
7756 if (list_empty(&vsi->macvlan_list)) in i40e_del_all_macvlans()
7759 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { in i40e_del_all_macvlans()
7761 ret = i40e_del_macvlan_filter(hw, ch->seid, in i40e_del_all_macvlans()
7767 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); in i40e_del_all_macvlans()
7768 netdev_unbind_sb_channel(vsi->netdev, in i40e_del_all_macvlans()
7769 ch->fwd->netdev); in i40e_del_all_macvlans()
7770 netdev_set_sb_channel(ch->fwd->netdev, 0); in i40e_del_all_macvlans()
7771 kfree(ch->fwd); in i40e_del_all_macvlans()
7772 ch->fwd = NULL; in i40e_del_all_macvlans()
7779 * i40e_fwd_del - delete macvlan interfaces
7788 struct i40e_vsi *vsi = np->vsi; in i40e_fwd_del()
7789 struct i40e_pf *pf = vsi->back; in i40e_fwd_del()
7790 struct i40e_hw *hw = &pf->hw; in i40e_fwd_del()
7794 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { in i40e_fwd_del()
7797 fwd->netdev->dev_addr)) { in i40e_fwd_del()
7798 ret = i40e_del_macvlan_filter(hw, ch->seid, in i40e_fwd_del()
7804 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); in i40e_fwd_del()
7805 netdev_unbind_sb_channel(netdev, fwd->netdev); in i40e_fwd_del()
7806 netdev_set_sb_channel(fwd->netdev, 0); in i40e_fwd_del()
7807 kfree(ch->fwd); in i40e_fwd_del()
7808 ch->fwd = NULL; in i40e_fwd_del()
7810 dev_info(&pf->pdev->dev, in i40e_fwd_del()
7821 * i40e_setup_tc - configure multiple traffic classes
7829 struct i40e_vsi *vsi = np->vsi; in i40e_setup_tc()
7830 struct i40e_pf *pf = vsi->back; in i40e_setup_tc()
7834 int ret = -EINVAL; in i40e_setup_tc()
7838 old_queue_pairs = vsi->num_queue_pairs; in i40e_setup_tc()
7839 num_tc = mqprio_qopt->qopt.num_tc; in i40e_setup_tc()
7840 hw = mqprio_qopt->qopt.hw; in i40e_setup_tc()
7841 mode = mqprio_qopt->mode; in i40e_setup_tc()
7843 pf->flags &= ~I40E_FLAG_TC_MQPRIO; in i40e_setup_tc()
7844 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); in i40e_setup_tc()
7849 if (pf->flags & I40E_FLAG_MFP_ENABLED) { in i40e_setup_tc()
7856 pf->flags &= ~I40E_FLAG_TC_MQPRIO; in i40e_setup_tc()
7859 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { in i40e_setup_tc()
7873 if (pf->flags & I40E_FLAG_DCB_ENABLED) { in i40e_setup_tc()
7878 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) in i40e_setup_tc()
7883 memcpy(&vsi->mqprio_qopt, mqprio_qopt, in i40e_setup_tc()
7885 pf->flags |= I40E_FLAG_TC_MQPRIO; in i40e_setup_tc()
7886 pf->flags &= ~I40E_FLAG_DCB_ENABLED; in i40e_setup_tc()
7889 return -EINVAL; in i40e_setup_tc()
7898 if (enabled_tc == vsi->tc_config.enabled_tc && in i40e_setup_tc()
7905 if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO)) in i40e_setup_tc()
7908 /* Configure VSI for enabled TCs */ in i40e_setup_tc()
7912 vsi->seid); in i40e_setup_tc()
7916 dev_info(&vsi->back->pdev->dev, in i40e_setup_tc()
7918 vsi->seid, vsi->tc_config.tc_info[0].qcount); in i40e_setup_tc()
7921 if (pf->flags & I40E_FLAG_TC_MQPRIO) { in i40e_setup_tc()
7922 if (vsi->mqprio_qopt.max_rate[0]) { in i40e_setup_tc()
7923 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; in i40e_setup_tc()
7926 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); in i40e_setup_tc()
7931 dev_dbg(&vsi->back->pdev->dev, in i40e_setup_tc()
7932 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", in i40e_setup_tc()
7935 vsi->seid); in i40e_setup_tc()
7943 vsi->num_queue_pairs = old_queue_pairs; in i40e_setup_tc()
7964 * i40e_set_cld_element - sets cloud filter element data
7978 ether_addr_copy(cld->outer_mac, filter->dst_mac); in i40e_set_cld_element()
7979 ether_addr_copy(cld->inner_mac, filter->src_mac); in i40e_set_cld_element()
7981 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6) in i40e_set_cld_element()
7984 if (filter->n_proto == ETH_P_IPV6) { in i40e_set_cld_element()
7985 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1) in i40e_set_cld_element()
7986 for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) { in i40e_set_cld_element()
7987 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]); in i40e_set_cld_element()
7989 *(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa); in i40e_set_cld_element()
7992 ipa = be32_to_cpu(filter->dst_ipv4); in i40e_set_cld_element()
7994 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa)); in i40e_set_cld_element()
7997 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id)); in i40e_set_cld_element()
8000 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id) in i40e_set_cld_element()
8002 if (filter->tenant_id) in i40e_set_cld_element()
8007 * i40e_add_del_cloud_filter - Add/del cloud filter
8019 struct i40e_pf *pf = vsi->back; in i40e_add_del_cloud_filter()
8038 if (filter->flags >= ARRAY_SIZE(flag_table)) in i40e_add_del_cloud_filter()
8046 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE) in i40e_add_del_cloud_filter()
8047 cld_filter.flags = cpu_to_le16(filter->tunnel_type << in i40e_add_del_cloud_filter()
8050 if (filter->n_proto == ETH_P_IPV6) in i40e_add_del_cloud_filter()
8051 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] | in i40e_add_del_cloud_filter()
8054 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] | in i40e_add_del_cloud_filter()
8058 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid, in i40e_add_del_cloud_filter()
8061 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid, in i40e_add_del_cloud_filter()
8064 dev_dbg(&pf->pdev->dev, in i40e_add_del_cloud_filter()
8066 add ? "add" : "delete", filter->dst_port, ret, in i40e_add_del_cloud_filter()
8067 pf->hw.aq.asq_last_status); in i40e_add_del_cloud_filter()
8069 dev_info(&pf->pdev->dev, in i40e_add_del_cloud_filter()
8071 add ? "Added" : "Deleted", filter->seid); in i40e_add_del_cloud_filter()
8076 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
8089 struct i40e_pf *pf = vsi->back; in i40e_add_del_cloud_filter_big_buf()
8093 if ((is_valid_ether_addr(filter->dst_mac) && in i40e_add_del_cloud_filter_big_buf()
8094 is_valid_ether_addr(filter->src_mac)) || in i40e_add_del_cloud_filter_big_buf()
8095 (is_multicast_ether_addr(filter->dst_mac) && in i40e_add_del_cloud_filter_big_buf()
8096 is_multicast_ether_addr(filter->src_mac))) in i40e_add_del_cloud_filter_big_buf()
8097 return -EOPNOTSUPP; in i40e_add_del_cloud_filter_big_buf()
8099 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP in i40e_add_del_cloud_filter_big_buf()
8102 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP) in i40e_add_del_cloud_filter_big_buf()
8103 return -EOPNOTSUPP; in i40e_add_del_cloud_filter_big_buf()
8106 if (filter->src_port || in i40e_add_del_cloud_filter_big_buf()
8107 (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) || in i40e_add_del_cloud_filter_big_buf()
8108 !ipv6_addr_any(&filter->ip.v6.src_ip6)) in i40e_add_del_cloud_filter_big_buf()
8109 return -EOPNOTSUPP; in i40e_add_del_cloud_filter_big_buf()
8116 if (is_valid_ether_addr(filter->dst_mac) || in i40e_add_del_cloud_filter_big_buf()
8117 is_valid_ether_addr(filter->src_mac) || in i40e_add_del_cloud_filter_big_buf()
8118 is_multicast_ether_addr(filter->dst_mac) || in i40e_add_del_cloud_filter_big_buf()
8119 is_multicast_ether_addr(filter->src_mac)) { in i40e_add_del_cloud_filter_big_buf()
8121 if (filter->dst_ipv4) in i40e_add_del_cloud_filter_big_buf()
8122 return -EOPNOTSUPP; in i40e_add_del_cloud_filter_big_buf()
8131 if (filter->vlan_id) { in i40e_add_del_cloud_filter_big_buf()
8136 } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) || in i40e_add_del_cloud_filter_big_buf()
8137 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) { in i40e_add_del_cloud_filter_big_buf()
8140 if (filter->n_proto == ETH_P_IPV6) in i40e_add_del_cloud_filter_big_buf()
8147 dev_err(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
8149 return -EINVAL; in i40e_add_del_cloud_filter_big_buf()
8154 be16_to_cpu(filter->dst_port); in i40e_add_del_cloud_filter_big_buf()
8160 dev_err(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
8166 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid, in i40e_add_del_cloud_filter_big_buf()
8169 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid, in i40e_add_del_cloud_filter_big_buf()
8174 dev_dbg(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
8176 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status); in i40e_add_del_cloud_filter_big_buf()
8178 dev_info(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
8180 add ? "add" : "delete", filter->seid, in i40e_add_del_cloud_filter_big_buf()
8181 ntohs(filter->dst_port)); in i40e_add_del_cloud_filter_big_buf()
8186 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
8197 struct flow_dissector *dissector = rule->match.dissector; in i40e_parse_cls_flower()
8199 struct i40e_pf *pf = vsi->back; in i40e_parse_cls_flower()
8202 if (dissector->used_keys & in i40e_parse_cls_flower()
8211 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n", in i40e_parse_cls_flower()
8212 dissector->used_keys); in i40e_parse_cls_flower()
8213 return -EOPNOTSUPP; in i40e_parse_cls_flower()
8220 if (match.mask->keyid != 0) in i40e_parse_cls_flower()
8223 filter->tenant_id = be32_to_cpu(match.key->keyid); in i40e_parse_cls_flower()
8230 n_proto_key = ntohs(match.key->n_proto); in i40e_parse_cls_flower()
8231 n_proto_mask = ntohs(match.mask->n_proto); in i40e_parse_cls_flower()
8237 filter->n_proto = n_proto_key & n_proto_mask; in i40e_parse_cls_flower()
8238 filter->ip_proto = match.key->ip_proto; in i40e_parse_cls_flower()
8247 if (!is_zero_ether_addr(match.mask->dst)) { in i40e_parse_cls_flower()
8248 if (is_broadcast_ether_addr(match.mask->dst)) { in i40e_parse_cls_flower()
8251 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n", in i40e_parse_cls_flower()
8252 match.mask->dst); in i40e_parse_cls_flower()
8257 if (!is_zero_ether_addr(match.mask->src)) { in i40e_parse_cls_flower()
8258 if (is_broadcast_ether_addr(match.mask->src)) { in i40e_parse_cls_flower()
8261 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n", in i40e_parse_cls_flower()
8262 match.mask->src); in i40e_parse_cls_flower()
8266 ether_addr_copy(filter->dst_mac, match.key->dst); in i40e_parse_cls_flower()
8267 ether_addr_copy(filter->src_mac, match.key->src); in i40e_parse_cls_flower()
8274 if (match.mask->vlan_id) { in i40e_parse_cls_flower()
8275 if (match.mask->vlan_id == VLAN_VID_MASK) { in i40e_parse_cls_flower()
8279 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n", in i40e_parse_cls_flower()
8280 match.mask->vlan_id); in i40e_parse_cls_flower()
8285 filter->vlan_id = cpu_to_be16(match.key->vlan_id); in i40e_parse_cls_flower()
8292 addr_type = match.key->addr_type; in i40e_parse_cls_flower()
8299 if (match.mask->dst) { in i40e_parse_cls_flower()
8300 if (match.mask->dst == cpu_to_be32(0xffffffff)) { in i40e_parse_cls_flower()
8303 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n", in i40e_parse_cls_flower()
8304 &match.mask->dst); in i40e_parse_cls_flower()
8309 if (match.mask->src) { in i40e_parse_cls_flower()
8310 if (match.mask->src == cpu_to_be32(0xffffffff)) { in i40e_parse_cls_flower()
8313 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n", in i40e_parse_cls_flower()
8314 &match.mask->src); in i40e_parse_cls_flower()
8320 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n"); in i40e_parse_cls_flower()
8323 filter->dst_ipv4 = match.key->dst; in i40e_parse_cls_flower()
8324 filter->src_ipv4 = match.key->src; in i40e_parse_cls_flower()
8335 if (ipv6_addr_loopback(&match.key->dst) || in i40e_parse_cls_flower()
8336 ipv6_addr_loopback(&match.key->src)) { in i40e_parse_cls_flower()
8337 dev_err(&pf->pdev->dev, in i40e_parse_cls_flower()
8341 if (!ipv6_addr_any(&match.mask->dst) || in i40e_parse_cls_flower()
8342 !ipv6_addr_any(&match.mask->src)) in i40e_parse_cls_flower()
8345 memcpy(&filter->src_ipv6, &match.key->src.s6_addr32, in i40e_parse_cls_flower()
8346 sizeof(filter->src_ipv6)); in i40e_parse_cls_flower()
8347 memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32, in i40e_parse_cls_flower()
8348 sizeof(filter->dst_ipv6)); in i40e_parse_cls_flower()
8355 if (match.mask->src) { in i40e_parse_cls_flower()
8356 if (match.mask->src == cpu_to_be16(0xffff)) { in i40e_parse_cls_flower()
8359 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n", in i40e_parse_cls_flower()
8360 be16_to_cpu(match.mask->src)); in i40e_parse_cls_flower()
8365 if (match.mask->dst) { in i40e_parse_cls_flower()
8366 if (match.mask->dst == cpu_to_be16(0xffff)) { in i40e_parse_cls_flower()
8369 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n", in i40e_parse_cls_flower()
8370 be16_to_cpu(match.mask->dst)); in i40e_parse_cls_flower()
8375 filter->dst_port = match.key->dst; in i40e_parse_cls_flower()
8376 filter->src_port = match.key->src; in i40e_parse_cls_flower()
8378 switch (filter->ip_proto) { in i40e_parse_cls_flower()
8383 dev_err(&pf->pdev->dev, in i40e_parse_cls_flower()
8385 return -EINVAL; in i40e_parse_cls_flower()
8388 filter->flags = field_flags; in i40e_parse_cls_flower()
8406 filter->seid = vsi->seid; in i40e_handle_tclass()
8408 } else if (vsi->tc_config.enabled_tc & BIT(tc)) { in i40e_handle_tclass()
8409 if (!filter->dst_port) { in i40e_handle_tclass()
8410 dev_err(&vsi->back->pdev->dev, in i40e_handle_tclass()
8412 return -EINVAL; in i40e_handle_tclass()
8414 if (list_empty(&vsi->ch_list)) in i40e_handle_tclass()
8415 return -EINVAL; in i40e_handle_tclass()
8416 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, in i40e_handle_tclass()
8418 if (ch->seid == vsi->tc_seid_map[tc]) in i40e_handle_tclass()
8419 filter->seid = ch->seid; in i40e_handle_tclass()
8423 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n"); in i40e_handle_tclass()
8424 return -EINVAL; in i40e_handle_tclass()
8428 * i40e_configure_clsflower - Configure tc flower filters
8436 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid); in i40e_configure_clsflower()
8438 struct i40e_pf *pf = vsi->back; in i40e_configure_clsflower()
8442 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n"); in i40e_configure_clsflower()
8443 return -EOPNOTSUPP; in i40e_configure_clsflower()
8446 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || in i40e_configure_clsflower()
8447 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) in i40e_configure_clsflower()
8448 return -EBUSY; in i40e_configure_clsflower()
8450 if (pf->fdir_pf_active_filters || in i40e_configure_clsflower()
8451 (!hlist_empty(&pf->fdir_filter_list))) { in i40e_configure_clsflower()
8452 dev_err(&vsi->back->pdev->dev, in i40e_configure_clsflower()
8454 return -EINVAL; in i40e_configure_clsflower()
8457 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_configure_clsflower()
8458 dev_err(&vsi->back->pdev->dev, in i40e_configure_clsflower()
8459 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n"); in i40e_configure_clsflower()
8460 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED; in i40e_configure_clsflower()
8461 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER; in i40e_configure_clsflower()
8466 return -ENOMEM; in i40e_configure_clsflower()
8468 filter->cookie = cls_flower->cookie; in i40e_configure_clsflower()
8479 if (filter->dst_port) in i40e_configure_clsflower()
8485 dev_err(&pf->pdev->dev, in i40e_configure_clsflower()
8487 i40e_stat_str(&pf->hw, err)); in i40e_configure_clsflower()
8492 INIT_HLIST_NODE(&filter->cloud_node); in i40e_configure_clsflower()
8494 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list); in i40e_configure_clsflower()
8496 pf->num_cloud_filters++; in i40e_configure_clsflower()
8505 * i40e_find_cloud_filter - Find the could filter in the list
8517 &vsi->back->cloud_filter_list, cloud_node) in i40e_find_cloud_filter()
8518 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) in i40e_find_cloud_filter()
8524 * i40e_delete_clsflower - Remove tc flower filters
8533 struct i40e_pf *pf = vsi->back; in i40e_delete_clsflower()
8536 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie); in i40e_delete_clsflower()
8539 return -EINVAL; in i40e_delete_clsflower()
8541 hash_del(&filter->cloud_node); in i40e_delete_clsflower()
8543 if (filter->dst_port) in i40e_delete_clsflower()
8550 dev_err(&pf->pdev->dev, in i40e_delete_clsflower()
8552 i40e_stat_str(&pf->hw, err)); in i40e_delete_clsflower()
8553 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status); in i40e_delete_clsflower()
8556 pf->num_cloud_filters--; in i40e_delete_clsflower()
8557 if (!pf->num_cloud_filters) in i40e_delete_clsflower()
8558 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) && in i40e_delete_clsflower()
8559 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) { in i40e_delete_clsflower()
8560 pf->flags |= I40E_FLAG_FD_SB_ENABLED; in i40e_delete_clsflower()
8561 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER; in i40e_delete_clsflower()
8562 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; in i40e_delete_clsflower()
8568 * i40e_setup_tc_cls_flower - flower classifier offloads
8575 struct i40e_vsi *vsi = np->vsi; in i40e_setup_tc_cls_flower()
8577 switch (cls_flower->command) { in i40e_setup_tc_cls_flower()
8583 return -EOPNOTSUPP; in i40e_setup_tc_cls_flower()
8585 return -EOPNOTSUPP; in i40e_setup_tc_cls_flower()
8594 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data)) in i40e_setup_tc_block_cb()
8595 return -EOPNOTSUPP; in i40e_setup_tc_block_cb()
8602 return -EOPNOTSUPP; in i40e_setup_tc_block_cb()
8622 return -EOPNOTSUPP; in __i40e_setup_tc()
8627 * i40e_open - Called when a network interface is made active
8641 struct i40e_vsi *vsi = np->vsi; in i40e_open()
8642 struct i40e_pf *pf = vsi->back; in i40e_open()
8646 if (test_bit(__I40E_TESTING, pf->state) || in i40e_open()
8647 test_bit(__I40E_BAD_EEPROM, pf->state)) in i40e_open()
8648 return -EBUSY; in i40e_open()
8653 return -EAGAIN; in i40e_open()
8660 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | in i40e_open()
8662 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | in i40e_open()
8665 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); in i40e_open()
8672 * i40e_vsi_open -
8683 struct i40e_pf *pf = vsi->back; in i40e_vsi_open()
8699 if (vsi->netdev) { in i40e_vsi_open()
8700 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", in i40e_vsi_open()
8701 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); in i40e_vsi_open()
8707 err = netif_set_real_num_tx_queues(vsi->netdev, in i40e_vsi_open()
8708 vsi->num_queue_pairs); in i40e_vsi_open()
8712 err = netif_set_real_num_rx_queues(vsi->netdev, in i40e_vsi_open()
8713 vsi->num_queue_pairs); in i40e_vsi_open()
8717 } else if (vsi->type == I40E_VSI_FDIR) { in i40e_vsi_open()
8718 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir", in i40e_vsi_open()
8719 dev_driver_string(&pf->pdev->dev), in i40e_vsi_open()
8720 dev_name(&pf->pdev->dev)); in i40e_vsi_open()
8726 err = -EINVAL; in i40e_vsi_open()
8744 if (vsi == pf->vsi[pf->lan_vsi]) in i40e_vsi_open()
8751 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
8764 &pf->fdir_filter_list, fdir_node) { in i40e_fdir_filter_exit()
8765 hlist_del(&filter->fdir_node); in i40e_fdir_filter_exit()
8769 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) { in i40e_fdir_filter_exit()
8770 list_del(&pit_entry->list); in i40e_fdir_filter_exit()
8773 INIT_LIST_HEAD(&pf->l3_flex_pit_list); in i40e_fdir_filter_exit()
8775 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) { in i40e_fdir_filter_exit()
8776 list_del(&pit_entry->list); in i40e_fdir_filter_exit()
8779 INIT_LIST_HEAD(&pf->l4_flex_pit_list); in i40e_fdir_filter_exit()
8781 pf->fdir_pf_active_filters = 0; in i40e_fdir_filter_exit()
8830 * i40e_cloud_filter_exit - Cleans up the cloud filters
8842 &pf->cloud_filter_list, cloud_node) { in i40e_cloud_filter_exit()
8843 hlist_del(&cfilter->cloud_node); in i40e_cloud_filter_exit()
8846 pf->num_cloud_filters = 0; in i40e_cloud_filter_exit()
8848 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) && in i40e_cloud_filter_exit()
8849 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) { in i40e_cloud_filter_exit()
8850 pf->flags |= I40E_FLAG_FD_SB_ENABLED; in i40e_cloud_filter_exit()
8851 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER; in i40e_cloud_filter_exit()
8852 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; in i40e_cloud_filter_exit()
8857 * i40e_close - Disables a network interface
8860 * The close entry point is called when an interface is de-activated
8869 struct i40e_vsi *vsi = np->vsi; in i40e_close()
8877 * i40e_do_reset - Start a PF or Core Reset sequence
8902 dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); in i40e_do_reset()
8903 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); in i40e_do_reset()
8905 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); in i40e_do_reset()
8913 dev_dbg(&pf->pdev->dev, "CoreR requested\n"); in i40e_do_reset()
8914 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); in i40e_do_reset()
8916 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); in i40e_do_reset()
8917 i40e_flush(&pf->hw); in i40e_do_reset()
8923 * Resets only the PF-specific registers in i40e_do_reset()
8925 * This goes directly to the tear-down and rebuild of in i40e_do_reset()
8929 dev_dbg(&pf->pdev->dev, "PFR requested\n"); in i40e_do_reset()
8939 dev_info(&pf->pdev->dev, in i40e_do_reset()
8940 pf->flags & I40E_FLAG_DISABLE_FW_LLDP ? in i40e_do_reset()
8947 /* Find the VSI(s) that requested a re-init */ in i40e_do_reset()
8948 dev_info(&pf->pdev->dev, in i40e_do_reset()
8950 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_do_reset()
8951 struct i40e_vsi *vsi = pf->vsi[v]; in i40e_do_reset()
8955 vsi->state)) in i40e_do_reset()
8956 i40e_vsi_reinit_locked(pf->vsi[v]); in i40e_do_reset()
8962 dev_info(&pf->pdev->dev, "VSI down requested\n"); in i40e_do_reset()
8963 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_do_reset()
8964 struct i40e_vsi *vsi = pf->vsi[v]; in i40e_do_reset()
8968 vsi->state)) { in i40e_do_reset()
8969 set_bit(__I40E_VSI_DOWN, vsi->state); in i40e_do_reset()
8974 dev_info(&pf->pdev->dev, in i40e_do_reset()
8981 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
8993 if (memcmp(&new_cfg->etscfg, in i40e_dcb_need_reconfig()
8994 &old_cfg->etscfg, in i40e_dcb_need_reconfig()
8995 sizeof(new_cfg->etscfg))) { in i40e_dcb_need_reconfig()
8997 if (memcmp(&new_cfg->etscfg.prioritytable, in i40e_dcb_need_reconfig()
8998 &old_cfg->etscfg.prioritytable, in i40e_dcb_need_reconfig()
8999 sizeof(new_cfg->etscfg.prioritytable))) { in i40e_dcb_need_reconfig()
9001 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); in i40e_dcb_need_reconfig()
9004 if (memcmp(&new_cfg->etscfg.tcbwtable, in i40e_dcb_need_reconfig()
9005 &old_cfg->etscfg.tcbwtable, in i40e_dcb_need_reconfig()
9006 sizeof(new_cfg->etscfg.tcbwtable))) in i40e_dcb_need_reconfig()
9007 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); in i40e_dcb_need_reconfig()
9009 if (memcmp(&new_cfg->etscfg.tsatable, in i40e_dcb_need_reconfig()
9010 &old_cfg->etscfg.tsatable, in i40e_dcb_need_reconfig()
9011 sizeof(new_cfg->etscfg.tsatable))) in i40e_dcb_need_reconfig()
9012 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); in i40e_dcb_need_reconfig()
9016 if (memcmp(&new_cfg->pfc, in i40e_dcb_need_reconfig()
9017 &old_cfg->pfc, in i40e_dcb_need_reconfig()
9018 sizeof(new_cfg->pfc))) { in i40e_dcb_need_reconfig()
9020 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); in i40e_dcb_need_reconfig()
9024 if (memcmp(&new_cfg->app, in i40e_dcb_need_reconfig()
9025 &old_cfg->app, in i40e_dcb_need_reconfig()
9026 sizeof(new_cfg->app))) { in i40e_dcb_need_reconfig()
9028 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); in i40e_dcb_need_reconfig()
9031 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); in i40e_dcb_need_reconfig()
9036 * i40e_handle_lldp_event - Handle LLDP Change MIB event
9044 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; in i40e_handle_lldp_event()
9045 struct i40e_hw *hw = &pf->hw; in i40e_handle_lldp_event()
9051 /* X710-T*L 2.5G and 5G speeds don't support DCB */ in i40e_handle_lldp_event()
9052 if (I40E_IS_X710TL_DEVICE(hw->device_id) && in i40e_handle_lldp_event()
9053 (hw->phy.link_info.link_speed & in i40e_handle_lldp_event()
9055 !(pf->flags & I40E_FLAG_DCB_CAPABLE)) in i40e_handle_lldp_event()
9057 pf->flags |= I40E_FLAG_DCB_CAPABLE; in i40e_handle_lldp_event()
9060 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) in i40e_handle_lldp_event()
9064 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) in i40e_handle_lldp_event()
9066 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type); in i40e_handle_lldp_event()
9071 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; in i40e_handle_lldp_event()
9072 dev_dbg(&pf->pdev->dev, in i40e_handle_lldp_event()
9078 &hw->remote_dcbx_config); in i40e_handle_lldp_event()
9083 tmp_dcbx_cfg = hw->local_dcbx_config; in i40e_handle_lldp_event()
9086 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); in i40e_handle_lldp_event()
9088 ret = i40e_get_dcb_config(&pf->hw); in i40e_handle_lldp_event()
9090 /* X710-T*L 2.5G and 5G speeds don't support DCB */ in i40e_handle_lldp_event()
9091 if (I40E_IS_X710TL_DEVICE(hw->device_id) && in i40e_handle_lldp_event()
9092 (hw->phy.link_info.link_speed & in i40e_handle_lldp_event()
9094 dev_warn(&pf->pdev->dev, in i40e_handle_lldp_event()
9095 "DCB is not supported for X710-T*L 2.5/5G speeds\n"); in i40e_handle_lldp_event()
9096 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; in i40e_handle_lldp_event()
9098 dev_info(&pf->pdev->dev, in i40e_handle_lldp_event()
9100 i40e_stat_str(&pf->hw, ret), in i40e_handle_lldp_event()
9101 i40e_aq_str(&pf->hw, in i40e_handle_lldp_event()
9102 pf->hw.aq.asq_last_status)); in i40e_handle_lldp_event()
9108 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config, in i40e_handle_lldp_event()
9110 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); in i40e_handle_lldp_event()
9115 &hw->local_dcbx_config); in i40e_handle_lldp_event()
9117 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); in i40e_handle_lldp_event()
9123 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) in i40e_handle_lldp_event()
9124 pf->flags |= I40E_FLAG_DCB_ENABLED; in i40e_handle_lldp_event()
9126 pf->flags &= ~I40E_FLAG_DCB_ENABLED; in i40e_handle_lldp_event()
9128 set_bit(__I40E_PORT_SUSPENDED, pf->state); in i40e_handle_lldp_event()
9137 clear_bit(__I40E_PORT_SUSPENDED, pf->state); in i40e_handle_lldp_event()
9142 /* Wait for the PF's queues to be disabled */ in i40e_handle_lldp_event()
9146 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_handle_lldp_event()
9150 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_handle_lldp_event()
9151 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); in i40e_handle_lldp_event()
9160 * i40e_do_reset_safe - Protected reset path for userland calls.
9173 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
9184 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; in i40e_handle_lan_overflow_event()
9185 u32 queue = le32_to_cpu(data->prtdcb_rupto); in i40e_handle_lan_overflow_event()
9186 u32 qtx_ctl = le32_to_cpu(data->otx_ctl); in i40e_handle_lan_overflow_event()
9187 struct i40e_hw *hw = &pf->hw; in i40e_handle_lan_overflow_event()
9191 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", in i40e_handle_lan_overflow_event()
9199 vf_id -= hw->func_caps.vf_base_id; in i40e_handle_lan_overflow_event()
9200 vf = &pf->vf[vf_id]; in i40e_handle_lan_overflow_event()
9209 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
9216 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); in i40e_get_cur_guaranteed_fd_count()
9222 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
9229 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); in i40e_get_current_fd_count()
9237 * i40e_get_global_fd_count - Get total FD filters programmed on device
9244 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); in i40e_get_global_fd_count()
9252 * i40e_reenable_fdir_sb - Restore FDir SB capability
9257 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) in i40e_reenable_fdir_sb()
9258 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && in i40e_reenable_fdir_sb()
9259 (I40E_DEBUG_FD & pf->hw.debug_mask)) in i40e_reenable_fdir_sb()
9260 …dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now… in i40e_reenable_fdir_sb()
9264 * i40e_reenable_fdir_atr - Restore FDir ATR capability
9269 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) { in i40e_reenable_fdir_atr()
9279 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && in i40e_reenable_fdir_atr()
9280 (I40E_DEBUG_FD & pf->hw.debug_mask)) in i40e_reenable_fdir_atr()
9281 …dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no c… in i40e_reenable_fdir_atr()
9286 * i40e_delete_invalid_filter - Delete an invalid FDIR filter
9294 pf->fdir_pf_active_filters--; in i40e_delete_invalid_filter()
9295 pf->fd_inv = 0; in i40e_delete_invalid_filter()
9297 switch (filter->flow_type) { in i40e_delete_invalid_filter()
9299 pf->fd_tcp4_filter_cnt--; in i40e_delete_invalid_filter()
9302 pf->fd_udp4_filter_cnt--; in i40e_delete_invalid_filter()
9305 pf->fd_sctp4_filter_cnt--; in i40e_delete_invalid_filter()
9308 pf->fd_tcp6_filter_cnt--; in i40e_delete_invalid_filter()
9311 pf->fd_udp6_filter_cnt--; in i40e_delete_invalid_filter()
9314 pf->fd_udp6_filter_cnt--; in i40e_delete_invalid_filter()
9317 switch (filter->ipl4_proto) { in i40e_delete_invalid_filter()
9319 pf->fd_tcp4_filter_cnt--; in i40e_delete_invalid_filter()
9322 pf->fd_udp4_filter_cnt--; in i40e_delete_invalid_filter()
9325 pf->fd_sctp4_filter_cnt--; in i40e_delete_invalid_filter()
9328 pf->fd_ip4_filter_cnt--; in i40e_delete_invalid_filter()
9333 switch (filter->ipl4_proto) { in i40e_delete_invalid_filter()
9335 pf->fd_tcp6_filter_cnt--; in i40e_delete_invalid_filter()
9338 pf->fd_udp6_filter_cnt--; in i40e_delete_invalid_filter()
9341 pf->fd_sctp6_filter_cnt--; in i40e_delete_invalid_filter()
9344 pf->fd_ip6_filter_cnt--; in i40e_delete_invalid_filter()
9351 hlist_del(&filter->fdir_node); in i40e_delete_invalid_filter()
9356 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
9365 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) in i40e_fdir_check_and_reenable()
9368 /* Check if we have enough room to re-enable FDir SB capability. */ in i40e_fdir_check_and_reenable()
9370 fcnt_avail = pf->fdir_pf_filter_count; in i40e_fdir_check_and_reenable()
9371 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || in i40e_fdir_check_and_reenable()
9372 (pf->fd_add_err == 0) || in i40e_fdir_check_and_reenable()
9373 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) in i40e_fdir_check_and_reenable()
9376 /* We should wait for even more space before re-enabling ATR. in i40e_fdir_check_and_reenable()
9380 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) && in i40e_fdir_check_and_reenable()
9381 pf->fd_tcp4_filter_cnt == 0 && pf->fd_tcp6_filter_cnt == 0) in i40e_fdir_check_and_reenable()
9385 if (pf->fd_inv > 0) { in i40e_fdir_check_and_reenable()
9387 &pf->fdir_filter_list, fdir_node) in i40e_fdir_check_and_reenable()
9388 if (filter->fd_id == pf->fd_inv) in i40e_fdir_check_and_reenable()
9396 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
9407 if (!time_after(jiffies, pf->fd_flush_timestamp + in i40e_fdir_flush_and_replay()
9412 * should not re-enable ATR for some time. in i40e_fdir_flush_and_replay()
9414 min_flush_time = pf->fd_flush_timestamp + in i40e_fdir_flush_and_replay()
9416 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; in i40e_fdir_flush_and_replay()
9420 if (I40E_DEBUG_FD & pf->hw.debug_mask) in i40e_fdir_flush_and_replay()
9421 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); in i40e_fdir_flush_and_replay()
9425 pf->fd_flush_timestamp = jiffies; in i40e_fdir_flush_and_replay()
9426 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); in i40e_fdir_flush_and_replay()
9428 wr32(&pf->hw, I40E_PFQF_CTL_1, in i40e_fdir_flush_and_replay()
9430 i40e_flush(&pf->hw); in i40e_fdir_flush_and_replay()
9431 pf->fd_flush_cnt++; in i40e_fdir_flush_and_replay()
9432 pf->fd_add_err = 0; in i40e_fdir_flush_and_replay()
9434 /* Check FD flush status every 5-6msec */ in i40e_fdir_flush_and_replay()
9436 reg = rd32(&pf->hw, I40E_PFQF_CTL_1); in i40e_fdir_flush_and_replay()
9439 } while (flush_wait_retry--); in i40e_fdir_flush_and_replay()
9441 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); in i40e_fdir_flush_and_replay()
9444 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); in i40e_fdir_flush_and_replay()
9445 if (!disable_atr && !pf->fd_tcp4_filter_cnt) in i40e_fdir_flush_and_replay()
9446 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); in i40e_fdir_flush_and_replay()
9447 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); in i40e_fdir_flush_and_replay()
9448 if (I40E_DEBUG_FD & pf->hw.debug_mask) in i40e_fdir_flush_and_replay()
9449 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); in i40e_fdir_flush_and_replay()
9454 * i40e_get_current_atr_cnt - Get the count of total FD ATR filters programmed
9459 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; in i40e_get_current_atr_cnt()
9463 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
9470 if (test_bit(__I40E_DOWN, pf->state)) in i40e_fdir_reinit_subtask()
9473 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) in i40e_fdir_reinit_subtask()
9481 * i40e_vsi_link_event - notify VSI of a link event
9487 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_vsi_link_event()
9490 switch (vsi->type) { in i40e_vsi_link_event()
9492 if (!vsi->netdev || !vsi->netdev_registered) in i40e_vsi_link_event()
9496 netif_carrier_on(vsi->netdev); in i40e_vsi_link_event()
9497 netif_tx_wake_all_queues(vsi->netdev); in i40e_vsi_link_event()
9499 netif_carrier_off(vsi->netdev); in i40e_vsi_link_event()
9500 netif_tx_stop_all_queues(vsi->netdev); in i40e_vsi_link_event()
9516 * i40e_veb_link_event - notify elements on the veb of a link event
9525 if (!veb || !veb->pf) in i40e_veb_link_event()
9527 pf = veb->pf; in i40e_veb_link_event()
9531 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) in i40e_veb_link_event()
9532 i40e_veb_link_event(pf->veb[i], link_up); in i40e_veb_link_event()
9535 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_veb_link_event()
9536 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) in i40e_veb_link_event()
9537 i40e_vsi_link_event(pf->vsi[i], link_up); in i40e_veb_link_event()
9541 * i40e_link_event - Update netif_carrier status
9546 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_link_event()
9555 pf->hw.phy.get_link_info = true; in i40e_link_event()
9556 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); in i40e_link_event()
9557 status = i40e_get_link_status(&pf->hw, &new_link); in i40e_link_event()
9561 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state); in i40e_link_event()
9566 set_bit(__I40E_TEMP_LINK_POLLING, pf->state); in i40e_link_event()
9567 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", in i40e_link_event()
9572 old_link_speed = pf->hw.phy.link_info_old.link_speed; in i40e_link_event()
9573 new_link_speed = pf->hw.phy.link_info.link_speed; in i40e_link_event()
9577 (test_bit(__I40E_VSI_DOWN, vsi->state) || in i40e_link_event()
9578 new_link == netif_carrier_ok(vsi->netdev))) in i40e_link_event()
9586 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) in i40e_link_event()
9587 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); in i40e_link_event()
9591 if (pf->vf) in i40e_link_event()
9594 if (pf->flags & I40E_FLAG_PTP) in i40e_link_event()
9600 if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) in i40e_link_event()
9607 dev_dbg(&pf->pdev->dev, "Reconfig DCB to single TC as result of Link Down\n"); in i40e_link_event()
9608 memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg)); in i40e_link_event()
9611 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | in i40e_link_event()
9614 pf->dcbx_cap = DCB_CAP_DCBX_HOST | in i40e_link_event()
9616 pf->flags |= I40E_FLAG_DCB_CAPABLE; in i40e_link_event()
9617 pf->flags &= ~I40E_FLAG_DCB_ENABLED; in i40e_link_event()
9624 * i40e_watchdog_subtask - periodic checks not using event driven response
9632 if (test_bit(__I40E_DOWN, pf->state) || in i40e_watchdog_subtask()
9633 test_bit(__I40E_CONFIG_BUSY, pf->state)) in i40e_watchdog_subtask()
9637 if (time_before(jiffies, (pf->service_timer_previous + in i40e_watchdog_subtask()
9638 pf->service_timer_period))) in i40e_watchdog_subtask()
9640 pf->service_timer_previous = jiffies; in i40e_watchdog_subtask()
9642 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) || in i40e_watchdog_subtask()
9643 test_bit(__I40E_TEMP_LINK_POLLING, pf->state)) in i40e_watchdog_subtask()
9649 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_watchdog_subtask()
9650 if (pf->vsi[i] && pf->vsi[i]->netdev) in i40e_watchdog_subtask()
9651 i40e_update_stats(pf->vsi[i]); in i40e_watchdog_subtask()
9653 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) { in i40e_watchdog_subtask()
9656 if (pf->veb[i]) in i40e_watchdog_subtask()
9657 i40e_update_veb_stats(pf->veb[i]); in i40e_watchdog_subtask()
9665 * i40e_reset_subtask - Set up for resetting the device and driver
9672 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) { in i40e_reset_subtask()
9674 clear_bit(__I40E_REINIT_REQUESTED, pf->state); in i40e_reset_subtask()
9676 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) { in i40e_reset_subtask()
9678 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_reset_subtask()
9680 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) { in i40e_reset_subtask()
9682 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state); in i40e_reset_subtask()
9684 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) { in i40e_reset_subtask()
9686 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); in i40e_reset_subtask()
9688 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) { in i40e_reset_subtask()
9690 clear_bit(__I40E_DOWN_REQUESTED, pf->state); in i40e_reset_subtask()
9696 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) { in i40e_reset_subtask()
9704 !test_bit(__I40E_DOWN, pf->state) && in i40e_reset_subtask()
9705 !test_bit(__I40E_CONFIG_BUSY, pf->state)) { in i40e_reset_subtask()
9711 * i40e_handle_link_event - Handle link event
9719 (struct i40e_aqc_get_link_status *)&e->desc.params.raw; in i40e_handle_link_event()
9721 /* Do a new status request to re-enable LSE reporting in i40e_handle_link_event()
9730 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) { in i40e_handle_link_event()
9731 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
9733 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
9739 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && in i40e_handle_link_event()
9740 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && in i40e_handle_link_event()
9741 (!(status->link_info & I40E_AQ_LINK_UP)) && in i40e_handle_link_event()
9742 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) { in i40e_handle_link_event()
9743 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
9745 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
9752 * i40e_clean_adminq_subtask - Clean the AdminQ rings
9758 struct i40e_hw *hw = &pf->hw; in i40e_clean_adminq_subtask()
9766 if (test_bit(__I40E_RESET_FAILED, pf->state)) in i40e_clean_adminq_subtask()
9770 val = rd32(&pf->hw, pf->hw.aq.arq.len); in i40e_clean_adminq_subtask()
9773 if (hw->debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
9774 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); in i40e_clean_adminq_subtask()
9778 if (hw->debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
9779 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); in i40e_clean_adminq_subtask()
9781 pf->arq_overflows++; in i40e_clean_adminq_subtask()
9784 if (hw->debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
9785 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); in i40e_clean_adminq_subtask()
9789 wr32(&pf->hw, pf->hw.aq.arq.len, val); in i40e_clean_adminq_subtask()
9791 val = rd32(&pf->hw, pf->hw.aq.asq.len); in i40e_clean_adminq_subtask()
9794 if (pf->hw.debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
9795 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); in i40e_clean_adminq_subtask()
9799 if (pf->hw.debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
9800 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); in i40e_clean_adminq_subtask()
9804 if (pf->hw.debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
9805 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); in i40e_clean_adminq_subtask()
9809 wr32(&pf->hw, pf->hw.aq.asq.len, val); in i40e_clean_adminq_subtask()
9821 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); in i40e_clean_adminq_subtask()
9842 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); in i40e_clean_adminq_subtask()
9850 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); in i40e_clean_adminq_subtask()
9854 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); in i40e_clean_adminq_subtask()
9859 i40e_debug(&pf->hw, I40E_DEBUG_NVM, in i40e_clean_adminq_subtask()
9864 dev_info(&pf->pdev->dev, in i40e_clean_adminq_subtask()
9869 } while (i++ < pf->adminq_work_limit); in i40e_clean_adminq_subtask()
9871 if (i < pf->adminq_work_limit) in i40e_clean_adminq_subtask()
9872 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); in i40e_clean_adminq_subtask()
9874 /* re-enable Admin queue interrupt cause */ in i40e_clean_adminq_subtask()
9884 * i40e_verify_eeprom - make sure eeprom is good to use
9891 err = i40e_diag_eeprom_test(&pf->hw); in i40e_verify_eeprom()
9894 err = i40e_diag_eeprom_test(&pf->hw); in i40e_verify_eeprom()
9896 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", in i40e_verify_eeprom()
9898 set_bit(__I40E_BAD_EEPROM, pf->state); in i40e_verify_eeprom()
9902 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) { in i40e_verify_eeprom()
9903 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); in i40e_verify_eeprom()
9904 clear_bit(__I40E_BAD_EEPROM, pf->state); in i40e_verify_eeprom()
9912 * enable switch loop back or die - no point in a return value
9916 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_enable_pf_switch_lb()
9920 ctxt.seid = pf->main_vsi_seid; in i40e_enable_pf_switch_lb()
9921 ctxt.pf_num = pf->hw.pf_id; in i40e_enable_pf_switch_lb()
9923 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); in i40e_enable_pf_switch_lb()
9925 dev_info(&pf->pdev->dev, in i40e_enable_pf_switch_lb()
9927 i40e_stat_str(&pf->hw, ret), in i40e_enable_pf_switch_lb()
9928 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_enable_pf_switch_lb()
9935 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_enable_pf_switch_lb()
9937 dev_info(&pf->pdev->dev, in i40e_enable_pf_switch_lb()
9939 i40e_stat_str(&pf->hw, ret), in i40e_enable_pf_switch_lb()
9940 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_enable_pf_switch_lb()
9948 * disable switch loop back or die - no point in a return value
9952 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_disable_pf_switch_lb()
9956 ctxt.seid = pf->main_vsi_seid; in i40e_disable_pf_switch_lb()
9957 ctxt.pf_num = pf->hw.pf_id; in i40e_disable_pf_switch_lb()
9959 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); in i40e_disable_pf_switch_lb()
9961 dev_info(&pf->pdev->dev, in i40e_disable_pf_switch_lb()
9963 i40e_stat_str(&pf->hw, ret), in i40e_disable_pf_switch_lb()
9964 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_disable_pf_switch_lb()
9971 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_disable_pf_switch_lb()
9973 dev_info(&pf->pdev->dev, in i40e_disable_pf_switch_lb()
9975 i40e_stat_str(&pf->hw, ret), in i40e_disable_pf_switch_lb()
9976 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_disable_pf_switch_lb()
9981 * i40e_config_bridge_mode - Configure the HW bridge mode
9990 struct i40e_pf *pf = veb->pf; in i40e_config_bridge_mode()
9992 if (pf->hw.debug_mask & I40E_DEBUG_LAN) in i40e_config_bridge_mode()
9993 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", in i40e_config_bridge_mode()
9994 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); in i40e_config_bridge_mode()
9995 if (veb->bridge_mode & BRIDGE_MODE_VEPA) in i40e_config_bridge_mode()
10002 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
10013 struct i40e_pf *pf = veb->pf; in i40e_reconstitute_veb()
10018 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { in i40e_reconstitute_veb()
10019 if (pf->vsi[v] && in i40e_reconstitute_veb()
10020 pf->vsi[v]->veb_idx == veb->idx && in i40e_reconstitute_veb()
10021 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { in i40e_reconstitute_veb()
10022 ctl_vsi = pf->vsi[v]; in i40e_reconstitute_veb()
10027 dev_info(&pf->pdev->dev, in i40e_reconstitute_veb()
10028 "missing owner VSI for veb_idx %d\n", veb->idx); in i40e_reconstitute_veb()
10029 ret = -ENOENT; in i40e_reconstitute_veb()
10032 if (ctl_vsi != pf->vsi[pf->lan_vsi]) in i40e_reconstitute_veb()
10033 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; in i40e_reconstitute_veb()
10036 dev_info(&pf->pdev->dev, in i40e_reconstitute_veb()
10038 veb->idx, ret); in i40e_reconstitute_veb()
10048 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) in i40e_reconstitute_veb()
10049 veb->bridge_mode = BRIDGE_MODE_VEB; in i40e_reconstitute_veb()
10051 veb->bridge_mode = BRIDGE_MODE_VEPA; in i40e_reconstitute_veb()
10055 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_reconstitute_veb()
10056 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) in i40e_reconstitute_veb()
10059 if (pf->vsi[v]->veb_idx == veb->idx) { in i40e_reconstitute_veb()
10060 struct i40e_vsi *vsi = pf->vsi[v]; in i40e_reconstitute_veb()
10062 vsi->uplink_seid = veb->seid; in i40e_reconstitute_veb()
10065 dev_info(&pf->pdev->dev, in i40e_reconstitute_veb()
10074 /* create any VEBs attached to this VEB - RECURSION */ in i40e_reconstitute_veb()
10076 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { in i40e_reconstitute_veb()
10077 pf->veb[veb_idx]->uplink_seid = veb->seid; in i40e_reconstitute_veb()
10078 ret = i40e_reconstitute_veb(pf->veb[veb_idx]); in i40e_reconstitute_veb()
10089 * i40e_get_capabilities - get info about the HW
10105 return -ENOMEM; in i40e_get_capabilities()
10108 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, in i40e_get_capabilities()
10114 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { in i40e_get_capabilities()
10117 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) { in i40e_get_capabilities()
10118 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10120 i40e_stat_str(&pf->hw, err), in i40e_get_capabilities()
10121 i40e_aq_str(&pf->hw, in i40e_get_capabilities()
10122 pf->hw.aq.asq_last_status)); in i40e_get_capabilities()
10123 return -ENODEV; in i40e_get_capabilities()
10127 if (pf->hw.debug_mask & I40E_DEBUG_USER) { in i40e_get_capabilities()
10129 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10131 pf->hw.pf_id, pf->hw.func_caps.num_vfs, in i40e_get_capabilities()
10132 pf->hw.func_caps.num_msix_vectors, in i40e_get_capabilities()
10133 pf->hw.func_caps.num_msix_vectors_vf, in i40e_get_capabilities()
10134 pf->hw.func_caps.fd_filters_guaranteed, in i40e_get_capabilities()
10135 pf->hw.func_caps.fd_filters_best_effort, in i40e_get_capabilities()
10136 pf->hw.func_caps.num_tx_qp, in i40e_get_capabilities()
10137 pf->hw.func_caps.num_vsis); in i40e_get_capabilities()
10139 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10141 pf->hw.dev_caps.switch_mode, in i40e_get_capabilities()
10142 pf->hw.dev_caps.valid_functions); in i40e_get_capabilities()
10143 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10144 "SR-IOV=%d, num_vfs for all function=%u\n", in i40e_get_capabilities()
10145 pf->hw.dev_caps.sr_iov_1_1, in i40e_get_capabilities()
10146 pf->hw.dev_caps.num_vfs); in i40e_get_capabilities()
10147 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10149 pf->hw.dev_caps.num_vsis, in i40e_get_capabilities()
10150 pf->hw.dev_caps.num_rx_qp, in i40e_get_capabilities()
10151 pf->hw.dev_caps.num_tx_qp); in i40e_get_capabilities()
10155 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ in i40e_get_capabilities()
10156 + pf->hw.func_caps.num_vfs) in i40e_get_capabilities()
10157 if (pf->hw.revision_id == 0 && in i40e_get_capabilities()
10158 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) { in i40e_get_capabilities()
10159 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10161 pf->hw.func_caps.num_vsis, DEF_NUM_VSI); in i40e_get_capabilities()
10162 pf->hw.func_caps.num_vsis = DEF_NUM_VSI; in i40e_get_capabilities()
10171 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
10181 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { in i40e_fdir_sb_setup()
10190 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); in i40e_fdir_sb_setup()
10193 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) in i40e_fdir_sb_setup()
10202 pf->vsi[pf->lan_vsi]->seid, 0); in i40e_fdir_sb_setup()
10204 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); in i40e_fdir_sb_setup()
10205 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; in i40e_fdir_sb_setup()
10206 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_fdir_sb_setup()
10215 * i40e_fdir_teardown - release the Flow Director resources
10229 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
10239 struct i40e_pf *pf = vsi->back; in i40e_rebuild_cloud_filters()
10244 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list, in i40e_rebuild_cloud_filters()
10246 if (cfilter->seid != seid) in i40e_rebuild_cloud_filters()
10249 if (cfilter->dst_port) in i40e_rebuild_cloud_filters()
10256 dev_dbg(&pf->pdev->dev, in i40e_rebuild_cloud_filters()
10258 i40e_stat_str(&pf->hw, ret), in i40e_rebuild_cloud_filters()
10259 i40e_aq_str(&pf->hw, in i40e_rebuild_cloud_filters()
10260 pf->hw.aq.asq_last_status)); in i40e_rebuild_cloud_filters()
10268 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
10278 if (list_empty(&vsi->ch_list)) in i40e_rebuild_channels()
10281 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { in i40e_rebuild_channels()
10282 if (!ch->initialized) in i40e_rebuild_channels()
10285 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch); in i40e_rebuild_channels()
10287 dev_info(&vsi->back->pdev->dev, in i40e_rebuild_channels()
10289 vsi->uplink_seid); in i40e_rebuild_channels()
10293 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch); in i40e_rebuild_channels()
10295 dev_info(&vsi->back->pdev->dev, in i40e_rebuild_channels()
10297 ch->seid); in i40e_rebuild_channels()
10301 vsi->next_base_queue = vsi->next_base_queue + in i40e_rebuild_channels()
10302 ch->num_queue_pairs; in i40e_rebuild_channels()
10303 if (ch->max_tx_rate) { in i40e_rebuild_channels()
10304 u64 credits = ch->max_tx_rate; in i40e_rebuild_channels()
10306 if (i40e_set_bw_limit(vsi, ch->seid, in i40e_rebuild_channels()
10307 ch->max_tx_rate)) in i40e_rebuild_channels()
10308 return -EINVAL; in i40e_rebuild_channels()
10311 dev_dbg(&vsi->back->pdev->dev, in i40e_rebuild_channels()
10312 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", in i40e_rebuild_channels()
10313 ch->max_tx_rate, in i40e_rebuild_channels()
10315 ch->seid); in i40e_rebuild_channels()
10317 ret = i40e_rebuild_cloud_filters(vsi, ch->seid); in i40e_rebuild_channels()
10319 dev_dbg(&vsi->back->pdev->dev, in i40e_rebuild_channels()
10321 ch->seid); in i40e_rebuild_channels()
10329 * i40e_prep_for_reset - prep for the core to reset
10336 struct i40e_hw *hw = &pf->hw; in i40e_prep_for_reset()
10340 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state); in i40e_prep_for_reset()
10341 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_prep_for_reset()
10343 if (i40e_check_asq_alive(&pf->hw)) in i40e_prep_for_reset()
10346 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); in i40e_prep_for_reset()
10351 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_prep_for_reset()
10352 if (pf->vsi[v]) in i40e_prep_for_reset()
10353 pf->vsi[v]->seid = 0; in i40e_prep_for_reset()
10356 i40e_shutdown_adminq(&pf->hw); in i40e_prep_for_reset()
10359 if (hw->hmc.hmc_obj) { in i40e_prep_for_reset()
10362 dev_warn(&pf->pdev->dev, in i40e_prep_for_reset()
10373 * i40e_send_version - update firmware with driver version
10385 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); in i40e_send_version()
10389 * i40e_get_oem_version - get OEM specific version information
10429 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release; in i40e_get_oem_version()
10430 hw->nvm.eetrack = I40E_OEM_EETRACK_ID; in i40e_get_oem_version()
10434 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
10439 struct i40e_hw *hw = &pf->hw; in i40e_reset()
10444 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); in i40e_reset()
10445 set_bit(__I40E_RESET_FAILED, pf->state); in i40e_reset()
10446 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); in i40e_reset()
10448 pf->pfr_count++; in i40e_reset()
10454 * i40e_rebuild - rebuild using a saved config
10456 * @reinit: if the Main VSI needs to re-initialized.
10462 int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state); in i40e_rebuild()
10463 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_rebuild()
10464 struct i40e_hw *hw = &pf->hw; in i40e_rebuild()
10469 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && in i40e_rebuild()
10471 i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev); in i40e_rebuild()
10474 if (test_bit(__I40E_DOWN, pf->state) && in i40e_rebuild()
10475 !test_bit(__I40E_RECOVERY_MODE, pf->state) && in i40e_rebuild()
10478 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); in i40e_rebuild()
10481 ret = i40e_init_adminq(&pf->hw); in i40e_rebuild()
10483 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n", in i40e_rebuild()
10484 i40e_stat_str(&pf->hw, ret), in i40e_rebuild()
10485 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_rebuild()
10488 i40e_get_oem_version(&pf->hw); in i40e_rebuild()
10490 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && in i40e_rebuild()
10491 ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) || in i40e_rebuild()
10492 hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) { in i40e_rebuild()
10501 /* re-verify the eeprom if we just had an EMP reset */ in i40e_rebuild()
10502 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) in i40e_rebuild()
10509 if (test_bit(__I40E_RECOVERY_MODE, pf->state) || in i40e_rebuild()
10515 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_rebuild()
10528 free_irq(pf->pdev->irq, pf); in i40e_rebuild()
10548 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, in i40e_rebuild()
10549 hw->func_caps.num_rx_qp, 0, 0); in i40e_rebuild()
10551 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); in i40e_rebuild()
10556 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); in i40e_rebuild()
10561 /* Enable FW to write a default DCB config on link-up in i40e_rebuild()
10565 if (pf->flags & I40E_FLAG_TC_MQPRIO) { in i40e_rebuild()
10568 if (I40E_IS_X710TL_DEVICE(hw->device_id) && in i40e_rebuild()
10569 (hw->phy.link_info.link_speed & in i40e_rebuild()
10572 dev_warn(&pf->pdev->dev, in i40e_rebuild()
10573 "DCB is not supported for X710-T*L 2.5/5G speeds\n"); in i40e_rebuild()
10574 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; in i40e_rebuild()
10579 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", in i40e_rebuild()
10581 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; in i40e_rebuild()
10597 ret = i40e_aq_set_phy_int_mask(&pf->hw, in i40e_rebuild()
10602 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", in i40e_rebuild()
10603 i40e_stat_str(&pf->hw, ret), in i40e_rebuild()
10604 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_rebuild()
10613 if (vsi->uplink_seid != pf->mac_seid) { in i40e_rebuild()
10614 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); in i40e_rebuild()
10617 if (!pf->veb[v]) in i40e_rebuild()
10620 if (pf->veb[v]->uplink_seid == pf->mac_seid || in i40e_rebuild()
10621 pf->veb[v]->uplink_seid == 0) { in i40e_rebuild()
10622 ret = i40e_reconstitute_veb(pf->veb[v]); in i40e_rebuild()
10633 if (pf->veb[v]->uplink_seid == pf->mac_seid) { in i40e_rebuild()
10634 dev_info(&pf->pdev->dev, in i40e_rebuild()
10637 vsi->uplink_seid = pf->mac_seid; in i40e_rebuild()
10639 } else if (pf->veb[v]->uplink_seid == 0) { in i40e_rebuild()
10640 dev_info(&pf->pdev->dev, in i40e_rebuild()
10648 if (vsi->uplink_seid == pf->mac_seid) { in i40e_rebuild()
10649 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); in i40e_rebuild()
10653 dev_info(&pf->pdev->dev, in i40e_rebuild()
10659 if (vsi->mqprio_qopt.max_rate[0]) { in i40e_rebuild()
10660 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; in i40e_rebuild()
10664 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); in i40e_rebuild()
10670 dev_dbg(&vsi->back->pdev->dev, in i40e_rebuild()
10671 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", in i40e_rebuild()
10674 vsi->seid); in i40e_rebuild()
10677 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid); in i40e_rebuild()
10702 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { in i40e_rebuild()
10704 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); in i40e_rebuild()
10706 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", in i40e_rebuild()
10707 i40e_stat_str(&pf->hw, ret), in i40e_rebuild()
10708 i40e_aq_str(&pf->hw, in i40e_rebuild()
10709 pf->hw.aq.asq_last_status)); in i40e_rebuild()
10712 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_rebuild()
10721 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, in i40e_rebuild()
10722 pf->main_vsi_seid); in i40e_rebuild()
10732 ret = i40e_set_promiscuous(pf, pf->cur_promisc); in i40e_rebuild()
10734 dev_warn(&pf->pdev->dev, in i40e_rebuild()
10736 pf->cur_promisc ? "on" : "off", in i40e_rebuild()
10737 i40e_stat_str(&pf->hw, ret), in i40e_rebuild()
10738 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_rebuild()
10752 clear_bit(__I40E_RESET_FAILED, pf->state); in i40e_rebuild()
10754 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); in i40e_rebuild()
10755 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state); in i40e_rebuild()
10759 * i40e_reset_and_rebuild - reset and rebuild using a saved config
10761 * @reinit: if the Main VSI needs to re-initialized.
10769 /* Now we wait for GRST to settle out. in i40e_reset_and_rebuild()
10779 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
10801 struct i40e_hw *hw = &pf->hw; in i40e_handle_mdd_event()
10807 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state)) in i40e_handle_mdd_event()
10820 I40E_GL_MDET_TX_QUEUE_SHIFT) - in i40e_handle_mdd_event()
10821 pf->hw.func_caps.base_queue; in i40e_handle_mdd_event()
10823 …dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x … in i40e_handle_mdd_event()
10835 I40E_GL_MDET_RX_QUEUE_SHIFT) - in i40e_handle_mdd_event()
10836 pf->hw.func_caps.base_queue; in i40e_handle_mdd_event()
10838 …dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02… in i40e_handle_mdd_event()
10848 dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n"); in i40e_handle_mdd_event()
10853 dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n"); in i40e_handle_mdd_event()
10858 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { in i40e_handle_mdd_event()
10859 vf = &(pf->vf[i]); in i40e_handle_mdd_event()
10863 vf->num_mdd_events++; in i40e_handle_mdd_event()
10864 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", in i40e_handle_mdd_event()
10866 dev_info(&pf->pdev->dev, in i40e_handle_mdd_event()
10867 "Use PF Control I/F to re-enable the VF\n"); in i40e_handle_mdd_event()
10868 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); in i40e_handle_mdd_event()
10874 vf->num_mdd_events++; in i40e_handle_mdd_event()
10875 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", in i40e_handle_mdd_event()
10877 dev_info(&pf->pdev->dev, in i40e_handle_mdd_event()
10878 "Use PF Control I/F to re-enable the VF\n"); in i40e_handle_mdd_event()
10879 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); in i40e_handle_mdd_event()
10883 /* re-enable mdd interrupt cause */ in i40e_handle_mdd_event()
10884 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state); in i40e_handle_mdd_event()
10892 * i40e_service_task - Run the driver's async subtasks
10903 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || in i40e_service_task()
10904 test_bit(__I40E_SUSPENDED, pf->state)) in i40e_service_task()
10907 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state)) in i40e_service_task()
10910 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_service_task()
10911 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]); in i40e_service_task()
10918 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) { in i40e_service_task()
10920 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], in i40e_service_task()
10925 pf->state)) in i40e_service_task()
10927 pf->vsi[pf->lan_vsi]); in i40e_service_task()
10938 clear_bit(__I40E_SERVICE_SCHED, pf->state); in i40e_service_task()
10942 * rather than wait for the timer to tick again. in i40e_service_task()
10944 if (time_after(jiffies, (start_time + pf->service_timer_period)) || in i40e_service_task()
10945 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) || in i40e_service_task()
10946 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) || in i40e_service_task()
10947 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) in i40e_service_task()
10952 * i40e_service_timer - timer callback
10959 mod_timer(&pf->service_timer, in i40e_service_timer()
10960 round_jiffies(jiffies + pf->service_timer_period)); in i40e_service_timer()
10965 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
10970 struct i40e_pf *pf = vsi->back; in i40e_set_num_rings_in_vsi()
10972 switch (vsi->type) { in i40e_set_num_rings_in_vsi()
10974 vsi->alloc_queue_pairs = pf->num_lan_qps; in i40e_set_num_rings_in_vsi()
10975 if (!vsi->num_tx_desc) in i40e_set_num_rings_in_vsi()
10976 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
10978 if (!vsi->num_rx_desc) in i40e_set_num_rings_in_vsi()
10979 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
10981 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_set_num_rings_in_vsi()
10982 vsi->num_q_vectors = pf->num_lan_msix; in i40e_set_num_rings_in_vsi()
10984 vsi->num_q_vectors = 1; in i40e_set_num_rings_in_vsi()
10989 vsi->alloc_queue_pairs = 1; in i40e_set_num_rings_in_vsi()
10990 vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT, in i40e_set_num_rings_in_vsi()
10992 vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT, in i40e_set_num_rings_in_vsi()
10994 vsi->num_q_vectors = pf->num_fdsb_msix; in i40e_set_num_rings_in_vsi()
10998 vsi->alloc_queue_pairs = pf->num_vmdq_qps; in i40e_set_num_rings_in_vsi()
10999 if (!vsi->num_tx_desc) in i40e_set_num_rings_in_vsi()
11000 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11002 if (!vsi->num_rx_desc) in i40e_set_num_rings_in_vsi()
11003 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11005 vsi->num_q_vectors = pf->num_vmdq_msix; in i40e_set_num_rings_in_vsi()
11009 vsi->alloc_queue_pairs = pf->num_vf_qps; in i40e_set_num_rings_in_vsi()
11010 if (!vsi->num_tx_desc) in i40e_set_num_rings_in_vsi()
11011 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11013 if (!vsi->num_rx_desc) in i40e_set_num_rings_in_vsi()
11014 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11020 return -ENODATA; in i40e_set_num_rings_in_vsi()
11024 vsi->num_tx_desc = I40E_MIN_NUM_DESCRIPTORS; in i40e_set_num_rings_in_vsi()
11025 vsi->num_rx_desc = I40E_MIN_NUM_DESCRIPTORS; in i40e_set_num_rings_in_vsi()
11032 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
11046 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * in i40e_vsi_alloc_arrays()
11048 vsi->tx_rings = kzalloc(size, GFP_KERNEL); in i40e_vsi_alloc_arrays()
11049 if (!vsi->tx_rings) in i40e_vsi_alloc_arrays()
11050 return -ENOMEM; in i40e_vsi_alloc_arrays()
11051 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs; in i40e_vsi_alloc_arrays()
11053 vsi->xdp_rings = next_rings; in i40e_vsi_alloc_arrays()
11054 next_rings += vsi->alloc_queue_pairs; in i40e_vsi_alloc_arrays()
11056 vsi->rx_rings = next_rings; in i40e_vsi_alloc_arrays()
11060 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; in i40e_vsi_alloc_arrays()
11061 vsi->q_vectors = kzalloc(size, GFP_KERNEL); in i40e_vsi_alloc_arrays()
11062 if (!vsi->q_vectors) { in i40e_vsi_alloc_arrays()
11063 ret = -ENOMEM; in i40e_vsi_alloc_arrays()
11070 kfree(vsi->tx_rings); in i40e_vsi_alloc_arrays()
11075 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
11084 int ret = -ENODEV; in i40e_vsi_mem_alloc()
11090 mutex_lock(&pf->switch_mutex); in i40e_vsi_mem_alloc()
11098 i = pf->next_vsi; in i40e_vsi_mem_alloc()
11099 while (i < pf->num_alloc_vsi && pf->vsi[i]) in i40e_vsi_mem_alloc()
11101 if (i >= pf->num_alloc_vsi) { in i40e_vsi_mem_alloc()
11103 while (i < pf->next_vsi && pf->vsi[i]) in i40e_vsi_mem_alloc()
11107 if (i < pf->num_alloc_vsi && !pf->vsi[i]) { in i40e_vsi_mem_alloc()
11110 ret = -ENODEV; in i40e_vsi_mem_alloc()
11113 pf->next_vsi = ++i; in i40e_vsi_mem_alloc()
11117 ret = -ENOMEM; in i40e_vsi_mem_alloc()
11120 vsi->type = type; in i40e_vsi_mem_alloc()
11121 vsi->back = pf; in i40e_vsi_mem_alloc()
11122 set_bit(__I40E_VSI_DOWN, vsi->state); in i40e_vsi_mem_alloc()
11123 vsi->flags = 0; in i40e_vsi_mem_alloc()
11124 vsi->idx = vsi_idx; in i40e_vsi_mem_alloc()
11125 vsi->int_rate_limit = 0; in i40e_vsi_mem_alloc()
11126 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? in i40e_vsi_mem_alloc()
11127 pf->rss_table_size : 64; in i40e_vsi_mem_alloc()
11128 vsi->netdev_registered = false; in i40e_vsi_mem_alloc()
11129 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; in i40e_vsi_mem_alloc()
11130 hash_init(vsi->mac_filter_hash); in i40e_vsi_mem_alloc()
11131 vsi->irqs_ready = false; in i40e_vsi_mem_alloc()
11134 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL); in i40e_vsi_mem_alloc()
11135 if (!vsi->af_xdp_zc_qps) in i40e_vsi_mem_alloc()
11151 spin_lock_init(&vsi->mac_filter_hash_lock); in i40e_vsi_mem_alloc()
11152 pf->vsi[vsi_idx] = vsi; in i40e_vsi_mem_alloc()
11157 bitmap_free(vsi->af_xdp_zc_qps); in i40e_vsi_mem_alloc()
11158 pf->next_vsi = i - 1; in i40e_vsi_mem_alloc()
11161 mutex_unlock(&pf->switch_mutex); in i40e_vsi_mem_alloc()
11166 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
11177 kfree(vsi->q_vectors); in i40e_vsi_free_arrays()
11178 vsi->q_vectors = NULL; in i40e_vsi_free_arrays()
11180 kfree(vsi->tx_rings); in i40e_vsi_free_arrays()
11181 vsi->tx_rings = NULL; in i40e_vsi_free_arrays()
11182 vsi->rx_rings = NULL; in i40e_vsi_free_arrays()
11183 vsi->xdp_rings = NULL; in i40e_vsi_free_arrays()
11187 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
11196 kfree(vsi->rss_hkey_user); in i40e_clear_rss_config_user()
11197 vsi->rss_hkey_user = NULL; in i40e_clear_rss_config_user()
11199 kfree(vsi->rss_lut_user); in i40e_clear_rss_config_user()
11200 vsi->rss_lut_user = NULL; in i40e_clear_rss_config_user()
11204 * i40e_vsi_clear - Deallocate the VSI provided
11205 * @vsi: the VSI being un-configured
11214 if (!vsi->back) in i40e_vsi_clear()
11216 pf = vsi->back; in i40e_vsi_clear()
11218 mutex_lock(&pf->switch_mutex); in i40e_vsi_clear()
11219 if (!pf->vsi[vsi->idx]) { in i40e_vsi_clear()
11220 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n", in i40e_vsi_clear()
11221 vsi->idx, vsi->idx, vsi->type); in i40e_vsi_clear()
11225 if (pf->vsi[vsi->idx] != vsi) { in i40e_vsi_clear()
11226 dev_err(&pf->pdev->dev, in i40e_vsi_clear()
11227 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n", in i40e_vsi_clear()
11228 pf->vsi[vsi->idx]->idx, in i40e_vsi_clear()
11229 pf->vsi[vsi->idx]->type, in i40e_vsi_clear()
11230 vsi->idx, vsi->type); in i40e_vsi_clear()
11235 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); in i40e_vsi_clear()
11236 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); in i40e_vsi_clear()
11238 bitmap_free(vsi->af_xdp_zc_qps); in i40e_vsi_clear()
11242 pf->vsi[vsi->idx] = NULL; in i40e_vsi_clear()
11243 if (vsi->idx < pf->next_vsi) in i40e_vsi_clear()
11244 pf->next_vsi = vsi->idx; in i40e_vsi_clear()
11247 mutex_unlock(&pf->switch_mutex); in i40e_vsi_clear()
11255 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
11262 if (vsi->tx_rings && vsi->tx_rings[0]) { in i40e_vsi_clear_rings()
11263 for (i = 0; i < vsi->alloc_queue_pairs; i++) { in i40e_vsi_clear_rings()
11264 kfree_rcu(vsi->tx_rings[i], rcu); in i40e_vsi_clear_rings()
11265 WRITE_ONCE(vsi->tx_rings[i], NULL); in i40e_vsi_clear_rings()
11266 WRITE_ONCE(vsi->rx_rings[i], NULL); in i40e_vsi_clear_rings()
11267 if (vsi->xdp_rings) in i40e_vsi_clear_rings()
11268 WRITE_ONCE(vsi->xdp_rings[i], NULL); in i40e_vsi_clear_rings()
11274 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
11280 struct i40e_pf *pf = vsi->back; in i40e_alloc_rings()
11284 for (i = 0; i < vsi->alloc_queue_pairs; i++) { in i40e_alloc_rings()
11290 ring->queue_index = i; in i40e_alloc_rings()
11291 ring->reg_idx = vsi->base_queue + i; in i40e_alloc_rings()
11292 ring->ring_active = false; in i40e_alloc_rings()
11293 ring->vsi = vsi; in i40e_alloc_rings()
11294 ring->netdev = vsi->netdev; in i40e_alloc_rings()
11295 ring->dev = &pf->pdev->dev; in i40e_alloc_rings()
11296 ring->count = vsi->num_tx_desc; in i40e_alloc_rings()
11297 ring->size = 0; in i40e_alloc_rings()
11298 ring->dcb_tc = 0; in i40e_alloc_rings()
11299 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) in i40e_alloc_rings()
11300 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; in i40e_alloc_rings()
11301 ring->itr_setting = pf->tx_itr_default; in i40e_alloc_rings()
11302 WRITE_ONCE(vsi->tx_rings[i], ring++); in i40e_alloc_rings()
11307 ring->queue_index = vsi->alloc_queue_pairs + i; in i40e_alloc_rings()
11308 ring->reg_idx = vsi->base_queue + ring->queue_index; in i40e_alloc_rings()
11309 ring->ring_active = false; in i40e_alloc_rings()
11310 ring->vsi = vsi; in i40e_alloc_rings()
11311 ring->netdev = NULL; in i40e_alloc_rings()
11312 ring->dev = &pf->pdev->dev; in i40e_alloc_rings()
11313 ring->count = vsi->num_tx_desc; in i40e_alloc_rings()
11314 ring->size = 0; in i40e_alloc_rings()
11315 ring->dcb_tc = 0; in i40e_alloc_rings()
11316 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) in i40e_alloc_rings()
11317 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; in i40e_alloc_rings()
11319 ring->itr_setting = pf->tx_itr_default; in i40e_alloc_rings()
11320 WRITE_ONCE(vsi->xdp_rings[i], ring++); in i40e_alloc_rings()
11323 ring->queue_index = i; in i40e_alloc_rings()
11324 ring->reg_idx = vsi->base_queue + i; in i40e_alloc_rings()
11325 ring->ring_active = false; in i40e_alloc_rings()
11326 ring->vsi = vsi; in i40e_alloc_rings()
11327 ring->netdev = vsi->netdev; in i40e_alloc_rings()
11328 ring->dev = &pf->pdev->dev; in i40e_alloc_rings()
11329 ring->count = vsi->num_rx_desc; in i40e_alloc_rings()
11330 ring->size = 0; in i40e_alloc_rings()
11331 ring->dcb_tc = 0; in i40e_alloc_rings()
11332 ring->itr_setting = pf->rx_itr_default; in i40e_alloc_rings()
11333 WRITE_ONCE(vsi->rx_rings[i], ring); in i40e_alloc_rings()
11340 return -ENOMEM; in i40e_alloc_rings()
11344 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
11346 * @vectors: the number of MSI-X vectors to request
11352 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, in i40e_reserve_msix_vectors()
11355 dev_info(&pf->pdev->dev, in i40e_reserve_msix_vectors()
11356 "MSI-X vector reservation failed: %d\n", vectors); in i40e_reserve_msix_vectors()
11364 * i40e_init_msix - Setup the MSIX capability
11373 struct i40e_hw *hw = &pf->hw; in i40e_init_msix()
11380 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) in i40e_init_msix()
11381 return -ENODEV; in i40e_init_msix()
11384 * - Add 1 for "other" cause for Admin Queue events, etc. in i40e_init_msix()
11385 * - The number of LAN queue pairs in i40e_init_msix()
11386 * - Queues being used for RSS. in i40e_init_msix()
11390 * - assumes symmetric Tx/Rx pairing in i40e_init_msix()
11391 * - The number of VMDq pairs in i40e_init_msix()
11392 * - The CPU count within the NUMA node if iWARP is enabled in i40e_init_msix()
11398 vectors_left = hw->func_caps.num_msix_vectors; in i40e_init_msix()
11404 vectors_left--; in i40e_init_msix()
11415 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2); in i40e_init_msix()
11416 vectors_left -= pf->num_lan_msix; in i40e_init_msix()
11419 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_init_msix()
11421 pf->num_fdsb_msix = 1; in i40e_init_msix()
11423 vectors_left--; in i40e_init_msix()
11425 pf->num_fdsb_msix = 0; in i40e_init_msix()
11430 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_init_msix()
11431 iwarp_requested = pf->num_iwarp_msix; in i40e_init_msix()
11434 pf->num_iwarp_msix = 0; in i40e_init_msix()
11435 else if (vectors_left < pf->num_iwarp_msix) in i40e_init_msix()
11436 pf->num_iwarp_msix = 1; in i40e_init_msix()
11437 v_budget += pf->num_iwarp_msix; in i40e_init_msix()
11438 vectors_left -= pf->num_iwarp_msix; in i40e_init_msix()
11442 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { in i40e_init_msix()
11444 pf->num_vmdq_msix = 0; in i40e_init_msix()
11445 pf->num_vmdq_qps = 0; in i40e_init_msix()
11448 pf->num_vmdq_vsis * pf->num_vmdq_qps; in i40e_init_msix()
11459 pf->num_vmdq_qps = 1; in i40e_init_msix()
11460 vmdq_vecs_wanted = pf->num_vmdq_vsis; in i40e_init_msix()
11465 pf->num_vmdq_msix = pf->num_vmdq_qps; in i40e_init_msix()
11468 vectors_left -= vmdq_vecs; in i40e_init_msix()
11481 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left); in i40e_init_msix()
11482 pf->num_lan_msix += extra_vectors; in i40e_init_msix()
11483 vectors_left -= extra_vectors; in i40e_init_msix()
11486 …remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n"); in i40e_init_msix()
11488 v_budget += pf->num_lan_msix; in i40e_init_msix()
11489 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), in i40e_init_msix()
11491 if (!pf->msix_entries) in i40e_init_msix()
11492 return -ENOMEM; in i40e_init_msix()
11495 pf->msix_entries[i].entry = i; in i40e_init_msix()
11499 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; in i40e_init_msix()
11500 kfree(pf->msix_entries); in i40e_init_msix()
11501 pf->msix_entries = NULL; in i40e_init_msix()
11502 pci_disable_msix(pf->pdev); in i40e_init_msix()
11503 return -ENODEV; in i40e_init_msix()
11507 pf->num_vmdq_vsis = 0; in i40e_init_msix()
11508 pf->num_vmdq_qps = 0; in i40e_init_msix()
11509 pf->num_lan_qps = 1; in i40e_init_msix()
11510 pf->num_lan_msix = 1; in i40e_init_msix()
11520 dev_info(&pf->pdev->dev, in i40e_init_msix()
11521 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n", in i40e_init_msix()
11524 vec = v_actual - 1; in i40e_init_msix()
11527 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ in i40e_init_msix()
11528 pf->num_vmdq_vsis = 1; in i40e_init_msix()
11529 pf->num_vmdq_qps = 1; in i40e_init_msix()
11534 pf->num_lan_msix = 1; in i40e_init_msix()
11537 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_init_msix()
11538 pf->num_lan_msix = 1; in i40e_init_msix()
11539 pf->num_iwarp_msix = 1; in i40e_init_msix()
11541 pf->num_lan_msix = 2; in i40e_init_msix()
11545 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_init_msix()
11546 pf->num_iwarp_msix = min_t(int, (vec / 3), in i40e_init_msix()
11548 pf->num_vmdq_vsis = min_t(int, (vec / 3), in i40e_init_msix()
11551 pf->num_vmdq_vsis = min_t(int, (vec / 2), in i40e_init_msix()
11554 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_init_msix()
11555 pf->num_fdsb_msix = 1; in i40e_init_msix()
11556 vec--; in i40e_init_msix()
11558 pf->num_lan_msix = min_t(int, in i40e_init_msix()
11559 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)), in i40e_init_msix()
11560 pf->num_lan_msix); in i40e_init_msix()
11561 pf->num_lan_qps = pf->num_lan_msix; in i40e_init_msix()
11566 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && in i40e_init_msix()
11567 (pf->num_fdsb_msix == 0)) { in i40e_init_msix()
11568 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n"); in i40e_init_msix()
11569 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; in i40e_init_msix()
11570 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_init_msix()
11572 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && in i40e_init_msix()
11573 (pf->num_vmdq_msix == 0)) { in i40e_init_msix()
11574 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); in i40e_init_msix()
11575 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; in i40e_init_msix()
11578 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && in i40e_init_msix()
11579 (pf->num_iwarp_msix == 0)) { in i40e_init_msix()
11580 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n"); in i40e_init_msix()
11581 pf->flags &= ~I40E_FLAG_IWARP_ENABLED; in i40e_init_msix()
11583 i40e_debug(&pf->hw, I40E_DEBUG_INIT, in i40e_init_msix()
11584 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n", in i40e_init_msix()
11585 pf->num_lan_msix, in i40e_init_msix()
11586 pf->num_vmdq_msix * pf->num_vmdq_vsis, in i40e_init_msix()
11587 pf->num_fdsb_msix, in i40e_init_msix()
11588 pf->num_iwarp_msix); in i40e_init_msix()
11594 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
11598 * We allocate one q_vector. If allocation fails we return -ENOMEM.
11607 return -ENOMEM; in i40e_vsi_alloc_q_vector()
11609 q_vector->vsi = vsi; in i40e_vsi_alloc_q_vector()
11610 q_vector->v_idx = v_idx; in i40e_vsi_alloc_q_vector()
11611 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); in i40e_vsi_alloc_q_vector()
11613 if (vsi->netdev) in i40e_vsi_alloc_q_vector()
11614 netif_napi_add(vsi->netdev, &q_vector->napi, in i40e_vsi_alloc_q_vector()
11618 vsi->q_vectors[v_idx] = q_vector; in i40e_vsi_alloc_q_vector()
11624 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
11628 * return -ENOMEM.
11632 struct i40e_pf *pf = vsi->back; in i40e_vsi_alloc_q_vectors()
11636 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_vsi_alloc_q_vectors()
11637 num_q_vectors = vsi->num_q_vectors; in i40e_vsi_alloc_q_vectors()
11638 else if (vsi == pf->vsi[pf->lan_vsi]) in i40e_vsi_alloc_q_vectors()
11641 return -EINVAL; in i40e_vsi_alloc_q_vectors()
11652 while (v_idx--) in i40e_vsi_alloc_q_vectors()
11659 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
11667 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_init_interrupt_scheme()
11670 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | in i40e_init_interrupt_scheme()
11679 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_init_interrupt_scheme()
11686 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && in i40e_init_interrupt_scheme()
11687 (pf->flags & I40E_FLAG_MSI_ENABLED)) { in i40e_init_interrupt_scheme()
11688 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); in i40e_init_interrupt_scheme()
11689 vectors = pci_enable_msi(pf->pdev); in i40e_init_interrupt_scheme()
11691 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", in i40e_init_interrupt_scheme()
11693 pf->flags &= ~I40E_FLAG_MSI_ENABLED; in i40e_init_interrupt_scheme()
11698 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) in i40e_init_interrupt_scheme()
11699 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); in i40e_init_interrupt_scheme()
11703 pf->irq_pile = kzalloc(size, GFP_KERNEL); in i40e_init_interrupt_scheme()
11704 if (!pf->irq_pile) in i40e_init_interrupt_scheme()
11705 return -ENOMEM; in i40e_init_interrupt_scheme()
11707 pf->irq_pile->num_entries = vectors; in i40e_init_interrupt_scheme()
11708 pf->irq_pile->search_hint = 0; in i40e_init_interrupt_scheme()
11711 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); in i40e_init_interrupt_scheme()
11717 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
11721 * device. This should be called during resume to re-allocate the q_vectors
11728 /* We cleared the MSI and MSI-X flags when disabling the old interrupt in i40e_restore_interrupt_scheme()
11729 * scheme. We need to re-enabled them here in order to attempt to in i40e_restore_interrupt_scheme()
11730 * re-acquire the MSI or MSI-X vectors in i40e_restore_interrupt_scheme()
11732 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); in i40e_restore_interrupt_scheme()
11738 /* Now that we've re-acquired IRQs, we need to remap the vectors and in i40e_restore_interrupt_scheme()
11741 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_restore_interrupt_scheme()
11742 if (pf->vsi[i]) { in i40e_restore_interrupt_scheme()
11743 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]); in i40e_restore_interrupt_scheme()
11746 i40e_vsi_map_rings_to_vectors(pf->vsi[i]); in i40e_restore_interrupt_scheme()
11754 if (pf->flags & I40E_FLAG_IWARP_ENABLED) in i40e_restore_interrupt_scheme()
11760 while (i--) { in i40e_restore_interrupt_scheme()
11761 if (pf->vsi[i]) in i40e_restore_interrupt_scheme()
11762 i40e_vsi_free_q_vectors(pf->vsi[i]); in i40e_restore_interrupt_scheme()
11769 * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
11774 * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
11782 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_setup_misc_vector_for_recovery_mode()
11786 dev_info(&pf->pdev->dev, in i40e_setup_misc_vector_for_recovery_mode()
11787 "MSI-X misc vector request failed, error %d\n", in i40e_setup_misc_vector_for_recovery_mode()
11792 u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED; in i40e_setup_misc_vector_for_recovery_mode()
11794 err = request_irq(pf->pdev->irq, i40e_intr, flags, in i40e_setup_misc_vector_for_recovery_mode()
11795 pf->int_name, pf); in i40e_setup_misc_vector_for_recovery_mode()
11798 dev_info(&pf->pdev->dev, in i40e_setup_misc_vector_for_recovery_mode()
11811 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
11815 * non-queue interrupts, e.g. AdminQ and errors. This is not used
11820 struct i40e_hw *hw = &pf->hw; in i40e_setup_misc_vector()
11824 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) { in i40e_setup_misc_vector()
11825 err = request_irq(pf->msix_entries[0].vector, in i40e_setup_misc_vector()
11826 i40e_intr, 0, pf->int_name, pf); in i40e_setup_misc_vector()
11828 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); in i40e_setup_misc_vector()
11829 dev_info(&pf->pdev->dev, in i40e_setup_misc_vector()
11831 pf->int_name, err); in i40e_setup_misc_vector()
11832 return -EFAULT; in i40e_setup_misc_vector()
11850 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
11861 struct i40e_pf *pf = vsi->back; in i40e_get_rss_aq()
11862 struct i40e_hw *hw = &pf->hw; in i40e_get_rss_aq()
11866 ret = i40e_aq_get_rss_key(hw, vsi->id, in i40e_get_rss_aq()
11869 dev_info(&pf->pdev->dev, in i40e_get_rss_aq()
11871 i40e_stat_str(&pf->hw, ret), in i40e_get_rss_aq()
11872 i40e_aq_str(&pf->hw, in i40e_get_rss_aq()
11873 pf->hw.aq.asq_last_status)); in i40e_get_rss_aq()
11879 bool pf_lut = vsi->type == I40E_VSI_MAIN; in i40e_get_rss_aq()
11881 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); in i40e_get_rss_aq()
11883 dev_info(&pf->pdev->dev, in i40e_get_rss_aq()
11885 i40e_stat_str(&pf->hw, ret), in i40e_get_rss_aq()
11886 i40e_aq_str(&pf->hw, in i40e_get_rss_aq()
11887 pf->hw.aq.asq_last_status)); in i40e_get_rss_aq()
11896 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
11907 struct i40e_pf *pf = vsi->back; in i40e_config_rss_reg()
11908 struct i40e_hw *hw = &pf->hw; in i40e_config_rss_reg()
11909 u16 vf_id = vsi->vf_id; in i40e_config_rss_reg()
11916 if (vsi->type == I40E_VSI_MAIN) { in i40e_config_rss_reg()
11919 } else if (vsi->type == I40E_VSI_SRIOV) { in i40e_config_rss_reg()
11923 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n"); in i40e_config_rss_reg()
11930 if (vsi->type == I40E_VSI_MAIN) { in i40e_config_rss_reg()
11932 return -EINVAL; in i40e_config_rss_reg()
11935 } else if (vsi->type == I40E_VSI_SRIOV) { in i40e_config_rss_reg()
11937 return -EINVAL; in i40e_config_rss_reg()
11941 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); in i40e_config_rss_reg()
11950 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
11961 struct i40e_pf *pf = vsi->back; in i40e_get_rss_reg()
11962 struct i40e_hw *hw = &pf->hw; in i40e_get_rss_reg()
11975 return -EINVAL; in i40e_get_rss_reg()
11984 * i40e_config_rss - Configure RSS keys and lut
11994 struct i40e_pf *pf = vsi->back; in i40e_config_rss()
11996 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) in i40e_config_rss()
12003 * i40e_get_rss - Get RSS keys and lut
12013 struct i40e_pf *pf = vsi->back; in i40e_get_rss()
12015 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) in i40e_get_rss()
12022 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
12038 * i40e_pf_config_rss - Prepare for RSS if used
12043 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_pf_config_rss()
12046 struct i40e_hw *hw = &pf->hw; in i40e_pf_config_rss()
12061 reg_val = (pf->rss_table_size == 512) ? in i40e_pf_config_rss()
12067 if (!vsi->rss_size) { in i40e_pf_config_rss()
12070 * could end up with zero TCs. Check for that to avoid in i40e_pf_config_rss()
12071 * divide-by-zero. It probably won't pass traffic, but it also in i40e_pf_config_rss()
12074 qcount = vsi->num_queue_pairs / in i40e_pf_config_rss()
12075 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1); in i40e_pf_config_rss()
12076 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); in i40e_pf_config_rss()
12078 if (!vsi->rss_size) in i40e_pf_config_rss()
12079 return -EINVAL; in i40e_pf_config_rss()
12081 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in i40e_pf_config_rss()
12083 return -ENOMEM; in i40e_pf_config_rss()
12086 if (vsi->rss_lut_user) in i40e_pf_config_rss()
12087 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in i40e_pf_config_rss()
12089 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); in i40e_pf_config_rss()
12094 if (vsi->rss_hkey_user) in i40e_pf_config_rss()
12095 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); in i40e_pf_config_rss()
12098 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); in i40e_pf_config_rss()
12105 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
12115 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_reconfig_rss_queues()
12118 if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) in i40e_reconfig_rss_queues()
12122 new_rss_size = min_t(int, queue_count, pf->rss_size_max); in i40e_reconfig_rss_queues()
12124 if (queue_count != vsi->num_queue_pairs) { in i40e_reconfig_rss_queues()
12127 vsi->req_queue_pairs = queue_count; in i40e_reconfig_rss_queues()
12130 pf->alloc_rss_size = new_rss_size; in i40e_reconfig_rss_queues()
12137 if (queue_count < vsi->rss_size) { in i40e_reconfig_rss_queues()
12139 dev_dbg(&pf->pdev->dev, in i40e_reconfig_rss_queues()
12143 /* Reset vsi->rss_size, as number of enabled queues changed */ in i40e_reconfig_rss_queues()
12144 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc; in i40e_reconfig_rss_queues()
12145 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); in i40e_reconfig_rss_queues()
12149 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n", in i40e_reconfig_rss_queues()
12150 vsi->req_queue_pairs, pf->rss_size_max); in i40e_reconfig_rss_queues()
12151 return pf->alloc_rss_size; in i40e_reconfig_rss_queues()
12155 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
12164 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, in i40e_get_partition_bw_setting()
12169 pf->min_bw = min_bw; in i40e_get_partition_bw_setting()
12171 pf->max_bw = max_bw; in i40e_get_partition_bw_setting()
12178 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
12189 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id)); in i40e_set_partition_bw_setting()
12190 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK; in i40e_set_partition_bw_setting()
12191 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK; in i40e_set_partition_bw_setting()
12194 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); in i40e_set_partition_bw_setting()
12200 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
12210 if (pf->hw.partition_id != 1) { in i40e_commit_partition_bw_setting()
12211 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
12213 pf->hw.partition_id); in i40e_commit_partition_bw_setting()
12219 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); in i40e_commit_partition_bw_setting()
12220 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
12222 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
12224 i40e_stat_str(&pf->hw, ret), in i40e_commit_partition_bw_setting()
12225 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
12229 /* Read word 0x10 of NVM - SW compatibility word 1 */ in i40e_commit_partition_bw_setting()
12230 ret = i40e_aq_read_nvm(&pf->hw, in i40e_commit_partition_bw_setting()
12237 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
12238 i40e_release_nvm(&pf->hw); in i40e_commit_partition_bw_setting()
12240 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n", in i40e_commit_partition_bw_setting()
12241 i40e_stat_str(&pf->hw, ret), in i40e_commit_partition_bw_setting()
12242 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
12246 /* Wait a bit for NVM release to complete */ in i40e_commit_partition_bw_setting()
12250 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); in i40e_commit_partition_bw_setting()
12251 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
12253 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
12255 i40e_stat_str(&pf->hw, ret), in i40e_commit_partition_bw_setting()
12256 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
12261 * the NVM - thus storing the bandwidth values permanently. in i40e_commit_partition_bw_setting()
12263 ret = i40e_aq_update_nvm(&pf->hw, in i40e_commit_partition_bw_setting()
12270 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
12271 i40e_release_nvm(&pf->hw); in i40e_commit_partition_bw_setting()
12273 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
12275 i40e_stat_str(&pf->hw, ret), in i40e_commit_partition_bw_setting()
12276 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
12283 * i40e_is_total_port_shutdown_enabled - read NVM and return value
12302 read_status = i40e_read_nvm_word(&pf->hw, in i40e_is_total_port_shutdown_enabled()
12307 read_status = i40e_read_nvm_word(&pf->hw, in i40e_is_total_port_shutdown_enabled()
12314 read_status = i40e_read_nvm_module_data(&pf->hw, in i40e_is_total_port_shutdown_enabled()
12322 link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH); in i40e_is_total_port_shutdown_enabled()
12328 dev_warn(&pf->pdev->dev, in i40e_is_total_port_shutdown_enabled()
12329 "total-port-shutdown feature is off due to read nvm error: %s\n", in i40e_is_total_port_shutdown_enabled()
12330 i40e_stat_str(&pf->hw, read_status)); in i40e_is_total_port_shutdown_enabled()
12335 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
12349 pf->flags = I40E_FLAG_RX_CSUM_ENABLED | in i40e_sw_init()
12354 pf->rx_itr_default = I40E_ITR_RX_DEF; in i40e_sw_init()
12355 pf->tx_itr_default = I40E_ITR_TX_DEF; in i40e_sw_init()
12360 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); in i40e_sw_init()
12361 pf->alloc_rss_size = 1; in i40e_sw_init()
12362 pf->rss_table_size = pf->hw.func_caps.rss_table_size; in i40e_sw_init()
12363 pf->rss_size_max = min_t(int, pf->rss_size_max, in i40e_sw_init()
12364 pf->hw.func_caps.num_tx_qp); in i40e_sw_init()
12366 /* find the next higher power-of-2 of num cpus */ in i40e_sw_init()
12368 pf->rss_size_max = min_t(int, pf->rss_size_max, pow); in i40e_sw_init()
12370 if (pf->hw.func_caps.rss) { in i40e_sw_init()
12371 pf->flags |= I40E_FLAG_RSS_ENABLED; in i40e_sw_init()
12372 pf->alloc_rss_size = min_t(int, pf->rss_size_max, in i40e_sw_init()
12377 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { in i40e_sw_init()
12378 pf->flags |= I40E_FLAG_MFP_ENABLED; in i40e_sw_init()
12379 dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); in i40e_sw_init()
12381 dev_warn(&pf->pdev->dev, in i40e_sw_init()
12384 dev_info(&pf->pdev->dev, in i40e_sw_init()
12386 pf->min_bw, pf->max_bw); in i40e_sw_init()
12393 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || in i40e_sw_init()
12394 (pf->hw.func_caps.fd_filters_best_effort > 0)) { in i40e_sw_init()
12395 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; in i40e_sw_init()
12396 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; in i40e_sw_init()
12397 if (pf->flags & I40E_FLAG_MFP_ENABLED && in i40e_sw_init()
12398 pf->hw.num_partitions > 1) in i40e_sw_init()
12399 dev_info(&pf->pdev->dev, in i40e_sw_init()
12402 pf->flags |= I40E_FLAG_FD_SB_ENABLED; in i40e_sw_init()
12403 pf->fdir_pf_filter_count = in i40e_sw_init()
12404 pf->hw.func_caps.fd_filters_guaranteed; in i40e_sw_init()
12405 pf->hw.fdir_shared_filter_count = in i40e_sw_init()
12406 pf->hw.func_caps.fd_filters_best_effort; in i40e_sw_init()
12409 if (pf->hw.mac.type == I40E_MAC_X722) { in i40e_sw_init()
12410 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE | in i40e_sw_init()
12423 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) != in i40e_sw_init()
12425 dev_warn(&pf->pdev->dev, in i40e_sw_init()
12427 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE; in i40e_sw_init()
12429 } else if ((pf->hw.aq.api_maj_ver > 1) || in i40e_sw_init()
12430 ((pf->hw.aq.api_maj_ver == 1) && in i40e_sw_init()
12431 (pf->hw.aq.api_min_ver > 4))) { in i40e_sw_init()
12433 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE; in i40e_sw_init()
12437 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE) in i40e_sw_init()
12438 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED; in i40e_sw_init()
12440 if ((pf->hw.mac.type == I40E_MAC_XL710) && in i40e_sw_init()
12441 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || in i40e_sw_init()
12442 (pf->hw.aq.fw_maj_ver < 4))) { in i40e_sw_init()
12443 pf->hw_features |= I40E_HW_RESTART_AUTONEG; in i40e_sw_init()
12445 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT; in i40e_sw_init()
12449 if ((pf->hw.mac.type == I40E_MAC_XL710) && in i40e_sw_init()
12450 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || in i40e_sw_init()
12451 (pf->hw.aq.fw_maj_ver < 4))) in i40e_sw_init()
12452 pf->hw_features |= I40E_HW_STOP_FW_LLDP; in i40e_sw_init()
12455 if ((pf->hw.mac.type == I40E_MAC_XL710) && in i40e_sw_init()
12456 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) || in i40e_sw_init()
12457 (pf->hw.aq.fw_maj_ver >= 5))) in i40e_sw_init()
12458 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB; in i40e_sw_init()
12461 if (pf->hw.mac.type == I40E_MAC_XL710 && in i40e_sw_init()
12462 pf->hw.aq.fw_maj_ver >= 6) in i40e_sw_init()
12463 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE; in i40e_sw_init()
12465 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) { in i40e_sw_init()
12466 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; in i40e_sw_init()
12467 pf->flags |= I40E_FLAG_VMDQ_ENABLED; in i40e_sw_init()
12468 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); in i40e_sw_init()
12471 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) { in i40e_sw_init()
12472 pf->flags |= I40E_FLAG_IWARP_ENABLED; in i40e_sw_init()
12474 pf->num_iwarp_msix = (int)num_online_cpus() + 1; in i40e_sw_init()
12481 if (pf->hw.mac.type == I40E_MAC_XL710 && in i40e_sw_init()
12482 pf->hw.func_caps.npar_enable && in i40e_sw_init()
12483 (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) in i40e_sw_init()
12484 pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE; in i40e_sw_init()
12487 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { in i40e_sw_init()
12488 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; in i40e_sw_init()
12489 pf->flags |= I40E_FLAG_SRIOV_ENABLED; in i40e_sw_init()
12490 pf->num_req_vfs = min_t(int, in i40e_sw_init()
12491 pf->hw.func_caps.num_vfs, in i40e_sw_init()
12495 pf->eeprom_version = 0xDEAD; in i40e_sw_init()
12496 pf->lan_veb = I40E_NO_VEB; in i40e_sw_init()
12497 pf->lan_vsi = I40E_NO_VSI; in i40e_sw_init()
12500 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; in i40e_sw_init()
12504 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); in i40e_sw_init()
12505 pf->qp_pile = kzalloc(size, GFP_KERNEL); in i40e_sw_init()
12506 if (!pf->qp_pile) { in i40e_sw_init()
12507 err = -ENOMEM; in i40e_sw_init()
12510 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; in i40e_sw_init()
12511 pf->qp_pile->search_hint = 0; in i40e_sw_init()
12513 pf->tx_timeout_recovery_level = 1; in i40e_sw_init()
12515 if (pf->hw.mac.type != I40E_MAC_X722 && in i40e_sw_init()
12520 pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED | in i40e_sw_init()
12522 dev_info(&pf->pdev->dev, in i40e_sw_init()
12523 "total-port-shutdown was enabled, link-down-on-close is forced on\n"); in i40e_sw_init()
12525 mutex_init(&pf->switch_mutex); in i40e_sw_init()
12532 * i40e_set_ntuple - set the ntuple feature flag and take action
12542 /* Check if Flow Director n-tuple support was enabled or disabled. If in i40e_set_ntuple()
12547 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) in i40e_set_ntuple()
12549 /* enable FD_SB only if there is MSI-X vector and no cloud in i40e_set_ntuple()
12552 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) { in i40e_set_ntuple()
12553 pf->flags |= I40E_FLAG_FD_SB_ENABLED; in i40e_set_ntuple()
12554 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; in i40e_set_ntuple()
12558 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_set_ntuple()
12562 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; in i40e_set_ntuple()
12563 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state); in i40e_set_ntuple()
12564 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_set_ntuple()
12567 pf->fd_add_err = 0; in i40e_set_ntuple()
12568 pf->fd_atr_cnt = 0; in i40e_set_ntuple()
12569 /* if ATR was auto disabled it can be re-enabled. */ in i40e_set_ntuple()
12570 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) in i40e_set_ntuple()
12571 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && in i40e_set_ntuple()
12572 (I40E_DEBUG_FD & pf->hw.debug_mask)) in i40e_set_ntuple()
12573 dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); in i40e_set_ntuple()
12579 * i40e_clear_rss_lut - clear the rx hash lookup table
12584 struct i40e_pf *pf = vsi->back; in i40e_clear_rss_lut()
12585 struct i40e_hw *hw = &pf->hw; in i40e_clear_rss_lut()
12586 u16 vf_id = vsi->vf_id; in i40e_clear_rss_lut()
12589 if (vsi->type == I40E_VSI_MAIN) { in i40e_clear_rss_lut()
12592 } else if (vsi->type == I40E_VSI_SRIOV) { in i40e_clear_rss_lut()
12596 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); in i40e_clear_rss_lut()
12601 * i40e_set_features - set the netdev feature flags
12610 struct i40e_vsi *vsi = np->vsi; in i40e_set_features()
12611 struct i40e_pf *pf = vsi->back; in i40e_set_features()
12614 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) in i40e_set_features()
12617 netdev->features & NETIF_F_RXHASH) in i40e_set_features()
12625 if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) { in i40e_set_features()
12626 dev_err(&pf->pdev->dev, in i40e_set_features()
12628 return -EINVAL; in i40e_set_features()
12631 if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt) in i40e_set_features()
12647 struct i40e_hw *hw = &np->vsi->back->hw; in i40e_udp_tunnel_set_port()
12651 type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN : in i40e_udp_tunnel_set_port()
12654 ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index, in i40e_udp_tunnel_set_port()
12659 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_udp_tunnel_set_port()
12660 return -EIO; in i40e_udp_tunnel_set_port()
12672 struct i40e_hw *hw = &np->vsi->back->hw; in i40e_udp_tunnel_unset_port()
12675 ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL); in i40e_udp_tunnel_unset_port()
12679 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_udp_tunnel_unset_port()
12680 return -EIO; in i40e_udp_tunnel_unset_port()
12690 struct i40e_pf *pf = np->vsi->back; in i40e_get_phys_port_id()
12691 struct i40e_hw *hw = &pf->hw; in i40e_get_phys_port_id()
12693 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID)) in i40e_get_phys_port_id()
12694 return -EOPNOTSUPP; in i40e_get_phys_port_id()
12696 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); in i40e_get_phys_port_id()
12697 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len); in i40e_get_phys_port_id()
12703 * i40e_ndo_fdb_add - add an entry to the hardware database
12719 struct i40e_pf *pf = np->vsi->back; in i40e_ndo_fdb_add()
12722 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) in i40e_ndo_fdb_add()
12723 return -EOPNOTSUPP; in i40e_ndo_fdb_add()
12726 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name); in i40e_ndo_fdb_add()
12727 return -EINVAL; in i40e_ndo_fdb_add()
12733 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { in i40e_ndo_fdb_add()
12735 return -EINVAL; in i40e_ndo_fdb_add()
12743 err = -EINVAL; in i40e_ndo_fdb_add()
12746 if (err == -EEXIST && !(flags & NLM_F_EXCL)) in i40e_ndo_fdb_add()
12753 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
12774 struct i40e_vsi *vsi = np->vsi; in i40e_ndo_bridge_setlink()
12775 struct i40e_pf *pf = vsi->back; in i40e_ndo_bridge_setlink()
12781 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) in i40e_ndo_bridge_setlink()
12782 return -EOPNOTSUPP; in i40e_ndo_bridge_setlink()
12786 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) in i40e_ndo_bridge_setlink()
12787 veb = pf->veb[i]; in i40e_ndo_bridge_setlink()
12801 return -EINVAL; in i40e_ndo_bridge_setlink()
12805 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, in i40e_ndo_bridge_setlink()
12806 vsi->tc_config.enabled_tc); in i40e_ndo_bridge_setlink()
12808 veb->bridge_mode = mode; in i40e_ndo_bridge_setlink()
12812 return -ENOENT; in i40e_ndo_bridge_setlink()
12815 } else if (mode != veb->bridge_mode) { in i40e_ndo_bridge_setlink()
12817 veb->bridge_mode = mode; in i40e_ndo_bridge_setlink()
12820 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; in i40e_ndo_bridge_setlink()
12822 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; in i40e_ndo_bridge_setlink()
12832 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
12849 struct i40e_vsi *vsi = np->vsi; in i40e_ndo_bridge_getlink()
12850 struct i40e_pf *pf = vsi->back; in i40e_ndo_bridge_getlink()
12855 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) in i40e_ndo_bridge_getlink()
12856 return -EOPNOTSUPP; in i40e_ndo_bridge_getlink()
12860 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) in i40e_ndo_bridge_getlink()
12861 veb = pf->veb[i]; in i40e_ndo_bridge_getlink()
12867 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, in i40e_ndo_bridge_getlink()
12872 * i40e_features_check - Validate encapsulated packet conforms to limits
12887 if (skb->ip_summed != CHECKSUM_PARTIAL) in i40e_features_check()
12893 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) in i40e_features_check()
12897 len = skb_network_header(skb) - skb->data; in i40e_features_check()
12902 len = skb_transport_header(skb) - skb_network_header(skb); in i40e_features_check()
12906 if (skb->encapsulation) { in i40e_features_check()
12908 len = skb_inner_network_header(skb) - skb_transport_header(skb); in i40e_features_check()
12913 len = skb_inner_transport_header(skb) - in i40e_features_check()
12930 * i40e_xdp_setup - add/remove an XDP program
12938 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; in i40e_xdp_setup()
12939 struct i40e_pf *pf = vsi->back; in i40e_xdp_setup()
12945 if (frame_size > vsi->rx_buf_len) { in i40e_xdp_setup()
12947 return -EINVAL; in i40e_xdp_setup()
12950 /* When turning XDP on->off/off->on we reset and rebuild the rings. */ in i40e_xdp_setup()
12956 old_prog = xchg(&vsi->xdp_prog, prog); in i40e_xdp_setup()
12960 /* Wait until ndo_xsk_wakeup completes. */ in i40e_xdp_setup()
12965 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_xdp_setup()
12966 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); in i40e_xdp_setup()
12975 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_xdp_setup()
12976 if (vsi->xdp_rings[i]->xsk_pool) in i40e_xdp_setup()
12977 (void)i40e_xsk_wakeup(vsi->netdev, i, in i40e_xdp_setup()
12984 * i40e_enter_busy_conf - Enters busy config state
12991 struct i40e_pf *pf = vsi->back; in i40e_enter_busy_conf()
12994 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { in i40e_enter_busy_conf()
12995 timeout--; in i40e_enter_busy_conf()
12997 return -EBUSY; in i40e_enter_busy_conf()
13005 * i40e_exit_busy_conf - Exits busy config state
13010 struct i40e_pf *pf = vsi->back; in i40e_exit_busy_conf()
13012 clear_bit(__I40E_CONFIG_BUSY, pf->state); in i40e_exit_busy_conf()
13016 * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
13022 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0, in i40e_queue_pair_reset_stats()
13023 sizeof(vsi->rx_rings[queue_pair]->rx_stats)); in i40e_queue_pair_reset_stats()
13024 memset(&vsi->tx_rings[queue_pair]->stats, 0, in i40e_queue_pair_reset_stats()
13025 sizeof(vsi->tx_rings[queue_pair]->stats)); in i40e_queue_pair_reset_stats()
13027 memset(&vsi->xdp_rings[queue_pair]->stats, 0, in i40e_queue_pair_reset_stats()
13028 sizeof(vsi->xdp_rings[queue_pair]->stats)); in i40e_queue_pair_reset_stats()
13033 * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
13039 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]); in i40e_queue_pair_clean_rings()
13041 /* Make sure that in-progress ndo_xdp_xmit calls are in i40e_queue_pair_clean_rings()
13045 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]); in i40e_queue_pair_clean_rings()
13047 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); in i40e_queue_pair_clean_rings()
13051 * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
13059 struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; in i40e_queue_pair_toggle_napi()
13060 struct i40e_q_vector *q_vector = rxr->q_vector; in i40e_queue_pair_toggle_napi()
13062 if (!vsi->netdev) in i40e_queue_pair_toggle_napi()
13066 if (q_vector->rx.ring || q_vector->tx.ring) { in i40e_queue_pair_toggle_napi()
13068 napi_enable(&q_vector->napi); in i40e_queue_pair_toggle_napi()
13070 napi_disable(&q_vector->napi); in i40e_queue_pair_toggle_napi()
13075 * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
13085 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_toggle_rings()
13088 pf_q = vsi->base_queue + queue_pair; in i40e_queue_pair_toggle_rings()
13089 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q, in i40e_queue_pair_toggle_rings()
13092 dev_info(&pf->pdev->dev, in i40e_queue_pair_toggle_rings()
13094 vsi->seid, pf_q, (enable ? "en" : "dis")); in i40e_queue_pair_toggle_rings()
13101 dev_info(&pf->pdev->dev, in i40e_queue_pair_toggle_rings()
13103 vsi->seid, pf_q, (enable ? "en" : "dis")); in i40e_queue_pair_toggle_rings()
13116 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_queue_pair_toggle_rings()
13117 pf_q + vsi->alloc_queue_pairs, in i40e_queue_pair_toggle_rings()
13120 dev_info(&pf->pdev->dev, in i40e_queue_pair_toggle_rings()
13122 vsi->seid, pf_q, (enable ? "en" : "dis")); in i40e_queue_pair_toggle_rings()
13129 * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
13135 struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; in i40e_queue_pair_enable_irq()
13136 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_enable_irq()
13137 struct i40e_hw *hw = &pf->hw; in i40e_queue_pair_enable_irq()
13140 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_queue_pair_enable_irq()
13141 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx); in i40e_queue_pair_enable_irq()
13149 * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
13155 struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; in i40e_queue_pair_disable_irq()
13156 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_disable_irq()
13157 struct i40e_hw *hw = &pf->hw; in i40e_queue_pair_disable_irq()
13165 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_queue_pair_disable_irq()
13166 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx; in i40e_queue_pair_disable_irq()
13168 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0); in i40e_queue_pair_disable_irq()
13170 synchronize_irq(pf->msix_entries[intpf].vector); in i40e_queue_pair_disable_irq()
13172 /* Legacy and MSI mode - this stops all interrupt handling */ in i40e_queue_pair_disable_irq()
13176 synchronize_irq(pf->pdev->irq); in i40e_queue_pair_disable_irq()
13181 * i40e_queue_pair_disable - Disables a queue pair
13205 * i40e_queue_pair_enable - Enables a queue pair
13215 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]); in i40e_queue_pair_enable()
13220 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]); in i40e_queue_pair_enable()
13225 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]); in i40e_queue_pair_enable()
13239 * i40e_xdp - implements ndo_bpf for i40e
13247 struct i40e_vsi *vsi = np->vsi; in i40e_xdp()
13249 if (vsi->type != I40E_VSI_MAIN) in i40e_xdp()
13250 return -EINVAL; in i40e_xdp()
13252 switch (xdp->command) { in i40e_xdp()
13254 return i40e_xdp_setup(vsi, xdp->prog, xdp->extack); in i40e_xdp()
13256 return i40e_xsk_pool_setup(vsi, xdp->xsk.pool, in i40e_xdp()
13257 xdp->xsk.queue_id); in i40e_xdp()
13259 return -EINVAL; in i40e_xdp()
13303 * i40e_config_netdev - Setup the netdev flags
13310 struct i40e_pf *pf = vsi->back; in i40e_config_netdev()
13311 struct i40e_hw *hw = &pf->hw; in i40e_config_netdev()
13321 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); in i40e_config_netdev()
13323 return -ENOMEM; in i40e_config_netdev()
13325 vsi->netdev = netdev; in i40e_config_netdev()
13327 np->vsi = vsi; in i40e_config_netdev()
13350 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE)) in i40e_config_netdev()
13351 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; in i40e_config_netdev()
13353 netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic; in i40e_config_netdev()
13355 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; in i40e_config_netdev()
13357 netdev->hw_enc_features |= hw_enc_features; in i40e_config_netdev()
13360 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; in i40e_config_netdev()
13363 netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; in i40e_config_netdev()
13369 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) in i40e_config_netdev()
13372 netdev->hw_features |= hw_features; in i40e_config_netdev()
13374 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; in i40e_config_netdev()
13375 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; in i40e_config_netdev()
13377 if (vsi->type == I40E_VSI_MAIN) { in i40e_config_netdev()
13378 SET_NETDEV_DEV(netdev, &pf->pdev->dev); in i40e_config_netdev()
13379 ether_addr_copy(mac_addr, hw->mac.perm_addr); in i40e_config_netdev()
13381 * some older NVM configurations load a default MAC-VLAN in i40e_config_netdev()
13391 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13393 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13398 * original name by IFNAMSIZ - 4 in i40e_config_netdev()
13400 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d", in i40e_config_netdev()
13401 IFNAMSIZ - 4, in i40e_config_netdev()
13402 pf->vsi[pf->lan_vsi]->netdev->name); in i40e_config_netdev()
13405 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13407 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13424 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13426 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13428 ether_addr_copy(netdev->dev_addr, mac_addr); in i40e_config_netdev()
13429 ether_addr_copy(netdev->perm_addr, mac_addr); in i40e_config_netdev()
13431 /* i40iw_net_event() reads 16 bytes from neigh->primary_key */ in i40e_config_netdev()
13432 netdev->neigh_priv_len = sizeof(u32) * 4; in i40e_config_netdev()
13434 netdev->priv_flags |= IFF_UNICAST_FLT; in i40e_config_netdev()
13435 netdev->priv_flags |= IFF_SUPP_NOFCS; in i40e_config_netdev()
13437 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); in i40e_config_netdev()
13439 netdev->netdev_ops = &i40e_netdev_ops; in i40e_config_netdev()
13440 netdev->watchdog_timeo = 5 * HZ; in i40e_config_netdev()
13443 /* MTU range: 68 - 9706 */ in i40e_config_netdev()
13444 netdev->min_mtu = ETH_MIN_MTU; in i40e_config_netdev()
13445 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD; in i40e_config_netdev()
13451 * i40e_vsi_delete - Delete a VSI from the switch
13459 if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) in i40e_vsi_delete()
13462 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); in i40e_vsi_delete()
13466 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
13474 struct i40e_pf *pf = vsi->back; in i40e_is_vsi_uplink_mode_veb()
13477 if (vsi->veb_idx >= I40E_MAX_VEB) in i40e_is_vsi_uplink_mode_veb()
13480 veb = pf->veb[vsi->veb_idx]; in i40e_is_vsi_uplink_mode_veb()
13482 dev_info(&pf->pdev->dev, in i40e_is_vsi_uplink_mode_veb()
13484 return -ENOENT; in i40e_is_vsi_uplink_mode_veb()
13488 if (veb->bridge_mode & BRIDGE_MODE_VEPA) { in i40e_is_vsi_uplink_mode_veb()
13500 * i40e_add_vsi - Add a VSI to the switch
13508 int ret = -ENODEV; in i40e_add_vsi()
13509 struct i40e_pf *pf = vsi->back; in i40e_add_vsi()
13510 struct i40e_hw *hw = &pf->hw; in i40e_add_vsi()
13520 switch (vsi->type) { in i40e_add_vsi()
13527 ctxt.seid = pf->main_vsi_seid; in i40e_add_vsi()
13528 ctxt.pf_num = pf->hw.pf_id; in i40e_add_vsi()
13530 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); in i40e_add_vsi()
13533 dev_info(&pf->pdev->dev, in i40e_add_vsi()
13535 i40e_stat_str(&pf->hw, ret), in i40e_add_vsi()
13536 i40e_aq_str(&pf->hw, in i40e_add_vsi()
13537 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
13538 return -ENOENT; in i40e_add_vsi()
13540 vsi->info = ctxt.info; in i40e_add_vsi()
13541 vsi->info.valid_sections = 0; in i40e_add_vsi()
13543 vsi->seid = ctxt.seid; in i40e_add_vsi()
13544 vsi->id = ctxt.vsi_number; in i40e_add_vsi()
13549 * negative logic - if it's set, we need to fiddle with in i40e_add_vsi()
13552 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) { in i40e_add_vsi()
13554 ctxt.seid = pf->main_vsi_seid; in i40e_add_vsi()
13555 ctxt.pf_num = pf->hw.pf_id; in i40e_add_vsi()
13563 dev_info(&pf->pdev->dev, in i40e_add_vsi()
13565 i40e_stat_str(&pf->hw, ret), in i40e_add_vsi()
13566 i40e_aq_str(&pf->hw, in i40e_add_vsi()
13567 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
13568 ret = -ENOENT; in i40e_add_vsi()
13574 if ((pf->flags & I40E_FLAG_MFP_ENABLED) && in i40e_add_vsi()
13575 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ in i40e_add_vsi()
13577 ctxt.seid = pf->main_vsi_seid; in i40e_add_vsi()
13578 ctxt.pf_num = pf->hw.pf_id; in i40e_add_vsi()
13583 dev_info(&pf->pdev->dev, in i40e_add_vsi()
13585 i40e_stat_str(&pf->hw, ret), in i40e_add_vsi()
13586 i40e_aq_str(&pf->hw, in i40e_add_vsi()
13587 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
13588 ret = -ENOENT; in i40e_add_vsi()
13593 vsi->info.valid_sections = 0; in i40e_add_vsi()
13596 * reconfigure it to enable all TCs that are in i40e_add_vsi()
13606 dev_info(&pf->pdev->dev, in i40e_add_vsi()
13607 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n", in i40e_add_vsi()
13609 i40e_stat_str(&pf->hw, ret), in i40e_add_vsi()
13610 i40e_aq_str(&pf->hw, in i40e_add_vsi()
13611 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
13617 ctxt.pf_num = hw->pf_id; in i40e_add_vsi()
13619 ctxt.uplink_seid = vsi->uplink_seid; in i40e_add_vsi()
13622 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && in i40e_add_vsi()
13633 ctxt.pf_num = hw->pf_id; in i40e_add_vsi()
13635 ctxt.uplink_seid = vsi->uplink_seid; in i40e_add_vsi()
13654 ctxt.pf_num = hw->pf_id; in i40e_add_vsi()
13655 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; in i40e_add_vsi()
13656 ctxt.uplink_seid = vsi->uplink_seid; in i40e_add_vsi()
13670 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_add_vsi()
13680 if (pf->vf[vsi->vf_id].spoofchk) { in i40e_add_vsi()
13696 return -ENODEV; in i40e_add_vsi()
13699 if (vsi->type != I40E_VSI_MAIN) { in i40e_add_vsi()
13702 dev_info(&vsi->back->pdev->dev, in i40e_add_vsi()
13704 i40e_stat_str(&pf->hw, ret), in i40e_add_vsi()
13705 i40e_aq_str(&pf->hw, in i40e_add_vsi()
13706 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
13707 ret = -ENOENT; in i40e_add_vsi()
13710 vsi->info = ctxt.info; in i40e_add_vsi()
13711 vsi->info.valid_sections = 0; in i40e_add_vsi()
13712 vsi->seid = ctxt.seid; in i40e_add_vsi()
13713 vsi->id = ctxt.vsi_number; in i40e_add_vsi()
13716 vsi->active_filters = 0; in i40e_add_vsi()
13717 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_add_vsi()
13718 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_add_vsi()
13720 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_add_vsi()
13721 f->state = I40E_FILTER_NEW; in i40e_add_vsi()
13724 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_add_vsi()
13727 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_add_vsi()
13728 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); in i40e_add_vsi()
13734 dev_info(&pf->pdev->dev, in i40e_add_vsi()
13736 i40e_stat_str(&pf->hw, ret), in i40e_add_vsi()
13737 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_vsi()
13747 * i40e_vsi_release - Delete a VSI and free its resources
13761 pf = vsi->back; in i40e_vsi_release()
13763 /* release of a VEB-owner or last VSI is not allowed */ in i40e_vsi_release()
13764 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { in i40e_vsi_release()
13765 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", in i40e_vsi_release()
13766 vsi->seid, vsi->uplink_seid); in i40e_vsi_release()
13767 return -ENODEV; in i40e_vsi_release()
13769 if (vsi == pf->vsi[pf->lan_vsi] && in i40e_vsi_release()
13770 !test_bit(__I40E_DOWN, pf->state)) { in i40e_vsi_release()
13771 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); in i40e_vsi_release()
13772 return -ENODEV; in i40e_vsi_release()
13775 uplink_seid = vsi->uplink_seid; in i40e_vsi_release()
13776 if (vsi->type != I40E_VSI_SRIOV) { in i40e_vsi_release()
13777 if (vsi->netdev_registered) { in i40e_vsi_release()
13778 vsi->netdev_registered = false; in i40e_vsi_release()
13779 if (vsi->netdev) { in i40e_vsi_release()
13781 unregister_netdev(vsi->netdev); in i40e_vsi_release()
13789 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_release()
13792 if (vsi->netdev) { in i40e_vsi_release()
13793 __dev_uc_unsync(vsi->netdev, NULL); in i40e_vsi_release()
13794 __dev_mc_unsync(vsi->netdev, NULL); in i40e_vsi_release()
13798 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) in i40e_vsi_release()
13801 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_release()
13807 if (vsi->netdev) { in i40e_vsi_release()
13808 free_netdev(vsi->netdev); in i40e_vsi_release()
13809 vsi->netdev = NULL; in i40e_vsi_release()
13819 * the orphan VEBs yet. We'll wait for an explicit remove request in i40e_vsi_release()
13822 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { in i40e_vsi_release()
13823 if (pf->vsi[i] && in i40e_vsi_release()
13824 pf->vsi[i]->uplink_seid == uplink_seid && in i40e_vsi_release()
13825 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { in i40e_vsi_release()
13830 if (!pf->veb[i]) in i40e_vsi_release()
13832 if (pf->veb[i]->uplink_seid == uplink_seid) in i40e_vsi_release()
13834 if (pf->veb[i]->seid == uplink_seid) in i40e_vsi_release()
13835 veb = pf->veb[i]; in i40e_vsi_release()
13837 if (n == 0 && veb && veb->uplink_seid != 0) in i40e_vsi_release()
13844 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
13855 int ret = -ENOENT; in i40e_vsi_setup_vectors()
13856 struct i40e_pf *pf = vsi->back; in i40e_vsi_setup_vectors()
13858 if (vsi->q_vectors[0]) { in i40e_vsi_setup_vectors()
13859 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", in i40e_vsi_setup_vectors()
13860 vsi->seid); in i40e_vsi_setup_vectors()
13861 return -EEXIST; in i40e_vsi_setup_vectors()
13864 if (vsi->base_vector) { in i40e_vsi_setup_vectors()
13865 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", in i40e_vsi_setup_vectors()
13866 vsi->seid, vsi->base_vector); in i40e_vsi_setup_vectors()
13867 return -EEXIST; in i40e_vsi_setup_vectors()
13872 dev_info(&pf->pdev->dev, in i40e_vsi_setup_vectors()
13874 vsi->num_q_vectors, vsi->seid, ret); in i40e_vsi_setup_vectors()
13875 vsi->num_q_vectors = 0; in i40e_vsi_setup_vectors()
13882 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) in i40e_vsi_setup_vectors()
13884 if (vsi->num_q_vectors) in i40e_vsi_setup_vectors()
13885 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, in i40e_vsi_setup_vectors()
13886 vsi->num_q_vectors, vsi->idx); in i40e_vsi_setup_vectors()
13887 if (vsi->base_vector < 0) { in i40e_vsi_setup_vectors()
13888 dev_info(&pf->pdev->dev, in i40e_vsi_setup_vectors()
13890 vsi->num_q_vectors, vsi->seid, vsi->base_vector); in i40e_vsi_setup_vectors()
13892 ret = -ENOENT; in i40e_vsi_setup_vectors()
13901 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
13904 * This re-allocates a vsi's queue resources.
13919 pf = vsi->back; in i40e_vsi_reinit_setup()
13921 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); in i40e_vsi_reinit_setup()
13930 alloc_queue_pairs = vsi->alloc_queue_pairs * in i40e_vsi_reinit_setup()
13933 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); in i40e_vsi_reinit_setup()
13935 dev_info(&pf->pdev->dev, in i40e_vsi_reinit_setup()
13937 alloc_queue_pairs, vsi->seid, ret); in i40e_vsi_reinit_setup()
13940 vsi->base_queue = ret; in i40e_vsi_reinit_setup()
13945 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; in i40e_vsi_reinit_setup()
13946 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; in i40e_vsi_reinit_setup()
13947 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; in i40e_vsi_reinit_setup()
13948 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); in i40e_vsi_reinit_setup()
13949 if (vsi->type == I40E_VSI_MAIN) in i40e_vsi_reinit_setup()
13950 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr); in i40e_vsi_reinit_setup()
13963 if (vsi->netdev_registered) { in i40e_vsi_reinit_setup()
13964 vsi->netdev_registered = false; in i40e_vsi_reinit_setup()
13965 unregister_netdev(vsi->netdev); in i40e_vsi_reinit_setup()
13966 free_netdev(vsi->netdev); in i40e_vsi_reinit_setup()
13967 vsi->netdev = NULL; in i40e_vsi_reinit_setup()
13969 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); in i40e_vsi_reinit_setup()
13976 * i40e_vsi_setup - Set up a VSI by a given type
13998 * - the PF's port seid in i40e_vsi_setup()
14001 * - seid of an existing VEB in i40e_vsi_setup()
14002 * - seid of a VSI that owns an existing VEB in i40e_vsi_setup()
14003 * - seid of a VSI that doesn't own a VEB in i40e_vsi_setup()
14005 * - seid of the PF VSI, which is what creates the first VEB in i40e_vsi_setup()
14011 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { in i40e_vsi_setup()
14012 veb = pf->veb[i]; in i40e_vsi_setup()
14017 if (!veb && uplink_seid != pf->mac_seid) { in i40e_vsi_setup()
14019 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_vsi_setup()
14020 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { in i40e_vsi_setup()
14021 vsi = pf->vsi[i]; in i40e_vsi_setup()
14026 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", in i40e_vsi_setup()
14031 if (vsi->uplink_seid == pf->mac_seid) in i40e_vsi_setup()
14032 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, in i40e_vsi_setup()
14033 vsi->tc_config.enabled_tc); in i40e_vsi_setup()
14034 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) in i40e_vsi_setup()
14035 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, in i40e_vsi_setup()
14036 vsi->tc_config.enabled_tc); in i40e_vsi_setup()
14038 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { in i40e_vsi_setup()
14039 dev_info(&vsi->back->pdev->dev, in i40e_vsi_setup()
14047 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { in i40e_vsi_setup()
14048 veb->bridge_mode = BRIDGE_MODE_VEPA; in i40e_vsi_setup()
14049 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; in i40e_vsi_setup()
14054 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) in i40e_vsi_setup()
14055 veb = pf->veb[i]; in i40e_vsi_setup()
14058 dev_info(&pf->pdev->dev, "couldn't add VEB\n"); in i40e_vsi_setup()
14062 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; in i40e_vsi_setup()
14063 uplink_seid = veb->seid; in i40e_vsi_setup()
14070 vsi = pf->vsi[v_idx]; in i40e_vsi_setup()
14073 vsi->type = type; in i40e_vsi_setup()
14074 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); in i40e_vsi_setup()
14077 pf->lan_vsi = v_idx; in i40e_vsi_setup()
14079 vsi->vf_id = param1; in i40e_vsi_setup()
14081 alloc_queue_pairs = vsi->alloc_queue_pairs * in i40e_vsi_setup()
14084 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); in i40e_vsi_setup()
14086 dev_info(&pf->pdev->dev, in i40e_vsi_setup()
14088 alloc_queue_pairs, vsi->seid, ret); in i40e_vsi_setup()
14091 vsi->base_queue = ret; in i40e_vsi_setup()
14094 vsi->uplink_seid = uplink_seid; in i40e_vsi_setup()
14099 switch (vsi->type) { in i40e_vsi_setup()
14106 ret = register_netdev(vsi->netdev); in i40e_vsi_setup()
14109 vsi->netdev_registered = true; in i40e_vsi_setup()
14110 netif_carrier_off(vsi->netdev); in i40e_vsi_setup()
14136 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && in i40e_vsi_setup()
14137 (vsi->type == I40E_VSI_VMDQ2)) { in i40e_vsi_setup()
14145 if (vsi->netdev_registered) { in i40e_vsi_setup()
14146 vsi->netdev_registered = false; in i40e_vsi_setup()
14147 unregister_netdev(vsi->netdev); in i40e_vsi_setup()
14148 free_netdev(vsi->netdev); in i40e_vsi_setup()
14149 vsi->netdev = NULL; in i40e_vsi_setup()
14152 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); in i40e_vsi_setup()
14160 * i40e_veb_get_bw_info - Query VEB BW information
14169 struct i40e_pf *pf = veb->pf; in i40e_veb_get_bw_info()
14170 struct i40e_hw *hw = &pf->hw; in i40e_veb_get_bw_info()
14175 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, in i40e_veb_get_bw_info()
14178 dev_info(&pf->pdev->dev, in i40e_veb_get_bw_info()
14180 i40e_stat_str(&pf->hw, ret), in i40e_veb_get_bw_info()
14181 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); in i40e_veb_get_bw_info()
14185 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, in i40e_veb_get_bw_info()
14188 dev_info(&pf->pdev->dev, in i40e_veb_get_bw_info()
14190 i40e_stat_str(&pf->hw, ret), in i40e_veb_get_bw_info()
14191 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); in i40e_veb_get_bw_info()
14195 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); in i40e_veb_get_bw_info()
14196 veb->bw_max_quanta = ets_data.tc_bw_max; in i40e_veb_get_bw_info()
14197 veb->is_abs_credits = bw_data.absolute_credits_enable; in i40e_veb_get_bw_info()
14198 veb->enabled_tc = ets_data.tc_valid_bits; in i40e_veb_get_bw_info()
14202 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; in i40e_veb_get_bw_info()
14203 veb->bw_tc_limit_credits[i] = in i40e_veb_get_bw_info()
14205 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); in i40e_veb_get_bw_info()
14213 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
14221 int ret = -ENOENT; in i40e_veb_mem_alloc()
14226 mutex_lock(&pf->switch_mutex); in i40e_veb_mem_alloc()
14235 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) in i40e_veb_mem_alloc()
14238 ret = -ENOMEM; in i40e_veb_mem_alloc()
14244 ret = -ENOMEM; in i40e_veb_mem_alloc()
14247 veb->pf = pf; in i40e_veb_mem_alloc()
14248 veb->idx = i; in i40e_veb_mem_alloc()
14249 veb->enabled_tc = 1; in i40e_veb_mem_alloc()
14251 pf->veb[i] = veb; in i40e_veb_mem_alloc()
14254 mutex_unlock(&pf->switch_mutex); in i40e_veb_mem_alloc()
14259 * i40e_switch_branch_release - Delete a branch of the switch tree
14267 struct i40e_pf *pf = branch->pf; in i40e_switch_branch_release()
14268 u16 branch_seid = branch->seid; in i40e_switch_branch_release()
14269 u16 veb_idx = branch->idx; in i40e_switch_branch_release()
14272 /* release any VEBs on this VEB - RECURSION */ in i40e_switch_branch_release()
14274 if (!pf->veb[i]) in i40e_switch_branch_release()
14276 if (pf->veb[i]->uplink_seid == branch->seid) in i40e_switch_branch_release()
14277 i40e_switch_branch_release(pf->veb[i]); in i40e_switch_branch_release()
14285 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_switch_branch_release()
14286 if (!pf->vsi[i]) in i40e_switch_branch_release()
14288 if (pf->vsi[i]->uplink_seid == branch_seid && in i40e_switch_branch_release()
14289 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { in i40e_switch_branch_release()
14290 i40e_vsi_release(pf->vsi[i]); in i40e_switch_branch_release()
14299 if (pf->veb[veb_idx]) in i40e_switch_branch_release()
14300 i40e_veb_release(pf->veb[veb_idx]); in i40e_switch_branch_release()
14304 * i40e_veb_clear - remove veb struct
14312 if (veb->pf) { in i40e_veb_clear()
14313 struct i40e_pf *pf = veb->pf; in i40e_veb_clear()
14315 mutex_lock(&pf->switch_mutex); in i40e_veb_clear()
14316 if (pf->veb[veb->idx] == veb) in i40e_veb_clear()
14317 pf->veb[veb->idx] = NULL; in i40e_veb_clear()
14318 mutex_unlock(&pf->switch_mutex); in i40e_veb_clear()
14325 * i40e_veb_release - Delete a VEB and free its resources
14334 pf = veb->pf; in i40e_veb_release()
14337 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_veb_release()
14338 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { in i40e_veb_release()
14340 vsi = pf->vsi[i]; in i40e_veb_release()
14344 dev_info(&pf->pdev->dev, in i40e_veb_release()
14346 veb->seid, n); in i40e_veb_release()
14351 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; in i40e_veb_release()
14352 if (veb->uplink_seid) { in i40e_veb_release()
14353 vsi->uplink_seid = veb->uplink_seid; in i40e_veb_release()
14354 if (veb->uplink_seid == pf->mac_seid) in i40e_veb_release()
14355 vsi->veb_idx = I40E_NO_VEB; in i40e_veb_release()
14357 vsi->veb_idx = veb->veb_idx; in i40e_veb_release()
14360 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; in i40e_veb_release()
14361 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; in i40e_veb_release()
14364 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); in i40e_veb_release()
14369 * i40e_add_veb - create the VEB in the switch
14375 struct i40e_pf *pf = veb->pf; in i40e_add_veb()
14376 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); in i40e_add_veb()
14379 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, in i40e_add_veb()
14380 veb->enabled_tc, false, in i40e_add_veb()
14381 &veb->seid, enable_stats, NULL); in i40e_add_veb()
14385 dev_info(&pf->pdev->dev, in i40e_add_veb()
14387 i40e_stat_str(&pf->hw, ret), in i40e_add_veb()
14388 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_veb()
14389 return -EPERM; in i40e_add_veb()
14393 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL, in i40e_add_veb()
14394 &veb->stats_idx, NULL, NULL, NULL); in i40e_add_veb()
14396 dev_info(&pf->pdev->dev, in i40e_add_veb()
14398 i40e_stat_str(&pf->hw, ret), in i40e_add_veb()
14399 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_veb()
14400 return -EPERM; in i40e_add_veb()
14404 dev_info(&pf->pdev->dev, in i40e_add_veb()
14406 i40e_stat_str(&pf->hw, ret), in i40e_add_veb()
14407 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_veb()
14408 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); in i40e_add_veb()
14409 return -ENOENT; in i40e_add_veb()
14412 vsi->uplink_seid = veb->seid; in i40e_add_veb()
14413 vsi->veb_idx = veb->idx; in i40e_add_veb()
14414 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; in i40e_add_veb()
14420 * i40e_veb_setup - Set up a VEB
14425 * @enabled_tc: Enabled TC bit-map
14446 dev_info(&pf->pdev->dev, in i40e_veb_setup()
14453 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) in i40e_veb_setup()
14454 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) in i40e_veb_setup()
14456 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) { in i40e_veb_setup()
14457 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", in i40e_veb_setup()
14462 if (uplink_seid && uplink_seid != pf->mac_seid) { in i40e_veb_setup()
14464 if (pf->veb[veb_idx] && in i40e_veb_setup()
14465 pf->veb[veb_idx]->seid == uplink_seid) { in i40e_veb_setup()
14466 uplink_veb = pf->veb[veb_idx]; in i40e_veb_setup()
14471 dev_info(&pf->pdev->dev, in i40e_veb_setup()
14481 veb = pf->veb[veb_idx]; in i40e_veb_setup()
14482 veb->flags = flags; in i40e_veb_setup()
14483 veb->uplink_seid = uplink_seid; in i40e_veb_setup()
14484 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); in i40e_veb_setup()
14485 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); in i40e_veb_setup()
14488 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); in i40e_veb_setup()
14491 if (vsi_idx == pf->lan_vsi) in i40e_veb_setup()
14492 pf->lan_veb = veb->idx; in i40e_veb_setup()
14503 * i40e_setup_pf_switch_element - set PF vars based on switch type
14515 u16 downlink_seid = le16_to_cpu(ele->downlink_seid); in i40e_setup_pf_switch_element()
14516 u16 uplink_seid = le16_to_cpu(ele->uplink_seid); in i40e_setup_pf_switch_element()
14517 u8 element_type = ele->element_type; in i40e_setup_pf_switch_element()
14518 u16 seid = le16_to_cpu(ele->seid); in i40e_setup_pf_switch_element()
14521 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch_element()
14527 pf->mac_seid = seid; in i40e_setup_pf_switch_element()
14531 if (uplink_seid != pf->mac_seid) in i40e_setup_pf_switch_element()
14533 if (pf->lan_veb >= I40E_MAX_VEB) { in i40e_setup_pf_switch_element()
14538 if (pf->veb[v] && (pf->veb[v]->seid == seid)) { in i40e_setup_pf_switch_element()
14539 pf->lan_veb = v; in i40e_setup_pf_switch_element()
14543 if (pf->lan_veb >= I40E_MAX_VEB) { in i40e_setup_pf_switch_element()
14547 pf->lan_veb = v; in i40e_setup_pf_switch_element()
14550 if (pf->lan_veb >= I40E_MAX_VEB) in i40e_setup_pf_switch_element()
14553 pf->veb[pf->lan_veb]->seid = seid; in i40e_setup_pf_switch_element()
14554 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; in i40e_setup_pf_switch_element()
14555 pf->veb[pf->lan_veb]->pf = pf; in i40e_setup_pf_switch_element()
14556 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; in i40e_setup_pf_switch_element()
14564 pf->mac_seid = uplink_seid; in i40e_setup_pf_switch_element()
14565 pf->pf_seid = downlink_seid; in i40e_setup_pf_switch_element()
14566 pf->main_vsi_seid = seid; in i40e_setup_pf_switch_element()
14568 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch_element()
14570 pf->pf_seid, pf->main_vsi_seid); in i40e_setup_pf_switch_element()
14581 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", in i40e_setup_pf_switch_element()
14588 * i40e_fetch_switch_configuration - Get switch config from firmware
14605 return -ENOMEM; in i40e_fetch_switch_configuration()
14611 ret = i40e_aq_get_switch_config(&pf->hw, sw_config, in i40e_fetch_switch_configuration()
14615 dev_info(&pf->pdev->dev, in i40e_fetch_switch_configuration()
14617 i40e_stat_str(&pf->hw, ret), in i40e_fetch_switch_configuration()
14618 i40e_aq_str(&pf->hw, in i40e_fetch_switch_configuration()
14619 pf->hw.aq.asq_last_status)); in i40e_fetch_switch_configuration()
14621 return -ENOENT; in i40e_fetch_switch_configuration()
14624 num_reported = le16_to_cpu(sw_config->header.num_reported); in i40e_fetch_switch_configuration()
14625 num_total = le16_to_cpu(sw_config->header.num_total); in i40e_fetch_switch_configuration()
14628 dev_info(&pf->pdev->dev, in i40e_fetch_switch_configuration()
14634 &sw_config->element[i]; in i40e_fetch_switch_configuration()
14646 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
14648 * @reinit: if the Main VSI needs to re-initialized.
14661 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch()
14663 i40e_stat_str(&pf->hw, ret), in i40e_setup_pf_switch()
14664 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_setup_pf_switch()
14675 if ((pf->hw.pf_id == 0) && in i40e_setup_pf_switch()
14676 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) { in i40e_setup_pf_switch()
14678 pf->last_sw_conf_flags = flags; in i40e_setup_pf_switch()
14681 if (pf->hw.pf_id == 0) { in i40e_setup_pf_switch()
14685 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0, in i40e_setup_pf_switch()
14687 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { in i40e_setup_pf_switch()
14688 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch()
14690 i40e_stat_str(&pf->hw, ret), in i40e_setup_pf_switch()
14691 i40e_aq_str(&pf->hw, in i40e_setup_pf_switch()
14692 pf->hw.aq.asq_last_status)); in i40e_setup_pf_switch()
14695 pf->last_sw_conf_valid_flags = valid_flags; in i40e_setup_pf_switch()
14699 if (pf->lan_vsi == I40E_NO_VSI || reinit) { in i40e_setup_pf_switch()
14706 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) in i40e_setup_pf_switch()
14707 uplink_seid = pf->veb[pf->lan_veb]->seid; in i40e_setup_pf_switch()
14709 uplink_seid = pf->mac_seid; in i40e_setup_pf_switch()
14710 if (pf->lan_vsi == I40E_NO_VSI) in i40e_setup_pf_switch()
14713 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); in i40e_setup_pf_switch()
14715 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); in i40e_setup_pf_switch()
14718 return -EAGAIN; in i40e_setup_pf_switch()
14722 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; in i40e_setup_pf_switch()
14724 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; in i40e_setup_pf_switch()
14725 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; in i40e_setup_pf_switch()
14726 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); in i40e_setup_pf_switch()
14728 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); in i40e_setup_pf_switch()
14735 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", in i40e_setup_pf_switch()
14743 if ((pf->flags & I40E_FLAG_RSS_ENABLED)) in i40e_setup_pf_switch()
14749 /* Initialize user-specific link properties */ in i40e_setup_pf_switch()
14750 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & in i40e_setup_pf_switch()
14759 udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev); in i40e_setup_pf_switch()
14768 * i40e_determine_queue_usage - Work out queue distribution
14776 pf->num_lan_qps = 0; in i40e_determine_queue_usage()
14782 queues_left = pf->hw.func_caps.num_tx_qp; in i40e_determine_queue_usage()
14785 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { in i40e_determine_queue_usage()
14788 pf->alloc_rss_size = pf->num_lan_qps = 1; in i40e_determine_queue_usage()
14791 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | in i40e_determine_queue_usage()
14799 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_determine_queue_usage()
14800 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | in i40e_determine_queue_usage()
14805 pf->alloc_rss_size = pf->num_lan_qps = 1; in i40e_determine_queue_usage()
14806 queues_left -= pf->num_lan_qps; in i40e_determine_queue_usage()
14808 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | in i40e_determine_queue_usage()
14814 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_determine_queue_usage()
14816 /* Not enough queues for all TCs */ in i40e_determine_queue_usage()
14817 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && in i40e_determine_queue_usage()
14819 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | in i40e_determine_queue_usage()
14821 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); in i40e_determine_queue_usage()
14825 q_max = max_t(int, pf->rss_size_max, num_online_cpus()); in i40e_determine_queue_usage()
14826 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp); in i40e_determine_queue_usage()
14827 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors); in i40e_determine_queue_usage()
14828 pf->num_lan_qps = q_max; in i40e_determine_queue_usage()
14830 queues_left -= pf->num_lan_qps; in i40e_determine_queue_usage()
14833 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_determine_queue_usage()
14835 queues_left -= 1; /* save 1 queue for FD */ in i40e_determine_queue_usage()
14837 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; in i40e_determine_queue_usage()
14838 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_determine_queue_usage()
14839 …dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n… in i40e_determine_queue_usage()
14843 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && in i40e_determine_queue_usage()
14844 pf->num_vf_qps && pf->num_req_vfs && queues_left) { in i40e_determine_queue_usage()
14845 pf->num_req_vfs = min_t(int, pf->num_req_vfs, in i40e_determine_queue_usage()
14846 (queues_left / pf->num_vf_qps)); in i40e_determine_queue_usage()
14847 queues_left -= (pf->num_req_vfs * pf->num_vf_qps); in i40e_determine_queue_usage()
14850 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && in i40e_determine_queue_usage()
14851 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { in i40e_determine_queue_usage()
14852 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, in i40e_determine_queue_usage()
14853 (queues_left / pf->num_vmdq_qps)); in i40e_determine_queue_usage()
14854 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); in i40e_determine_queue_usage()
14857 pf->queues_left = queues_left; in i40e_determine_queue_usage()
14858 dev_dbg(&pf->pdev->dev, in i40e_determine_queue_usage()
14860 pf->hw.func_caps.num_tx_qp, in i40e_determine_queue_usage()
14861 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED), in i40e_determine_queue_usage()
14862 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, in i40e_determine_queue_usage()
14863 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, in i40e_determine_queue_usage()
14868 * i40e_setup_pf_filter_control - Setup PF static filter control
14880 struct i40e_filter_control_settings *settings = &pf->filter_settings; in i40e_setup_pf_filter_control()
14882 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; in i40e_setup_pf_filter_control()
14885 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) in i40e_setup_pf_filter_control()
14886 settings->enable_fdir = true; in i40e_setup_pf_filter_control()
14889 settings->enable_ethtype = true; in i40e_setup_pf_filter_control()
14890 settings->enable_macvlan = true; in i40e_setup_pf_filter_control()
14892 if (i40e_set_filter_control(&pf->hw, settings)) in i40e_setup_pf_filter_control()
14893 return -ENOENT; in i40e_setup_pf_filter_control()
14899 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
14902 struct i40e_hw *hw = &pf->hw; in i40e_print_features()
14910 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id); in i40e_print_features()
14912 i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); in i40e_print_features()
14915 pf->hw.func_caps.num_vsis, in i40e_print_features()
14916 pf->vsi[pf->lan_vsi]->num_queue_pairs); in i40e_print_features()
14917 if (pf->flags & I40E_FLAG_RSS_ENABLED) in i40e_print_features()
14919 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) in i40e_print_features()
14921 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_print_features()
14925 if (pf->flags & I40E_FLAG_DCB_CAPABLE) in i40e_print_features()
14929 if (pf->flags & I40E_FLAG_PTP) in i40e_print_features()
14931 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) in i40e_print_features()
14936 dev_info(&pf->pdev->dev, "%s\n", buf); in i40e_print_features()
14942 * i40e_get_platform_mac_addr - get platform-specific MAC address
14953 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr)) in i40e_get_platform_mac_addr()
14954 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr); in i40e_get_platform_mac_addr()
14958 * i40e_set_fec_in_flags - helper function for setting FEC options in flags
14981 * i40e_check_recovery_mode - check if we are running transition firmware
14991 u32 val = rd32(&pf->hw, I40E_GL_FWSTS); in i40e_check_recovery_mode()
14994 dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n"); in i40e_check_recovery_mode()
14995 …dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for detai… in i40e_check_recovery_mode()
14996 set_bit(__I40E_RECOVERY_MODE, pf->state); in i40e_check_recovery_mode()
15000 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_check_recovery_mode()
15001 …dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full … in i40e_check_recovery_mode()
15007 * i40e_pf_loop_reset - perform reset in a loop.
15017 * state is to issue a series of pf-resets and check a return value.
15029 /* wait max 10 seconds for PF reset to succeed */ in i40e_pf_loop_reset()
15032 struct i40e_hw *hw = &pf->hw; in i40e_pf_loop_reset()
15042 pf->pfr_count++; in i40e_pf_loop_reset()
15044 dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret); in i40e_pf_loop_reset()
15050 * i40e_check_fw_empr - check if FW issued unexpected EMP Reset
15062 const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) & in i40e_check_fw_empr()
15069 * i40e_handle_resets - handle EMP resets and PF resets
15085 …dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several … in i40e_handle_resets()
15091 * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
15106 pci_save_state(pf->pdev); in i40e_init_recovery_mode()
15109 timer_setup(&pf->service_timer, i40e_service_timer, 0); in i40e_init_recovery_mode()
15110 pf->service_timer_period = HZ; in i40e_init_recovery_mode()
15112 INIT_WORK(&pf->service_task, i40e_service_task); in i40e_init_recovery_mode()
15113 clear_bit(__I40E_SERVICE_SCHED, pf->state); in i40e_init_recovery_mode()
15124 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) in i40e_init_recovery_mode()
15125 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; in i40e_init_recovery_mode()
15127 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; in i40e_init_recovery_mode()
15130 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), in i40e_init_recovery_mode()
15132 if (!pf->vsi) { in i40e_init_recovery_mode()
15133 err = -ENOMEM; in i40e_init_recovery_mode()
15145 pf->lan_vsi = v_idx; in i40e_init_recovery_mode()
15146 vsi = pf->vsi[v_idx]; in i40e_init_recovery_mode()
15148 err = -EFAULT; in i40e_init_recovery_mode()
15151 vsi->alloc_queue_pairs = 1; in i40e_init_recovery_mode()
15155 err = register_netdev(vsi->netdev); in i40e_init_recovery_mode()
15158 vsi->netdev_registered = true; in i40e_init_recovery_mode()
15169 mod_timer(&pf->service_timer, in i40e_init_recovery_mode()
15170 round_jiffies(jiffies + pf->service_timer_period)); in i40e_init_recovery_mode()
15176 del_timer_sync(&pf->service_timer); in i40e_init_recovery_mode()
15178 iounmap(hw->hw_addr); in i40e_init_recovery_mode()
15179 pci_disable_pcie_error_reporting(pf->pdev); in i40e_init_recovery_mode()
15180 pci_release_mem_regions(pf->pdev); in i40e_init_recovery_mode()
15181 pci_disable_device(pf->pdev); in i40e_init_recovery_mode()
15188 * i40e_set_subsystem_device_id - set subsystem device id
15196 struct pci_dev *pdev = ((struct i40e_pf *)hw->back)->pdev; in i40e_set_subsystem_device_id()
15198 hw->subsystem_device_id = pdev->subsystem_device ? in i40e_set_subsystem_device_id()
15199 pdev->subsystem_device : in i40e_set_subsystem_device_id()
15204 * i40e_probe - Device initialization routine
15235 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in i40e_probe()
15237 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in i40e_probe()
15239 dev_err(&pdev->dev, in i40e_probe()
15248 dev_info(&pdev->dev, in i40e_probe()
15263 err = -ENOMEM; in i40e_probe()
15266 pf->next_vsi = 0; in i40e_probe()
15267 pf->pdev = pdev; in i40e_probe()
15268 set_bit(__I40E_DOWN, pf->state); in i40e_probe()
15270 hw = &pf->hw; in i40e_probe()
15271 hw->back = pf; in i40e_probe()
15273 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), in i40e_probe()
15280 if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) { in i40e_probe()
15281 dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n", in i40e_probe()
15282 pf->ioremap_len); in i40e_probe()
15283 err = -ENOMEM; in i40e_probe()
15286 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len); in i40e_probe()
15287 if (!hw->hw_addr) { in i40e_probe()
15288 err = -EIO; in i40e_probe()
15289 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", in i40e_probe()
15291 pf->ioremap_len, err); in i40e_probe()
15294 hw->vendor_id = pdev->vendor; in i40e_probe()
15295 hw->device_id = pdev->device; in i40e_probe()
15296 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); in i40e_probe()
15297 hw->subsystem_vendor_id = pdev->subsystem_vendor; in i40e_probe()
15299 hw->bus.device = PCI_SLOT(pdev->devfn); in i40e_probe()
15300 hw->bus.func = PCI_FUNC(pdev->devfn); in i40e_probe()
15301 hw->bus.bus_id = pdev->bus->number; in i40e_probe()
15302 pf->instance = pfs_found; in i40e_probe()
15307 hw->switch_tag = 0xffff; in i40e_probe()
15308 hw->first_tag = ETH_P_8021AD; in i40e_probe()
15309 hw->second_tag = ETH_P_8021Q; in i40e_probe()
15311 INIT_LIST_HEAD(&pf->l3_flex_pit_list); in i40e_probe()
15312 INIT_LIST_HEAD(&pf->l4_flex_pit_list); in i40e_probe()
15313 INIT_LIST_HEAD(&pf->ddp_old_prof); in i40e_probe()
15318 mutex_init(&hw->aq.asq_mutex); in i40e_probe()
15319 mutex_init(&hw->aq.arq_mutex); in i40e_probe()
15321 pf->msg_enable = netif_msg_init(debug, in i40e_probe()
15325 if (debug < -1) in i40e_probe()
15326 pf->hw.debug_mask = debug; in i40e_probe()
15329 if (hw->revision_id == 0 && in i40e_probe()
15334 pf->corer_count++; in i40e_probe()
15344 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", in i40e_probe()
15356 hw->aq.num_arq_entries = I40E_MIN_ARQ_LEN; in i40e_probe()
15357 hw->aq.num_asq_entries = I40E_MIN_ASQ_LEN; in i40e_probe()
15359 hw->aq.num_arq_entries = I40E_AQ_LEN; in i40e_probe()
15360 hw->aq.num_asq_entries = I40E_AQ_LEN; in i40e_probe()
15362 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; in i40e_probe()
15363 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; in i40e_probe()
15364 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; in i40e_probe()
15366 snprintf(pf->int_name, sizeof(pf->int_name) - 1, in i40e_probe()
15367 "%s-%s:misc", in i40e_probe()
15368 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); in i40e_probe()
15372 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", in i40e_probe()
15378 pf->hw.fc.requested_mode = I40E_FC_NONE; in i40e_probe()
15383 dev_info(&pdev->dev, in i40e_probe()
15385 hw->aq.api_maj_ver, in i40e_probe()
15386 hw->aq.api_min_ver, in i40e_probe()
15390 dev_info(&pdev->dev, in i40e_probe()
15398 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n", in i40e_probe()
15399 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, in i40e_probe()
15400 hw->aq.api_maj_ver, hw->aq.api_min_ver, in i40e_probe()
15401 i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id, in i40e_probe()
15402 hw->subsystem_vendor_id, hw->subsystem_device_id); in i40e_probe()
15404 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && in i40e_probe()
15405 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) in i40e_probe()
15406 dev_info(&pdev->dev, in i40e_probe()
15408 hw->aq.api_maj_ver, in i40e_probe()
15409 hw->aq.api_min_ver, in i40e_probe()
15412 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) in i40e_probe()
15413 dev_info(&pdev->dev, in i40e_probe()
15415 hw->aq.api_maj_ver, in i40e_probe()
15416 hw->aq.api_min_ver, in i40e_probe()
15423 if (hw->revision_id < 1) in i40e_probe()
15424 …dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be is… in i40e_probe()
15434 dev_info(&pdev->dev, "sw_init failed: %d\n", err); in i40e_probe()
15438 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_probe()
15441 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, in i40e_probe()
15442 hw->func_caps.num_rx_qp, 0, 0); in i40e_probe()
15444 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); in i40e_probe()
15450 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err); in i40e_probe()
15451 err = -ENOENT; in i40e_probe()
15459 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) { in i40e_probe()
15460 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); in i40e_probe()
15467 if (!is_valid_ether_addr(hw->mac.addr)) { in i40e_probe()
15468 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); in i40e_probe()
15469 err = -EIO; in i40e_probe()
15472 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); in i40e_probe()
15473 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); in i40e_probe()
15474 i40e_get_port_mac_addr(hw, hw->mac.port_addr); in i40e_probe()
15475 if (is_valid_ether_addr(hw->mac.port_addr)) in i40e_probe()
15476 pf->hw_features |= I40E_HW_PORT_ID_VALID; in i40e_probe()
15483 status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status); in i40e_probe()
15486 (pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) : in i40e_probe()
15487 (pf->flags |= I40E_FLAG_DISABLE_FW_LLDP); in i40e_probe()
15488 dev_info(&pdev->dev, in i40e_probe()
15489 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ? in i40e_probe()
15493 /* Enable FW to write default DCB config on link-up */ in i40e_probe()
15498 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); in i40e_probe()
15499 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED); in i40e_probe()
15505 timer_setup(&pf->service_timer, i40e_service_timer, 0); in i40e_probe()
15506 pf->service_timer_period = HZ; in i40e_probe()
15508 INIT_WORK(&pf->service_task, i40e_service_task); in i40e_probe()
15509 clear_bit(__I40E_SERVICE_SCHED, pf->state); in i40e_probe()
15513 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1) in i40e_probe()
15514 pf->wol_en = false; in i40e_probe()
15516 pf->wol_en = true; in i40e_probe()
15517 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); in i40e_probe()
15526 * When MSI-X is enabled, it's not allowed to use more TC queue in i40e_probe()
15527 * pairs than MSI-X vectors (pf->num_lan_msix) exist. Thus in i40e_probe()
15528 * vsi->num_queue_pairs will be equal to pf->num_lan_msix, i.e., 1. in i40e_probe()
15531 pf->num_lan_msix = 1; in i40e_probe()
15533 pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port; in i40e_probe()
15534 pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port; in i40e_probe()
15535 pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; in i40e_probe()
15536 pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared; in i40e_probe()
15537 pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS; in i40e_probe()
15538 pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN | in i40e_probe()
15546 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) in i40e_probe()
15547 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; in i40e_probe()
15549 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; in i40e_probe()
15550 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { in i40e_probe()
15551 dev_warn(&pf->pdev->dev, in i40e_probe()
15553 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); in i40e_probe()
15554 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; in i40e_probe()
15558 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), in i40e_probe()
15560 if (!pf->vsi) { in i40e_probe()
15561 err = -ENOMEM; in i40e_probe()
15567 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && in i40e_probe()
15568 (pf->flags & I40E_FLAG_MSIX_ENABLED) && in i40e_probe()
15569 !test_bit(__I40E_BAD_EEPROM, pf->state)) { in i40e_probe()
15571 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; in i40e_probe()
15576 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); in i40e_probe()
15579 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list); in i40e_probe()
15582 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_probe()
15583 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { in i40e_probe()
15584 i40e_vsi_open(pf->vsi[i]); in i40e_probe()
15592 err = i40e_aq_set_phy_int_mask(&pf->hw, in i40e_probe()
15597 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", in i40e_probe()
15598 i40e_stat_str(&pf->hw, err), in i40e_probe()
15599 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_probe()
15612 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { in i40e_probe()
15614 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); in i40e_probe()
15616 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", in i40e_probe()
15617 i40e_stat_str(&pf->hw, err), in i40e_probe()
15618 i40e_aq_str(&pf->hw, in i40e_probe()
15619 pf->hw.aq.asq_last_status)); in i40e_probe()
15625 clear_bit(__I40E_DOWN, pf->state); in i40e_probe()
15632 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_probe()
15635 dev_info(&pdev->dev, in i40e_probe()
15645 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && in i40e_probe()
15646 (pf->flags & I40E_FLAG_MSIX_ENABLED) && in i40e_probe()
15647 !test_bit(__I40E_BAD_EEPROM, pf->state)) { in i40e_probe()
15655 dev_info(&pdev->dev, in i40e_probe()
15659 dev_info(&pdev->dev, in i40e_probe()
15666 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_probe()
15667 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile, in i40e_probe()
15668 pf->num_iwarp_msix, in i40e_probe()
15670 if (pf->iwarp_base_vector < 0) { in i40e_probe()
15671 dev_info(&pdev->dev, in i40e_probe()
15673 pf->num_iwarp_msix, pf->iwarp_base_vector); in i40e_probe()
15674 pf->flags &= ~I40E_FLAG_IWARP_ENABLED; in i40e_probe()
15684 mod_timer(&pf->service_timer, in i40e_probe()
15685 round_jiffies(jiffies + pf->service_timer_period)); in i40e_probe()
15688 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_probe()
15691 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", in i40e_probe()
15701 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) { in i40e_probe()
15708 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, in i40e_probe()
15713 switch (hw->bus.speed) { in i40e_probe()
15723 switch (hw->bus.width) { in i40e_probe()
15736 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n", in i40e_probe()
15739 if (hw->bus.width < i40e_bus_width_pcie_x8 || in i40e_probe()
15740 hw->bus.speed < i40e_bus_speed_8000) { in i40e_probe()
15741 …dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for opti… in i40e_probe()
15742 …dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or high… in i40e_probe()
15749 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n", in i40e_probe()
15750 i40e_stat_str(&pf->hw, err), in i40e_probe()
15751 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_probe()
15752 pf->hw.phy.link_info.requested_speeds = abilities.link_speed; in i40e_probe()
15755 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags); in i40e_probe()
15760 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n", in i40e_probe()
15761 i40e_stat_str(&pf->hw, err), in i40e_probe()
15762 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_probe()
15766 val = (rd32(&pf->hw, I40E_PRTGL_SAH) & in i40e_probe()
15769 dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n", in i40e_probe()
15778 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, in i40e_probe()
15779 pf->main_vsi_seid); in i40e_probe()
15781 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) || in i40e_probe()
15782 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) in i40e_probe()
15783 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS; in i40e_probe()
15784 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722) in i40e_probe()
15785 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER; in i40e_probe()
15793 set_bit(__I40E_DOWN, pf->state); in i40e_probe()
15795 kfree(pf->vsi); in i40e_probe()
15798 del_timer_sync(&pf->service_timer); in i40e_probe()
15803 kfree(pf->qp_pile); in i40e_probe()
15807 iounmap(hw->hw_addr); in i40e_probe()
15820 * i40e_remove - Device removal routine
15825 * Hot-Plug event, or because the driver is going to be removed from
15831 struct i40e_hw *hw = &pf->hw; in i40e_remove()
15843 while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_remove()
15846 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { in i40e_remove()
15847 set_bit(__I40E_VF_RESETS_DISABLED, pf->state); in i40e_remove()
15849 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; in i40e_remove()
15852 set_bit(__I40E_SUSPENDED, pf->state); in i40e_remove()
15853 set_bit(__I40E_DOWN, pf->state); in i40e_remove()
15854 if (pf->service_timer.function) in i40e_remove()
15855 del_timer_sync(&pf->service_timer); in i40e_remove()
15856 if (pf->service_task.func) in i40e_remove()
15857 cancel_work_sync(&pf->service_task); in i40e_remove()
15859 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_remove()
15860 struct i40e_vsi *vsi = pf->vsi[0]; in i40e_remove()
15866 unregister_netdev(vsi->netdev); in i40e_remove()
15867 free_netdev(vsi->netdev); in i40e_remove()
15875 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); in i40e_remove()
15883 if (!pf->veb[i]) in i40e_remove()
15886 if (pf->veb[i]->uplink_seid == pf->mac_seid || in i40e_remove()
15887 pf->veb[i]->uplink_seid == 0) in i40e_remove()
15888 i40e_switch_branch_release(pf->veb[i]); in i40e_remove()
15894 if (pf->vsi[pf->lan_vsi]) in i40e_remove()
15895 i40e_vsi_release(pf->vsi[pf->lan_vsi]); in i40e_remove()
15900 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_remove()
15903 dev_warn(&pdev->dev, "Failed to delete client device: %d\n", in i40e_remove()
15908 if (hw->hmc.hmc_obj) { in i40e_remove()
15911 dev_warn(&pdev->dev, in i40e_remove()
15918 if (test_bit(__I40E_RECOVERY_MODE, pf->state) && in i40e_remove()
15919 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) in i40e_remove()
15920 free_irq(pf->pdev->irq, pf); in i40e_remove()
15926 mutex_destroy(&hw->aq.arq_mutex); in i40e_remove()
15927 mutex_destroy(&hw->aq.asq_mutex); in i40e_remove()
15932 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_remove()
15933 if (pf->vsi[i]) { in i40e_remove()
15934 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_remove()
15935 i40e_vsi_clear_rings(pf->vsi[i]); in i40e_remove()
15936 i40e_vsi_clear(pf->vsi[i]); in i40e_remove()
15937 pf->vsi[i] = NULL; in i40e_remove()
15943 kfree(pf->veb[i]); in i40e_remove()
15944 pf->veb[i] = NULL; in i40e_remove()
15947 kfree(pf->qp_pile); in i40e_remove()
15948 kfree(pf->vsi); in i40e_remove()
15950 iounmap(hw->hw_addr); in i40e_remove()
15959 * i40e_pci_error_detected - warning that something funky happened in PCI land
15972 dev_info(&pdev->dev, "%s: error %d\n", __func__, error); in i40e_pci_error_detected()
15975 dev_info(&pdev->dev, in i40e_pci_error_detected()
15976 "Cannot recover - error happened during device probe\n"); in i40e_pci_error_detected()
15981 if (!test_bit(__I40E_SUSPENDED, pf->state)) in i40e_pci_error_detected()
15989 * i40e_pci_error_slot_reset - a PCI slot reset just happened
16003 dev_dbg(&pdev->dev, "%s\n", __func__); in i40e_pci_error_slot_reset()
16005 dev_info(&pdev->dev, in i40e_pci_error_slot_reset()
16006 "Cannot re-enable PCI device after reset.\n"); in i40e_pci_error_slot_reset()
16014 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); in i40e_pci_error_slot_reset()
16025 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
16036 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
16047 * i40e_pci_error_resume - restart operations after PCI error recovery
16057 dev_dbg(&pdev->dev, "%s\n", __func__); in i40e_pci_error_resume()
16058 if (test_bit(__I40E_SUSPENDED, pf->state)) in i40e_pci_error_resume()
16065 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
16071 struct i40e_hw *hw = &pf->hw; in i40e_enable_mc_magic_wake()
16077 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) { in i40e_enable_mc_magic_wake()
16079 pf->vsi[pf->lan_vsi]->netdev->dev_addr); in i40e_enable_mc_magic_wake()
16081 dev_err(&pf->pdev->dev, in i40e_enable_mc_magic_wake()
16083 ether_addr_copy(mac_addr, hw->mac.addr); in i40e_enable_mc_magic_wake()
16092 if (hw->func_caps.flex10_enable && hw->partition_id != 1) in i40e_enable_mc_magic_wake()
16097 dev_err(&pf->pdev->dev, in i40e_enable_mc_magic_wake()
16107 dev_err(&pf->pdev->dev, in i40e_enable_mc_magic_wake()
16112 * i40e_shutdown - PCI callback for shutting down
16118 struct i40e_hw *hw = &pf->hw; in i40e_shutdown()
16120 set_bit(__I40E_SUSPENDED, pf->state); in i40e_shutdown()
16121 set_bit(__I40E_DOWN, pf->state); in i40e_shutdown()
16123 del_timer_sync(&pf->service_timer); in i40e_shutdown()
16124 cancel_work_sync(&pf->service_task); in i40e_shutdown()
16131 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); in i40e_shutdown()
16133 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) in i40e_shutdown()
16139 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); in i40e_shutdown()
16141 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); in i40e_shutdown()
16144 if (test_bit(__I40E_RECOVERY_MODE, pf->state) && in i40e_shutdown()
16145 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) in i40e_shutdown()
16146 free_irq(pf->pdev->irq, pf); in i40e_shutdown()
16157 pci_wake_from_d3(pdev, pf->wol_en); in i40e_shutdown()
16163 * i40e_suspend - PM callback for moving to D3
16169 struct i40e_hw *hw = &pf->hw; in i40e_suspend()
16172 if (test_and_set_bit(__I40E_SUSPENDED, pf->state)) in i40e_suspend()
16175 set_bit(__I40E_DOWN, pf->state); in i40e_suspend()
16178 del_timer_sync(&pf->service_timer); in i40e_suspend()
16179 cancel_work_sync(&pf->service_task); in i40e_suspend()
16184 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); in i40e_suspend()
16186 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) in i40e_suspend()
16197 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); in i40e_suspend()
16198 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); in i40e_suspend()
16213 * i40e_resume - PM callback for waking up from D3
16222 if (!test_bit(__I40E_SUSPENDED, pf->state)) in i40e_resume()
16239 clear_bit(__I40E_DOWN, pf->state); in i40e_resume()
16245 clear_bit(__I40E_SUSPENDED, pf->state); in i40e_resume()
16248 mod_timer(&pf->service_timer, in i40e_resume()
16249 round_jiffies(jiffies + pf->service_timer_period)); in i40e_resume()
16278 * i40e_init_module - Driver registration routine
16298 return -ENOMEM; in i40e_init_module()
16307 * i40e_exit_module - Driver exit cleanup routine