Lines Matching refs:hdev
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73 static void hclge_sync_fd_table(struct hclge_dev *hdev);
74 static void hclge_update_fec_stats(struct hclge_dev *hdev);
490 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) in hclge_mac_update_stats_defective() argument
494 u64 *data = (u64 *)(&hdev->mac_stats); in hclge_mac_update_stats_defective()
502 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); in hclge_mac_update_stats_defective()
504 dev_err(&hdev->pdev->dev, in hclge_mac_update_stats_defective()
526 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev) in hclge_mac_update_stats_complete() argument
530 u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num; in hclge_mac_update_stats_complete()
531 u64 *data = (u64 *)(&hdev->mac_stats); in hclge_mac_update_stats_complete()
550 ret = hclge_cmd_send(&hdev->hw, desc, desc_num); in hclge_mac_update_stats_complete()
556 data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num); in hclge_mac_update_stats_complete()
573 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num) in hclge_mac_query_reg_num() argument
583 if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) { in hclge_mac_query_reg_num()
589 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_mac_query_reg_num()
591 dev_err(&hdev->pdev->dev, in hclge_mac_query_reg_num()
599 dev_err(&hdev->pdev->dev, in hclge_mac_query_reg_num()
607 int hclge_mac_update_stats(struct hclge_dev *hdev) in hclge_mac_update_stats() argument
610 if (hdev->ae_dev->dev_specs.mac_stats_num) in hclge_mac_update_stats()
611 return hclge_mac_update_stats_complete(hdev); in hclge_mac_update_stats()
613 return hclge_mac_update_stats_defective(hdev); in hclge_mac_update_stats()
616 static int hclge_comm_get_count(struct hclge_dev *hdev, in hclge_comm_get_count() argument
624 if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num) in hclge_comm_get_count()
630 static u64 *hclge_comm_get_stats(struct hclge_dev *hdev, in hclge_comm_get_stats() argument
638 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) in hclge_comm_get_stats()
641 *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset); in hclge_comm_get_stats()
648 static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset, in hclge_comm_get_strings() argument
659 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) in hclge_comm_get_strings()
669 static void hclge_update_stats_for_all(struct hclge_dev *hdev) in hclge_update_stats_for_all() argument
674 handle = &hdev->vport[0].nic; in hclge_update_stats_for_all()
676 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); in hclge_update_stats_for_all()
678 dev_err(&hdev->pdev->dev, in hclge_update_stats_for_all()
684 hclge_update_fec_stats(hdev); in hclge_update_stats_for_all()
686 status = hclge_mac_update_stats(hdev); in hclge_update_stats_for_all()
688 dev_err(&hdev->pdev->dev, in hclge_update_stats_for_all()
696 struct hclge_dev *hdev = vport->back; in hclge_update_stats() local
699 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) in hclge_update_stats()
702 status = hclge_mac_update_stats(hdev); in hclge_update_stats()
704 dev_err(&hdev->pdev->dev, in hclge_update_stats()
708 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); in hclge_update_stats()
710 dev_err(&hdev->pdev->dev, in hclge_update_stats()
714 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); in hclge_update_stats()
726 struct hclge_dev *hdev = vport->back; in hclge_get_sset_count() local
737 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 || in hclge_get_sset_count()
738 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || in hclge_get_sset_count()
739 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || in hclge_get_sset_count()
740 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { in hclge_get_sset_count()
752 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && in hclge_get_sset_count()
753 hdev->hw.mac.phydev->drv->set_loopback) || in hclge_get_sset_count()
754 hnae3_dev_phy_imp_supported(hdev)) { in hclge_get_sset_count()
759 count = hclge_comm_get_count(hdev, g_mac_stats_string, in hclge_get_sset_count()
771 struct hclge_dev *hdev = vport->back; in hclge_get_strings() local
777 p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string, in hclge_get_strings()
813 struct hclge_dev *hdev = vport->back; in hclge_get_stats() local
816 p = hclge_comm_get_stats(hdev, g_mac_stats_string, in hclge_get_stats()
825 struct hclge_dev *hdev = vport->back; in hclge_get_mac_stat() local
829 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num; in hclge_get_mac_stat()
830 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num; in hclge_get_mac_stat()
833 static int hclge_parse_func_status(struct hclge_dev *hdev, in hclge_parse_func_status() argument
843 hdev->flag |= HCLGE_FLAG_MAIN; in hclge_parse_func_status()
845 hdev->flag &= ~HCLGE_FLAG_MAIN; in hclge_parse_func_status()
847 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK; in hclge_parse_func_status()
851 static int hclge_query_function_status(struct hclge_dev *hdev) in hclge_query_function_status() argument
864 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_query_function_status()
866 dev_err(&hdev->pdev->dev, in hclge_query_function_status()
877 return hclge_parse_func_status(hdev, req); in hclge_query_function_status()
880 static int hclge_query_pf_resource(struct hclge_dev *hdev) in hclge_query_pf_resource() argument
887 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_query_pf_resource()
889 dev_err(&hdev->pdev->dev, in hclge_query_pf_resource()
895 hdev->num_tqps = le16_to_cpu(req->tqp_num) + in hclge_query_pf_resource()
897 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; in hclge_query_pf_resource()
900 hdev->tx_buf_size = in hclge_query_pf_resource()
903 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF; in hclge_query_pf_resource()
905 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT); in hclge_query_pf_resource()
908 hdev->dv_buf_size = in hclge_query_pf_resource()
911 hdev->dv_buf_size = HCLGE_DEFAULT_DV; in hclge_query_pf_resource()
913 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT); in hclge_query_pf_resource()
915 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic); in hclge_query_pf_resource()
916 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) { in hclge_query_pf_resource()
917 dev_err(&hdev->pdev->dev, in hclge_query_pf_resource()
919 hdev->num_nic_msi); in hclge_query_pf_resource()
923 if (hnae3_dev_roce_supported(hdev)) { in hclge_query_pf_resource()
924 hdev->num_roce_msi = in hclge_query_pf_resource()
930 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi; in hclge_query_pf_resource()
932 hdev->num_msi = hdev->num_nic_msi; in hclge_query_pf_resource()
1004 struct hclge_dev *hdev = vport->back; in hclge_check_port_speed() local
1005 u32 speed_ability = hdev->hw.mac.speed_ability; in hclge_check_port_speed()
1170 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, in hclge_parse_fiber_link_mode() argument
1173 struct hclge_mac *mac = &hdev->hw.mac; in hclge_parse_fiber_link_mode()
1182 if (hnae3_dev_fec_supported(hdev)) in hclge_parse_fiber_link_mode()
1185 if (hnae3_dev_pause_supported(hdev)) in hclge_parse_fiber_link_mode()
1192 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev, in hclge_parse_backplane_link_mode() argument
1195 struct hclge_mac *mac = &hdev->hw.mac; in hclge_parse_backplane_link_mode()
1198 if (hnae3_dev_fec_supported(hdev)) in hclge_parse_backplane_link_mode()
1201 if (hnae3_dev_pause_supported(hdev)) in hclge_parse_backplane_link_mode()
1208 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev, in hclge_parse_copper_link_mode() argument
1211 unsigned long *supported = hdev->hw.mac.supported; in hclge_parse_copper_link_mode()
1233 if (hnae3_dev_pause_supported(hdev)) { in hclge_parse_copper_link_mode()
1242 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability) in hclge_parse_link_mode() argument
1244 u8 media_type = hdev->hw.mac.media_type; in hclge_parse_link_mode()
1247 hclge_parse_fiber_link_mode(hdev, speed_ability); in hclge_parse_link_mode()
1249 hclge_parse_copper_link_mode(hdev, speed_ability); in hclge_parse_link_mode()
1251 hclge_parse_backplane_link_mode(hdev, speed_ability); in hclge_parse_link_mode()
1381 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) in hclge_get_cfg() argument
1402 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); in hclge_get_cfg()
1404 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); in hclge_get_cfg()
1413 static void hclge_set_default_dev_specs(struct hclge_dev *hdev) in hclge_set_default_dev_specs() argument
1417 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_set_default_dev_specs()
1429 static void hclge_parse_dev_specs(struct hclge_dev *hdev, in hclge_parse_dev_specs() argument
1432 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_parse_dev_specs()
1452 static void hclge_check_dev_specs(struct hclge_dev *hdev) in hclge_check_dev_specs() argument
1454 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; in hclge_check_dev_specs()
1474 static int hclge_query_mac_stats_num(struct hclge_dev *hdev) in hclge_query_mac_stats_num() argument
1479 ret = hclge_mac_query_reg_num(hdev, ®_num); in hclge_query_mac_stats_num()
1483 hdev->ae_dev->dev_specs.mac_stats_num = reg_num; in hclge_query_mac_stats_num()
1487 static int hclge_query_dev_specs(struct hclge_dev *hdev) in hclge_query_dev_specs() argument
1493 ret = hclge_query_mac_stats_num(hdev); in hclge_query_dev_specs()
1500 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { in hclge_query_dev_specs()
1501 hclge_set_default_dev_specs(hdev); in hclge_query_dev_specs()
1512 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM); in hclge_query_dev_specs()
1516 hclge_parse_dev_specs(hdev, desc); in hclge_query_dev_specs()
1517 hclge_check_dev_specs(hdev); in hclge_query_dev_specs()
1522 static int hclge_get_cap(struct hclge_dev *hdev) in hclge_get_cap() argument
1526 ret = hclge_query_function_status(hdev); in hclge_get_cap()
1528 dev_err(&hdev->pdev->dev, in hclge_get_cap()
1534 return hclge_query_pf_resource(hdev); in hclge_get_cap()
1537 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) in hclge_init_kdump_kernel_config() argument
1545 dev_info(&hdev->pdev->dev, in hclge_init_kdump_kernel_config()
1549 hdev->num_tqps = hdev->num_req_vfs + 1; in hclge_init_kdump_kernel_config()
1550 hdev->num_tx_desc = HCLGE_MIN_TX_DESC; in hclge_init_kdump_kernel_config()
1551 hdev->num_rx_desc = HCLGE_MIN_RX_DESC; in hclge_init_kdump_kernel_config()
1554 static void hclge_init_tc_config(struct hclge_dev *hdev) in hclge_init_tc_config() argument
1558 if (hdev->tc_max > HNAE3_MAX_TC || in hclge_init_tc_config()
1559 hdev->tc_max < 1) { in hclge_init_tc_config()
1560 dev_warn(&hdev->pdev->dev, "TC num = %u.\n", in hclge_init_tc_config()
1561 hdev->tc_max); in hclge_init_tc_config()
1562 hdev->tc_max = 1; in hclge_init_tc_config()
1566 if (!hnae3_dev_dcb_supported(hdev)) { in hclge_init_tc_config()
1567 hdev->tc_max = 1; in hclge_init_tc_config()
1568 hdev->pfc_max = 0; in hclge_init_tc_config()
1570 hdev->pfc_max = hdev->tc_max; in hclge_init_tc_config()
1573 hdev->tm_info.num_tc = 1; in hclge_init_tc_config()
1576 for (i = 0; i < hdev->tm_info.num_tc; i++) in hclge_init_tc_config()
1577 hnae3_set_bit(hdev->hw_tc_map, i, 1); in hclge_init_tc_config()
1579 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; in hclge_init_tc_config()
1582 static int hclge_configure(struct hclge_dev *hdev) in hclge_configure() argument
1584 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_configure()
1588 ret = hclge_get_cfg(hdev, &cfg); in hclge_configure()
1592 hdev->base_tqp_pid = 0; in hclge_configure()
1593 hdev->vf_rss_size_max = cfg.vf_rss_size_max; in hclge_configure()
1594 hdev->pf_rss_size_max = cfg.pf_rss_size_max; in hclge_configure()
1595 hdev->rx_buf_len = cfg.rx_buf_len; in hclge_configure()
1596 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); in hclge_configure()
1597 hdev->hw.mac.media_type = cfg.media_type; in hclge_configure()
1598 hdev->hw.mac.phy_addr = cfg.phy_addr; in hclge_configure()
1599 hdev->num_tx_desc = cfg.tqp_desc_num; in hclge_configure()
1600 hdev->num_rx_desc = cfg.tqp_desc_num; in hclge_configure()
1601 hdev->tm_info.num_pg = 1; in hclge_configure()
1602 hdev->tc_max = cfg.tc_num; in hclge_configure()
1603 hdev->tm_info.hw_pfc_map = 0; in hclge_configure()
1605 hdev->wanted_umv_size = cfg.umv_space; in hclge_configure()
1607 hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size; in hclge_configure()
1608 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size; in hclge_configure()
1609 hdev->gro_en = true; in hclge_configure()
1613 if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) { in hclge_configure()
1614 hdev->fd_en = true; in hclge_configure()
1615 hdev->fd_active_type = HCLGE_FD_RULE_NONE; in hclge_configure()
1618 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); in hclge_configure()
1620 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n", in hclge_configure()
1625 hclge_parse_link_mode(hdev, cfg.speed_ability); in hclge_configure()
1627 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability); in hclge_configure()
1629 hclge_init_tc_config(hdev); in hclge_configure()
1630 hclge_init_kdump_kernel_config(hdev); in hclge_configure()
1635 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min, in hclge_config_tso() argument
1647 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_tso()
1650 static int hclge_config_gro(struct hclge_dev *hdev) in hclge_config_gro() argument
1656 if (!hnae3_ae_dev_gro_supported(hdev->ae_dev)) in hclge_config_gro()
1662 req->gro_en = hdev->gro_en ? 1 : 0; in hclge_config_gro()
1664 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_gro()
1666 dev_err(&hdev->pdev->dev, in hclge_config_gro()
1672 static int hclge_alloc_tqps(struct hclge_dev *hdev) in hclge_alloc_tqps() argument
1674 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_alloc_tqps()
1678 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, in hclge_alloc_tqps()
1680 if (!hdev->htqp) in hclge_alloc_tqps()
1683 tqp = hdev->htqp; in hclge_alloc_tqps()
1685 for (i = 0; i < hdev->num_tqps; i++) { in hclge_alloc_tqps()
1686 tqp->dev = &hdev->pdev->dev; in hclge_alloc_tqps()
1690 tqp->q.buf_size = hdev->rx_buf_len; in hclge_alloc_tqps()
1691 tqp->q.tx_desc_num = hdev->num_tx_desc; in hclge_alloc_tqps()
1692 tqp->q.rx_desc_num = hdev->num_rx_desc; in hclge_alloc_tqps()
1698 tqp->q.io_base = hdev->hw.hw.io_base + in hclge_alloc_tqps()
1702 tqp->q.io_base = hdev->hw.hw.io_base + in hclge_alloc_tqps()
1713 tqp->q.mem_base = hdev->hw.hw.mem_base + in hclge_alloc_tqps()
1714 HCLGE_TQP_MEM_OFFSET(hdev, i); in hclge_alloc_tqps()
1722 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, in hclge_map_tqps_to_func() argument
1739 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_map_tqps_to_func()
1741 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); in hclge_map_tqps_to_func()
1749 struct hclge_dev *hdev = vport->back; in hclge_assign_tqp() local
1752 for (i = 0, alloced = 0; i < hdev->num_tqps && in hclge_assign_tqp()
1754 if (!hdev->htqp[i].alloced) { in hclge_assign_tqp()
1755 hdev->htqp[i].q.handle = &vport->nic; in hclge_assign_tqp()
1756 hdev->htqp[i].q.tqp_index = alloced; in hclge_assign_tqp()
1757 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc; in hclge_assign_tqp()
1758 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc; in hclge_assign_tqp()
1759 kinfo->tqp[alloced] = &hdev->htqp[i].q; in hclge_assign_tqp()
1760 hdev->htqp[i].alloced = true; in hclge_assign_tqp()
1765 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max, in hclge_assign_tqp()
1766 vport->alloc_tqps / hdev->tm_info.num_tc); in hclge_assign_tqp()
1770 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc); in hclge_assign_tqp()
1781 struct hclge_dev *hdev = vport->back; in hclge_knic_setup() local
1787 kinfo->rx_buf_len = hdev->rx_buf_len; in hclge_knic_setup()
1788 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size; in hclge_knic_setup()
1790 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps, in hclge_knic_setup()
1797 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); in hclge_knic_setup()
1802 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, in hclge_map_tqp_to_vport() argument
1817 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, in hclge_map_tqp_to_vport()
1826 static int hclge_map_tqp(struct hclge_dev *hdev) in hclge_map_tqp() argument
1828 struct hclge_vport *vport = hdev->vport; in hclge_map_tqp()
1831 num_vport = hdev->num_req_vfs + 1; in hclge_map_tqp()
1835 ret = hclge_map_tqp_to_vport(hdev, vport); in hclge_map_tqp()
1848 struct hclge_dev *hdev = vport->back; in hclge_vport_setup() local
1851 nic->pdev = hdev->pdev; in hclge_vport_setup()
1853 nic->numa_node_mask = hdev->numa_node_mask; in hclge_vport_setup()
1854 nic->kinfo.io_base = hdev->hw.hw.io_base; in hclge_vport_setup()
1857 hdev->num_tx_desc, hdev->num_rx_desc); in hclge_vport_setup()
1859 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret); in hclge_vport_setup()
1864 static int hclge_alloc_vport(struct hclge_dev *hdev) in hclge_alloc_vport() argument
1866 struct pci_dev *pdev = hdev->pdev; in hclge_alloc_vport()
1874 num_vport = hdev->num_req_vfs + 1; in hclge_alloc_vport()
1876 if (hdev->num_tqps < num_vport) { in hclge_alloc_vport()
1877 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)", in hclge_alloc_vport()
1878 hdev->num_tqps, num_vport); in hclge_alloc_vport()
1883 tqp_per_vport = hdev->num_tqps / num_vport; in hclge_alloc_vport()
1884 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; in hclge_alloc_vport()
1891 hdev->vport = vport; in hclge_alloc_vport()
1892 hdev->num_alloc_vport = num_vport; in hclge_alloc_vport()
1895 hdev->num_alloc_vfs = hdev->num_req_vfs; in hclge_alloc_vport()
1898 vport->back = hdev; in hclge_alloc_vport()
1928 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, in hclge_cmd_alloc_tx_buff() argument
1950 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cmd_alloc_tx_buff()
1952 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", in hclge_cmd_alloc_tx_buff()
1958 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, in hclge_tx_buffer_alloc() argument
1961 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); in hclge_tx_buffer_alloc()
1964 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); in hclge_tx_buffer_alloc()
1969 static u32 hclge_get_tc_num(struct hclge_dev *hdev) in hclge_get_tc_num() argument
1975 if (hdev->hw_tc_map & BIT(i)) in hclge_get_tc_num()
1981 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, in hclge_get_pfc_priv_num() argument
1990 if ((hdev->tm_info.hw_pfc_map & BIT(i)) && in hclge_get_pfc_priv_num()
1999 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, in hclge_get_no_pfc_priv_num() argument
2008 if (hdev->hw_tc_map & BIT(i) && in hclge_get_no_pfc_priv_num()
2009 !(hdev->tm_info.hw_pfc_map & BIT(i)) && in hclge_get_no_pfc_priv_num()
2041 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, in hclge_is_rx_buf_ok() argument
2046 u32 tc_num = hclge_get_tc_num(hdev); in hclge_is_rx_buf_ok()
2051 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); in hclge_is_rx_buf_ok()
2053 if (hnae3_dev_dcb_supported(hdev)) in hclge_is_rx_buf_ok()
2055 hdev->dv_buf_size; in hclge_is_rx_buf_ok()
2058 + hdev->dv_buf_size; in hclge_is_rx_buf_ok()
2070 if (hnae3_dev_dcb_supported(hdev)) { in hclge_is_rx_buf_ok()
2071 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; in hclge_is_rx_buf_ok()
2081 if (hnae3_dev_dcb_supported(hdev)) { in hclge_is_rx_buf_ok()
2082 hi_thrd = shared_buf - hdev->dv_buf_size; in hclge_is_rx_buf_ok()
2107 static int hclge_tx_buffer_calc(struct hclge_dev *hdev, in hclge_tx_buffer_calc() argument
2112 total_size = hdev->pkt_buf_size; in hclge_tx_buffer_calc()
2118 if (hdev->hw_tc_map & BIT(i)) { in hclge_tx_buffer_calc()
2119 if (total_size < hdev->tx_buf_size) in hclge_tx_buffer_calc()
2122 priv->tx_buf_size = hdev->tx_buf_size; in hclge_tx_buffer_calc()
2133 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max, in hclge_rx_buf_calc_all() argument
2136 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_rx_buf_calc_all()
2137 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); in hclge_rx_buf_calc_all()
2148 if (!(hdev->hw_tc_map & BIT(i))) in hclge_rx_buf_calc_all()
2153 if (hdev->tm_info.hw_pfc_map & BIT(i)) { in hclge_rx_buf_calc_all()
2163 priv->buf_size = priv->wl.high + hdev->dv_buf_size; in hclge_rx_buf_calc_all()
2166 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); in hclge_rx_buf_calc_all()
2169 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev, in hclge_drop_nopfc_buf_till_fit() argument
2172 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_drop_nopfc_buf_till_fit()
2173 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); in hclge_drop_nopfc_buf_till_fit()
2181 if (hdev->hw_tc_map & mask && in hclge_drop_nopfc_buf_till_fit()
2182 !(hdev->tm_info.hw_pfc_map & mask)) { in hclge_drop_nopfc_buf_till_fit()
2191 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || in hclge_drop_nopfc_buf_till_fit()
2196 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); in hclge_drop_nopfc_buf_till_fit()
2199 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev, in hclge_drop_pfc_buf_till_fit() argument
2202 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_drop_pfc_buf_till_fit()
2203 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); in hclge_drop_pfc_buf_till_fit()
2211 if (hdev->hw_tc_map & mask && in hclge_drop_pfc_buf_till_fit()
2212 hdev->tm_info.hw_pfc_map & mask) { in hclge_drop_pfc_buf_till_fit()
2221 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || in hclge_drop_pfc_buf_till_fit()
2226 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); in hclge_drop_pfc_buf_till_fit()
2229 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev, in hclge_only_alloc_priv_buff() argument
2236 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_only_alloc_priv_buff()
2237 u32 tc_num = hclge_get_tc_num(hdev); in hclge_only_alloc_priv_buff()
2238 u32 half_mps = hdev->mps >> 1; in hclge_only_alloc_priv_buff()
2248 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER + in hclge_only_alloc_priv_buff()
2263 if (!(hdev->hw_tc_map & BIT(i))) in hclge_only_alloc_priv_buff()
2268 priv->wl.high = rx_priv - hdev->dv_buf_size; in hclge_only_alloc_priv_buff()
2282 static int hclge_rx_buffer_calc(struct hclge_dev *hdev, in hclge_rx_buffer_calc() argument
2286 if (!hnae3_dev_dcb_supported(hdev)) { in hclge_rx_buffer_calc()
2287 u32 rx_all = hdev->pkt_buf_size; in hclge_rx_buffer_calc()
2290 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) in hclge_rx_buffer_calc()
2296 if (hclge_only_alloc_priv_buff(hdev, buf_alloc)) in hclge_rx_buffer_calc()
2299 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc)) in hclge_rx_buffer_calc()
2303 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc)) in hclge_rx_buffer_calc()
2306 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc)) in hclge_rx_buffer_calc()
2309 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc)) in hclge_rx_buffer_calc()
2315 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, in hclge_rx_priv_buf_alloc() argument
2340 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_rx_priv_buf_alloc()
2342 dev_err(&hdev->pdev->dev, in hclge_rx_priv_buf_alloc()
2348 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, in hclge_rx_priv_wl_config() argument
2384 ret = hclge_cmd_send(&hdev->hw, desc, 2); in hclge_rx_priv_wl_config()
2386 dev_err(&hdev->pdev->dev, in hclge_rx_priv_wl_config()
2392 static int hclge_common_thrd_config(struct hclge_dev *hdev, in hclge_common_thrd_config() argument
2428 ret = hclge_cmd_send(&hdev->hw, desc, 2); in hclge_common_thrd_config()
2430 dev_err(&hdev->pdev->dev, in hclge_common_thrd_config()
2435 static int hclge_common_wl_config(struct hclge_dev *hdev, in hclge_common_wl_config() argument
2452 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_common_wl_config()
2454 dev_err(&hdev->pdev->dev, in hclge_common_wl_config()
2460 int hclge_buffer_alloc(struct hclge_dev *hdev) in hclge_buffer_alloc() argument
2469 ret = hclge_tx_buffer_calc(hdev, pkt_buf); in hclge_buffer_alloc()
2471 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2476 ret = hclge_tx_buffer_alloc(hdev, pkt_buf); in hclge_buffer_alloc()
2478 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2483 ret = hclge_rx_buffer_calc(hdev, pkt_buf); in hclge_buffer_alloc()
2485 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2491 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); in hclge_buffer_alloc()
2493 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", in hclge_buffer_alloc()
2498 if (hnae3_dev_dcb_supported(hdev)) { in hclge_buffer_alloc()
2499 ret = hclge_rx_priv_wl_config(hdev, pkt_buf); in hclge_buffer_alloc()
2501 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2507 ret = hclge_common_thrd_config(hdev, pkt_buf); in hclge_buffer_alloc()
2509 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2516 ret = hclge_common_wl_config(hdev, pkt_buf); in hclge_buffer_alloc()
2518 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2530 struct hclge_dev *hdev = vport->back; in hclge_init_roce_base_info() local
2534 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi) in hclge_init_roce_base_info()
2537 roce->rinfo.base_vector = hdev->num_nic_msi; in hclge_init_roce_base_info()
2540 roce->rinfo.roce_io_base = hdev->hw.hw.io_base; in hclge_init_roce_base_info()
2541 roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; in hclge_init_roce_base_info()
2550 static int hclge_init_msi(struct hclge_dev *hdev) in hclge_init_msi() argument
2552 struct pci_dev *pdev = hdev->pdev; in hclge_init_msi()
2557 hdev->num_msi, in hclge_init_msi()
2565 if (vectors < hdev->num_msi) in hclge_init_msi()
2566 dev_warn(&hdev->pdev->dev, in hclge_init_msi()
2568 hdev->num_msi, vectors); in hclge_init_msi()
2570 hdev->num_msi = vectors; in hclge_init_msi()
2571 hdev->num_msi_left = vectors; in hclge_init_msi()
2573 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, in hclge_init_msi()
2575 if (!hdev->vector_status) { in hclge_init_msi()
2580 for (i = 0; i < hdev->num_msi; i++) in hclge_init_msi()
2581 hdev->vector_status[i] = HCLGE_INVALID_VPORT; in hclge_init_msi()
2583 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, in hclge_init_msi()
2585 if (!hdev->vector_irq) { in hclge_init_msi()
2627 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, in hclge_cfg_mac_speed_dup_hw() argument
2644 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); in hclge_cfg_mac_speed_dup_hw()
2654 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_mac_speed_dup_hw()
2656 dev_err(&hdev->pdev->dev, in hclge_cfg_mac_speed_dup_hw()
2664 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num) in hclge_cfg_mac_speed_dup() argument
2666 struct hclge_mac *mac = &hdev->hw.mac; in hclge_cfg_mac_speed_dup()
2674 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num); in hclge_cfg_mac_speed_dup()
2678 hdev->hw.mac.speed = speed; in hclge_cfg_mac_speed_dup()
2679 hdev->hw.mac.duplex = duplex; in hclge_cfg_mac_speed_dup()
2681 hdev->hw.mac.lane_num = lane_num; in hclge_cfg_mac_speed_dup()
2690 struct hclge_dev *hdev = vport->back; in hclge_cfg_mac_speed_dup_h() local
2692 return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num); in hclge_cfg_mac_speed_dup_h()
2695 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) in hclge_set_autoneg_en() argument
2709 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_autoneg_en()
2711 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", in hclge_set_autoneg_en()
2720 struct hclge_dev *hdev = vport->back; in hclge_set_autoneg() local
2722 if (!hdev->hw.mac.support_autoneg) { in hclge_set_autoneg()
2724 dev_err(&hdev->pdev->dev, in hclge_set_autoneg()
2732 return hclge_set_autoneg_en(hdev, enable); in hclge_set_autoneg()
2738 struct hclge_dev *hdev = vport->back; in hclge_get_autoneg() local
2739 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_get_autoneg()
2744 return hdev->hw.mac.autoneg; in hclge_get_autoneg()
2750 struct hclge_dev *hdev = vport->back; in hclge_restart_autoneg() local
2753 dev_dbg(&hdev->pdev->dev, "restart autoneg\n"); in hclge_restart_autoneg()
2755 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); in hclge_restart_autoneg()
2758 return hclge_notify_client(hdev, HNAE3_UP_CLIENT); in hclge_restart_autoneg()
2764 struct hclge_dev *hdev = vport->back; in hclge_halt_autoneg() local
2766 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg) in hclge_halt_autoneg()
2767 return hclge_set_autoneg_en(hdev, !halt); in hclge_halt_autoneg()
2772 static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev, in hclge_parse_fec_stats_lanes() argument
2789 hdev->fec_stats.per_lanes[i] += in hclge_parse_fec_stats_lanes()
2795 static void hclge_parse_fec_stats(struct hclge_dev *hdev, in hclge_parse_fec_stats() argument
2802 hdev->fec_stats.base_r_lane_num = req->base_r_lane_num; in hclge_parse_fec_stats()
2803 hdev->fec_stats.rs_corr_blocks += in hclge_parse_fec_stats()
2805 hdev->fec_stats.rs_uncorr_blocks += in hclge_parse_fec_stats()
2807 hdev->fec_stats.rs_error_blocks += in hclge_parse_fec_stats()
2809 hdev->fec_stats.base_r_corr_blocks += in hclge_parse_fec_stats()
2811 hdev->fec_stats.base_r_uncorr_blocks += in hclge_parse_fec_stats()
2814 hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1); in hclge_parse_fec_stats()
2817 static int hclge_update_fec_stats_hw(struct hclge_dev *hdev) in hclge_update_fec_stats_hw() argument
2830 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM); in hclge_update_fec_stats_hw()
2834 hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM); in hclge_update_fec_stats_hw()
2839 static void hclge_update_fec_stats(struct hclge_dev *hdev) in hclge_update_fec_stats() argument
2841 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_update_fec_stats()
2845 test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state)) in hclge_update_fec_stats()
2848 ret = hclge_update_fec_stats_hw(hdev); in hclge_update_fec_stats()
2850 dev_err(&hdev->pdev->dev, in hclge_update_fec_stats()
2853 clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state); in hclge_update_fec_stats()
2856 static void hclge_get_fec_stats_total(struct hclge_dev *hdev, in hclge_get_fec_stats_total() argument
2859 fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks; in hclge_get_fec_stats_total()
2861 hdev->fec_stats.rs_uncorr_blocks; in hclge_get_fec_stats_total()
2864 static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev, in hclge_get_fec_stats_lanes() argument
2869 if (hdev->fec_stats.base_r_lane_num == 0 || in hclge_get_fec_stats_lanes()
2870 hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) { in hclge_get_fec_stats_lanes()
2871 dev_err(&hdev->pdev->dev, in hclge_get_fec_stats_lanes()
2873 hdev->fec_stats.base_r_lane_num); in hclge_get_fec_stats_lanes()
2877 for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) { in hclge_get_fec_stats_lanes()
2879 hdev->fec_stats.base_r_corr_per_lanes[i]; in hclge_get_fec_stats_lanes()
2881 hdev->fec_stats.base_r_uncorr_per_lanes[i]; in hclge_get_fec_stats_lanes()
2885 static void hclge_comm_get_fec_stats(struct hclge_dev *hdev, in hclge_comm_get_fec_stats() argument
2888 u32 fec_mode = hdev->hw.mac.fec_mode; in hclge_comm_get_fec_stats()
2893 hclge_get_fec_stats_total(hdev, fec_stats); in hclge_comm_get_fec_stats()
2896 hclge_get_fec_stats_lanes(hdev, fec_stats); in hclge_comm_get_fec_stats()
2899 dev_err(&hdev->pdev->dev, in hclge_comm_get_fec_stats()
2910 struct hclge_dev *hdev = vport->back; in hclge_get_fec_stats() local
2911 u32 fec_mode = hdev->hw.mac.fec_mode; in hclge_get_fec_stats()
2918 hclge_update_fec_stats(hdev); in hclge_get_fec_stats()
2920 hclge_comm_get_fec_stats(hdev, fec_stats); in hclge_get_fec_stats()
2923 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode) in hclge_set_fec_hw() argument
2944 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_fec_hw()
2946 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret); in hclge_set_fec_hw()
2954 struct hclge_dev *hdev = vport->back; in hclge_set_fec() local
2955 struct hclge_mac *mac = &hdev->hw.mac; in hclge_set_fec()
2959 dev_err(&hdev->pdev->dev, "unsupported fec mode\n"); in hclge_set_fec()
2963 ret = hclge_set_fec_hw(hdev, fec_mode); in hclge_set_fec()
2975 struct hclge_dev *hdev = vport->back; in hclge_get_fec() local
2976 struct hclge_mac *mac = &hdev->hw.mac; in hclge_get_fec()
2984 static int hclge_mac_init(struct hclge_dev *hdev) in hclge_mac_init() argument
2986 struct hclge_mac *mac = &hdev->hw.mac; in hclge_mac_init()
2989 hdev->support_sfp_query = true; in hclge_mac_init()
2990 hdev->hw.mac.duplex = HCLGE_MAC_FULL; in hclge_mac_init()
2991 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, in hclge_mac_init()
2992 hdev->hw.mac.duplex, hdev->hw.mac.lane_num); in hclge_mac_init()
2996 if (hdev->hw.mac.support_autoneg) { in hclge_mac_init()
2997 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg); in hclge_mac_init()
3005 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode); in hclge_mac_init()
3010 ret = hclge_set_mac_mtu(hdev, hdev->mps); in hclge_mac_init()
3012 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); in hclge_mac_init()
3016 ret = hclge_set_default_loopback(hdev); in hclge_mac_init()
3020 ret = hclge_buffer_alloc(hdev); in hclge_mac_init()
3022 dev_err(&hdev->pdev->dev, in hclge_mac_init()
3028 static void hclge_mbx_task_schedule(struct hclge_dev *hdev) in hclge_mbx_task_schedule() argument
3030 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_mbx_task_schedule()
3031 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) { in hclge_mbx_task_schedule()
3032 hdev->last_mbx_scheduled = jiffies; in hclge_mbx_task_schedule()
3033 mod_delayed_work(hclge_wq, &hdev->service_task, 0); in hclge_mbx_task_schedule()
3037 static void hclge_reset_task_schedule(struct hclge_dev *hdev) in hclge_reset_task_schedule() argument
3039 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_reset_task_schedule()
3040 test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) && in hclge_reset_task_schedule()
3041 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) { in hclge_reset_task_schedule()
3042 hdev->last_rst_scheduled = jiffies; in hclge_reset_task_schedule()
3043 mod_delayed_work(hclge_wq, &hdev->service_task, 0); in hclge_reset_task_schedule()
3047 static void hclge_errhand_task_schedule(struct hclge_dev *hdev) in hclge_errhand_task_schedule() argument
3049 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_errhand_task_schedule()
3050 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) in hclge_errhand_task_schedule()
3051 mod_delayed_work(hclge_wq, &hdev->service_task, 0); in hclge_errhand_task_schedule()
3054 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time) in hclge_task_schedule() argument
3056 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_task_schedule()
3057 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) in hclge_task_schedule()
3058 mod_delayed_work(hclge_wq, &hdev->service_task, delay_time); in hclge_task_schedule()
3061 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status) in hclge_get_mac_link_status() argument
3068 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_mac_link_status()
3070 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", in hclge_get_mac_link_status()
3082 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status) in hclge_get_mac_phy_link() argument
3084 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_get_mac_phy_link()
3088 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) in hclge_get_mac_phy_link()
3094 return hclge_get_mac_link_status(hdev, link_status); in hclge_get_mac_phy_link()
3097 static void hclge_push_link_status(struct hclge_dev *hdev) in hclge_push_link_status() argument
3103 for (i = 0; i < pci_num_vf(hdev->pdev); i++) { in hclge_push_link_status()
3104 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; in hclge_push_link_status()
3112 dev_err(&hdev->pdev->dev, in hclge_push_link_status()
3119 static void hclge_update_link_status(struct hclge_dev *hdev) in hclge_update_link_status() argument
3121 struct hnae3_handle *rhandle = &hdev->vport[0].roce; in hclge_update_link_status()
3122 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_update_link_status()
3123 struct hnae3_client *rclient = hdev->roce_client; in hclge_update_link_status()
3124 struct hnae3_client *client = hdev->nic_client; in hclge_update_link_status()
3131 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state)) in hclge_update_link_status()
3134 ret = hclge_get_mac_phy_link(hdev, &state); in hclge_update_link_status()
3136 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); in hclge_update_link_status()
3140 if (state != hdev->hw.mac.link) { in hclge_update_link_status()
3141 hdev->hw.mac.link = state; in hclge_update_link_status()
3143 hclge_config_mac_tnl_int(hdev, state); in hclge_update_link_status()
3147 hclge_push_link_status(hdev); in hclge_update_link_status()
3150 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); in hclge_update_link_status()
3195 static void hclge_update_pause_advertising(struct hclge_dev *hdev) in hclge_update_pause_advertising() argument
3197 struct hclge_mac *mac = &hdev->hw.mac; in hclge_update_pause_advertising()
3200 switch (hdev->fc_mode_last_time) { in hclge_update_pause_advertising()
3222 static void hclge_update_advertising(struct hclge_dev *hdev) in hclge_update_advertising() argument
3224 struct hclge_mac *mac = &hdev->hw.mac; in hclge_update_advertising()
3229 hclge_update_pause_advertising(hdev); in hclge_update_advertising()
3232 static void hclge_update_port_capability(struct hclge_dev *hdev, in hclge_update_port_capability() argument
3235 if (hnae3_dev_fec_supported(hdev)) in hclge_update_port_capability()
3253 hclge_update_advertising(hdev); in hclge_update_port_capability()
3257 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) in hclge_get_sfp_speed() argument
3265 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_sfp_speed()
3267 dev_warn(&hdev->pdev->dev, in hclge_get_sfp_speed()
3271 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret); in hclge_get_sfp_speed()
3280 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac) in hclge_get_sfp_info() argument
3291 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_sfp_info()
3293 dev_warn(&hdev->pdev->dev, in hclge_get_sfp_info()
3297 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret); in hclge_get_sfp_info()
3338 struct hclge_dev *hdev = vport->back; in hclge_get_phy_link_ksettings() local
3347 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); in hclge_get_phy_link_ksettings()
3349 dev_err(&hdev->pdev->dev, in hclge_get_phy_link_ksettings()
3388 struct hclge_dev *hdev = vport->back; in hclge_set_phy_link_ksettings() local
3416 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); in hclge_set_phy_link_ksettings()
3418 dev_err(&hdev->pdev->dev, in hclge_set_phy_link_ksettings()
3423 hdev->hw.mac.autoneg = cmd->base.autoneg; in hclge_set_phy_link_ksettings()
3424 hdev->hw.mac.speed = cmd->base.speed; in hclge_set_phy_link_ksettings()
3425 hdev->hw.mac.duplex = cmd->base.duplex; in hclge_set_phy_link_ksettings()
3426 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising); in hclge_set_phy_link_ksettings()
3431 static int hclge_update_tp_port_info(struct hclge_dev *hdev) in hclge_update_tp_port_info() argument
3436 if (!hnae3_dev_phy_imp_supported(hdev)) in hclge_update_tp_port_info()
3439 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd); in hclge_update_tp_port_info()
3443 hdev->hw.mac.autoneg = cmd.base.autoneg; in hclge_update_tp_port_info()
3444 hdev->hw.mac.speed = cmd.base.speed; in hclge_update_tp_port_info()
3445 hdev->hw.mac.duplex = cmd.base.duplex; in hclge_update_tp_port_info()
3446 linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising); in hclge_update_tp_port_info()
3451 static int hclge_tp_port_init(struct hclge_dev *hdev) in hclge_tp_port_init() argument
3455 if (!hnae3_dev_phy_imp_supported(hdev)) in hclge_tp_port_init()
3458 cmd.base.autoneg = hdev->hw.mac.autoneg; in hclge_tp_port_init()
3459 cmd.base.speed = hdev->hw.mac.speed; in hclge_tp_port_init()
3460 cmd.base.duplex = hdev->hw.mac.duplex; in hclge_tp_port_init()
3461 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising); in hclge_tp_port_init()
3463 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd); in hclge_tp_port_init()
3466 static int hclge_update_port_info(struct hclge_dev *hdev) in hclge_update_port_info() argument
3468 struct hclge_mac *mac = &hdev->hw.mac; in hclge_update_port_info()
3474 return hclge_update_tp_port_info(hdev); in hclge_update_port_info()
3477 if (!hdev->support_sfp_query) in hclge_update_port_info()
3480 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hclge_update_port_info()
3482 ret = hclge_get_sfp_info(hdev, mac); in hclge_update_port_info()
3485 ret = hclge_get_sfp_speed(hdev, &speed); in hclge_update_port_info()
3489 hdev->support_sfp_query = false; in hclge_update_port_info()
3495 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hclge_update_port_info()
3497 hclge_update_port_capability(hdev, mac); in hclge_update_port_info()
3499 (void)hclge_tm_port_shaper_cfg(hdev); in hclge_update_port_info()
3502 return hclge_cfg_mac_speed_dup(hdev, mac->speed, in hclge_update_port_info()
3509 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0); in hclge_update_port_info()
3516 struct hclge_dev *hdev = vport->back; in hclge_get_status() local
3518 hclge_update_link_status(hdev); in hclge_get_status()
3520 return hdev->hw.mac.link; in hclge_get_status()
3523 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf) in hclge_get_vf_vport() argument
3525 if (!pci_num_vf(hdev->pdev)) { in hclge_get_vf_vport()
3526 dev_err(&hdev->pdev->dev, in hclge_get_vf_vport()
3531 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) { in hclge_get_vf_vport()
3532 dev_err(&hdev->pdev->dev, in hclge_get_vf_vport()
3534 vf, pci_num_vf(hdev->pdev)); in hclge_get_vf_vport()
3540 return &hdev->vport[vf]; in hclge_get_vf_vport()
3547 struct hclge_dev *hdev = vport->back; in hclge_get_vf_config() local
3549 vport = hclge_get_vf_vport(hdev, vf); in hclge_get_vf_config()
3571 struct hclge_dev *hdev = vport->back; in hclge_set_vf_link_state() local
3575 vport = hclge_get_vf_vport(hdev, vf); in hclge_set_vf_link_state()
3591 dev_err(&hdev->pdev->dev, in hclge_set_vf_link_state()
3598 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) in hclge_check_event_cause() argument
3603 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); in hclge_check_event_cause()
3604 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); in hclge_check_event_cause()
3605 hw_err_src_reg = hclge_read_dev(&hdev->hw, in hclge_check_event_cause()
3617 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); in hclge_check_event_cause()
3618 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); in hclge_check_event_cause()
3619 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_check_event_cause()
3621 hdev->rst_stats.imp_rst_cnt++; in hclge_check_event_cause()
3626 dev_info(&hdev->pdev->dev, "global reset interrupt\n"); in hclge_check_event_cause()
3627 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_check_event_cause()
3628 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); in hclge_check_event_cause()
3630 hdev->rst_stats.global_rst_cnt++; in hclge_check_event_cause()
3653 dev_info(&hdev->pdev->dev, in hclge_check_event_cause()
3660 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, in hclge_clear_event_cause() argument
3666 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); in hclge_clear_event_cause()
3669 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); in hclge_clear_event_cause()
3676 static void hclge_clear_all_event_cause(struct hclge_dev *hdev) in hclge_clear_all_event_cause() argument
3678 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST, in hclge_clear_all_event_cause()
3682 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0); in hclge_clear_all_event_cause()
3692 struct hclge_dev *hdev = data; in hclge_misc_irq_handle() local
3697 hclge_enable_vector(&hdev->misc_vector, false); in hclge_misc_irq_handle()
3698 event_cause = hclge_check_event_cause(hdev, &clearval); in hclge_misc_irq_handle()
3703 hclge_errhand_task_schedule(hdev); in hclge_misc_irq_handle()
3706 hclge_reset_task_schedule(hdev); in hclge_misc_irq_handle()
3709 spin_lock_irqsave(&hdev->ptp->lock, flags); in hclge_misc_irq_handle()
3710 hclge_ptp_clean_tx_hwts(hdev); in hclge_misc_irq_handle()
3711 spin_unlock_irqrestore(&hdev->ptp->lock, flags); in hclge_misc_irq_handle()
3723 hclge_mbx_task_schedule(hdev); in hclge_misc_irq_handle()
3726 dev_warn(&hdev->pdev->dev, in hclge_misc_irq_handle()
3731 hclge_clear_event_cause(hdev, event_cause, clearval); in hclge_misc_irq_handle()
3737 hclge_enable_vector(&hdev->misc_vector, true); in hclge_misc_irq_handle()
3742 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) in hclge_free_vector() argument
3744 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { in hclge_free_vector()
3745 dev_warn(&hdev->pdev->dev, in hclge_free_vector()
3750 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; in hclge_free_vector()
3751 hdev->num_msi_left += 1; in hclge_free_vector()
3752 hdev->num_msi_used -= 1; in hclge_free_vector()
3755 static void hclge_get_misc_vector(struct hclge_dev *hdev) in hclge_get_misc_vector() argument
3757 struct hclge_misc_vector *vector = &hdev->misc_vector; in hclge_get_misc_vector()
3759 vector->vector_irq = pci_irq_vector(hdev->pdev, 0); in hclge_get_misc_vector()
3761 vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; in hclge_get_misc_vector()
3762 hdev->vector_status[0] = 0; in hclge_get_misc_vector()
3764 hdev->num_msi_left -= 1; in hclge_get_misc_vector()
3765 hdev->num_msi_used += 1; in hclge_get_misc_vector()
3768 static int hclge_misc_irq_init(struct hclge_dev *hdev) in hclge_misc_irq_init() argument
3772 hclge_get_misc_vector(hdev); in hclge_misc_irq_init()
3775 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", in hclge_misc_irq_init()
3776 HCLGE_NAME, pci_name(hdev->pdev)); in hclge_misc_irq_init()
3777 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, in hclge_misc_irq_init()
3778 0, hdev->misc_vector.name, hdev); in hclge_misc_irq_init()
3780 hclge_free_vector(hdev, 0); in hclge_misc_irq_init()
3781 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", in hclge_misc_irq_init()
3782 hdev->misc_vector.vector_irq); in hclge_misc_irq_init()
3788 static void hclge_misc_irq_uninit(struct hclge_dev *hdev) in hclge_misc_irq_uninit() argument
3790 free_irq(hdev->misc_vector.vector_irq, hdev); in hclge_misc_irq_uninit()
3791 hclge_free_vector(hdev, 0); in hclge_misc_irq_uninit()
3794 int hclge_notify_client(struct hclge_dev *hdev, in hclge_notify_client() argument
3797 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_notify_client()
3798 struct hnae3_client *client = hdev->nic_client; in hclge_notify_client()
3801 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client) in hclge_notify_client()
3809 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", in hclge_notify_client()
3815 static int hclge_notify_roce_client(struct hclge_dev *hdev, in hclge_notify_roce_client() argument
3818 struct hnae3_handle *handle = &hdev->vport[0].roce; in hclge_notify_roce_client()
3819 struct hnae3_client *client = hdev->roce_client; in hclge_notify_roce_client()
3822 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client) in hclge_notify_roce_client()
3830 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", in hclge_notify_roce_client()
3836 static int hclge_reset_wait(struct hclge_dev *hdev) in hclge_reset_wait() argument
3844 switch (hdev->reset_type) { in hclge_reset_wait()
3858 dev_err(&hdev->pdev->dev, in hclge_reset_wait()
3860 hdev->reset_type); in hclge_reset_wait()
3864 val = hclge_read_dev(&hdev->hw, reg); in hclge_reset_wait()
3867 val = hclge_read_dev(&hdev->hw, reg); in hclge_reset_wait()
3872 dev_warn(&hdev->pdev->dev, in hclge_reset_wait()
3873 "Wait for reset timeout: %d\n", hdev->reset_type); in hclge_reset_wait()
3880 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) in hclge_set_vf_rst() argument
3892 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vf_rst()
3895 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) in hclge_set_all_vf_rst() argument
3899 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) { in hclge_set_all_vf_rst()
3900 struct hclge_vport *vport = &hdev->vport[i]; in hclge_set_all_vf_rst()
3904 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); in hclge_set_all_vf_rst()
3906 dev_err(&hdev->pdev->dev, in hclge_set_all_vf_rst()
3922 dev_warn(&hdev->pdev->dev, in hclge_set_all_vf_rst()
3931 static void hclge_mailbox_service_task(struct hclge_dev *hdev) in hclge_mailbox_service_task() argument
3933 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) || in hclge_mailbox_service_task()
3934 test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) || in hclge_mailbox_service_task()
3935 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) in hclge_mailbox_service_task()
3938 if (time_is_before_jiffies(hdev->last_mbx_scheduled + in hclge_mailbox_service_task()
3940 dev_warn(&hdev->pdev->dev, in hclge_mailbox_service_task()
3942 jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled), in hclge_mailbox_service_task()
3945 hclge_mbx_handler(hdev); in hclge_mailbox_service_task()
3947 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); in hclge_mailbox_service_task()
3950 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev) in hclge_func_reset_sync_vf() argument
3962 hclge_mailbox_service_task(hdev); in hclge_func_reset_sync_vf()
3964 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_func_reset_sync_vf()
3972 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n", in hclge_func_reset_sync_vf()
3982 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n"); in hclge_func_reset_sync_vf()
3985 void hclge_report_hw_error(struct hclge_dev *hdev, in hclge_report_hw_error() argument
3988 struct hnae3_client *client = hdev->nic_client; in hclge_report_hw_error()
3991 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) in hclge_report_hw_error()
3994 client->ops->process_hw_error(&hdev->vport[0].nic, type); in hclge_report_hw_error()
3997 static void hclge_handle_imp_error(struct hclge_dev *hdev) in hclge_handle_imp_error() argument
4001 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); in hclge_handle_imp_error()
4003 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR); in hclge_handle_imp_error()
4005 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); in hclge_handle_imp_error()
4009 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR); in hclge_handle_imp_error()
4011 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); in hclge_handle_imp_error()
4015 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) in hclge_func_reset_cmd() argument
4025 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_func_reset_cmd()
4027 dev_err(&hdev->pdev->dev, in hclge_func_reset_cmd()
4033 static void hclge_do_reset(struct hclge_dev *hdev) in hclge_do_reset() argument
4035 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_do_reset()
4036 struct pci_dev *pdev = hdev->pdev; in hclge_do_reset()
4042 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING), in hclge_do_reset()
4043 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); in hclge_do_reset()
4047 switch (hdev->reset_type) { in hclge_do_reset()
4050 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); in hclge_do_reset()
4052 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val); in hclge_do_reset()
4056 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); in hclge_do_reset()
4058 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); in hclge_do_reset()
4063 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); in hclge_do_reset()
4064 hclge_reset_task_schedule(hdev); in hclge_do_reset()
4068 "unsupported reset type: %d\n", hdev->reset_type); in hclge_do_reset()
4077 struct hclge_dev *hdev = ae_dev->priv; in hclge_get_reset_level() local
4097 if (hdev->reset_type != HNAE3_NONE_RESET && in hclge_get_reset_level()
4098 rst_level < hdev->reset_type) in hclge_get_reset_level()
4104 static void hclge_clear_reset_cause(struct hclge_dev *hdev) in hclge_clear_reset_cause() argument
4108 switch (hdev->reset_type) { in hclge_clear_reset_cause()
4125 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_clear_reset_cause()
4126 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, in hclge_clear_reset_cause()
4129 hclge_enable_vector(&hdev->misc_vector, true); in hclge_clear_reset_cause()
4132 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable) in hclge_reset_handshake() argument
4136 reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); in hclge_reset_handshake()
4142 hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val); in hclge_reset_handshake()
4145 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev) in hclge_func_reset_notify_vf() argument
4149 ret = hclge_set_all_vf_rst(hdev, true); in hclge_func_reset_notify_vf()
4153 hclge_func_reset_sync_vf(hdev); in hclge_func_reset_notify_vf()
4158 static int hclge_reset_prepare_wait(struct hclge_dev *hdev) in hclge_reset_prepare_wait() argument
4163 switch (hdev->reset_type) { in hclge_reset_prepare_wait()
4165 ret = hclge_func_reset_notify_vf(hdev); in hclge_reset_prepare_wait()
4169 ret = hclge_func_reset_cmd(hdev, 0); in hclge_reset_prepare_wait()
4171 dev_err(&hdev->pdev->dev, in hclge_reset_prepare_wait()
4181 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_reset_prepare_wait()
4182 hdev->rst_stats.pf_rst_cnt++; in hclge_reset_prepare_wait()
4185 ret = hclge_func_reset_notify_vf(hdev); in hclge_reset_prepare_wait()
4190 hclge_handle_imp_error(hdev); in hclge_reset_prepare_wait()
4191 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); in hclge_reset_prepare_wait()
4192 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, in hclge_reset_prepare_wait()
4201 hclge_reset_handshake(hdev, true); in hclge_reset_prepare_wait()
4202 dev_info(&hdev->pdev->dev, "prepare wait ok\n"); in hclge_reset_prepare_wait()
4207 static void hclge_show_rst_info(struct hclge_dev *hdev) in hclge_show_rst_info() argument
4215 hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN); in hclge_show_rst_info()
4217 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf); in hclge_show_rst_info()
4222 static bool hclge_reset_err_handle(struct hclge_dev *hdev) in hclge_reset_err_handle() argument
4226 if (hdev->reset_pending) { in hclge_reset_err_handle()
4227 dev_info(&hdev->pdev->dev, "Reset pending %lu\n", in hclge_reset_err_handle()
4228 hdev->reset_pending); in hclge_reset_err_handle()
4230 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) & in hclge_reset_err_handle()
4232 dev_info(&hdev->pdev->dev, in hclge_reset_err_handle()
4234 hclge_clear_reset_cause(hdev); in hclge_reset_err_handle()
4236 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) { in hclge_reset_err_handle()
4237 hdev->rst_stats.reset_fail_cnt++; in hclge_reset_err_handle()
4238 set_bit(hdev->reset_type, &hdev->reset_pending); in hclge_reset_err_handle()
4239 dev_info(&hdev->pdev->dev, in hclge_reset_err_handle()
4241 hdev->rst_stats.reset_fail_cnt); in hclge_reset_err_handle()
4245 hclge_clear_reset_cause(hdev); in hclge_reset_err_handle()
4248 hclge_reset_handshake(hdev, true); in hclge_reset_err_handle()
4250 dev_err(&hdev->pdev->dev, "Reset fail!\n"); in hclge_reset_err_handle()
4252 hclge_show_rst_info(hdev); in hclge_reset_err_handle()
4254 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state); in hclge_reset_err_handle()
4259 static void hclge_update_reset_level(struct hclge_dev *hdev) in hclge_update_reset_level() argument
4261 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_update_reset_level()
4268 hclge_get_reset_level(ae_dev, &hdev->reset_request); in hclge_update_reset_level()
4275 &hdev->default_reset_request); in hclge_update_reset_level()
4277 set_bit(reset_level, &hdev->reset_request); in hclge_update_reset_level()
4280 static int hclge_set_rst_done(struct hclge_dev *hdev) in hclge_set_rst_done() argument
4290 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_rst_done()
4296 dev_warn(&hdev->pdev->dev, in hclge_set_rst_done()
4301 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n", in hclge_set_rst_done()
4308 static int hclge_reset_prepare_up(struct hclge_dev *hdev) in hclge_reset_prepare_up() argument
4312 switch (hdev->reset_type) { in hclge_reset_prepare_up()
4315 ret = hclge_set_all_vf_rst(hdev, false); in hclge_reset_prepare_up()
4319 ret = hclge_set_rst_done(hdev); in hclge_reset_prepare_up()
4326 hclge_reset_handshake(hdev, false); in hclge_reset_prepare_up()
4331 static int hclge_reset_stack(struct hclge_dev *hdev) in hclge_reset_stack() argument
4335 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); in hclge_reset_stack()
4339 ret = hclge_reset_ae_dev(hdev->ae_dev); in hclge_reset_stack()
4343 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT); in hclge_reset_stack()
4346 static int hclge_reset_prepare(struct hclge_dev *hdev) in hclge_reset_prepare() argument
4350 hdev->rst_stats.reset_cnt++; in hclge_reset_prepare()
4352 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); in hclge_reset_prepare()
4357 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); in hclge_reset_prepare()
4362 return hclge_reset_prepare_wait(hdev); in hclge_reset_prepare()
4365 static int hclge_reset_rebuild(struct hclge_dev *hdev) in hclge_reset_rebuild() argument
4369 hdev->rst_stats.hw_reset_done_cnt++; in hclge_reset_rebuild()
4371 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); in hclge_reset_rebuild()
4376 ret = hclge_reset_stack(hdev); in hclge_reset_rebuild()
4381 hclge_clear_reset_cause(hdev); in hclge_reset_rebuild()
4383 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT); in hclge_reset_rebuild()
4388 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1) in hclge_reset_rebuild()
4391 ret = hclge_reset_prepare_up(hdev); in hclge_reset_rebuild()
4396 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); in hclge_reset_rebuild()
4401 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT); in hclge_reset_rebuild()
4405 hdev->last_reset_time = jiffies; in hclge_reset_rebuild()
4406 hdev->rst_stats.reset_fail_cnt = 0; in hclge_reset_rebuild()
4407 hdev->rst_stats.reset_done_cnt++; in hclge_reset_rebuild()
4408 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); in hclge_reset_rebuild()
4410 hclge_update_reset_level(hdev); in hclge_reset_rebuild()
4415 static void hclge_reset(struct hclge_dev *hdev) in hclge_reset() argument
4417 if (hclge_reset_prepare(hdev)) in hclge_reset()
4420 if (hclge_reset_wait(hdev)) in hclge_reset()
4423 if (hclge_reset_rebuild(hdev)) in hclge_reset()
4429 if (hclge_reset_err_handle(hdev)) in hclge_reset()
4430 hclge_reset_task_schedule(hdev); in hclge_reset()
4436 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_event() local
4453 if (time_before(jiffies, (hdev->last_reset_time + in hclge_reset_event()
4455 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); in hclge_reset_event()
4459 if (hdev->default_reset_request) { in hclge_reset_event()
4460 hdev->reset_level = in hclge_reset_event()
4462 &hdev->default_reset_request); in hclge_reset_event()
4463 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) { in hclge_reset_event()
4464 hdev->reset_level = HNAE3_FUNC_RESET; in hclge_reset_event()
4467 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n", in hclge_reset_event()
4468 hdev->reset_level); in hclge_reset_event()
4471 set_bit(hdev->reset_level, &hdev->reset_request); in hclge_reset_event()
4472 hclge_reset_task_schedule(hdev); in hclge_reset_event()
4474 if (hdev->reset_level < HNAE3_GLOBAL_RESET) in hclge_reset_event()
4475 hdev->reset_level++; in hclge_reset_event()
4481 struct hclge_dev *hdev = ae_dev->priv; in hclge_set_def_reset_request() local
4483 set_bit(rst_type, &hdev->default_reset_request); in hclge_set_def_reset_request()
4488 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer); in hclge_reset_timer() local
4493 if (!hdev->default_reset_request) in hclge_reset_timer()
4496 dev_info(&hdev->pdev->dev, in hclge_reset_timer()
4498 hclge_reset_event(hdev->pdev, NULL); in hclge_reset_timer()
4501 static void hclge_reset_subtask(struct hclge_dev *hdev) in hclge_reset_subtask() argument
4503 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_reset_subtask()
4514 hdev->last_reset_time = jiffies; in hclge_reset_subtask()
4515 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending); in hclge_reset_subtask()
4516 if (hdev->reset_type != HNAE3_NONE_RESET) in hclge_reset_subtask()
4517 hclge_reset(hdev); in hclge_reset_subtask()
4520 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request); in hclge_reset_subtask()
4521 if (hdev->reset_type != HNAE3_NONE_RESET) in hclge_reset_subtask()
4522 hclge_do_reset(hdev); in hclge_reset_subtask()
4524 hdev->reset_type = HNAE3_NONE_RESET; in hclge_reset_subtask()
4527 static void hclge_handle_err_reset_request(struct hclge_dev *hdev) in hclge_handle_err_reset_request() argument
4529 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_handle_err_reset_request()
4538 if (hdev->default_reset_request && ae_dev->ops->reset_event) in hclge_handle_err_reset_request()
4539 ae_dev->ops->reset_event(hdev->pdev, NULL); in hclge_handle_err_reset_request()
4542 hclge_enable_vector(&hdev->misc_vector, true); in hclge_handle_err_reset_request()
4545 static void hclge_handle_err_recovery(struct hclge_dev *hdev) in hclge_handle_err_recovery() argument
4547 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_handle_err_recovery()
4551 if (hclge_find_error_source(hdev)) { in hclge_handle_err_recovery()
4553 hclge_handle_mac_tnl(hdev); in hclge_handle_err_recovery()
4556 hclge_handle_err_reset_request(hdev); in hclge_handle_err_recovery()
4559 static void hclge_misc_err_recovery(struct hclge_dev *hdev) in hclge_misc_err_recovery() argument
4561 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_misc_err_recovery()
4562 struct device *dev = &hdev->pdev->dev; in hclge_misc_err_recovery()
4565 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); in hclge_misc_err_recovery()
4568 (hdev, &hdev->default_reset_request)) in hclge_misc_err_recovery()
4575 hclge_handle_err_reset_request(hdev); in hclge_misc_err_recovery()
4578 static void hclge_errhand_service_task(struct hclge_dev *hdev) in hclge_errhand_service_task() argument
4580 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) in hclge_errhand_service_task()
4583 if (hnae3_dev_ras_imp_supported(hdev)) in hclge_errhand_service_task()
4584 hclge_handle_err_recovery(hdev); in hclge_errhand_service_task()
4586 hclge_misc_err_recovery(hdev); in hclge_errhand_service_task()
4589 static void hclge_reset_service_task(struct hclge_dev *hdev) in hclge_reset_service_task() argument
4591 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) in hclge_reset_service_task()
4594 if (time_is_before_jiffies(hdev->last_rst_scheduled + in hclge_reset_service_task()
4596 dev_warn(&hdev->pdev->dev, in hclge_reset_service_task()
4598 jiffies_to_msecs(jiffies - hdev->last_rst_scheduled), in hclge_reset_service_task()
4601 down(&hdev->reset_sem); in hclge_reset_service_task()
4602 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_service_task()
4604 hclge_reset_subtask(hdev); in hclge_reset_service_task()
4606 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_service_task()
4607 up(&hdev->reset_sem); in hclge_reset_service_task()
4610 static void hclge_update_vport_alive(struct hclge_dev *hdev) in hclge_update_vport_alive() argument
4615 for (i = 1; i < hdev->num_alloc_vport; i++) { in hclge_update_vport_alive()
4616 struct hclge_vport *vport = &hdev->vport[i]; in hclge_update_vport_alive()
4627 static void hclge_periodic_service_task(struct hclge_dev *hdev) in hclge_periodic_service_task() argument
4631 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) in hclge_periodic_service_task()
4637 hclge_update_link_status(hdev); in hclge_periodic_service_task()
4638 hclge_sync_mac_table(hdev); in hclge_periodic_service_task()
4639 hclge_sync_promisc_mode(hdev); in hclge_periodic_service_task()
4640 hclge_sync_fd_table(hdev); in hclge_periodic_service_task()
4642 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { in hclge_periodic_service_task()
4643 delta = jiffies - hdev->last_serv_processed; in hclge_periodic_service_task()
4651 hdev->serv_processed_cnt++; in hclge_periodic_service_task()
4652 hclge_update_vport_alive(hdev); in hclge_periodic_service_task()
4654 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) { in hclge_periodic_service_task()
4655 hdev->last_serv_processed = jiffies; in hclge_periodic_service_task()
4659 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL)) in hclge_periodic_service_task()
4660 hclge_update_stats_for_all(hdev); in hclge_periodic_service_task()
4662 hclge_update_port_info(hdev); in hclge_periodic_service_task()
4663 hclge_sync_vlan_filter(hdev); in hclge_periodic_service_task()
4665 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL)) in hclge_periodic_service_task()
4666 hclge_rfs_filter_expire(hdev); in hclge_periodic_service_task()
4668 hdev->last_serv_processed = jiffies; in hclge_periodic_service_task()
4671 hclge_task_schedule(hdev, delta); in hclge_periodic_service_task()
4674 static void hclge_ptp_service_task(struct hclge_dev *hdev) in hclge_ptp_service_task() argument
4678 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) || in hclge_ptp_service_task()
4679 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) || in hclge_ptp_service_task()
4680 !time_is_before_jiffies(hdev->ptp->tx_start + HZ)) in hclge_ptp_service_task()
4684 spin_lock_irqsave(&hdev->ptp->lock, flags); in hclge_ptp_service_task()
4689 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) in hclge_ptp_service_task()
4690 hclge_ptp_clean_tx_hwts(hdev); in hclge_ptp_service_task()
4692 spin_unlock_irqrestore(&hdev->ptp->lock, flags); in hclge_ptp_service_task()
4697 struct hclge_dev *hdev = in hclge_service_task() local
4700 hclge_errhand_service_task(hdev); in hclge_service_task()
4701 hclge_reset_service_task(hdev); in hclge_service_task()
4702 hclge_ptp_service_task(hdev); in hclge_service_task()
4703 hclge_mailbox_service_task(hdev); in hclge_service_task()
4704 hclge_periodic_service_task(hdev); in hclge_service_task()
4710 hclge_errhand_service_task(hdev); in hclge_service_task()
4711 hclge_reset_service_task(hdev); in hclge_service_task()
4712 hclge_mailbox_service_task(hdev); in hclge_service_task()
4726 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx, in hclge_get_vector_info() argument
4731 vector_info->vector = pci_irq_vector(hdev->pdev, idx); in hclge_get_vector_info()
4735 vector_info->io_addr = hdev->hw.hw.io_base + in hclge_get_vector_info()
4739 vector_info->io_addr = hdev->hw.hw.io_base + in hclge_get_vector_info()
4746 hdev->vector_status[idx] = hdev->vport[0].vport_id; in hclge_get_vector_info()
4747 hdev->vector_irq[idx] = vector_info->vector; in hclge_get_vector_info()
4755 struct hclge_dev *hdev = vport->back; in hclge_get_vector() local
4760 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num); in hclge_get_vector()
4761 vector_num = min(hdev->num_msi_left, vector_num); in hclge_get_vector()
4764 while (++i < hdev->num_nic_msi) { in hclge_get_vector()
4765 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { in hclge_get_vector()
4766 hclge_get_vector_info(hdev, i, vector); in hclge_get_vector()
4774 hdev->num_msi_left -= alloc; in hclge_get_vector()
4775 hdev->num_msi_used += alloc; in hclge_get_vector()
4780 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) in hclge_get_vector_index() argument
4784 for (i = 0; i < hdev->num_msi; i++) in hclge_get_vector_index()
4785 if (vector == hdev->vector_irq[i]) in hclge_get_vector_index()
4794 struct hclge_dev *hdev = vport->back; in hclge_put_vector() local
4797 vector_id = hclge_get_vector_index(hdev, vector); in hclge_put_vector()
4799 dev_err(&hdev->pdev->dev, in hclge_put_vector()
4804 hclge_free_vector(hdev, vector_id); in hclge_put_vector()
4829 struct hclge_dev *hdev = vport->back; in hclge_set_rss() local
4830 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; in hclge_set_rss()
4833 ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc); in hclge_set_rss()
4835 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc); in hclge_set_rss()
4844 return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw, in hclge_set_rss()
4852 struct hclge_dev *hdev = vport->back; in hclge_set_rss_tuple() local
4855 ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw, in hclge_set_rss_tuple()
4856 &hdev->rss_cfg, nfc); in hclge_set_rss_tuple()
4858 dev_err(&hdev->pdev->dev, in hclge_set_rss_tuple()
4888 struct hclge_dev *hdev = vport->back; in hclge_get_tc_size() local
4890 return hdev->pf_rss_size_max; in hclge_get_tc_size()
4893 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev) in hclge_init_rss_tc_mode() argument
4895 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; in hclge_init_rss_tc_mode()
4896 struct hclge_vport *vport = hdev->vport; in hclge_init_rss_tc_mode()
4910 if (!(hdev->hw_tc_map & BIT(i))) in hclge_init_rss_tc_mode()
4919 dev_err(&hdev->pdev->dev, in hclge_init_rss_tc_mode()
4933 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, in hclge_init_rss_tc_mode()
4937 int hclge_rss_init_hw(struct hclge_dev *hdev) in hclge_rss_init_hw() argument
4939 u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl; in hclge_rss_init_hw()
4940 u8 *key = hdev->rss_cfg.rss_hash_key; in hclge_rss_init_hw()
4941 u8 hfunc = hdev->rss_cfg.rss_algo; in hclge_rss_init_hw()
4944 ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, in hclge_rss_init_hw()
4949 ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key); in hclge_rss_init_hw()
4953 ret = hclge_comm_set_rss_input_tuple(&hdev->vport[0].nic, in hclge_rss_init_hw()
4954 &hdev->hw.hw, true, in hclge_rss_init_hw()
4955 &hdev->rss_cfg); in hclge_rss_init_hw()
4959 return hclge_init_rss_tc_mode(hdev); in hclge_rss_init_hw()
4966 struct hclge_dev *hdev = vport->back; in hclge_bind_ring_with_vector() local
5003 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_bind_ring_with_vector()
5005 dev_err(&hdev->pdev->dev, in hclge_bind_ring_with_vector()
5029 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_bind_ring_with_vector()
5031 dev_err(&hdev->pdev->dev, in hclge_bind_ring_with_vector()
5044 struct hclge_dev *hdev = vport->back; in hclge_map_ring_to_vector() local
5047 vector_id = hclge_get_vector_index(hdev, vector); in hclge_map_ring_to_vector()
5049 dev_err(&hdev->pdev->dev, in hclge_map_ring_to_vector()
5061 struct hclge_dev *hdev = vport->back; in hclge_unmap_ring_frm_vector() local
5064 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_unmap_ring_frm_vector()
5067 vector_id = hclge_get_vector_index(hdev, vector); in hclge_unmap_ring_frm_vector()
5083 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id, in hclge_cmd_set_promisc_mode() argument
5086 struct hclge_vport *vport = &hdev->vport[vf_id]; in hclge_cmd_set_promisc_mode()
5119 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cmd_set_promisc_mode()
5121 dev_err(&hdev->pdev->dev, in hclge_cmd_set_promisc_mode()
5139 struct hclge_dev *hdev = vport->back; in hclge_set_promisc_mode() local
5146 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_promisc_mode()
5160 static void hclge_sync_fd_state(struct hclge_dev *hdev) in hclge_sync_fd_state() argument
5162 if (hlist_empty(&hdev->fd_rule_list)) in hclge_sync_fd_state()
5163 hdev->fd_active_type = HCLGE_FD_RULE_NONE; in hclge_sync_fd_state()
5166 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location) in hclge_fd_inc_rule_cnt() argument
5168 if (!test_bit(location, hdev->fd_bmap)) { in hclge_fd_inc_rule_cnt()
5169 set_bit(location, hdev->fd_bmap); in hclge_fd_inc_rule_cnt()
5170 hdev->hclge_fd_rule_num++; in hclge_fd_inc_rule_cnt()
5174 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location) in hclge_fd_dec_rule_cnt() argument
5176 if (test_bit(location, hdev->fd_bmap)) { in hclge_fd_dec_rule_cnt()
5177 clear_bit(location, hdev->fd_bmap); in hclge_fd_dec_rule_cnt()
5178 hdev->hclge_fd_rule_num--; in hclge_fd_dec_rule_cnt()
5182 static void hclge_fd_free_node(struct hclge_dev *hdev, in hclge_fd_free_node() argument
5187 hclge_sync_fd_state(hdev); in hclge_fd_free_node()
5190 static void hclge_update_fd_rule_node(struct hclge_dev *hdev, in hclge_update_fd_rule_node() argument
5213 hclge_fd_dec_rule_cnt(hdev, old_rule->location); in hclge_update_fd_rule_node()
5214 hclge_fd_free_node(hdev, old_rule); in hclge_update_fd_rule_node()
5229 hclge_fd_dec_rule_cnt(hdev, old_rule->location); in hclge_update_fd_rule_node()
5230 hclge_fd_free_node(hdev, old_rule); in hclge_update_fd_rule_node()
5272 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev, in hclge_fd_set_user_def_cmd() argument
5301 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_fd_set_user_def_cmd()
5303 dev_err(&hdev->pdev->dev, in hclge_fd_set_user_def_cmd()
5308 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked) in hclge_sync_fd_user_def_cfg() argument
5312 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state)) in hclge_sync_fd_user_def_cfg()
5316 spin_lock_bh(&hdev->fd_rule_lock); in hclge_sync_fd_user_def_cfg()
5318 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg); in hclge_sync_fd_user_def_cfg()
5320 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); in hclge_sync_fd_user_def_cfg()
5323 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_sync_fd_user_def_cfg()
5326 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev, in hclge_fd_check_user_def_refcnt() argument
5329 struct hlist_head *hlist = &hdev->fd_rule_list; in hclge_fd_check_user_def_refcnt()
5339 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; in hclge_fd_check_user_def_refcnt()
5356 dev_err(&hdev->pdev->dev, in hclge_fd_check_user_def_refcnt()
5362 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev, in hclge_fd_inc_user_def_refcnt() argument
5371 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; in hclge_fd_inc_user_def_refcnt()
5374 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); in hclge_fd_inc_user_def_refcnt()
5379 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev, in hclge_fd_dec_user_def_refcnt() argument
5388 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; in hclge_fd_dec_user_def_refcnt()
5395 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); in hclge_fd_dec_user_def_refcnt()
5399 static void hclge_update_fd_list(struct hclge_dev *hdev, in hclge_update_fd_list() argument
5403 struct hlist_head *hlist = &hdev->fd_rule_list; in hclge_update_fd_list()
5408 hclge_fd_dec_user_def_refcnt(hdev, fd_rule); in hclge_update_fd_list()
5410 hclge_fd_inc_user_def_refcnt(hdev, new_rule); in hclge_update_fd_list()
5411 hclge_sync_fd_user_def_cfg(hdev, true); in hclge_update_fd_list()
5413 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state); in hclge_update_fd_list()
5421 dev_warn(&hdev->pdev->dev, in hclge_update_fd_list()
5427 hclge_fd_inc_user_def_refcnt(hdev, new_rule); in hclge_update_fd_list()
5428 hclge_sync_fd_user_def_cfg(hdev, true); in hclge_update_fd_list()
5431 hclge_fd_inc_rule_cnt(hdev, new_rule->location); in hclge_update_fd_list()
5434 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_update_fd_list()
5435 hclge_task_schedule(hdev, 0); in hclge_update_fd_list()
5439 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) in hclge_get_fd_mode() argument
5449 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_fd_mode()
5451 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); in hclge_get_fd_mode()
5460 static int hclge_get_fd_allocation(struct hclge_dev *hdev, in hclge_get_fd_allocation() argument
5474 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_fd_allocation()
5476 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", in hclge_get_fd_allocation()
5489 static int hclge_set_fd_key_config(struct hclge_dev *hdev, in hclge_set_fd_key_config() argument
5500 stage = &hdev->fd_cfg.key_cfg[stage_num]; in hclge_set_fd_key_config()
5510 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_fd_key_config()
5512 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); in hclge_set_fd_key_config()
5517 static void hclge_fd_disable_user_def(struct hclge_dev *hdev) in hclge_fd_disable_user_def() argument
5519 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg; in hclge_fd_disable_user_def()
5521 spin_lock_bh(&hdev->fd_rule_lock); in hclge_fd_disable_user_def()
5522 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg)); in hclge_fd_disable_user_def()
5523 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_fd_disable_user_def()
5525 hclge_fd_set_user_def_cmd(hdev, cfg); in hclge_fd_disable_user_def()
5528 static int hclge_init_fd_config(struct hclge_dev *hdev) in hclge_init_fd_config() argument
5534 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_init_fd_config()
5537 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); in hclge_init_fd_config()
5541 switch (hdev->fd_cfg.fd_mode) { in hclge_init_fd_config()
5543 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; in hclge_init_fd_config()
5546 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; in hclge_init_fd_config()
5549 dev_err(&hdev->pdev->dev, in hclge_init_fd_config()
5551 hdev->fd_cfg.fd_mode); in hclge_init_fd_config()
5555 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; in hclge_init_fd_config()
5568 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { in hclge_init_fd_config()
5571 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) in hclge_init_fd_config()
5580 ret = hclge_get_fd_allocation(hdev, in hclge_init_fd_config()
5581 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], in hclge_init_fd_config()
5582 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], in hclge_init_fd_config()
5583 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], in hclge_init_fd_config()
5584 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); in hclge_init_fd_config()
5588 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); in hclge_init_fd_config()
5591 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, in hclge_fd_tcam_config() argument
5624 ret = hclge_cmd_send(&hdev->hw, desc, 3); in hclge_fd_tcam_config()
5626 dev_err(&hdev->pdev->dev, in hclge_fd_tcam_config()
5633 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, in hclge_fd_ad_config() argument
5636 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_fd_ad_config()
5672 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_fd_ad_config()
5674 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); in hclge_fd_ad_config()
5802 static int hclge_config_key(struct hclge_dev *hdev, u8 stage, in hclge_config_key() argument
5805 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; in hclge_config_key()
5833 meta_data_region = hdev->fd_cfg.max_key_length / 8 - in hclge_config_key()
5841 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, in hclge_config_key()
5844 dev_err(&hdev->pdev->dev, in hclge_config_key()
5850 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, in hclge_config_key()
5853 dev_err(&hdev->pdev->dev, in hclge_config_key()
5859 static int hclge_config_action(struct hclge_dev *hdev, u8 stage, in hclge_config_action() argument
5862 struct hclge_vport *vport = hdev->vport; in hclge_config_action()
5882 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) { in hclge_config_action()
5885 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]; in hclge_config_action()
5897 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); in hclge_config_action()
6032 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev, in hclge_fd_check_ext_tuple() argument
6038 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n"); in hclge_fd_check_ext_tuple()
6047 dev_err(&hdev->pdev->dev, in hclge_fd_check_ext_tuple()
6057 if (hdev->fd_cfg.fd_mode != in hclge_fd_check_ext_tuple()
6059 dev_err(&hdev->pdev->dev, in hclge_fd_check_ext_tuple()
6105 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev, in hclge_fd_parse_user_def_field() argument
6110 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active; in hclge_fd_parse_user_def_field()
6130 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); in hclge_fd_parse_user_def_field()
6135 dev_err(&hdev->pdev->dev, in hclge_fd_parse_user_def_field()
6142 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n"); in hclge_fd_parse_user_def_field()
6148 dev_err(&hdev->pdev->dev, in hclge_fd_parse_user_def_field()
6161 static int hclge_fd_check_spec(struct hclge_dev *hdev, in hclge_fd_check_spec() argument
6169 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { in hclge_fd_check_spec()
6170 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
6173 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1); in hclge_fd_check_spec()
6177 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info); in hclge_fd_check_spec()
6204 if (hdev->fd_cfg.fd_mode != in hclge_fd_check_spec()
6206 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
6215 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
6222 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
6228 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple); in hclge_fd_check_spec()
6231 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev, in hclge_fd_get_tcpip4_tuple() argument
6261 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev, in hclge_fd_get_ip4_tuple() argument
6285 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev, in hclge_fd_get_tcpip6_tuple() argument
6315 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev, in hclge_fd_get_ip6_tuple() argument
6339 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev, in hclge_fd_get_ether_tuple() argument
6376 static int hclge_fd_get_tuple(struct hclge_dev *hdev, in hclge_fd_get_tuple() argument
6385 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP); in hclge_fd_get_tuple()
6388 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP); in hclge_fd_get_tuple()
6391 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP); in hclge_fd_get_tuple()
6394 hclge_fd_get_ip4_tuple(hdev, fs, rule); in hclge_fd_get_tuple()
6397 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP); in hclge_fd_get_tuple()
6400 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP); in hclge_fd_get_tuple()
6403 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP); in hclge_fd_get_tuple()
6406 hclge_fd_get_ip6_tuple(hdev, fs, rule); in hclge_fd_get_tuple()
6409 hclge_fd_get_ether_tuple(hdev, fs, rule); in hclge_fd_get_tuple()
6429 static int hclge_fd_config_rule(struct hclge_dev *hdev, in hclge_fd_config_rule() argument
6434 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); in hclge_fd_config_rule()
6438 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); in hclge_fd_config_rule()
6441 static int hclge_add_fd_entry_common(struct hclge_dev *hdev, in hclge_add_fd_entry_common() argument
6446 spin_lock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_common()
6448 if (hdev->fd_active_type != rule->rule_type && in hclge_add_fd_entry_common()
6449 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || in hclge_add_fd_entry_common()
6450 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) { in hclge_add_fd_entry_common()
6451 dev_err(&hdev->pdev->dev, in hclge_add_fd_entry_common()
6453 rule->rule_type, hdev->fd_active_type); in hclge_add_fd_entry_common()
6454 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_common()
6458 ret = hclge_fd_check_user_def_refcnt(hdev, rule); in hclge_add_fd_entry_common()
6462 ret = hclge_clear_arfs_rules(hdev); in hclge_add_fd_entry_common()
6466 ret = hclge_fd_config_rule(hdev, rule); in hclge_add_fd_entry_common()
6471 hdev->fd_active_type = rule->rule_type; in hclge_add_fd_entry_common()
6472 hclge_update_fd_list(hdev, rule->state, rule->location, rule); in hclge_add_fd_entry_common()
6475 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_common()
6482 struct hclge_dev *hdev = vport->back; in hclge_is_cls_flower_active() local
6484 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE; in hclge_is_cls_flower_active()
6487 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie, in hclge_fd_parse_ring_cookie() argument
6490 struct hclge_vport *vport = hdev->vport; in hclge_fd_parse_ring_cookie()
6502 if (vf > hdev->num_req_vfs) { in hclge_fd_parse_ring_cookie()
6503 dev_err(&hdev->pdev->dev, in hclge_fd_parse_ring_cookie()
6505 vf - 1U, hdev->num_req_vfs); in hclge_fd_parse_ring_cookie()
6509 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; in hclge_fd_parse_ring_cookie()
6510 tqps = hdev->vport[vf].nic.kinfo.num_tqps; in hclge_fd_parse_ring_cookie()
6513 dev_err(&hdev->pdev->dev, in hclge_fd_parse_ring_cookie()
6530 struct hclge_dev *hdev = vport->back; in hclge_add_fd_entry() local
6539 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { in hclge_add_fd_entry()
6540 dev_err(&hdev->pdev->dev, in hclge_add_fd_entry()
6545 if (!hdev->fd_en) { in hclge_add_fd_entry()
6546 dev_err(&hdev->pdev->dev, in hclge_add_fd_entry()
6553 ret = hclge_fd_check_spec(hdev, fs, &unused, &info); in hclge_add_fd_entry()
6557 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id, in hclge_add_fd_entry()
6566 ret = hclge_fd_get_tuple(hdev, fs, rule, &info); in hclge_add_fd_entry()
6580 ret = hclge_add_fd_entry_common(hdev, rule); in hclge_add_fd_entry()
6591 struct hclge_dev *hdev = vport->back; in hclge_del_fd_entry() local
6595 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_del_fd_entry()
6600 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) in hclge_del_fd_entry()
6603 spin_lock_bh(&hdev->fd_rule_lock); in hclge_del_fd_entry()
6604 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || in hclge_del_fd_entry()
6605 !test_bit(fs->location, hdev->fd_bmap)) { in hclge_del_fd_entry()
6606 dev_err(&hdev->pdev->dev, in hclge_del_fd_entry()
6608 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_fd_entry()
6612 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location, in hclge_del_fd_entry()
6617 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL); in hclge_del_fd_entry()
6620 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_fd_entry()
6624 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev, in hclge_clear_fd_rules_in_list() argument
6631 spin_lock_bh(&hdev->fd_rule_lock); in hclge_clear_fd_rules_in_list()
6633 for_each_set_bit(location, hdev->fd_bmap, in hclge_clear_fd_rules_in_list()
6634 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) in hclge_clear_fd_rules_in_list()
6635 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location, in hclge_clear_fd_rules_in_list()
6639 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, in hclge_clear_fd_rules_in_list()
6644 hdev->fd_active_type = HCLGE_FD_RULE_NONE; in hclge_clear_fd_rules_in_list()
6645 hdev->hclge_fd_rule_num = 0; in hclge_clear_fd_rules_in_list()
6646 bitmap_zero(hdev->fd_bmap, in hclge_clear_fd_rules_in_list()
6647 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); in hclge_clear_fd_rules_in_list()
6650 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_clear_fd_rules_in_list()
6653 static void hclge_del_all_fd_entries(struct hclge_dev *hdev) in hclge_del_all_fd_entries() argument
6655 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_del_all_fd_entries()
6658 hclge_clear_fd_rules_in_list(hdev, true); in hclge_del_all_fd_entries()
6659 hclge_fd_disable_user_def(hdev); in hclge_del_all_fd_entries()
6665 struct hclge_dev *hdev = vport->back; in hclge_restore_fd_entries() local
6673 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_restore_fd_entries()
6677 if (!hdev->fd_en) in hclge_restore_fd_entries()
6680 spin_lock_bh(&hdev->fd_rule_lock); in hclge_restore_fd_entries()
6681 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_restore_fd_entries()
6685 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_restore_fd_entries()
6686 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_restore_fd_entries()
6695 struct hclge_dev *hdev = vport->back; in hclge_get_fd_rule_cnt() local
6697 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle)) in hclge_get_fd_rule_cnt()
6700 cmd->rule_cnt = hdev->hclge_fd_rule_num; in hclge_get_fd_rule_cnt()
6701 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; in hclge_get_fd_rule_cnt()
6876 static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev, in hclge_get_fd_rule() argument
6882 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { in hclge_get_fd_rule()
6912 struct hclge_dev *hdev = vport->back; in hclge_get_fd_rule_info() local
6915 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_get_fd_rule_info()
6920 spin_lock_bh(&hdev->fd_rule_lock); in hclge_get_fd_rule_info()
6922 rule = hclge_get_fd_rule(hdev, fs->location); in hclge_get_fd_rule_info()
6924 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_fd_rule_info()
6964 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_fd_rule_info()
6973 struct hclge_dev *hdev = vport->back; in hclge_get_all_rules() local
6978 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_get_all_rules()
6981 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; in hclge_get_all_rules()
6983 spin_lock_bh(&hdev->fd_rule_lock); in hclge_get_all_rules()
6985 &hdev->fd_rule_list, rule_node) { in hclge_get_all_rules()
6987 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_all_rules()
6998 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_all_rules()
7030 hclge_fd_search_flow_keys(struct hclge_dev *hdev, in hclge_fd_search_flow_keys() argument
7036 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_fd_search_flow_keys()
7074 struct hclge_dev *hdev = vport->back; in hclge_add_fd_entry_by_arfs() local
7078 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_add_fd_entry_by_arfs()
7084 spin_lock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7085 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE && in hclge_add_fd_entry_by_arfs()
7086 hdev->fd_active_type != HCLGE_FD_RULE_NONE) { in hclge_add_fd_entry_by_arfs()
7087 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7098 rule = hclge_fd_search_flow_keys(hdev, &new_tuples); in hclge_add_fd_entry_by_arfs()
7100 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM); in hclge_add_fd_entry_by_arfs()
7101 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { in hclge_add_fd_entry_by_arfs()
7102 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7108 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7116 hclge_update_fd_list(hdev, rule->state, rule->location, rule); in hclge_add_fd_entry_by_arfs()
7117 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE; in hclge_add_fd_entry_by_arfs()
7121 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_add_fd_entry_by_arfs()
7122 hclge_task_schedule(hdev, 0); in hclge_add_fd_entry_by_arfs()
7124 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7128 static void hclge_rfs_filter_expire(struct hclge_dev *hdev) in hclge_rfs_filter_expire() argument
7131 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_rfs_filter_expire()
7135 spin_lock_bh(&hdev->fd_rule_lock); in hclge_rfs_filter_expire()
7136 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) { in hclge_rfs_filter_expire()
7137 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_rfs_filter_expire()
7140 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_rfs_filter_expire()
7146 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_rfs_filter_expire()
7149 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_rfs_filter_expire()
7154 static int hclge_clear_arfs_rules(struct hclge_dev *hdev) in hclge_clear_arfs_rules() argument
7161 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) in hclge_clear_arfs_rules()
7164 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_clear_arfs_rules()
7168 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, in hclge_clear_arfs_rules()
7174 hclge_fd_dec_rule_cnt(hdev, rule->location); in hclge_clear_arfs_rules()
7182 hclge_sync_fd_state(hdev); in hclge_clear_arfs_rules()
7304 static int hclge_parse_cls_flower(struct hclge_dev *hdev, in hclge_parse_cls_flower() argument
7319 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n", in hclge_parse_cls_flower()
7333 static int hclge_check_cls_flower(struct hclge_dev *hdev, in hclge_check_cls_flower() argument
7338 if (tc < 0 || tc > hdev->tc_max) { in hclge_check_cls_flower()
7339 dev_err(&hdev->pdev->dev, "invalid traffic class\n"); in hclge_check_cls_flower()
7344 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { in hclge_check_cls_flower()
7345 dev_err(&hdev->pdev->dev, in hclge_check_cls_flower()
7347 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); in hclge_check_cls_flower()
7351 if (test_bit(prio - 1, hdev->fd_bmap)) { in hclge_check_cls_flower()
7352 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio); in hclge_check_cls_flower()
7363 struct hclge_dev *hdev = vport->back; in hclge_add_cls_flower() local
7367 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { in hclge_add_cls_flower()
7368 dev_err(&hdev->pdev->dev, in hclge_add_cls_flower()
7373 ret = hclge_check_cls_flower(hdev, cls_flower, tc); in hclge_add_cls_flower()
7375 dev_err(&hdev->pdev->dev, in hclge_add_cls_flower()
7384 ret = hclge_parse_cls_flower(hdev, cls_flower, rule); in hclge_add_cls_flower()
7397 ret = hclge_add_fd_entry_common(hdev, rule); in hclge_add_cls_flower()
7404 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev, in hclge_find_cls_flower() argument
7410 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_find_cls_flower()
7422 struct hclge_dev *hdev = vport->back; in hclge_del_cls_flower() local
7426 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_del_cls_flower()
7429 spin_lock_bh(&hdev->fd_rule_lock); in hclge_del_cls_flower()
7431 rule = hclge_find_cls_flower(hdev, cls_flower->cookie); in hclge_del_cls_flower()
7433 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_cls_flower()
7437 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location, in hclge_del_cls_flower()
7440 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_cls_flower()
7444 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL); in hclge_del_cls_flower()
7445 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_cls_flower()
7450 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist) in hclge_sync_fd_list() argument
7456 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state)) in hclge_sync_fd_list()
7459 spin_lock_bh(&hdev->fd_rule_lock); in hclge_sync_fd_list()
7464 ret = hclge_fd_config_rule(hdev, rule); in hclge_sync_fd_list()
7470 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, in hclge_sync_fd_list()
7474 hclge_fd_dec_rule_cnt(hdev, rule->location); in hclge_sync_fd_list()
7475 hclge_fd_free_node(hdev, rule); in hclge_sync_fd_list()
7484 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_sync_fd_list()
7486 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_sync_fd_list()
7489 static void hclge_sync_fd_table(struct hclge_dev *hdev) in hclge_sync_fd_table() argument
7491 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_sync_fd_table()
7494 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) { in hclge_sync_fd_table()
7495 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; in hclge_sync_fd_table()
7497 hclge_clear_fd_rules_in_list(hdev, clear_list); in hclge_sync_fd_table()
7500 hclge_sync_fd_user_def_cfg(hdev, false); in hclge_sync_fd_table()
7502 hclge_sync_fd_list(hdev, &hdev->fd_rule_list); in hclge_sync_fd_table()
7508 struct hclge_dev *hdev = vport->back; in hclge_get_hw_reset_stat() local
7510 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || in hclge_get_hw_reset_stat()
7511 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); in hclge_get_hw_reset_stat()
7517 struct hclge_dev *hdev = vport->back; in hclge_get_cmdq_stat() local
7519 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_get_cmdq_stat()
7525 struct hclge_dev *hdev = vport->back; in hclge_ae_dev_resetting() local
7527 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_ae_dev_resetting()
7533 struct hclge_dev *hdev = vport->back; in hclge_ae_dev_reset_cnt() local
7535 return hdev->rst_stats.hw_reset_done_cnt; in hclge_ae_dev_reset_cnt()
7541 struct hclge_dev *hdev = vport->back; in hclge_enable_fd() local
7543 hdev->fd_en = enable; in hclge_enable_fd()
7546 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state); in hclge_enable_fd()
7550 hclge_task_schedule(hdev, 0); in hclge_enable_fd()
7553 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) in hclge_cfg_mac_mode() argument
7578 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_mac_mode()
7580 dev_err(&hdev->pdev->dev, in hclge_cfg_mac_mode()
7584 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid, in hclge_config_switch_param() argument
7601 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_switch_param()
7603 dev_err(&hdev->pdev->dev, in hclge_config_switch_param()
7613 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_switch_param()
7615 dev_err(&hdev->pdev->dev, in hclge_config_switch_param()
7620 static void hclge_phy_link_status_wait(struct hclge_dev *hdev, in hclge_phy_link_status_wait() argument
7625 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_phy_link_status_wait()
7632 dev_err(&hdev->pdev->dev, in hclge_phy_link_status_wait()
7644 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret) in hclge_mac_link_status_wait() argument
7653 ret = hclge_get_mac_link_status(hdev, &link_status); in hclge_mac_link_status_wait()
7664 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, in hclge_mac_phy_link_status_wait() argument
7672 hclge_phy_link_status_wait(hdev, link_ret); in hclge_mac_phy_link_status_wait()
7674 return hclge_mac_link_status_wait(hdev, link_ret); in hclge_mac_phy_link_status_wait()
7677 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) in hclge_set_app_loopback() argument
7687 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_app_loopback()
7689 dev_err(&hdev->pdev->dev, in hclge_set_app_loopback()
7704 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_app_loopback()
7706 dev_err(&hdev->pdev->dev, in hclge_set_app_loopback()
7711 static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en, in hclge_cfg_common_loopback_cmd_send() argument
7733 dev_err(&hdev->pdev->dev, in hclge_cfg_common_loopback_cmd_send()
7742 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_common_loopback_cmd_send()
7744 dev_err(&hdev->pdev->dev, in hclge_cfg_common_loopback_cmd_send()
7751 static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev) in hclge_cfg_common_loopback_wait() argument
7767 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_common_loopback_wait()
7769 dev_err(&hdev->pdev->dev, in hclge_cfg_common_loopback_wait()
7778 dev_err(&hdev->pdev->dev, "wait loopback timeout\n"); in hclge_cfg_common_loopback_wait()
7781 dev_err(&hdev->pdev->dev, "failed to do loopback test\n"); in hclge_cfg_common_loopback_wait()
7788 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en, in hclge_cfg_common_loopback() argument
7793 ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode); in hclge_cfg_common_loopback()
7797 return hclge_cfg_common_loopback_wait(hdev); in hclge_cfg_common_loopback()
7800 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en, in hclge_set_common_loopback() argument
7805 ret = hclge_cfg_common_loopback(hdev, en, loop_mode); in hclge_set_common_loopback()
7809 hclge_cfg_mac_mode(hdev, en); in hclge_set_common_loopback()
7811 ret = hclge_mac_phy_link_status_wait(hdev, en, false); in hclge_set_common_loopback()
7813 dev_err(&hdev->pdev->dev, in hclge_set_common_loopback()
7819 static int hclge_enable_phy_loopback(struct hclge_dev *hdev, in hclge_enable_phy_loopback() argument
7837 static int hclge_disable_phy_loopback(struct hclge_dev *hdev, in hclge_disable_phy_loopback() argument
7849 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en) in hclge_set_phy_loopback() argument
7851 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_set_phy_loopback()
7855 if (hnae3_dev_phy_imp_supported(hdev)) in hclge_set_phy_loopback()
7856 return hclge_set_common_loopback(hdev, en, in hclge_set_phy_loopback()
7862 ret = hclge_enable_phy_loopback(hdev, phydev); in hclge_set_phy_loopback()
7864 ret = hclge_disable_phy_loopback(hdev, phydev); in hclge_set_phy_loopback()
7866 dev_err(&hdev->pdev->dev, in hclge_set_phy_loopback()
7871 hclge_cfg_mac_mode(hdev, en); in hclge_set_phy_loopback()
7873 ret = hclge_mac_phy_link_status_wait(hdev, en, true); in hclge_set_phy_loopback()
7875 dev_err(&hdev->pdev->dev, in hclge_set_phy_loopback()
7881 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id, in hclge_tqp_enable_cmd_send() argument
7894 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_tqp_enable_cmd_send()
7900 struct hclge_dev *hdev = vport->back; in hclge_tqp_enable() local
7905 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable); in hclge_tqp_enable()
7916 struct hclge_dev *hdev = vport->back; in hclge_set_loopback() local
7924 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hclge_set_loopback()
7927 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param, in hclge_set_loopback()
7935 ret = hclge_set_app_loopback(hdev, en); in hclge_set_loopback()
7939 ret = hclge_set_common_loopback(hdev, en, loop_mode); in hclge_set_loopback()
7942 ret = hclge_set_phy_loopback(hdev, en); in hclge_set_loopback()
7948 dev_err(&hdev->pdev->dev, in hclge_set_loopback()
7958 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n", in hclge_set_loopback()
7964 static int hclge_set_default_loopback(struct hclge_dev *hdev) in hclge_set_default_loopback() argument
7968 ret = hclge_set_app_loopback(hdev, false); in hclge_set_default_loopback()
7972 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES); in hclge_set_default_loopback()
7976 return hclge_cfg_common_loopback(hdev, false, in hclge_set_default_loopback()
7980 static void hclge_flush_link_update(struct hclge_dev *hdev) in hclge_flush_link_update() argument
7984 unsigned long last = hdev->serv_processed_cnt; in hclge_flush_link_update()
7987 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) && in hclge_flush_link_update()
7989 last == hdev->serv_processed_cnt) in hclge_flush_link_update()
7996 struct hclge_dev *hdev = vport->back; in hclge_set_timer_task() local
7999 hclge_task_schedule(hdev, 0); in hclge_set_timer_task()
8002 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_set_timer_task()
8006 hclge_flush_link_update(hdev); in hclge_set_timer_task()
8013 struct hclge_dev *hdev = vport->back; in hclge_ae_start() local
8016 hclge_cfg_mac_mode(hdev, true); in hclge_ae_start()
8017 clear_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_ae_start()
8018 hdev->hw.mac.link = 0; in hclge_ae_start()
8023 hclge_mac_start_phy(hdev); in hclge_ae_start()
8031 struct hclge_dev *hdev = vport->back; in hclge_ae_stop() local
8033 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_ae_stop()
8034 spin_lock_bh(&hdev->fd_rule_lock); in hclge_ae_stop()
8035 hclge_clear_arfs_rules(hdev); in hclge_ae_stop()
8036 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_ae_stop()
8041 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && in hclge_ae_stop()
8042 hdev->reset_type != HNAE3_FUNC_RESET && in hclge_ae_stop()
8043 hdev->reset_type != HNAE3_FLR_RESET) { in hclge_ae_stop()
8044 hclge_mac_stop_phy(hdev); in hclge_ae_stop()
8045 hclge_update_link_status(hdev); in hclge_ae_stop()
8051 hclge_config_mac_tnl_int(hdev, false); in hclge_ae_stop()
8054 hclge_cfg_mac_mode(hdev, false); in hclge_ae_stop()
8056 hclge_mac_stop_phy(hdev); in hclge_ae_stop()
8060 hclge_update_link_status(hdev); in hclge_ae_stop()
8065 struct hclge_dev *hdev = vport->back; in hclge_vport_start() local
8071 if (test_bit(vport->vport_id, hdev->vport_config_block)) { in hclge_vport_start()
8076 hclge_restore_hw_table(hdev); in hclge_vport_start()
8080 clear_bit(vport->vport_id, hdev->vport_config_block); in hclge_vport_start()
8108 struct hclge_dev *hdev = vport->back; in hclge_get_mac_vlan_cmd_status() local
8111 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8124 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8132 dev_dbg(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8137 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8145 dev_dbg(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8150 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8156 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8226 struct hclge_dev *hdev = vport->back; in hclge_remove_mac_vlan_tbl() local
8236 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_remove_mac_vlan_tbl()
8238 dev_err(&hdev->pdev->dev, in hclge_remove_mac_vlan_tbl()
8255 struct hclge_dev *hdev = vport->back; in hclge_lookup_mac_vlan_tbl() local
8273 ret = hclge_cmd_send(&hdev->hw, desc, 3); in hclge_lookup_mac_vlan_tbl()
8278 ret = hclge_cmd_send(&hdev->hw, desc, 1); in hclge_lookup_mac_vlan_tbl()
8281 dev_err(&hdev->pdev->dev, in hclge_lookup_mac_vlan_tbl()
8297 struct hclge_dev *hdev = vport->back; in hclge_add_mac_vlan_tbl() local
8311 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_add_mac_vlan_tbl()
8327 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); in hclge_add_mac_vlan_tbl()
8337 dev_err(&hdev->pdev->dev, in hclge_add_mac_vlan_tbl()
8346 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, in hclge_set_umv_space() argument
8358 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_umv_space()
8360 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n", in hclge_set_umv_space()
8370 static int hclge_init_umv_space(struct hclge_dev *hdev) in hclge_init_umv_space() argument
8375 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size); in hclge_init_umv_space()
8379 if (allocated_size < hdev->wanted_umv_size) in hclge_init_umv_space()
8380 dev_warn(&hdev->pdev->dev, in hclge_init_umv_space()
8382 hdev->wanted_umv_size, allocated_size); in hclge_init_umv_space()
8384 hdev->max_umv_size = allocated_size; in hclge_init_umv_space()
8385 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1); in hclge_init_umv_space()
8386 hdev->share_umv_size = hdev->priv_umv_size + in hclge_init_umv_space()
8387 hdev->max_umv_size % (hdev->num_alloc_vport + 1); in hclge_init_umv_space()
8389 if (hdev->ae_dev->dev_specs.mc_mac_size) in hclge_init_umv_space()
8390 set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps); in hclge_init_umv_space()
8395 static void hclge_reset_umv_space(struct hclge_dev *hdev) in hclge_reset_umv_space() argument
8400 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_reset_umv_space()
8401 vport = &hdev->vport[i]; in hclge_reset_umv_space()
8405 mutex_lock(&hdev->vport_lock); in hclge_reset_umv_space()
8406 hdev->share_umv_size = hdev->priv_umv_size + in hclge_reset_umv_space()
8407 hdev->max_umv_size % (hdev->num_alloc_vport + 1); in hclge_reset_umv_space()
8408 mutex_unlock(&hdev->vport_lock); in hclge_reset_umv_space()
8410 hdev->used_mc_mac_num = 0; in hclge_reset_umv_space()
8415 struct hclge_dev *hdev = vport->back; in hclge_is_umv_space_full() local
8419 mutex_lock(&hdev->vport_lock); in hclge_is_umv_space_full()
8421 is_full = (vport->used_umv_num >= hdev->priv_umv_size && in hclge_is_umv_space_full()
8422 hdev->share_umv_size == 0); in hclge_is_umv_space_full()
8425 mutex_unlock(&hdev->vport_lock); in hclge_is_umv_space_full()
8432 struct hclge_dev *hdev = vport->back; in hclge_update_umv_space() local
8435 if (vport->used_umv_num > hdev->priv_umv_size) in hclge_update_umv_space()
8436 hdev->share_umv_size++; in hclge_update_umv_space()
8441 if (vport->used_umv_num >= hdev->priv_umv_size && in hclge_update_umv_space()
8442 hdev->share_umv_size > 0) in hclge_update_umv_space()
8443 hdev->share_umv_size--; in hclge_update_umv_space()
8495 struct hclge_dev *hdev = vport->back; in hclge_update_mac_list() local
8520 dev_err(&hdev->pdev->dev, in hclge_update_mac_list()
8556 struct hclge_dev *hdev = vport->back; in hclge_add_uc_addr_common() local
8567 dev_err(&hdev->pdev->dev, in hclge_add_uc_addr_common()
8590 mutex_lock(&hdev->vport_lock); in hclge_add_uc_addr_common()
8595 mutex_unlock(&hdev->vport_lock); in hclge_add_uc_addr_common()
8598 mutex_unlock(&hdev->vport_lock); in hclge_add_uc_addr_common()
8601 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", in hclge_add_uc_addr_common()
8602 hdev->priv_umv_size); in hclge_add_uc_addr_common()
8627 struct hclge_dev *hdev = vport->back; in hclge_rm_uc_addr_common() local
8636 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n", in hclge_rm_uc_addr_common()
8646 mutex_lock(&hdev->vport_lock); in hclge_rm_uc_addr_common()
8648 mutex_unlock(&hdev->vport_lock); in hclge_rm_uc_addr_common()
8668 struct hclge_dev *hdev = vport->back; in hclge_add_mc_addr_common() local
8677 dev_err(&hdev->pdev->dev, in hclge_add_mc_addr_common()
8686 if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) && in hclge_add_mc_addr_common()
8687 hdev->used_mc_mac_num >= in hclge_add_mc_addr_common()
8688 hdev->ae_dev->dev_specs.mc_mac_size) in hclge_add_mc_addr_common()
8705 hdev->used_mc_mac_num++; in hclge_add_mc_addr_common()
8713 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); in hclge_add_mc_addr_common()
8732 struct hclge_dev *hdev = vport->back; in hclge_rm_mc_addr_common() local
8740 dev_dbg(&hdev->pdev->dev, in hclge_rm_mc_addr_common()
8759 hdev->used_mc_mac_num--; in hclge_rm_mc_addr_common()
8968 struct hclge_dev *hdev = vport->back; in hclge_need_sync_mac_table() local
8970 if (test_bit(vport->vport_id, hdev->vport_config_block)) in hclge_need_sync_mac_table()
8979 static void hclge_sync_mac_table(struct hclge_dev *hdev) in hclge_sync_mac_table() argument
8983 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_sync_mac_table()
8984 struct hclge_vport *vport = &hdev->vport[i]; in hclge_sync_mac_table()
9049 struct hclge_dev *hdev = vport->back; in hclge_rm_vport_all_mac_table() local
9063 set_bit(vport->vport_id, hdev->vport_config_block); in hclge_rm_vport_all_mac_table()
9085 struct hclge_dev *hdev = vport->back; in hclge_uninit_vport_mac_list() local
9113 dev_warn(&hdev->pdev->dev, in hclge_uninit_vport_mac_list()
9124 static void hclge_uninit_mac_table(struct hclge_dev *hdev) in hclge_uninit_mac_table() argument
9129 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_uninit_mac_table()
9130 vport = &hdev->vport[i]; in hclge_uninit_mac_table()
9136 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, in hclge_get_mac_ethertype_cmd_status() argument
9147 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
9159 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
9164 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
9169 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
9183 struct hclge_dev *hdev = vport->back; in hclge_set_vf_mac() local
9185 vport = hclge_get_vf_vport(hdev, vf); in hclge_set_vf_mac()
9191 dev_info(&hdev->pdev->dev, in hclge_set_vf_mac()
9204 dev_info(&hdev->pdev->dev, in hclge_set_vf_mac()
9211 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s\n", in hclge_set_vf_mac()
9216 static int hclge_add_mgr_tbl(struct hclge_dev *hdev, in hclge_add_mgr_tbl() argument
9227 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_add_mgr_tbl()
9229 dev_err(&hdev->pdev->dev, in hclge_add_mgr_tbl()
9238 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); in hclge_add_mgr_tbl()
9241 static int init_mgr_tbl(struct hclge_dev *hdev) in init_mgr_tbl() argument
9247 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); in init_mgr_tbl()
9249 dev_err(&hdev->pdev->dev, in init_mgr_tbl()
9262 struct hclge_dev *hdev = vport->back; in hclge_get_mac_addr() local
9264 ether_addr_copy(p, hdev->hw.mac.mac_addr); in hclge_get_mac_addr()
9317 struct hclge_dev *hdev = vport->back; in hclge_set_mac_addr() local
9326 dev_err(&hdev->pdev->dev, in hclge_set_mac_addr()
9332 ret = hclge_pause_addr_cfg(hdev, new_addr); in hclge_set_mac_addr()
9334 dev_err(&hdev->pdev->dev, in hclge_set_mac_addr()
9341 old_addr = hdev->hw.mac.mac_addr; in hclge_set_mac_addr()
9347 dev_err(&hdev->pdev->dev, in hclge_set_mac_addr()
9353 hclge_pause_addr_cfg(hdev, old_addr); in hclge_set_mac_addr()
9360 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); in hclge_set_mac_addr()
9363 hclge_task_schedule(hdev, 0); in hclge_set_mac_addr()
9368 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd) in hclge_mii_ioctl() argument
9372 if (!hnae3_dev_phy_imp_supported(hdev)) in hclge_mii_ioctl()
9377 data->phy_id = hdev->hw.mac.phy_addr; in hclge_mii_ioctl()
9381 data->val_out = hclge_read_phy_reg(hdev, data->reg_num); in hclge_mii_ioctl()
9385 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in); in hclge_mii_ioctl()
9395 struct hclge_dev *hdev = vport->back; in hclge_do_ioctl() local
9399 return hclge_ptp_get_cfg(hdev, ifr); in hclge_do_ioctl()
9401 return hclge_ptp_set_cfg(hdev, ifr); in hclge_do_ioctl()
9403 if (!hdev->hw.mac.phydev) in hclge_do_ioctl()
9404 return hclge_mii_ioctl(hdev, ifr, cmd); in hclge_do_ioctl()
9407 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); in hclge_do_ioctl()
9410 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id, in hclge_set_port_vlan_filter_bypass() argument
9423 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_port_vlan_filter_bypass()
9425 dev_err(&hdev->pdev->dev, in hclge_set_port_vlan_filter_bypass()
9432 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, in hclge_set_vlan_filter_ctrl() argument
9445 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_filter_ctrl()
9447 dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n", in hclge_set_vlan_filter_ctrl()
9457 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_filter_ctrl()
9459 dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n", in hclge_set_vlan_filter_ctrl()
9467 struct hclge_dev *hdev = vport->back; in hclge_set_vport_vlan_filter() local
9468 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; in hclge_set_vport_vlan_filter()
9471 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_vport_vlan_filter()
9472 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, in hclge_set_vport_vlan_filter()
9476 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, in hclge_set_vport_vlan_filter()
9483 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id, in hclge_set_vport_vlan_filter()
9489 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, in hclge_set_vport_vlan_filter()
9501 struct hclge_dev *hdev = vport->back; in hclge_need_enable_vport_vlan_filter() local
9518 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) in hclge_need_enable_vport_vlan_filter()
9530 struct hclge_dev *hdev = vport->back; in hclge_enable_vport_vlan_filter() local
9534 mutex_lock(&hdev->vport_lock); in hclge_enable_vport_vlan_filter()
9540 mutex_unlock(&hdev->vport_lock); in hclge_enable_vport_vlan_filter()
9546 mutex_unlock(&hdev->vport_lock); in hclge_enable_vport_vlan_filter()
9552 mutex_unlock(&hdev->vport_lock); in hclge_enable_vport_vlan_filter()
9564 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid, in hclge_set_vf_vlan_filter_cmd() argument
9595 ret = hclge_cmd_send(&hdev->hw, desc, 2); in hclge_set_vf_vlan_filter_cmd()
9597 dev_err(&hdev->pdev->dev, in hclge_set_vf_vlan_filter_cmd()
9606 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid, in hclge_check_vf_vlan_cmd_status() argument
9619 set_bit(vfid, hdev->vf_vlan_full); in hclge_check_vf_vlan_cmd_status()
9620 dev_warn(&hdev->pdev->dev, in hclge_check_vf_vlan_cmd_status()
9625 dev_err(&hdev->pdev->dev, in hclge_check_vf_vlan_cmd_status()
9641 dev_err(&hdev->pdev->dev, in hclge_check_vf_vlan_cmd_status()
9649 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid, in hclge_set_vf_vlan_common() argument
9652 struct hclge_vport *vport = &hdev->vport[vfid]; in hclge_set_vf_vlan_common()
9661 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) { in hclge_set_vf_vlan_common()
9663 dev_err(&hdev->pdev->dev, in hclge_set_vf_vlan_common()
9670 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc); in hclge_set_vf_vlan_common()
9674 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc); in hclge_set_vf_vlan_common()
9677 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, in hclge_set_port_vlan_filter() argument
9699 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_port_vlan_filter()
9701 dev_err(&hdev->pdev->dev, in hclge_set_port_vlan_filter()
9706 static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id, in hclge_need_update_port_vlan() argument
9711 test_bit(vport_id, hdev->vlan_table[vlan_id])) in hclge_need_update_port_vlan()
9714 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { in hclge_need_update_port_vlan()
9715 dev_warn(&hdev->pdev->dev, in hclge_need_update_port_vlan()
9722 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { in hclge_need_update_port_vlan()
9723 dev_warn(&hdev->pdev->dev, in hclge_need_update_port_vlan()
9732 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, in hclge_set_vlan_filter_hw() argument
9745 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id); in hclge_set_vlan_filter_hw()
9747 dev_err(&hdev->pdev->dev, in hclge_set_vlan_filter_hw()
9753 if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill)) in hclge_set_vlan_filter_hw()
9756 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) in hclge_set_vlan_filter_hw()
9760 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, in hclge_set_vlan_filter_hw()
9770 struct hclge_dev *hdev = vport->back; in hclge_set_vlan_tx_offload_cfg() local
9802 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_tx_offload_cfg()
9804 dev_err(&hdev->pdev->dev, in hclge_set_vlan_tx_offload_cfg()
9815 struct hclge_dev *hdev = vport->back; in hclge_set_vlan_rx_offload_cfg() local
9842 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_rx_offload_cfg()
9844 dev_err(&hdev->pdev->dev, in hclge_set_vlan_rx_offload_cfg()
9906 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) in hclge_set_vlan_protocol_type() argument
9916 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); in hclge_set_vlan_protocol_type()
9918 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); in hclge_set_vlan_protocol_type()
9920 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); in hclge_set_vlan_protocol_type()
9922 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); in hclge_set_vlan_protocol_type()
9924 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_protocol_type()
9926 dev_err(&hdev->pdev->dev, in hclge_set_vlan_protocol_type()
9935 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); in hclge_set_vlan_protocol_type()
9936 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); in hclge_set_vlan_protocol_type()
9938 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_protocol_type()
9940 dev_err(&hdev->pdev->dev, in hclge_set_vlan_protocol_type()
9947 static int hclge_init_vlan_filter(struct hclge_dev *hdev) in hclge_init_vlan_filter() argument
9953 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_init_vlan_filter()
9954 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, in hclge_init_vlan_filter()
9959 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_init_vlan_filter()
9960 vport = &hdev->vport[i]; in hclge_init_vlan_filter()
9961 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, in hclge_init_vlan_filter()
9969 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, in hclge_init_vlan_filter()
9973 static int hclge_init_vlan_type(struct hclge_dev *hdev) in hclge_init_vlan_type() argument
9975 hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
9976 hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
9977 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
9978 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
9979 hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
9980 hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
9982 return hclge_set_vlan_protocol_type(hdev); in hclge_init_vlan_type()
9985 static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev) in hclge_init_vport_vlan_offload() argument
9992 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_init_vport_vlan_offload()
9993 vport = &hdev->vport[i]; in hclge_init_vport_vlan_offload()
10005 static int hclge_init_vlan_config(struct hclge_dev *hdev) in hclge_init_vlan_config() argument
10007 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_init_vlan_config()
10010 ret = hclge_init_vlan_filter(hdev); in hclge_init_vlan_config()
10014 ret = hclge_init_vlan_type(hdev); in hclge_init_vlan_config()
10018 ret = hclge_init_vport_vlan_offload(hdev); in hclge_init_vlan_config()
10029 struct hclge_dev *hdev = vport->back; in hclge_add_vport_vlan_table() local
10031 mutex_lock(&hdev->vport_lock); in hclge_add_vport_vlan_table()
10035 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_vlan_table()
10042 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_vlan_table()
10050 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_vlan_table()
10056 struct hclge_dev *hdev = vport->back; in hclge_add_vport_all_vlan_table() local
10059 mutex_lock(&hdev->vport_lock); in hclge_add_vport_all_vlan_table()
10063 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), in hclge_add_vport_all_vlan_table()
10067 dev_err(&hdev->pdev->dev, in hclge_add_vport_all_vlan_table()
10071 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_all_vlan_table()
10078 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_all_vlan_table()
10087 struct hclge_dev *hdev = vport->back; in hclge_rm_vport_vlan_table() local
10089 mutex_lock(&hdev->vport_lock); in hclge_rm_vport_vlan_table()
10094 hclge_set_vlan_filter_hw(hdev, in hclge_rm_vport_vlan_table()
10106 mutex_unlock(&hdev->vport_lock); in hclge_rm_vport_vlan_table()
10112 struct hclge_dev *hdev = vport->back; in hclge_rm_vport_all_vlan_table() local
10114 mutex_lock(&hdev->vport_lock); in hclge_rm_vport_all_vlan_table()
10118 hclge_set_vlan_filter_hw(hdev, in hclge_rm_vport_all_vlan_table()
10130 clear_bit(vport->vport_id, hdev->vf_vlan_full); in hclge_rm_vport_all_vlan_table()
10131 mutex_unlock(&hdev->vport_lock); in hclge_rm_vport_all_vlan_table()
10134 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) in hclge_uninit_vport_vlan_table() argument
10140 mutex_lock(&hdev->vport_lock); in hclge_uninit_vport_vlan_table()
10142 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_uninit_vport_vlan_table()
10143 vport = &hdev->vport[i]; in hclge_uninit_vport_vlan_table()
10150 mutex_unlock(&hdev->vport_lock); in hclge_uninit_vport_vlan_table()
10153 void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev) in hclge_restore_vport_port_base_vlan_config() argument
10164 for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) { in hclge_restore_vport_port_base_vlan_config()
10165 vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM]; in hclge_restore_vport_port_base_vlan_config()
10175 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]); in hclge_restore_vport_port_base_vlan_config()
10176 ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), in hclge_restore_vport_port_base_vlan_config()
10187 struct hclge_dev *hdev = vport->back; in hclge_restore_vport_vlan_table() local
10190 mutex_lock(&hdev->vport_lock); in hclge_restore_vport_vlan_table()
10194 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), in hclge_restore_vport_vlan_table()
10203 mutex_unlock(&hdev->vport_lock); in hclge_restore_vport_vlan_table()
10237 static void hclge_restore_hw_table(struct hclge_dev *hdev) in hclge_restore_hw_table() argument
10239 struct hclge_vport *vport = &hdev->vport[0]; in hclge_restore_hw_table()
10243 hclge_restore_vport_port_base_vlan_config(hdev); in hclge_restore_hw_table()
10245 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); in hclge_restore_hw_table()
10273 struct hclge_dev *hdev = vport->back; in hclge_set_vport_vlan_fltr_change() local
10275 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) in hclge_set_vport_vlan_fltr_change()
10284 struct hclge_dev *hdev = vport->back; in hclge_update_vlan_filter_entries() local
10290 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0); in hclge_update_vlan_filter_entries()
10293 return hclge_set_vlan_filter_hw(hdev, in hclge_update_vlan_filter_entries()
10303 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0); in hclge_update_vlan_filter_entries()
10307 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto), in hclge_update_vlan_filter_entries()
10332 struct hclge_dev *hdev = vport->back; in hclge_modify_port_base_vlan_tag() local
10336 ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto), in hclge_modify_port_base_vlan_tag()
10345 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, in hclge_modify_port_base_vlan_tag()
10348 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), in hclge_modify_port_base_vlan_tag()
10352 dev_err(&hdev->pdev->dev, in hclge_modify_port_base_vlan_tag()
10426 struct hclge_dev *hdev = vport->back; in hclge_set_vf_vlan_filter() local
10431 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_vf_vlan_filter()
10434 vport = hclge_get_vf_vport(hdev, vfid); in hclge_set_vf_vlan_filter()
10456 dev_err(&hdev->pdev->dev, in hclge_set_vf_vlan_filter()
10470 (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0], in hclge_set_vf_vlan_filter()
10477 static void hclge_clear_vf_vlan(struct hclge_dev *hdev) in hclge_clear_vf_vlan() argument
10485 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { in hclge_clear_vf_vlan()
10486 vport = &hdev->vport[vf]; in hclge_clear_vf_vlan()
10489 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), in hclge_clear_vf_vlan()
10493 dev_err(&hdev->pdev->dev, in hclge_clear_vf_vlan()
10503 struct hclge_dev *hdev = vport->back; in hclge_set_vlan_filter() local
10511 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_set_vlan_filter()
10512 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) { in hclge_set_vlan_filter()
10524 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, in hclge_set_vlan_filter()
10548 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev) in hclge_sync_vlan_fltr_state() argument
10554 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_sync_vlan_fltr_state()
10555 vport = &hdev->vport[i]; in hclge_sync_vlan_fltr_state()
10563 dev_err(&hdev->pdev->dev, in hclge_sync_vlan_fltr_state()
10573 static void hclge_sync_vlan_filter(struct hclge_dev *hdev) in hclge_sync_vlan_filter() argument
10581 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_sync_vlan_filter()
10582 struct hclge_vport *vport = &hdev->vport[i]; in hclge_sync_vlan_filter()
10587 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), in hclge_sync_vlan_filter()
10606 hclge_sync_vlan_fltr_state(hdev); in hclge_sync_vlan_filter()
10609 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) in hclge_set_mac_mtu() argument
10620 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_mac_mtu()
10632 struct hclge_dev *hdev = vport->back; in hclge_set_vport_mtu() local
10638 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size) in hclge_set_vport_mtu()
10642 mutex_lock(&hdev->vport_lock); in hclge_set_vport_mtu()
10644 if (vport->vport_id && max_frm_size > hdev->mps) { in hclge_set_vport_mtu()
10645 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
10649 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
10654 for (i = 1; i < hdev->num_alloc_vport; i++) in hclge_set_vport_mtu()
10655 if (max_frm_size < hdev->vport[i].mps) { in hclge_set_vport_mtu()
10656 dev_err(&hdev->pdev->dev, in hclge_set_vport_mtu()
10658 i, hdev->vport[i].mps); in hclge_set_vport_mtu()
10659 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
10663 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); in hclge_set_vport_mtu()
10665 ret = hclge_set_mac_mtu(hdev, max_frm_size); in hclge_set_vport_mtu()
10667 dev_err(&hdev->pdev->dev, in hclge_set_vport_mtu()
10672 hdev->mps = max_frm_size; in hclge_set_vport_mtu()
10675 ret = hclge_buffer_alloc(hdev); in hclge_set_vport_mtu()
10677 dev_err(&hdev->pdev->dev, in hclge_set_vport_mtu()
10681 hclge_notify_client(hdev, HNAE3_UP_CLIENT); in hclge_set_vport_mtu()
10682 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
10686 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id, in hclge_reset_tqp_cmd_send() argument
10700 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_reset_tqp_cmd_send()
10702 dev_err(&hdev->pdev->dev, in hclge_reset_tqp_cmd_send()
10710 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id, in hclge_get_reset_status() argument
10722 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_reset_status()
10724 dev_err(&hdev->pdev->dev, in hclge_get_reset_status()
10748 struct hclge_dev *hdev = vport->back; in hclge_reset_tqp_cmd() local
10757 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true); in hclge_reset_tqp_cmd()
10759 dev_err(&hdev->pdev->dev, in hclge_reset_tqp_cmd()
10766 ret = hclge_get_reset_status(hdev, queue_gid, in hclge_reset_tqp_cmd()
10779 dev_err(&hdev->pdev->dev, in hclge_reset_tqp_cmd()
10784 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false); in hclge_reset_tqp_cmd()
10786 dev_err(&hdev->pdev->dev, in hclge_reset_tqp_cmd()
10802 struct hclge_dev *hdev = vport->back; in hclge_reset_rcb() local
10817 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_reset_rcb()
10819 dev_err(&hdev->pdev->dev, in hclge_reset_rcb()
10829 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n", in hclge_reset_rcb()
10843 struct hclge_dev *hdev = vport->back; in hclge_reset_tqp() local
10850 dev_err(&hdev->pdev->dev, in hclge_reset_tqp()
10862 struct hclge_dev *hdev = vport->back; in hclge_get_fw_version() local
10864 return hdev->fw_version; in hclge_get_fw_version()
10867 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) in hclge_set_flowctrl_adv() argument
10869 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_set_flowctrl_adv()
10877 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) in hclge_cfg_pauseparam() argument
10881 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) in hclge_cfg_pauseparam()
10884 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); in hclge_cfg_pauseparam()
10886 dev_err(&hdev->pdev->dev, in hclge_cfg_pauseparam()
10892 int hclge_cfg_flowctrl(struct hclge_dev *hdev) in hclge_cfg_flowctrl() argument
10894 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_cfg_flowctrl()
10921 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); in hclge_cfg_flowctrl()
10928 struct hclge_dev *hdev = vport->back; in hclge_get_pauseparam() local
10929 u8 media_type = hdev->hw.mac.media_type; in hclge_get_pauseparam()
10934 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { in hclge_get_pauseparam()
10940 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { in hclge_get_pauseparam()
10943 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { in hclge_get_pauseparam()
10946 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { in hclge_get_pauseparam()
10955 static void hclge_record_user_pauseparam(struct hclge_dev *hdev, in hclge_record_user_pauseparam() argument
10959 hdev->fc_mode_last_time = HCLGE_FC_FULL; in hclge_record_user_pauseparam()
10961 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; in hclge_record_user_pauseparam()
10963 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; in hclge_record_user_pauseparam()
10965 hdev->fc_mode_last_time = HCLGE_FC_NONE; in hclge_record_user_pauseparam()
10967 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; in hclge_record_user_pauseparam()
10974 struct hclge_dev *hdev = vport->back; in hclge_set_pauseparam() local
10975 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_set_pauseparam()
10978 if (phydev || hnae3_dev_phy_imp_supported(hdev)) { in hclge_set_pauseparam()
10981 dev_info(&hdev->pdev->dev, in hclge_set_pauseparam()
10987 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { in hclge_set_pauseparam()
10988 dev_info(&hdev->pdev->dev, in hclge_set_pauseparam()
10993 hclge_set_flowctrl_adv(hdev, rx_en, tx_en); in hclge_set_pauseparam()
10995 hclge_record_user_pauseparam(hdev, rx_en, tx_en); in hclge_set_pauseparam()
10997 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev)) in hclge_set_pauseparam()
10998 return hclge_cfg_pauseparam(hdev, rx_en, tx_en); in hclge_set_pauseparam()
11010 struct hclge_dev *hdev = vport->back; in hclge_get_ksettings_an_result() local
11013 *speed = hdev->hw.mac.speed; in hclge_get_ksettings_an_result()
11015 *duplex = hdev->hw.mac.duplex; in hclge_get_ksettings_an_result()
11017 *auto_neg = hdev->hw.mac.autoneg; in hclge_get_ksettings_an_result()
11019 *lane_num = hdev->hw.mac.lane_num; in hclge_get_ksettings_an_result()
11026 struct hclge_dev *hdev = vport->back; in hclge_get_media_type() local
11032 hclge_update_port_info(hdev); in hclge_get_media_type()
11035 *media_type = hdev->hw.mac.media_type; in hclge_get_media_type()
11038 *module_type = hdev->hw.mac.module_type; in hclge_get_media_type()
11045 struct hclge_dev *hdev = vport->back; in hclge_get_mdix_mode() local
11046 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_get_mdix_mode()
11091 static void hclge_info_show(struct hclge_dev *hdev) in hclge_info_show() argument
11093 struct device *dev = &hdev->pdev->dev; in hclge_info_show()
11097 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); in hclge_info_show()
11098 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); in hclge_info_show()
11099 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); in hclge_info_show()
11100 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); in hclge_info_show()
11101 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs); in hclge_info_show()
11102 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); in hclge_info_show()
11103 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size); in hclge_info_show()
11104 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size); in hclge_info_show()
11105 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size); in hclge_info_show()
11107 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main"); in hclge_info_show()
11109 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable"); in hclge_info_show()
11111 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable"); in hclge_info_show()
11113 hdev->tx_spare_buf_size); in hclge_info_show()
11122 struct hclge_dev *hdev = ae_dev->priv; in hclge_init_nic_client_instance() local
11123 int rst_cnt = hdev->rst_stats.reset_cnt; in hclge_init_nic_client_instance()
11130 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); in hclge_init_nic_client_instance()
11131 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_init_nic_client_instance()
11132 rst_cnt != hdev->rst_stats.reset_cnt) { in hclge_init_nic_client_instance()
11138 ret = hclge_config_nic_hw_error(hdev, true); in hclge_init_nic_client_instance()
11147 if (netif_msg_drv(&hdev->vport->nic)) in hclge_init_nic_client_instance()
11148 hclge_info_show(hdev); in hclge_init_nic_client_instance()
11153 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); in hclge_init_nic_client_instance()
11154 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_init_nic_client_instance()
11165 struct hclge_dev *hdev = ae_dev->priv; in hclge_init_roce_client_instance() local
11170 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || in hclge_init_roce_client_instance()
11171 !hdev->nic_client) in hclge_init_roce_client_instance()
11174 client = hdev->roce_client; in hclge_init_roce_client_instance()
11179 rst_cnt = hdev->rst_stats.reset_cnt; in hclge_init_roce_client_instance()
11184 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); in hclge_init_roce_client_instance()
11185 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_init_roce_client_instance()
11186 rst_cnt != hdev->rst_stats.reset_cnt) { in hclge_init_roce_client_instance()
11192 ret = hclge_config_rocee_ras_interrupt(hdev, true); in hclge_init_roce_client_instance()
11204 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); in hclge_init_roce_client_instance()
11205 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_init_roce_client_instance()
11208 hdev->roce_client->ops->uninit_instance(&vport->roce, 0); in hclge_init_roce_client_instance()
11216 struct hclge_dev *hdev = ae_dev->priv; in hclge_init_client_instance() local
11217 struct hclge_vport *vport = &hdev->vport[0]; in hclge_init_client_instance()
11222 hdev->nic_client = client; in hclge_init_client_instance()
11234 if (hnae3_dev_roce_supported(hdev)) { in hclge_init_client_instance()
11235 hdev->roce_client = client; in hclge_init_client_instance()
11251 hdev->nic_client = NULL; in hclge_init_client_instance()
11255 hdev->roce_client = NULL; in hclge_init_client_instance()
11263 struct hclge_dev *hdev = ae_dev->priv; in hclge_uninit_client_instance() local
11264 struct hclge_vport *vport = &hdev->vport[0]; in hclge_uninit_client_instance()
11266 if (hdev->roce_client) { in hclge_uninit_client_instance()
11267 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); in hclge_uninit_client_instance()
11268 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_uninit_client_instance()
11271 hdev->roce_client->ops->uninit_instance(&vport->roce, 0); in hclge_uninit_client_instance()
11272 hdev->roce_client = NULL; in hclge_uninit_client_instance()
11277 if (hdev->nic_client && client->ops->uninit_instance) { in hclge_uninit_client_instance()
11278 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); in hclge_uninit_client_instance()
11279 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_uninit_client_instance()
11283 hdev->nic_client = NULL; in hclge_uninit_client_instance()
11288 static int hclge_dev_mem_map(struct hclge_dev *hdev) in hclge_dev_mem_map() argument
11290 struct pci_dev *pdev = hdev->pdev; in hclge_dev_mem_map()
11291 struct hclge_hw *hw = &hdev->hw; in hclge_dev_mem_map()
11309 static int hclge_pci_init(struct hclge_dev *hdev) in hclge_pci_init() argument
11311 struct pci_dev *pdev = hdev->pdev; in hclge_pci_init()
11339 hw = &hdev->hw; in hclge_pci_init()
11347 ret = hclge_dev_mem_map(hdev); in hclge_pci_init()
11351 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); in hclge_pci_init()
11356 pcim_iounmap(pdev, hdev->hw.hw.io_base); in hclge_pci_init()
11366 static void hclge_pci_uninit(struct hclge_dev *hdev) in hclge_pci_uninit() argument
11368 struct pci_dev *pdev = hdev->pdev; in hclge_pci_uninit()
11370 if (hdev->hw.hw.mem_base) in hclge_pci_uninit()
11371 devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); in hclge_pci_uninit()
11373 pcim_iounmap(pdev, hdev->hw.hw.io_base); in hclge_pci_uninit()
11380 static void hclge_state_init(struct hclge_dev *hdev) in hclge_state_init() argument
11382 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); in hclge_state_init()
11383 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_state_init()
11384 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); in hclge_state_init()
11385 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_state_init()
11386 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); in hclge_state_init()
11387 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); in hclge_state_init()
11388 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); in hclge_state_init()
11391 static void hclge_state_uninit(struct hclge_dev *hdev) in hclge_state_uninit() argument
11393 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_state_uninit()
11394 set_bit(HCLGE_STATE_REMOVING, &hdev->state); in hclge_state_uninit()
11396 if (hdev->reset_timer.function) in hclge_state_uninit()
11397 del_timer_sync(&hdev->reset_timer); in hclge_state_uninit()
11398 if (hdev->service_task.work.func) in hclge_state_uninit()
11399 cancel_delayed_work_sync(&hdev->service_task); in hclge_state_uninit()
11408 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_prepare_general() local
11413 down(&hdev->reset_sem); in hclge_reset_prepare_general()
11414 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_prepare_general()
11415 hdev->reset_type = rst_type; in hclge_reset_prepare_general()
11416 ret = hclge_reset_prepare(hdev); in hclge_reset_prepare_general()
11417 if (!ret && !hdev->reset_pending) in hclge_reset_prepare_general()
11420 dev_err(&hdev->pdev->dev, in hclge_reset_prepare_general()
11422 ret, hdev->reset_pending, retry_cnt); in hclge_reset_prepare_general()
11423 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_prepare_general()
11424 up(&hdev->reset_sem); in hclge_reset_prepare_general()
11429 hclge_enable_vector(&hdev->misc_vector, false); in hclge_reset_prepare_general()
11430 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_reset_prepare_general()
11432 if (hdev->reset_type == HNAE3_FLR_RESET) in hclge_reset_prepare_general()
11433 hdev->rst_stats.flr_rst_cnt++; in hclge_reset_prepare_general()
11438 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_done() local
11441 hclge_enable_vector(&hdev->misc_vector, true); in hclge_reset_done()
11443 ret = hclge_reset_rebuild(hdev); in hclge_reset_done()
11445 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret); in hclge_reset_done()
11447 hdev->reset_type = HNAE3_NONE_RESET; in hclge_reset_done()
11448 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_done()
11449 up(&hdev->reset_sem); in hclge_reset_done()
11452 static void hclge_clear_resetting_state(struct hclge_dev *hdev) in hclge_clear_resetting_state() argument
11456 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_clear_resetting_state()
11457 struct hclge_vport *vport = &hdev->vport[i]; in hclge_clear_resetting_state()
11461 ret = hclge_set_vf_rst(hdev, vport->vport_id, false); in hclge_clear_resetting_state()
11463 dev_warn(&hdev->pdev->dev, in hclge_clear_resetting_state()
11469 static int hclge_clear_hw_resource(struct hclge_dev *hdev) in hclge_clear_hw_resource() argument
11476 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_clear_hw_resource()
11484 dev_err(&hdev->pdev->dev, in hclge_clear_hw_resource()
11491 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev) in hclge_init_rxd_adv_layout() argument
11493 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) in hclge_init_rxd_adv_layout()
11494 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1); in hclge_init_rxd_adv_layout()
11497 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev) in hclge_uninit_rxd_adv_layout() argument
11499 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) in hclge_uninit_rxd_adv_layout()
11500 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0); in hclge_uninit_rxd_adv_layout()
11506 struct hclge_dev *hdev; in hclge_init_ae_dev() local
11509 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); in hclge_init_ae_dev()
11510 if (!hdev) in hclge_init_ae_dev()
11513 hdev->pdev = pdev; in hclge_init_ae_dev()
11514 hdev->ae_dev = ae_dev; in hclge_init_ae_dev()
11515 hdev->reset_type = HNAE3_NONE_RESET; in hclge_init_ae_dev()
11516 hdev->reset_level = HNAE3_FUNC_RESET; in hclge_init_ae_dev()
11517 ae_dev->priv = hdev; in hclge_init_ae_dev()
11520 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; in hclge_init_ae_dev()
11522 mutex_init(&hdev->vport_lock); in hclge_init_ae_dev()
11523 spin_lock_init(&hdev->fd_rule_lock); in hclge_init_ae_dev()
11524 sema_init(&hdev->reset_sem, 1); in hclge_init_ae_dev()
11526 ret = hclge_pci_init(hdev); in hclge_init_ae_dev()
11530 ret = hclge_devlink_init(hdev); in hclge_init_ae_dev()
11535 ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); in hclge_init_ae_dev()
11540 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, in hclge_init_ae_dev()
11541 true, hdev->reset_pending); in hclge_init_ae_dev()
11545 ret = hclge_clear_hw_resource(hdev); in hclge_init_ae_dev()
11549 ret = hclge_get_cap(hdev); in hclge_init_ae_dev()
11553 ret = hclge_query_dev_specs(hdev); in hclge_init_ae_dev()
11560 ret = hclge_configure(hdev); in hclge_init_ae_dev()
11566 ret = hclge_init_msi(hdev); in hclge_init_ae_dev()
11572 ret = hclge_misc_irq_init(hdev); in hclge_init_ae_dev()
11576 ret = hclge_alloc_tqps(hdev); in hclge_init_ae_dev()
11582 ret = hclge_alloc_vport(hdev); in hclge_init_ae_dev()
11586 ret = hclge_map_tqp(hdev); in hclge_init_ae_dev()
11590 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { in hclge_init_ae_dev()
11591 if (hnae3_dev_phy_imp_supported(hdev)) in hclge_init_ae_dev()
11592 ret = hclge_update_tp_port_info(hdev); in hclge_init_ae_dev()
11594 ret = hclge_mac_mdio_config(hdev); in hclge_init_ae_dev()
11600 ret = hclge_init_umv_space(hdev); in hclge_init_ae_dev()
11604 ret = hclge_mac_init(hdev); in hclge_init_ae_dev()
11610 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); in hclge_init_ae_dev()
11616 ret = hclge_config_gro(hdev); in hclge_init_ae_dev()
11620 ret = hclge_init_vlan_config(hdev); in hclge_init_ae_dev()
11626 ret = hclge_tm_schd_init(hdev); in hclge_init_ae_dev()
11632 ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev, in hclge_init_ae_dev()
11633 &hdev->rss_cfg); in hclge_init_ae_dev()
11639 ret = hclge_rss_init_hw(hdev); in hclge_init_ae_dev()
11645 ret = init_mgr_tbl(hdev); in hclge_init_ae_dev()
11651 ret = hclge_init_fd_config(hdev); in hclge_init_ae_dev()
11658 ret = hclge_ptp_init(hdev); in hclge_init_ae_dev()
11662 ret = hclge_update_port_info(hdev); in hclge_init_ae_dev()
11666 INIT_KFIFO(hdev->mac_tnl_log); in hclge_init_ae_dev()
11668 hclge_dcb_ops_set(hdev); in hclge_init_ae_dev()
11670 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); in hclge_init_ae_dev()
11671 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); in hclge_init_ae_dev()
11673 hclge_clear_all_event_cause(hdev); in hclge_init_ae_dev()
11674 hclge_clear_resetting_state(hdev); in hclge_init_ae_dev()
11677 if (hnae3_dev_ras_imp_supported(hdev)) in hclge_init_ae_dev()
11678 hclge_handle_occurred_error(hdev); in hclge_init_ae_dev()
11691 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); in hclge_init_ae_dev()
11694 hclge_init_rxd_adv_layout(hdev); in hclge_init_ae_dev()
11697 hclge_enable_vector(&hdev->misc_vector, true); in hclge_init_ae_dev()
11699 hclge_state_init(hdev); in hclge_init_ae_dev()
11700 hdev->last_reset_time = jiffies; in hclge_init_ae_dev()
11702 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n", in hclge_init_ae_dev()
11705 hclge_task_schedule(hdev, round_jiffies_relative(HZ)); in hclge_init_ae_dev()
11710 if (hdev->hw.mac.phydev) in hclge_init_ae_dev()
11711 mdiobus_unregister(hdev->hw.mac.mdio_bus); in hclge_init_ae_dev()
11713 hclge_misc_irq_uninit(hdev); in hclge_init_ae_dev()
11717 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); in hclge_init_ae_dev()
11719 hclge_devlink_uninit(hdev); in hclge_init_ae_dev()
11721 pcim_iounmap(pdev, hdev->hw.hw.io_base); in hclge_init_ae_dev()
11726 mutex_destroy(&hdev->vport_lock); in hclge_init_ae_dev()
11730 static void hclge_stats_clear(struct hclge_dev *hdev) in hclge_stats_clear() argument
11732 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats)); in hclge_stats_clear()
11733 memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats)); in hclge_stats_clear()
11736 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable) in hclge_set_mac_spoofchk() argument
11738 return hclge_config_switch_param(hdev, vf, enable, in hclge_set_mac_spoofchk()
11742 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable) in hclge_set_vlan_spoofchk() argument
11744 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, in hclge_set_vlan_spoofchk()
11749 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable) in hclge_set_vf_spoofchk_hw() argument
11753 ret = hclge_set_mac_spoofchk(hdev, vf, enable); in hclge_set_vf_spoofchk_hw()
11755 dev_err(&hdev->pdev->dev, in hclge_set_vf_spoofchk_hw()
11761 ret = hclge_set_vlan_spoofchk(hdev, vf, enable); in hclge_set_vf_spoofchk_hw()
11763 dev_err(&hdev->pdev->dev, in hclge_set_vf_spoofchk_hw()
11774 struct hclge_dev *hdev = vport->back; in hclge_set_vf_spoofchk() local
11778 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_vf_spoofchk()
11781 vport = hclge_get_vf_vport(hdev, vf); in hclge_set_vf_spoofchk()
11788 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full)) in hclge_set_vf_spoofchk()
11789 dev_warn(&hdev->pdev->dev, in hclge_set_vf_spoofchk()
11793 dev_warn(&hdev->pdev->dev, in hclge_set_vf_spoofchk()
11797 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable); in hclge_set_vf_spoofchk()
11805 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev) in hclge_reset_vport_spoofchk() argument
11807 struct hclge_vport *vport = hdev->vport; in hclge_reset_vport_spoofchk()
11811 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_reset_vport_spoofchk()
11815 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_reset_vport_spoofchk()
11816 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, in hclge_reset_vport_spoofchk()
11830 struct hclge_dev *hdev = vport->back; in hclge_set_vf_trust() local
11833 vport = hclge_get_vf_vport(hdev, vf); in hclge_set_vf_trust()
11842 hclge_task_schedule(hdev, 0); in hclge_set_vf_trust()
11847 static void hclge_reset_vf_rate(struct hclge_dev *hdev) in hclge_reset_vf_rate() argument
11853 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { in hclge_reset_vf_rate()
11854 struct hclge_vport *vport = &hdev->vport[vf]; in hclge_reset_vf_rate()
11859 dev_err(&hdev->pdev->dev, in hclge_reset_vf_rate()
11865 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, in hclge_vf_rate_param_check() argument
11869 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) { in hclge_vf_rate_param_check()
11870 dev_err(&hdev->pdev->dev, in hclge_vf_rate_param_check()
11872 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed); in hclge_vf_rate_param_check()
11883 struct hclge_dev *hdev = vport->back; in hclge_set_vf_rate() local
11886 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate); in hclge_set_vf_rate()
11890 vport = hclge_get_vf_vport(hdev, vf); in hclge_set_vf_rate()
11906 static int hclge_resume_vf_rate(struct hclge_dev *hdev) in hclge_resume_vf_rate() argument
11908 struct hnae3_handle *handle = &hdev->vport->nic; in hclge_resume_vf_rate()
11914 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) { in hclge_resume_vf_rate()
11915 vport = hclge_get_vf_vport(hdev, vf); in hclge_resume_vf_rate()
11928 dev_err(&hdev->pdev->dev, in hclge_resume_vf_rate()
11938 static void hclge_reset_vport_state(struct hclge_dev *hdev) in hclge_reset_vport_state() argument
11940 struct hclge_vport *vport = hdev->vport; in hclge_reset_vport_state()
11943 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_reset_vport_state()
11951 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_ae_dev() local
11955 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_reset_ae_dev()
11957 hclge_stats_clear(hdev); in hclge_reset_ae_dev()
11961 if (hdev->reset_type == HNAE3_IMP_RESET || in hclge_reset_ae_dev()
11962 hdev->reset_type == HNAE3_GLOBAL_RESET) { in hclge_reset_ae_dev()
11963 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); in hclge_reset_ae_dev()
11964 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); in hclge_reset_ae_dev()
11965 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport); in hclge_reset_ae_dev()
11966 hclge_reset_umv_space(hdev); in hclge_reset_ae_dev()
11969 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, in hclge_reset_ae_dev()
11970 true, hdev->reset_pending); in hclge_reset_ae_dev()
11976 ret = hclge_map_tqp(hdev); in hclge_reset_ae_dev()
11982 ret = hclge_mac_init(hdev); in hclge_reset_ae_dev()
11988 ret = hclge_tp_port_init(hdev); in hclge_reset_ae_dev()
11995 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); in hclge_reset_ae_dev()
12001 ret = hclge_config_gro(hdev); in hclge_reset_ae_dev()
12005 ret = hclge_init_vlan_config(hdev); in hclge_reset_ae_dev()
12011 ret = hclge_tm_init_hw(hdev, true); in hclge_reset_ae_dev()
12017 ret = hclge_rss_init_hw(hdev); in hclge_reset_ae_dev()
12023 ret = init_mgr_tbl(hdev); in hclge_reset_ae_dev()
12030 ret = hclge_init_fd_config(hdev); in hclge_reset_ae_dev()
12036 ret = hclge_ptp_init(hdev); in hclge_reset_ae_dev()
12041 if (hnae3_dev_ras_imp_supported(hdev)) in hclge_reset_ae_dev()
12042 hclge_handle_occurred_error(hdev); in hclge_reset_ae_dev()
12049 ret = hclge_config_nic_hw_error(hdev, true); in hclge_reset_ae_dev()
12057 if (hdev->roce_client) { in hclge_reset_ae_dev()
12058 ret = hclge_config_rocee_ras_interrupt(hdev, true); in hclge_reset_ae_dev()
12067 hclge_reset_vport_state(hdev); in hclge_reset_ae_dev()
12068 ret = hclge_reset_vport_spoofchk(hdev); in hclge_reset_ae_dev()
12072 ret = hclge_resume_vf_rate(hdev); in hclge_reset_ae_dev()
12076 hclge_init_rxd_adv_layout(hdev); in hclge_reset_ae_dev()
12086 struct hclge_dev *hdev = ae_dev->priv; in hclge_uninit_ae_dev() local
12087 struct hclge_mac *mac = &hdev->hw.mac; in hclge_uninit_ae_dev()
12089 hclge_reset_vf_rate(hdev); in hclge_uninit_ae_dev()
12090 hclge_clear_vf_vlan(hdev); in hclge_uninit_ae_dev()
12091 hclge_state_uninit(hdev); in hclge_uninit_ae_dev()
12092 hclge_ptp_uninit(hdev); in hclge_uninit_ae_dev()
12093 hclge_uninit_rxd_adv_layout(hdev); in hclge_uninit_ae_dev()
12094 hclge_uninit_mac_table(hdev); in hclge_uninit_ae_dev()
12095 hclge_del_all_fd_entries(hdev); in hclge_uninit_ae_dev()
12101 hclge_enable_vector(&hdev->misc_vector, false); in hclge_uninit_ae_dev()
12102 synchronize_irq(hdev->misc_vector.vector_irq); in hclge_uninit_ae_dev()
12105 hclge_config_mac_tnl_int(hdev, false); in hclge_uninit_ae_dev()
12106 hclge_config_nic_hw_error(hdev, false); in hclge_uninit_ae_dev()
12107 hclge_config_rocee_ras_interrupt(hdev, false); in hclge_uninit_ae_dev()
12109 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); in hclge_uninit_ae_dev()
12110 hclge_misc_irq_uninit(hdev); in hclge_uninit_ae_dev()
12111 hclge_devlink_uninit(hdev); in hclge_uninit_ae_dev()
12112 hclge_pci_uninit(hdev); in hclge_uninit_ae_dev()
12113 hclge_uninit_vport_vlan_table(hdev); in hclge_uninit_ae_dev()
12114 mutex_destroy(&hdev->vport_lock); in hclge_uninit_ae_dev()
12121 struct hclge_dev *hdev = vport->back; in hclge_get_max_channels() local
12123 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps); in hclge_get_max_channels()
12139 struct hclge_dev *hdev = vport->back; in hclge_get_tqps_and_rss_info() local
12142 *max_rss_size = hdev->pf_rss_size_max; in hclge_get_tqps_and_rss_info()
12149 struct hclge_dev *hdev = vport->back; in hclge_set_rss_tc_mode_cfg() local
12161 if (!(hdev->hw_tc_map & BIT(i))) in hclge_set_rss_tc_mode_cfg()
12169 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, in hclge_set_rss_tc_mode_cfg()
12179 struct hclge_dev *hdev = vport->back; in hclge_set_channels() local
12188 ret = hclge_tm_vport_map_update(hdev); in hclge_set_channels()
12190 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret); in hclge_set_channels()
12213 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", in hclge_set_channels()
12220 dev_info(&hdev->pdev->dev, in hclge_set_channels()
12228 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit, in hclge_get_regs_num() argument
12236 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_regs_num()
12238 dev_err(&hdev->pdev->dev, in hclge_get_regs_num()
12253 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, in hclge_get_32_bit_regs() argument
12278 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); in hclge_get_32_bit_regs()
12280 dev_err(&hdev->pdev->dev, in hclge_get_32_bit_regs()
12307 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, in hclge_get_64_bit_regs() argument
12332 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); in hclge_get_64_bit_regs()
12334 dev_err(&hdev->pdev->dev, in hclge_get_64_bit_regs()
12368 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc) in hclge_query_bd_num_cmd_send() argument
12382 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT); in hclge_query_bd_num_cmd_send()
12385 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev, in hclge_get_dfx_reg_bd_num() argument
12393 ret = hclge_query_bd_num_cmd_send(hdev, desc); in hclge_get_dfx_reg_bd_num()
12395 dev_err(&hdev->pdev->dev, in hclge_get_dfx_reg_bd_num()
12411 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev, in hclge_dfx_reg_cmd_send() argument
12426 ret = hclge_cmd_send(&hdev->hw, desc, bd_num); in hclge_dfx_reg_cmd_send()
12428 dev_err(&hdev->pdev->dev, in hclge_dfx_reg_cmd_send()
12456 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len) in hclge_get_dfx_reg_len() argument
12468 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num); in hclge_get_dfx_reg_len()
12470 dev_err(&hdev->pdev->dev, in hclge_get_dfx_reg_len()
12488 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data) in hclge_get_dfx_reg() argument
12501 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num); in hclge_get_dfx_reg()
12503 dev_err(&hdev->pdev->dev, in hclge_get_dfx_reg()
12521 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num, in hclge_get_dfx_reg()
12524 dev_err(&hdev->pdev->dev, in hclge_get_dfx_reg()
12538 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data, in hclge_fetch_pf_reg() argument
12552 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); in hclge_fetch_pf_reg()
12560 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]); in hclge_fetch_pf_reg()
12569 *reg++ = hclge_read_dev(&hdev->hw, in hclge_fetch_pf_reg()
12579 for (j = 0; j < hdev->num_msi_used - 1; j++) { in hclge_fetch_pf_reg()
12581 *reg++ = hclge_read_dev(&hdev->hw, in hclge_fetch_pf_reg()
12587 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1); in hclge_fetch_pf_reg()
12597 struct hclge_dev *hdev = vport->back; in hclge_get_regs_len() local
12602 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); in hclge_get_regs_len()
12604 dev_err(&hdev->pdev->dev, in hclge_get_regs_len()
12609 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len); in hclge_get_regs_len()
12611 dev_err(&hdev->pdev->dev, in hclge_get_regs_len()
12630 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit + in hclge_get_regs_len()
12639 struct hclge_dev *hdev = vport->back; in hclge_get_regs() local
12644 *version = hdev->fw_version; in hclge_get_regs()
12646 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); in hclge_get_regs()
12648 dev_err(&hdev->pdev->dev, in hclge_get_regs()
12653 reg += hclge_fetch_pf_reg(hdev, reg, kinfo); in hclge_get_regs()
12655 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg); in hclge_get_regs()
12657 dev_err(&hdev->pdev->dev, in hclge_get_regs()
12667 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg); in hclge_get_regs()
12669 dev_err(&hdev->pdev->dev, in hclge_get_regs()
12679 ret = hclge_get_dfx_reg(hdev, reg); in hclge_get_regs()
12681 dev_err(&hdev->pdev->dev, in hclge_get_regs()
12685 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) in hclge_set_led_status() argument
12697 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_led_status()
12699 dev_err(&hdev->pdev->dev, in hclge_set_led_status()
12715 struct hclge_dev *hdev = vport->back; in hclge_set_led_id() local
12719 return hclge_set_led_status(hdev, HCLGE_LED_ON); in hclge_set_led_id()
12721 return hclge_set_led_status(hdev, HCLGE_LED_OFF); in hclge_set_led_id()
12733 struct hclge_dev *hdev = vport->back; in hclge_get_link_mode() local
12737 supported[idx] = hdev->hw.mac.supported[idx]; in hclge_get_link_mode()
12738 advertising[idx] = hdev->hw.mac.advertising[idx]; in hclge_get_link_mode()
12745 struct hclge_dev *hdev = vport->back; in hclge_gro_en() local
12746 bool gro_en_old = hdev->gro_en; in hclge_gro_en()
12749 hdev->gro_en = enable; in hclge_gro_en()
12750 ret = hclge_config_gro(hdev); in hclge_gro_en()
12752 hdev->gro_en = gro_en_old; in hclge_gro_en()
12757 static void hclge_sync_promisc_mode(struct hclge_dev *hdev) in hclge_sync_promisc_mode() argument
12759 struct hclge_vport *vport = &hdev->vport[0]; in hclge_sync_promisc_mode()
12782 for (i = 1; i < hdev->num_alloc_vport; i++) { in hclge_sync_promisc_mode()
12787 vport = &hdev->vport[i]; in hclge_sync_promisc_mode()
12803 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en, in hclge_sync_promisc_mode()
12814 static bool hclge_module_existed(struct hclge_dev *hdev) in hclge_module_existed() argument
12821 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_module_existed()
12823 dev_err(&hdev->pdev->dev, in hclge_module_existed()
12836 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset, in hclge_get_sfp_eeprom_info() argument
12862 ret = hclge_cmd_send(&hdev->hw, desc, i); in hclge_get_sfp_eeprom_info()
12864 dev_err(&hdev->pdev->dev, in hclge_get_sfp_eeprom_info()
12891 struct hclge_dev *hdev = vport->back; in hclge_get_module_eeprom() local
12895 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) in hclge_get_module_eeprom()
12898 if (!hclge_module_existed(hdev)) in hclge_get_module_eeprom()
12902 data_len = hclge_get_sfp_eeprom_info(hdev, in hclge_get_module_eeprom()
12919 struct hclge_dev *hdev = vport->back; in hclge_get_link_diagnosis_info() local
12923 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) in hclge_get_link_diagnosis_info()
12927 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_link_diagnosis_info()
12929 dev_err(&hdev->pdev->dev, in hclge_get_link_diagnosis_info()
12943 struct hclge_dev *hdev = vport->back; in hclge_clear_vport_vf_info() local
12950 dev_err(&hdev->pdev->dev, in hclge_clear_vport_vf_info()
12961 dev_err(&hdev->pdev->dev, in hclge_clear_vport_vf_info()
12965 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false); in hclge_clear_vport_vf_info()
12967 dev_err(&hdev->pdev->dev, in hclge_clear_vport_vf_info()
12976 struct hclge_dev *hdev = ae_dev->priv; in hclge_clean_vport_config() local
12981 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; in hclge_clean_vport_config()