Lines Matching refs:hdev
58 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
59 static int hclge_init_vlan_config(struct hclge_dev *hdev);
60 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
414 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) in hclge_mac_update_stats_defective() argument
418 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); in hclge_mac_update_stats_defective()
425 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); in hclge_mac_update_stats_defective()
427 dev_err(&hdev->pdev->dev, in hclge_mac_update_stats_defective()
453 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num) in hclge_mac_update_stats_complete() argument
455 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); in hclge_mac_update_stats_complete()
469 ret = hclge_cmd_send(&hdev->hw, desc, desc_num); in hclge_mac_update_stats_complete()
497 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num) in hclge_mac_query_reg_num() argument
505 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_mac_query_reg_num()
518 static int hclge_mac_update_stats(struct hclge_dev *hdev) in hclge_mac_update_stats() argument
523 ret = hclge_mac_query_reg_num(hdev, &desc_num); in hclge_mac_update_stats()
527 ret = hclge_mac_update_stats_complete(hdev, desc_num); in hclge_mac_update_stats()
529 ret = hclge_mac_update_stats_defective(hdev); in hclge_mac_update_stats()
531 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n"); in hclge_mac_update_stats()
540 struct hclge_dev *hdev = vport->back; in hclge_tqps_update_stats() local
554 ret = hclge_cmd_send(&hdev->hw, desc, 1); in hclge_tqps_update_stats()
556 dev_err(&hdev->pdev->dev, in hclge_tqps_update_stats()
574 ret = hclge_cmd_send(&hdev->hw, desc, 1); in hclge_tqps_update_stats()
576 dev_err(&hdev->pdev->dev, in hclge_tqps_update_stats()
672 static void hclge_update_stats_for_all(struct hclge_dev *hdev) in hclge_update_stats_for_all() argument
677 handle = &hdev->vport[0].nic; in hclge_update_stats_for_all()
681 dev_err(&hdev->pdev->dev, in hclge_update_stats_for_all()
687 status = hclge_mac_update_stats(hdev); in hclge_update_stats_for_all()
689 dev_err(&hdev->pdev->dev, in hclge_update_stats_for_all()
697 struct hclge_dev *hdev = vport->back; in hclge_update_stats() local
700 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) in hclge_update_stats()
703 status = hclge_mac_update_stats(hdev); in hclge_update_stats()
705 dev_err(&hdev->pdev->dev, in hclge_update_stats()
711 dev_err(&hdev->pdev->dev, in hclge_update_stats()
715 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); in hclge_update_stats()
726 struct hclge_dev *hdev = vport->back; in hclge_get_sset_count() local
737 if (hdev->pdev->revision >= 0x21 || in hclge_get_sset_count()
738 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || in hclge_get_sset_count()
739 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || in hclge_get_sset_count()
740 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { in hclge_get_sset_count()
749 if (hdev->hw.mac.phydev) { in hclge_get_sset_count()
801 struct hclge_dev *hdev = vport->back; in hclge_get_stats() local
804 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string, in hclge_get_stats()
813 struct hclge_dev *hdev = vport->back; in hclge_get_mac_stat() local
817 mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num; in hclge_get_mac_stat()
818 mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num; in hclge_get_mac_stat()
821 static int hclge_parse_func_status(struct hclge_dev *hdev, in hclge_parse_func_status() argument
829 hdev->flag |= HCLGE_FLAG_MAIN; in hclge_parse_func_status()
831 hdev->flag &= ~HCLGE_FLAG_MAIN; in hclge_parse_func_status()
836 static int hclge_query_function_status(struct hclge_dev *hdev) in hclge_query_function_status() argument
849 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_query_function_status()
851 dev_err(&hdev->pdev->dev, in hclge_query_function_status()
862 ret = hclge_parse_func_status(hdev, req); in hclge_query_function_status()
867 static int hclge_query_pf_resource(struct hclge_dev *hdev) in hclge_query_pf_resource() argument
874 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_query_pf_resource()
876 dev_err(&hdev->pdev->dev, in hclge_query_pf_resource()
882 hdev->num_tqps = __le16_to_cpu(req->tqp_num); in hclge_query_pf_resource()
883 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; in hclge_query_pf_resource()
886 hdev->tx_buf_size = in hclge_query_pf_resource()
889 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF; in hclge_query_pf_resource()
891 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT); in hclge_query_pf_resource()
894 hdev->dv_buf_size = in hclge_query_pf_resource()
897 hdev->dv_buf_size = HCLGE_DEFAULT_DV; in hclge_query_pf_resource()
899 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT); in hclge_query_pf_resource()
901 if (hnae3_dev_roce_supported(hdev)) { in hclge_query_pf_resource()
902 hdev->roce_base_msix_offset = in hclge_query_pf_resource()
905 hdev->num_roce_msi = in hclge_query_pf_resource()
910 hdev->num_nic_msi = hdev->num_roce_msi; in hclge_query_pf_resource()
915 hdev->num_msi = hdev->num_roce_msi + in hclge_query_pf_resource()
916 hdev->roce_base_msix_offset; in hclge_query_pf_resource()
918 hdev->num_msi = in hclge_query_pf_resource()
922 hdev->num_nic_msi = hdev->num_msi; in hclge_query_pf_resource()
925 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) { in hclge_query_pf_resource()
926 dev_err(&hdev->pdev->dev, in hclge_query_pf_resource()
928 hdev->num_nic_msi); in hclge_query_pf_resource()
972 struct hclge_dev *hdev = vport->back; in hclge_check_port_speed() local
973 u32 speed_ability = hdev->hw.mac.speed_ability; in hclge_check_port_speed()
1121 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, in hclge_parse_fiber_link_mode() argument
1124 struct hclge_mac *mac = &hdev->hw.mac; in hclge_parse_fiber_link_mode()
1133 if (hdev->pdev->revision >= 0x21) in hclge_parse_fiber_link_mode()
1141 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev, in hclge_parse_backplane_link_mode() argument
1144 struct hclge_mac *mac = &hdev->hw.mac; in hclge_parse_backplane_link_mode()
1147 if (hdev->pdev->revision >= 0x21) in hclge_parse_backplane_link_mode()
1154 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev, in hclge_parse_copper_link_mode() argument
1157 unsigned long *supported = hdev->hw.mac.supported; in hclge_parse_copper_link_mode()
1185 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) in hclge_parse_link_mode() argument
1187 u8 media_type = hdev->hw.mac.media_type; in hclge_parse_link_mode()
1190 hclge_parse_fiber_link_mode(hdev, speed_ability); in hclge_parse_link_mode()
1192 hclge_parse_copper_link_mode(hdev, speed_ability); in hclge_parse_link_mode()
1194 hclge_parse_backplane_link_mode(hdev, speed_ability); in hclge_parse_link_mode()
1260 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) in hclge_get_cfg() argument
1281 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); in hclge_get_cfg()
1283 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); in hclge_get_cfg()
1292 static int hclge_get_cap(struct hclge_dev *hdev) in hclge_get_cap() argument
1296 ret = hclge_query_function_status(hdev); in hclge_get_cap()
1298 dev_err(&hdev->pdev->dev, in hclge_get_cap()
1304 ret = hclge_query_pf_resource(hdev); in hclge_get_cap()
1306 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret); in hclge_get_cap()
1311 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) in hclge_init_kdump_kernel_config() argument
1319 dev_info(&hdev->pdev->dev, in hclge_init_kdump_kernel_config()
1323 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; in hclge_init_kdump_kernel_config()
1324 hdev->num_tx_desc = HCLGE_MIN_TX_DESC; in hclge_init_kdump_kernel_config()
1325 hdev->num_rx_desc = HCLGE_MIN_RX_DESC; in hclge_init_kdump_kernel_config()
1328 static int hclge_configure(struct hclge_dev *hdev) in hclge_configure() argument
1334 ret = hclge_get_cfg(hdev, &cfg); in hclge_configure()
1336 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret); in hclge_configure()
1340 hdev->num_vmdq_vport = cfg.vmdq_vport_num; in hclge_configure()
1341 hdev->base_tqp_pid = 0; in hclge_configure()
1342 hdev->rss_size_max = cfg.rss_size_max; in hclge_configure()
1343 hdev->rx_buf_len = cfg.rx_buf_len; in hclge_configure()
1344 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); in hclge_configure()
1345 hdev->hw.mac.media_type = cfg.media_type; in hclge_configure()
1346 hdev->hw.mac.phy_addr = cfg.phy_addr; in hclge_configure()
1347 hdev->num_tx_desc = cfg.tqp_desc_num; in hclge_configure()
1348 hdev->num_rx_desc = cfg.tqp_desc_num; in hclge_configure()
1349 hdev->tm_info.num_pg = 1; in hclge_configure()
1350 hdev->tc_max = cfg.tc_num; in hclge_configure()
1351 hdev->tm_info.hw_pfc_map = 0; in hclge_configure()
1352 hdev->wanted_umv_size = cfg.umv_space; in hclge_configure()
1354 if (hnae3_dev_fd_supported(hdev)) { in hclge_configure()
1355 hdev->fd_en = true; in hclge_configure()
1356 hdev->fd_active_type = HCLGE_FD_RULE_NONE; in hclge_configure()
1359 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); in hclge_configure()
1361 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); in hclge_configure()
1365 hclge_parse_link_mode(hdev, cfg.speed_ability); in hclge_configure()
1367 if ((hdev->tc_max > HNAE3_MAX_TC) || in hclge_configure()
1368 (hdev->tc_max < 1)) { in hclge_configure()
1369 dev_warn(&hdev->pdev->dev, "TC num = %d.\n", in hclge_configure()
1370 hdev->tc_max); in hclge_configure()
1371 hdev->tc_max = 1; in hclge_configure()
1375 if (!hnae3_dev_dcb_supported(hdev)) { in hclge_configure()
1376 hdev->tc_max = 1; in hclge_configure()
1377 hdev->pfc_max = 0; in hclge_configure()
1379 hdev->pfc_max = hdev->tc_max; in hclge_configure()
1382 hdev->tm_info.num_tc = 1; in hclge_configure()
1385 for (i = 0; i < hdev->tm_info.num_tc; i++) in hclge_configure()
1386 hnae3_set_bit(hdev->hw_tc_map, i, 1); in hclge_configure()
1388 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; in hclge_configure()
1390 hclge_init_kdump_kernel_config(hdev); in hclge_configure()
1393 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev))); in hclge_configure()
1394 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0; in hclge_configure()
1395 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)), in hclge_configure()
1396 &hdev->affinity_mask); in hclge_configure()
1401 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min, in hclge_config_tso() argument
1422 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_tso()
1425 static int hclge_config_gro(struct hclge_dev *hdev, bool en) in hclge_config_gro() argument
1431 if (!hnae3_dev_gro_supported(hdev)) in hclge_config_gro()
1439 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_gro()
1441 dev_err(&hdev->pdev->dev, in hclge_config_gro()
1447 static int hclge_alloc_tqps(struct hclge_dev *hdev) in hclge_alloc_tqps() argument
1452 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, in hclge_alloc_tqps()
1454 if (!hdev->htqp) in hclge_alloc_tqps()
1457 tqp = hdev->htqp; in hclge_alloc_tqps()
1459 for (i = 0; i < hdev->num_tqps; i++) { in hclge_alloc_tqps()
1460 tqp->dev = &hdev->pdev->dev; in hclge_alloc_tqps()
1464 tqp->q.buf_size = hdev->rx_buf_len; in hclge_alloc_tqps()
1465 tqp->q.tx_desc_num = hdev->num_tx_desc; in hclge_alloc_tqps()
1466 tqp->q.rx_desc_num = hdev->num_rx_desc; in hclge_alloc_tqps()
1467 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + in hclge_alloc_tqps()
1476 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, in hclge_map_tqps_to_func() argument
1493 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_map_tqps_to_func()
1495 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); in hclge_map_tqps_to_func()
1503 struct hclge_dev *hdev = vport->back; in hclge_assign_tqp() local
1506 for (i = 0, alloced = 0; i < hdev->num_tqps && in hclge_assign_tqp()
1508 if (!hdev->htqp[i].alloced) { in hclge_assign_tqp()
1509 hdev->htqp[i].q.handle = &vport->nic; in hclge_assign_tqp()
1510 hdev->htqp[i].q.tqp_index = alloced; in hclge_assign_tqp()
1511 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc; in hclge_assign_tqp()
1512 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc; in hclge_assign_tqp()
1513 kinfo->tqp[alloced] = &hdev->htqp[i].q; in hclge_assign_tqp()
1514 hdev->htqp[i].alloced = true; in hclge_assign_tqp()
1519 kinfo->rss_size = min_t(u16, hdev->rss_size_max, in hclge_assign_tqp()
1520 vport->alloc_tqps / hdev->tm_info.num_tc); in hclge_assign_tqp()
1524 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc); in hclge_assign_tqp()
1535 struct hclge_dev *hdev = vport->back; in hclge_knic_setup() local
1541 kinfo->rx_buf_len = hdev->rx_buf_len; in hclge_knic_setup()
1543 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps, in hclge_knic_setup()
1550 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); in hclge_knic_setup()
1555 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, in hclge_map_tqp_to_vport() argument
1570 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, in hclge_map_tqp_to_vport()
1579 static int hclge_map_tqp(struct hclge_dev *hdev) in hclge_map_tqp() argument
1581 struct hclge_vport *vport = hdev->vport; in hclge_map_tqp()
1584 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; in hclge_map_tqp()
1588 ret = hclge_map_tqp_to_vport(hdev, vport); in hclge_map_tqp()
1601 struct hclge_dev *hdev = vport->back; in hclge_vport_setup() local
1604 nic->pdev = hdev->pdev; in hclge_vport_setup()
1606 nic->numa_node_mask = hdev->numa_node_mask; in hclge_vport_setup()
1609 hdev->num_tx_desc, hdev->num_rx_desc); in hclge_vport_setup()
1611 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret); in hclge_vport_setup()
1616 static int hclge_alloc_vport(struct hclge_dev *hdev) in hclge_alloc_vport() argument
1618 struct pci_dev *pdev = hdev->pdev; in hclge_alloc_vport()
1626 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; in hclge_alloc_vport()
1628 if (hdev->num_tqps < num_vport) { in hclge_alloc_vport()
1629 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)", in hclge_alloc_vport()
1630 hdev->num_tqps, num_vport); in hclge_alloc_vport()
1635 tqp_per_vport = hdev->num_tqps / num_vport; in hclge_alloc_vport()
1636 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; in hclge_alloc_vport()
1643 hdev->vport = vport; in hclge_alloc_vport()
1644 hdev->num_alloc_vport = num_vport; in hclge_alloc_vport()
1647 hdev->num_alloc_vfs = hdev->num_req_vfs; in hclge_alloc_vport()
1650 vport->back = hdev; in hclge_alloc_vport()
1676 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, in hclge_cmd_alloc_tx_buff() argument
1698 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cmd_alloc_tx_buff()
1700 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", in hclge_cmd_alloc_tx_buff()
1706 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, in hclge_tx_buffer_alloc() argument
1709 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); in hclge_tx_buffer_alloc()
1712 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); in hclge_tx_buffer_alloc()
1717 static u32 hclge_get_tc_num(struct hclge_dev *hdev) in hclge_get_tc_num() argument
1723 if (hdev->hw_tc_map & BIT(i)) in hclge_get_tc_num()
1729 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, in hclge_get_pfc_priv_num() argument
1738 if ((hdev->tm_info.hw_pfc_map & BIT(i)) && in hclge_get_pfc_priv_num()
1747 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, in hclge_get_no_pfc_priv_num() argument
1756 if (hdev->hw_tc_map & BIT(i) && in hclge_get_no_pfc_priv_num()
1757 !(hdev->tm_info.hw_pfc_map & BIT(i)) && in hclge_get_no_pfc_priv_num()
1789 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, in hclge_is_rx_buf_ok() argument
1794 u32 tc_num = hclge_get_tc_num(hdev); in hclge_is_rx_buf_ok()
1799 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); in hclge_is_rx_buf_ok()
1801 if (hnae3_dev_dcb_supported(hdev)) in hclge_is_rx_buf_ok()
1803 hdev->dv_buf_size; in hclge_is_rx_buf_ok()
1806 + hdev->dv_buf_size; in hclge_is_rx_buf_ok()
1818 if (hnae3_dev_dcb_supported(hdev)) { in hclge_is_rx_buf_ok()
1819 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; in hclge_is_rx_buf_ok()
1829 if (hnae3_dev_dcb_supported(hdev)) { in hclge_is_rx_buf_ok()
1830 hi_thrd = shared_buf - hdev->dv_buf_size; in hclge_is_rx_buf_ok()
1855 static int hclge_tx_buffer_calc(struct hclge_dev *hdev, in hclge_tx_buffer_calc() argument
1860 total_size = hdev->pkt_buf_size; in hclge_tx_buffer_calc()
1866 if (hdev->hw_tc_map & BIT(i)) { in hclge_tx_buffer_calc()
1867 if (total_size < hdev->tx_buf_size) in hclge_tx_buffer_calc()
1870 priv->tx_buf_size = hdev->tx_buf_size; in hclge_tx_buffer_calc()
1881 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max, in hclge_rx_buf_calc_all() argument
1884 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_rx_buf_calc_all()
1885 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); in hclge_rx_buf_calc_all()
1896 if (!(hdev->hw_tc_map & BIT(i))) in hclge_rx_buf_calc_all()
1901 if (hdev->tm_info.hw_pfc_map & BIT(i)) { in hclge_rx_buf_calc_all()
1911 priv->buf_size = priv->wl.high + hdev->dv_buf_size; in hclge_rx_buf_calc_all()
1914 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); in hclge_rx_buf_calc_all()
1917 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev, in hclge_drop_nopfc_buf_till_fit() argument
1920 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_drop_nopfc_buf_till_fit()
1921 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); in hclge_drop_nopfc_buf_till_fit()
1929 if (hdev->hw_tc_map & mask && in hclge_drop_nopfc_buf_till_fit()
1930 !(hdev->tm_info.hw_pfc_map & mask)) { in hclge_drop_nopfc_buf_till_fit()
1939 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || in hclge_drop_nopfc_buf_till_fit()
1944 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); in hclge_drop_nopfc_buf_till_fit()
1947 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev, in hclge_drop_pfc_buf_till_fit() argument
1950 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_drop_pfc_buf_till_fit()
1951 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); in hclge_drop_pfc_buf_till_fit()
1959 if (hdev->hw_tc_map & mask && in hclge_drop_pfc_buf_till_fit()
1960 hdev->tm_info.hw_pfc_map & mask) { in hclge_drop_pfc_buf_till_fit()
1969 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || in hclge_drop_pfc_buf_till_fit()
1974 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); in hclge_drop_pfc_buf_till_fit()
1977 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev, in hclge_only_alloc_priv_buff() argument
1984 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_only_alloc_priv_buff()
1985 u32 tc_num = hclge_get_tc_num(hdev); in hclge_only_alloc_priv_buff()
1986 u32 half_mps = hdev->mps >> 1; in hclge_only_alloc_priv_buff()
1996 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER + in hclge_only_alloc_priv_buff()
2012 if (!(hdev->hw_tc_map & BIT(i))) in hclge_only_alloc_priv_buff()
2017 priv->wl.high = rx_priv - hdev->dv_buf_size; in hclge_only_alloc_priv_buff()
2031 static int hclge_rx_buffer_calc(struct hclge_dev *hdev, in hclge_rx_buffer_calc() argument
2035 if (!hnae3_dev_dcb_supported(hdev)) { in hclge_rx_buffer_calc()
2036 u32 rx_all = hdev->pkt_buf_size; in hclge_rx_buffer_calc()
2039 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) in hclge_rx_buffer_calc()
2045 if (hclge_only_alloc_priv_buff(hdev, buf_alloc)) in hclge_rx_buffer_calc()
2048 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc)) in hclge_rx_buffer_calc()
2052 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc)) in hclge_rx_buffer_calc()
2055 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc)) in hclge_rx_buffer_calc()
2058 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc)) in hclge_rx_buffer_calc()
2064 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, in hclge_rx_priv_buf_alloc() argument
2089 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_rx_priv_buf_alloc()
2091 dev_err(&hdev->pdev->dev, in hclge_rx_priv_buf_alloc()
2097 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, in hclge_rx_priv_wl_config() argument
2133 ret = hclge_cmd_send(&hdev->hw, desc, 2); in hclge_rx_priv_wl_config()
2135 dev_err(&hdev->pdev->dev, in hclge_rx_priv_wl_config()
2141 static int hclge_common_thrd_config(struct hclge_dev *hdev, in hclge_common_thrd_config() argument
2177 ret = hclge_cmd_send(&hdev->hw, desc, 2); in hclge_common_thrd_config()
2179 dev_err(&hdev->pdev->dev, in hclge_common_thrd_config()
2184 static int hclge_common_wl_config(struct hclge_dev *hdev, in hclge_common_wl_config() argument
2201 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_common_wl_config()
2203 dev_err(&hdev->pdev->dev, in hclge_common_wl_config()
2209 int hclge_buffer_alloc(struct hclge_dev *hdev) in hclge_buffer_alloc() argument
2218 ret = hclge_tx_buffer_calc(hdev, pkt_buf); in hclge_buffer_alloc()
2220 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2225 ret = hclge_tx_buffer_alloc(hdev, pkt_buf); in hclge_buffer_alloc()
2227 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2232 ret = hclge_rx_buffer_calc(hdev, pkt_buf); in hclge_buffer_alloc()
2234 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2240 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); in hclge_buffer_alloc()
2242 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", in hclge_buffer_alloc()
2247 if (hnae3_dev_dcb_supported(hdev)) { in hclge_buffer_alloc()
2248 ret = hclge_rx_priv_wl_config(hdev, pkt_buf); in hclge_buffer_alloc()
2250 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2256 ret = hclge_common_thrd_config(hdev, pkt_buf); in hclge_buffer_alloc()
2258 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2265 ret = hclge_common_wl_config(hdev, pkt_buf); in hclge_buffer_alloc()
2267 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2298 static int hclge_init_msi(struct hclge_dev *hdev) in hclge_init_msi() argument
2300 struct pci_dev *pdev = hdev->pdev; in hclge_init_msi()
2305 hdev->num_msi, in hclge_init_msi()
2313 if (vectors < hdev->num_msi) in hclge_init_msi()
2314 dev_warn(&hdev->pdev->dev, in hclge_init_msi()
2316 hdev->num_msi, vectors); in hclge_init_msi()
2318 hdev->num_msi = vectors; in hclge_init_msi()
2319 hdev->num_msi_left = vectors; in hclge_init_msi()
2321 hdev->base_msi_vector = pdev->irq; in hclge_init_msi()
2322 hdev->roce_base_vector = hdev->base_msi_vector + in hclge_init_msi()
2323 hdev->roce_base_msix_offset; in hclge_init_msi()
2325 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, in hclge_init_msi()
2327 if (!hdev->vector_status) { in hclge_init_msi()
2332 for (i = 0; i < hdev->num_msi; i++) in hclge_init_msi()
2333 hdev->vector_status[i] = HCLGE_INVALID_VPORT; in hclge_init_msi()
2335 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, in hclge_init_msi()
2337 if (!hdev->vector_irq) { in hclge_init_msi()
2353 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, in hclge_cfg_mac_speed_dup_hw() argument
2401 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); in hclge_cfg_mac_speed_dup_hw()
2408 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_mac_speed_dup_hw()
2410 dev_err(&hdev->pdev->dev, in hclge_cfg_mac_speed_dup_hw()
2418 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) in hclge_cfg_mac_speed_dup() argument
2423 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex) in hclge_cfg_mac_speed_dup()
2426 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex); in hclge_cfg_mac_speed_dup()
2430 hdev->hw.mac.speed = speed; in hclge_cfg_mac_speed_dup()
2431 hdev->hw.mac.duplex = duplex; in hclge_cfg_mac_speed_dup()
2440 struct hclge_dev *hdev = vport->back; in hclge_cfg_mac_speed_dup_h() local
2442 return hclge_cfg_mac_speed_dup(hdev, speed, duplex); in hclge_cfg_mac_speed_dup_h()
2445 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) in hclge_set_autoneg_en() argument
2459 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_autoneg_en()
2461 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", in hclge_set_autoneg_en()
2470 struct hclge_dev *hdev = vport->back; in hclge_set_autoneg() local
2472 if (!hdev->hw.mac.support_autoneg) { in hclge_set_autoneg()
2474 dev_err(&hdev->pdev->dev, in hclge_set_autoneg()
2482 return hclge_set_autoneg_en(hdev, enable); in hclge_set_autoneg()
2488 struct hclge_dev *hdev = vport->back; in hclge_get_autoneg() local
2489 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_get_autoneg()
2494 return hdev->hw.mac.autoneg; in hclge_get_autoneg()
2500 struct hclge_dev *hdev = vport->back; in hclge_restart_autoneg() local
2503 dev_dbg(&hdev->pdev->dev, "restart autoneg\n"); in hclge_restart_autoneg()
2505 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); in hclge_restart_autoneg()
2508 return hclge_notify_client(hdev, HNAE3_UP_CLIENT); in hclge_restart_autoneg()
2514 struct hclge_dev *hdev = vport->back; in hclge_halt_autoneg() local
2516 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg) in hclge_halt_autoneg()
2517 return hclge_set_autoneg_en(hdev, !halt); in hclge_halt_autoneg()
2522 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode) in hclge_set_fec_hw() argument
2540 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_fec_hw()
2542 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret); in hclge_set_fec_hw()
2550 struct hclge_dev *hdev = vport->back; in hclge_set_fec() local
2551 struct hclge_mac *mac = &hdev->hw.mac; in hclge_set_fec()
2555 dev_err(&hdev->pdev->dev, "unsupported fec mode\n"); in hclge_set_fec()
2559 ret = hclge_set_fec_hw(hdev, fec_mode); in hclge_set_fec()
2571 struct hclge_dev *hdev = vport->back; in hclge_get_fec() local
2572 struct hclge_mac *mac = &hdev->hw.mac; in hclge_get_fec()
2580 static int hclge_mac_init(struct hclge_dev *hdev) in hclge_mac_init() argument
2582 struct hclge_mac *mac = &hdev->hw.mac; in hclge_mac_init()
2585 hdev->support_sfp_query = true; in hclge_mac_init()
2586 hdev->hw.mac.duplex = HCLGE_MAC_FULL; in hclge_mac_init()
2587 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, in hclge_mac_init()
2588 hdev->hw.mac.duplex); in hclge_mac_init()
2590 dev_err(&hdev->pdev->dev, in hclge_mac_init()
2595 if (hdev->hw.mac.support_autoneg) { in hclge_mac_init()
2596 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg); in hclge_mac_init()
2598 dev_err(&hdev->pdev->dev, in hclge_mac_init()
2607 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode); in hclge_mac_init()
2609 dev_err(&hdev->pdev->dev, in hclge_mac_init()
2615 ret = hclge_set_mac_mtu(hdev, hdev->mps); in hclge_mac_init()
2617 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); in hclge_mac_init()
2621 ret = hclge_set_default_loopback(hdev); in hclge_mac_init()
2625 ret = hclge_buffer_alloc(hdev); in hclge_mac_init()
2627 dev_err(&hdev->pdev->dev, in hclge_mac_init()
2633 static void hclge_mbx_task_schedule(struct hclge_dev *hdev) in hclge_mbx_task_schedule() argument
2635 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) && in hclge_mbx_task_schedule()
2636 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) in hclge_mbx_task_schedule()
2637 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq, in hclge_mbx_task_schedule()
2638 &hdev->mbx_service_task); in hclge_mbx_task_schedule()
2641 static void hclge_reset_task_schedule(struct hclge_dev *hdev) in hclge_reset_task_schedule() argument
2643 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_reset_task_schedule()
2644 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) in hclge_reset_task_schedule()
2645 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq, in hclge_reset_task_schedule()
2646 &hdev->rst_service_task); in hclge_reset_task_schedule()
2649 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time) in hclge_task_schedule() argument
2651 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && in hclge_task_schedule()
2652 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_task_schedule()
2653 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) { in hclge_task_schedule()
2654 hdev->hw_stats.stats_timer++; in hclge_task_schedule()
2655 hdev->fd_arfs_expire_timer++; in hclge_task_schedule()
2656 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), in hclge_task_schedule()
2657 system_wq, &hdev->service_task, in hclge_task_schedule()
2662 static int hclge_get_mac_link_status(struct hclge_dev *hdev) in hclge_get_mac_link_status() argument
2670 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_mac_link_status()
2672 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", in hclge_get_mac_link_status()
2683 static int hclge_get_mac_phy_link(struct hclge_dev *hdev) in hclge_get_mac_phy_link() argument
2688 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) in hclge_get_mac_phy_link()
2691 mac_state = hclge_get_mac_link_status(hdev); in hclge_get_mac_phy_link()
2693 if (hdev->hw.mac.phydev) { in hclge_get_mac_phy_link()
2694 if (hdev->hw.mac.phydev->state == PHY_RUNNING) in hclge_get_mac_phy_link()
2696 hdev->hw.mac.phydev->link; in hclge_get_mac_phy_link()
2707 static void hclge_update_link_status(struct hclge_dev *hdev) in hclge_update_link_status() argument
2709 struct hnae3_client *rclient = hdev->roce_client; in hclge_update_link_status()
2710 struct hnae3_client *client = hdev->nic_client; in hclge_update_link_status()
2718 state = hclge_get_mac_phy_link(hdev); in hclge_update_link_status()
2719 if (state != hdev->hw.mac.link) { in hclge_update_link_status()
2720 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { in hclge_update_link_status()
2721 handle = &hdev->vport[i].nic; in hclge_update_link_status()
2723 hclge_config_mac_tnl_int(hdev, state); in hclge_update_link_status()
2724 rhandle = &hdev->vport[i].roce; in hclge_update_link_status()
2729 hdev->hw.mac.link = state; in hclge_update_link_status()
2757 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) in hclge_get_sfp_speed() argument
2765 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_sfp_speed()
2767 dev_warn(&hdev->pdev->dev, in hclge_get_sfp_speed()
2771 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret); in hclge_get_sfp_speed()
2780 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac) in hclge_get_sfp_info() argument
2791 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_sfp_info()
2793 dev_warn(&hdev->pdev->dev, in hclge_get_sfp_info()
2797 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret); in hclge_get_sfp_info()
2822 static int hclge_update_port_info(struct hclge_dev *hdev) in hclge_update_port_info() argument
2824 struct hclge_mac *mac = &hdev->hw.mac; in hclge_update_port_info()
2833 if (!hdev->support_sfp_query) in hclge_update_port_info()
2836 if (hdev->pdev->revision >= 0x21) in hclge_update_port_info()
2837 ret = hclge_get_sfp_info(hdev, mac); in hclge_update_port_info()
2839 ret = hclge_get_sfp_speed(hdev, &speed); in hclge_update_port_info()
2842 hdev->support_sfp_query = false; in hclge_update_port_info()
2848 if (hdev->pdev->revision >= 0x21) { in hclge_update_port_info()
2853 return hclge_cfg_mac_speed_dup(hdev, mac->speed, in hclge_update_port_info()
2860 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL); in hclge_update_port_info()
2867 struct hclge_dev *hdev = vport->back; in hclge_get_status() local
2869 hclge_update_link_status(hdev); in hclge_get_status()
2871 return hdev->hw.mac.link; in hclge_get_status()
2874 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) in hclge_check_event_cause() argument
2879 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); in hclge_check_event_cause()
2880 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); in hclge_check_event_cause()
2881 msix_src_reg = hclge_read_dev(&hdev->hw, in hclge_check_event_cause()
2893 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); in hclge_check_event_cause()
2894 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); in hclge_check_event_cause()
2895 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); in hclge_check_event_cause()
2897 hdev->rst_stats.imp_rst_cnt++; in hclge_check_event_cause()
2902 dev_info(&hdev->pdev->dev, "global reset interrupt\n"); in hclge_check_event_cause()
2903 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); in hclge_check_event_cause()
2904 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); in hclge_check_event_cause()
2906 hdev->rst_stats.global_rst_cnt++; in hclge_check_event_cause()
2912 dev_info(&hdev->pdev->dev, "received event 0x%x\n", in hclge_check_event_cause()
2926 dev_info(&hdev->pdev->dev, in hclge_check_event_cause()
2934 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, in hclge_clear_event_cause() argument
2939 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); in hclge_clear_event_cause()
2942 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); in hclge_clear_event_cause()
2949 static void hclge_clear_all_event_cause(struct hclge_dev *hdev) in hclge_clear_all_event_cause() argument
2951 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST, in hclge_clear_all_event_cause()
2955 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0); in hclge_clear_all_event_cause()
2965 struct hclge_dev *hdev = data; in hclge_misc_irq_handle() local
2969 hclge_enable_vector(&hdev->misc_vector, false); in hclge_misc_irq_handle()
2970 event_cause = hclge_check_event_cause(hdev, &clearval); in hclge_misc_irq_handle()
2985 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request); in hclge_misc_irq_handle()
2988 hclge_reset_task_schedule(hdev); in hclge_misc_irq_handle()
3000 hclge_mbx_task_schedule(hdev); in hclge_misc_irq_handle()
3003 dev_warn(&hdev->pdev->dev, in hclge_misc_irq_handle()
3008 hclge_clear_event_cause(hdev, event_cause, clearval); in hclge_misc_irq_handle()
3017 hclge_enable_vector(&hdev->misc_vector, true); in hclge_misc_irq_handle()
3023 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) in hclge_free_vector() argument
3025 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { in hclge_free_vector()
3026 dev_warn(&hdev->pdev->dev, in hclge_free_vector()
3031 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; in hclge_free_vector()
3032 hdev->num_msi_left += 1; in hclge_free_vector()
3033 hdev->num_msi_used -= 1; in hclge_free_vector()
3036 static void hclge_get_misc_vector(struct hclge_dev *hdev) in hclge_get_misc_vector() argument
3038 struct hclge_misc_vector *vector = &hdev->misc_vector; in hclge_get_misc_vector()
3040 vector->vector_irq = pci_irq_vector(hdev->pdev, 0); in hclge_get_misc_vector()
3042 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; in hclge_get_misc_vector()
3043 hdev->vector_status[0] = 0; in hclge_get_misc_vector()
3045 hdev->num_msi_left -= 1; in hclge_get_misc_vector()
3046 hdev->num_msi_used += 1; in hclge_get_misc_vector()
3052 struct hclge_dev *hdev = container_of(notify, struct hclge_dev, in hclge_irq_affinity_notify() local
3055 cpumask_copy(&hdev->affinity_mask, mask); in hclge_irq_affinity_notify()
3062 static void hclge_misc_affinity_setup(struct hclge_dev *hdev) in hclge_misc_affinity_setup() argument
3064 irq_set_affinity_hint(hdev->misc_vector.vector_irq, in hclge_misc_affinity_setup()
3065 &hdev->affinity_mask); in hclge_misc_affinity_setup()
3067 hdev->affinity_notify.notify = hclge_irq_affinity_notify; in hclge_misc_affinity_setup()
3068 hdev->affinity_notify.release = hclge_irq_affinity_release; in hclge_misc_affinity_setup()
3069 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, in hclge_misc_affinity_setup()
3070 &hdev->affinity_notify); in hclge_misc_affinity_setup()
3073 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev) in hclge_misc_affinity_teardown() argument
3075 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL); in hclge_misc_affinity_teardown()
3076 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL); in hclge_misc_affinity_teardown()
3079 static int hclge_misc_irq_init(struct hclge_dev *hdev) in hclge_misc_irq_init() argument
3083 hclge_get_misc_vector(hdev); in hclge_misc_irq_init()
3086 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, in hclge_misc_irq_init()
3087 0, "hclge_misc", hdev); in hclge_misc_irq_init()
3089 hclge_free_vector(hdev, 0); in hclge_misc_irq_init()
3090 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", in hclge_misc_irq_init()
3091 hdev->misc_vector.vector_irq); in hclge_misc_irq_init()
3097 static void hclge_misc_irq_uninit(struct hclge_dev *hdev) in hclge_misc_irq_uninit() argument
3099 free_irq(hdev->misc_vector.vector_irq, hdev); in hclge_misc_irq_uninit()
3100 hclge_free_vector(hdev, 0); in hclge_misc_irq_uninit()
3103 int hclge_notify_client(struct hclge_dev *hdev, in hclge_notify_client() argument
3106 struct hnae3_client *client = hdev->nic_client; in hclge_notify_client()
3109 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client) in hclge_notify_client()
3115 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { in hclge_notify_client()
3116 struct hnae3_handle *handle = &hdev->vport[i].nic; in hclge_notify_client()
3121 dev_err(&hdev->pdev->dev, in hclge_notify_client()
3130 static int hclge_notify_roce_client(struct hclge_dev *hdev, in hclge_notify_roce_client() argument
3133 struct hnae3_client *client = hdev->roce_client; in hclge_notify_roce_client()
3137 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client) in hclge_notify_roce_client()
3143 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { in hclge_notify_roce_client()
3144 struct hnae3_handle *handle = &hdev->vport[i].roce; in hclge_notify_roce_client()
3148 dev_err(&hdev->pdev->dev, in hclge_notify_roce_client()
3158 static int hclge_reset_wait(struct hclge_dev *hdev) in hclge_reset_wait() argument
3165 switch (hdev->reset_type) { in hclge_reset_wait()
3181 dev_err(&hdev->pdev->dev, in hclge_reset_wait()
3183 hdev->reset_type); in hclge_reset_wait()
3187 if (hdev->reset_type == HNAE3_FLR_RESET) { in hclge_reset_wait()
3188 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && in hclge_reset_wait()
3192 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { in hclge_reset_wait()
3193 dev_err(&hdev->pdev->dev, in hclge_reset_wait()
3201 val = hclge_read_dev(&hdev->hw, reg); in hclge_reset_wait()
3204 val = hclge_read_dev(&hdev->hw, reg); in hclge_reset_wait()
3209 dev_warn(&hdev->pdev->dev, in hclge_reset_wait()
3210 "Wait for reset timeout: %d\n", hdev->reset_type); in hclge_reset_wait()
3217 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) in hclge_set_vf_rst() argument
3229 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vf_rst()
3232 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) in hclge_set_all_vf_rst() argument
3236 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) { in hclge_set_all_vf_rst()
3237 struct hclge_vport *vport = &hdev->vport[i]; in hclge_set_all_vf_rst()
3241 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); in hclge_set_all_vf_rst()
3243 dev_err(&hdev->pdev->dev, in hclge_set_all_vf_rst()
3258 dev_warn(&hdev->pdev->dev, in hclge_set_all_vf_rst()
3266 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev) in hclge_func_reset_sync_vf() argument
3277 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_func_reset_sync_vf()
3285 dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n", in hclge_func_reset_sync_vf()
3295 dev_err(&hdev->pdev->dev, "sync with VF timeout!\n"); in hclge_func_reset_sync_vf()
3299 void hclge_report_hw_error(struct hclge_dev *hdev, in hclge_report_hw_error() argument
3302 struct hnae3_client *client = hdev->nic_client; in hclge_report_hw_error()
3306 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) in hclge_report_hw_error()
3309 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) in hclge_report_hw_error()
3310 client->ops->process_hw_error(&hdev->vport[i].nic, type); in hclge_report_hw_error()
3313 static void hclge_handle_imp_error(struct hclge_dev *hdev) in hclge_handle_imp_error() argument
3317 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); in hclge_handle_imp_error()
3319 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR); in hclge_handle_imp_error()
3321 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); in hclge_handle_imp_error()
3325 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR); in hclge_handle_imp_error()
3327 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); in hclge_handle_imp_error()
3331 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) in hclge_func_reset_cmd() argument
3341 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_func_reset_cmd()
3343 dev_err(&hdev->pdev->dev, in hclge_func_reset_cmd()
3349 static void hclge_do_reset(struct hclge_dev *hdev) in hclge_do_reset() argument
3351 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_do_reset()
3352 struct pci_dev *pdev = hdev->pdev; in hclge_do_reset()
3358 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING), in hclge_do_reset()
3359 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); in hclge_do_reset()
3363 switch (hdev->reset_type) { in hclge_do_reset()
3365 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); in hclge_do_reset()
3367 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); in hclge_do_reset()
3373 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); in hclge_do_reset()
3374 hclge_reset_task_schedule(hdev); in hclge_do_reset()
3379 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending); in hclge_do_reset()
3380 hclge_reset_task_schedule(hdev); in hclge_do_reset()
3384 "Unsupported reset type: %d\n", hdev->reset_type); in hclge_do_reset()
3393 struct hclge_dev *hdev = ae_dev->priv; in hclge_get_reset_level() local
3400 hclge_handle_hw_msix_error(hdev, addr); in hclge_get_reset_level()
3409 hclge_enable_vector(&hdev->misc_vector, true); in hclge_get_reset_level()
3430 if (hdev->reset_type != HNAE3_NONE_RESET && in hclge_get_reset_level()
3431 rst_level < hdev->reset_type) in hclge_get_reset_level()
3437 static void hclge_clear_reset_cause(struct hclge_dev *hdev) in hclge_clear_reset_cause() argument
3441 switch (hdev->reset_type) { in hclge_clear_reset_cause()
3458 if (hdev->pdev->revision == 0x20) in hclge_clear_reset_cause()
3459 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, in hclge_clear_reset_cause()
3462 hclge_enable_vector(&hdev->misc_vector, true); in hclge_clear_reset_cause()
3465 static int hclge_reset_prepare_down(struct hclge_dev *hdev) in hclge_reset_prepare_down() argument
3469 switch (hdev->reset_type) { in hclge_reset_prepare_down()
3473 ret = hclge_set_all_vf_rst(hdev, true); in hclge_reset_prepare_down()
3482 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable) in hclge_reset_handshake() argument
3486 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG); in hclge_reset_handshake()
3492 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val); in hclge_reset_handshake()
3495 static int hclge_reset_prepare_wait(struct hclge_dev *hdev) in hclge_reset_prepare_wait() argument
3500 switch (hdev->reset_type) { in hclge_reset_prepare_wait()
3505 ret = hclge_func_reset_sync_vf(hdev); in hclge_reset_prepare_wait()
3509 ret = hclge_func_reset_cmd(hdev, 0); in hclge_reset_prepare_wait()
3511 dev_err(&hdev->pdev->dev, in hclge_reset_prepare_wait()
3521 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); in hclge_reset_prepare_wait()
3522 hdev->rst_stats.pf_rst_cnt++; in hclge_reset_prepare_wait()
3528 ret = hclge_func_reset_sync_vf(hdev); in hclge_reset_prepare_wait()
3532 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); in hclge_reset_prepare_wait()
3533 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); in hclge_reset_prepare_wait()
3534 hdev->rst_stats.flr_rst_cnt++; in hclge_reset_prepare_wait()
3537 hclge_handle_imp_error(hdev); in hclge_reset_prepare_wait()
3538 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); in hclge_reset_prepare_wait()
3539 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, in hclge_reset_prepare_wait()
3548 hclge_reset_handshake(hdev, true); in hclge_reset_prepare_wait()
3549 dev_info(&hdev->pdev->dev, "prepare wait ok\n"); in hclge_reset_prepare_wait()
3554 static bool hclge_reset_err_handle(struct hclge_dev *hdev) in hclge_reset_err_handle() argument
3558 if (hdev->reset_pending) { in hclge_reset_err_handle()
3559 dev_info(&hdev->pdev->dev, "Reset pending %lu\n", in hclge_reset_err_handle()
3560 hdev->reset_pending); in hclge_reset_err_handle()
3562 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) & in hclge_reset_err_handle()
3564 dev_info(&hdev->pdev->dev, in hclge_reset_err_handle()
3566 hclge_clear_reset_cause(hdev); in hclge_reset_err_handle()
3568 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) { in hclge_reset_err_handle()
3569 hdev->rst_stats.reset_fail_cnt++; in hclge_reset_err_handle()
3570 set_bit(hdev->reset_type, &hdev->reset_pending); in hclge_reset_err_handle()
3571 dev_info(&hdev->pdev->dev, in hclge_reset_err_handle()
3573 hdev->rst_stats.reset_fail_cnt); in hclge_reset_err_handle()
3577 hclge_clear_reset_cause(hdev); in hclge_reset_err_handle()
3580 hclge_reset_handshake(hdev, true); in hclge_reset_err_handle()
3582 dev_err(&hdev->pdev->dev, "Reset fail!\n"); in hclge_reset_err_handle()
3586 static int hclge_set_rst_done(struct hclge_dev *hdev) in hclge_set_rst_done() argument
3596 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_rst_done()
3602 dev_warn(&hdev->pdev->dev, in hclge_set_rst_done()
3607 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n", in hclge_set_rst_done()
3614 static int hclge_reset_prepare_up(struct hclge_dev *hdev) in hclge_reset_prepare_up() argument
3618 switch (hdev->reset_type) { in hclge_reset_prepare_up()
3622 ret = hclge_set_all_vf_rst(hdev, false); in hclge_reset_prepare_up()
3627 ret = hclge_set_rst_done(hdev); in hclge_reset_prepare_up()
3634 hclge_reset_handshake(hdev, false); in hclge_reset_prepare_up()
3639 static int hclge_reset_stack(struct hclge_dev *hdev) in hclge_reset_stack() argument
3643 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); in hclge_reset_stack()
3647 ret = hclge_reset_ae_dev(hdev->ae_dev); in hclge_reset_stack()
3651 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); in hclge_reset_stack()
3655 return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT); in hclge_reset_stack()
3658 static void hclge_reset(struct hclge_dev *hdev) in hclge_reset() argument
3660 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_reset()
3667 ae_dev->reset_type = hdev->reset_type; in hclge_reset()
3668 hdev->rst_stats.reset_cnt++; in hclge_reset()
3670 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); in hclge_reset()
3674 ret = hclge_reset_prepare_down(hdev); in hclge_reset()
3679 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); in hclge_reset()
3685 ret = hclge_reset_prepare_wait(hdev); in hclge_reset()
3689 if (hclge_reset_wait(hdev)) in hclge_reset()
3692 hdev->rst_stats.hw_reset_done_cnt++; in hclge_reset()
3694 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); in hclge_reset()
3700 ret = hclge_reset_stack(hdev); in hclge_reset()
3704 hclge_clear_reset_cause(hdev); in hclge_reset()
3706 ret = hclge_reset_prepare_up(hdev); in hclge_reset()
3712 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT); in hclge_reset()
3717 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1) in hclge_reset()
3722 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); in hclge_reset()
3728 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT); in hclge_reset()
3732 hdev->last_reset_time = jiffies; in hclge_reset()
3733 hdev->rst_stats.reset_fail_cnt = 0; in hclge_reset()
3734 hdev->rst_stats.reset_done_cnt++; in hclge_reset()
3742 &hdev->default_reset_request); in hclge_reset()
3744 set_bit(reset_level, &hdev->reset_request); in hclge_reset()
3751 if (hclge_reset_err_handle(hdev)) in hclge_reset()
3752 hclge_reset_task_schedule(hdev); in hclge_reset()
3758 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_event() local
3776 handle = &hdev->vport[0].nic; in hclge_reset_event()
3778 if (time_before(jiffies, (hdev->last_reset_time + in hclge_reset_event()
3780 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); in hclge_reset_event()
3782 } else if (hdev->default_reset_request) in hclge_reset_event()
3783 hdev->reset_level = in hclge_reset_event()
3785 &hdev->default_reset_request); in hclge_reset_event()
3786 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) in hclge_reset_event()
3787 hdev->reset_level = HNAE3_FUNC_RESET; in hclge_reset_event()
3789 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n", in hclge_reset_event()
3790 hdev->reset_level); in hclge_reset_event()
3793 set_bit(hdev->reset_level, &hdev->reset_request); in hclge_reset_event()
3794 hclge_reset_task_schedule(hdev); in hclge_reset_event()
3796 if (hdev->reset_level < HNAE3_GLOBAL_RESET) in hclge_reset_event()
3797 hdev->reset_level++; in hclge_reset_event()
3803 struct hclge_dev *hdev = ae_dev->priv; in hclge_set_def_reset_request() local
3805 set_bit(rst_type, &hdev->default_reset_request); in hclge_set_def_reset_request()
3810 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer); in hclge_reset_timer() local
3815 if (!hdev->default_reset_request) in hclge_reset_timer()
3818 dev_info(&hdev->pdev->dev, in hclge_reset_timer()
3820 hclge_reset_event(hdev->pdev, NULL); in hclge_reset_timer()
3823 static void hclge_reset_subtask(struct hclge_dev *hdev) in hclge_reset_subtask() argument
3825 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_reset_subtask()
3836 hdev->last_reset_time = jiffies; in hclge_reset_subtask()
3837 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending); in hclge_reset_subtask()
3838 if (hdev->reset_type != HNAE3_NONE_RESET) in hclge_reset_subtask()
3839 hclge_reset(hdev); in hclge_reset_subtask()
3842 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request); in hclge_reset_subtask()
3843 if (hdev->reset_type != HNAE3_NONE_RESET) in hclge_reset_subtask()
3844 hclge_do_reset(hdev); in hclge_reset_subtask()
3846 hdev->reset_type = HNAE3_NONE_RESET; in hclge_reset_subtask()
3851 struct hclge_dev *hdev = in hclge_reset_service_task() local
3854 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_reset_service_task()
3857 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); in hclge_reset_service_task()
3859 hclge_reset_subtask(hdev); in hclge_reset_service_task()
3861 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_service_task()
3866 struct hclge_dev *hdev = in hclge_mailbox_service_task() local
3869 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) in hclge_mailbox_service_task()
3872 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); in hclge_mailbox_service_task()
3874 hclge_mbx_handler(hdev); in hclge_mailbox_service_task()
3876 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); in hclge_mailbox_service_task()
3879 static void hclge_update_vport_alive(struct hclge_dev *hdev) in hclge_update_vport_alive() argument
3884 for (i = 1; i < hdev->num_alloc_vport; i++) { in hclge_update_vport_alive()
3885 struct hclge_vport *vport = &hdev->vport[i]; in hclge_update_vport_alive()
3898 struct hclge_dev *hdev = in hclge_service_task() local
3901 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); in hclge_service_task()
3903 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { in hclge_service_task()
3904 hclge_update_stats_for_all(hdev); in hclge_service_task()
3905 hdev->hw_stats.stats_timer = 0; in hclge_service_task()
3908 hclge_update_port_info(hdev); in hclge_service_task()
3909 hclge_update_link_status(hdev); in hclge_service_task()
3910 hclge_update_vport_alive(hdev); in hclge_service_task()
3911 hclge_sync_vlan_filter(hdev); in hclge_service_task()
3912 if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) { in hclge_service_task()
3913 hclge_rfs_filter_expire(hdev); in hclge_service_task()
3914 hdev->fd_arfs_expire_timer = 0; in hclge_service_task()
3917 hclge_task_schedule(hdev, round_jiffies_relative(HZ)); in hclge_service_task()
3936 struct hclge_dev *hdev = vport->back; in hclge_get_vector() local
3940 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num); in hclge_get_vector()
3941 vector_num = min(hdev->num_msi_left, vector_num); in hclge_get_vector()
3944 for (i = 1; i < hdev->num_msi; i++) { in hclge_get_vector()
3945 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { in hclge_get_vector()
3946 vector->vector = pci_irq_vector(hdev->pdev, i); in hclge_get_vector()
3947 vector->io_addr = hdev->hw.io_base + in hclge_get_vector()
3952 hdev->vector_status[i] = vport->vport_id; in hclge_get_vector()
3953 hdev->vector_irq[i] = vector->vector; in hclge_get_vector()
3962 hdev->num_msi_left -= alloc; in hclge_get_vector()
3963 hdev->num_msi_used += alloc; in hclge_get_vector()
3968 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) in hclge_get_vector_index() argument
3972 for (i = 0; i < hdev->num_msi; i++) in hclge_get_vector_index()
3973 if (vector == hdev->vector_irq[i]) in hclge_get_vector_index()
3982 struct hclge_dev *hdev = vport->back; in hclge_put_vector() local
3985 vector_id = hclge_get_vector_index(hdev, vector); in hclge_put_vector()
3987 dev_err(&hdev->pdev->dev, in hclge_put_vector()
3992 hclge_free_vector(hdev, vector_id); in hclge_put_vector()
4007 static int hclge_set_rss_algo_key(struct hclge_dev *hdev, in hclge_set_rss_algo_key() argument
4033 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_rss_algo_key()
4035 dev_err(&hdev->pdev->dev, in hclge_set_rss_algo_key()
4044 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir) in hclge_set_rss_indir_table() argument
4065 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_rss_indir_table()
4067 dev_err(&hdev->pdev->dev, in hclge_set_rss_indir_table()
4076 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, in hclge_set_rss_tc_mode() argument
4099 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_rss_tc_mode()
4101 dev_err(&hdev->pdev->dev, in hclge_set_rss_tc_mode()
4123 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) in hclge_set_rss_input_tuple() argument
4134 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en; in hclge_set_rss_input_tuple()
4135 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en; in hclge_set_rss_input_tuple()
4136 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en; in hclge_set_rss_input_tuple()
4137 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en; in hclge_set_rss_input_tuple()
4138 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en; in hclge_set_rss_input_tuple()
4139 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; in hclge_set_rss_input_tuple()
4140 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; in hclge_set_rss_input_tuple()
4141 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; in hclge_set_rss_input_tuple()
4142 hclge_get_rss_type(&hdev->vport[0]); in hclge_set_rss_input_tuple()
4143 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_rss_input_tuple()
4145 dev_err(&hdev->pdev->dev, in hclge_set_rss_input_tuple()
4187 struct hclge_dev *hdev = vport->back; in hclge_set_rss() local
4207 ret = hclge_set_rss_algo_key(hdev, hash_algo, key); in hclge_set_rss()
4221 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl); in hclge_set_rss()
4253 struct hclge_dev *hdev = vport->back; in hclge_set_rss_tuple() local
4309 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_rss_tuple()
4311 dev_err(&hdev->pdev->dev, in hclge_set_rss_tuple()
4381 struct hclge_dev *hdev = vport->back; in hclge_get_tc_size() local
4383 return hdev->rss_size_max; in hclge_get_tc_size()
4386 int hclge_rss_init_hw(struct hclge_dev *hdev) in hclge_rss_init_hw() argument
4388 struct hclge_vport *vport = hdev->vport; in hclge_rss_init_hw()
4400 ret = hclge_set_rss_indir_table(hdev, rss_indir); in hclge_rss_init_hw()
4404 ret = hclge_set_rss_algo_key(hdev, hfunc, key); in hclge_rss_init_hw()
4408 ret = hclge_set_rss_input_tuple(hdev); in hclge_rss_init_hw()
4417 dev_err(&hdev->pdev->dev, in hclge_rss_init_hw()
4429 if (!(hdev->hw_tc_map & BIT(i))) in hclge_rss_init_hw()
4437 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); in hclge_rss_init_hw()
4440 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) in hclge_rss_indir_init_cfg() argument
4442 struct hclge_vport *vport = hdev->vport; in hclge_rss_indir_init_cfg()
4445 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { in hclge_rss_indir_init_cfg()
4452 static void hclge_rss_init_cfg(struct hclge_dev *hdev) in hclge_rss_init_cfg() argument
4455 struct hclge_vport *vport = hdev->vport; in hclge_rss_init_cfg()
4457 if (hdev->pdev->revision >= 0x21) in hclge_rss_init_cfg()
4460 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { in hclge_rss_init_cfg()
4484 hclge_rss_indir_init_cfg(hdev); in hclge_rss_init_cfg()
4491 struct hclge_dev *hdev = vport->back; in hclge_bind_ring_with_vector() local
4523 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_bind_ring_with_vector()
4525 dev_err(&hdev->pdev->dev, in hclge_bind_ring_with_vector()
4542 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_bind_ring_with_vector()
4544 dev_err(&hdev->pdev->dev, in hclge_bind_ring_with_vector()
4557 struct hclge_dev *hdev = vport->back; in hclge_map_ring_to_vector() local
4560 vector_id = hclge_get_vector_index(hdev, vector); in hclge_map_ring_to_vector()
4562 dev_err(&hdev->pdev->dev, in hclge_map_ring_to_vector()
4574 struct hclge_dev *hdev = vport->back; in hclge_unmap_ring_frm_vector() local
4577 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_unmap_ring_frm_vector()
4580 vector_id = hclge_get_vector_index(hdev, vector); in hclge_unmap_ring_frm_vector()
4596 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, in hclge_cmd_set_promisc_mode() argument
4616 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cmd_set_promisc_mode()
4618 dev_err(&hdev->pdev->dev, in hclge_cmd_set_promisc_mode()
4644 struct hclge_dev *hdev = vport->back; in hclge_set_promisc_mode() local
4657 return hclge_cmd_set_promisc_mode(hdev, ¶m); in hclge_set_promisc_mode()
4660 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) in hclge_get_fd_mode() argument
4670 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_fd_mode()
4672 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); in hclge_get_fd_mode()
4681 static int hclge_get_fd_allocation(struct hclge_dev *hdev, in hclge_get_fd_allocation() argument
4695 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_fd_allocation()
4697 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", in hclge_get_fd_allocation()
4710 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num) in hclge_set_fd_key_config() argument
4720 stage = &hdev->fd_cfg.key_cfg[stage_num]; in hclge_set_fd_key_config()
4730 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_fd_key_config()
4732 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); in hclge_set_fd_key_config()
4737 static int hclge_init_fd_config(struct hclge_dev *hdev) in hclge_init_fd_config() argument
4743 if (!hnae3_dev_fd_supported(hdev)) in hclge_init_fd_config()
4746 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); in hclge_init_fd_config()
4750 switch (hdev->fd_cfg.fd_mode) { in hclge_init_fd_config()
4752 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; in hclge_init_fd_config()
4755 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; in hclge_init_fd_config()
4758 dev_err(&hdev->pdev->dev, in hclge_init_fd_config()
4760 hdev->fd_cfg.fd_mode); in hclge_init_fd_config()
4764 hdev->fd_cfg.proto_support = in hclge_init_fd_config()
4767 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; in hclge_init_fd_config()
4780 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) { in hclge_init_fd_config()
4781 hdev->fd_cfg.proto_support |= ETHER_FLOW; in hclge_init_fd_config()
4791 ret = hclge_get_fd_allocation(hdev, in hclge_init_fd_config()
4792 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], in hclge_init_fd_config()
4793 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], in hclge_init_fd_config()
4794 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], in hclge_init_fd_config()
4795 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); in hclge_init_fd_config()
4799 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); in hclge_init_fd_config()
4802 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, in hclge_fd_tcam_config() argument
4835 ret = hclge_cmd_send(&hdev->hw, desc, 3); in hclge_fd_tcam_config()
4837 dev_err(&hdev->pdev->dev, in hclge_fd_tcam_config()
4844 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, in hclge_fd_ad_config() argument
4876 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_fd_ad_config()
4878 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); in hclge_fd_ad_config()
5047 static int hclge_config_key(struct hclge_dev *hdev, u8 stage, in hclge_config_key() argument
5050 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; in hclge_config_key()
5077 meta_data_region = hdev->fd_cfg.max_key_length / 8 - in hclge_config_key()
5085 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, in hclge_config_key()
5088 dev_err(&hdev->pdev->dev, in hclge_config_key()
5094 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, in hclge_config_key()
5097 dev_err(&hdev->pdev->dev, in hclge_config_key()
5103 static int hclge_config_action(struct hclge_dev *hdev, u8 stage, in hclge_config_action() argument
5129 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); in hclge_config_action()
5132 static int hclge_fd_check_spec(struct hclge_dev *hdev, in hclge_fd_check_spec() argument
5141 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) in hclge_fd_check_spec()
5144 if (!(fs->flow_type & hdev->fd_cfg.proto_support)) in hclge_fd_check_spec()
5149 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); in hclge_fd_check_spec()
5286 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW)) in hclge_fd_check_spec()
5298 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location) in hclge_fd_rule_exist() argument
5303 spin_lock_bh(&hdev->fd_rule_lock); in hclge_fd_rule_exist()
5304 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { in hclge_fd_rule_exist()
5309 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_fd_rule_exist()
5315 static int hclge_fd_update_rule_list(struct hclge_dev *hdev, in hclge_fd_update_rule_list() argument
5327 &hdev->fd_rule_list, rule_node) { in hclge_fd_update_rule_list()
5336 hdev->hclge_fd_rule_num--; in hclge_fd_update_rule_list()
5339 if (!hdev->hclge_fd_rule_num) in hclge_fd_update_rule_list()
5340 hdev->fd_active_type = HCLGE_FD_RULE_NONE; in hclge_fd_update_rule_list()
5341 clear_bit(location, hdev->fd_bmap); in hclge_fd_update_rule_list()
5346 dev_err(&hdev->pdev->dev, in hclge_fd_update_rule_list()
5357 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list); in hclge_fd_update_rule_list()
5359 set_bit(location, hdev->fd_bmap); in hclge_fd_update_rule_list()
5360 hdev->hclge_fd_rule_num++; in hclge_fd_update_rule_list()
5361 hdev->fd_active_type = new_rule->rule_type; in hclge_fd_update_rule_list()
5366 static int hclge_fd_get_tuple(struct hclge_dev *hdev, in hclge_fd_get_tuple() argument
5520 static int hclge_fd_config_rule(struct hclge_dev *hdev, in hclge_fd_config_rule() argument
5526 dev_err(&hdev->pdev->dev, in hclge_fd_config_rule()
5532 hclge_fd_update_rule_list(hdev, rule, rule->location, true); in hclge_fd_config_rule()
5534 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); in hclge_fd_config_rule()
5538 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); in hclge_fd_config_rule()
5545 hclge_fd_update_rule_list(hdev, rule, rule->location, false); in hclge_fd_config_rule()
5553 struct hclge_dev *hdev = vport->back; in hclge_add_fd_entry() local
5561 if (!hnae3_dev_fd_supported(hdev)) in hclge_add_fd_entry()
5564 if (!hdev->fd_en) { in hclge_add_fd_entry()
5565 dev_warn(&hdev->pdev->dev, in hclge_add_fd_entry()
5572 ret = hclge_fd_check_spec(hdev, fs, &unused); in hclge_add_fd_entry()
5574 dev_err(&hdev->pdev->dev, "Check fd spec failed\n"); in hclge_add_fd_entry()
5585 if (vf > hdev->num_req_vfs) { in hclge_add_fd_entry()
5586 dev_err(&hdev->pdev->dev, in hclge_add_fd_entry()
5588 vf, hdev->num_req_vfs); in hclge_add_fd_entry()
5592 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; in hclge_add_fd_entry()
5593 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps; in hclge_add_fd_entry()
5596 dev_err(&hdev->pdev->dev, in hclge_add_fd_entry()
5610 ret = hclge_fd_get_tuple(hdev, fs, rule); in hclge_add_fd_entry()
5630 spin_lock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry()
5631 ret = hclge_fd_config_rule(hdev, rule); in hclge_add_fd_entry()
5633 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry()
5642 struct hclge_dev *hdev = vport->back; in hclge_del_fd_entry() local
5646 if (!hnae3_dev_fd_supported(hdev)) in hclge_del_fd_entry()
5651 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) in hclge_del_fd_entry()
5654 if (!hclge_fd_rule_exist(hdev, fs->location)) { in hclge_del_fd_entry()
5655 dev_err(&hdev->pdev->dev, in hclge_del_fd_entry()
5660 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location, in hclge_del_fd_entry()
5665 spin_lock_bh(&hdev->fd_rule_lock); in hclge_del_fd_entry()
5666 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false); in hclge_del_fd_entry()
5668 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_fd_entry()
5677 struct hclge_dev *hdev = vport->back; in hclge_del_all_fd_entries() local
5682 if (!hnae3_dev_fd_supported(hdev)) in hclge_del_all_fd_entries()
5685 spin_lock_bh(&hdev->fd_rule_lock); in hclge_del_all_fd_entries()
5686 for_each_set_bit(location, hdev->fd_bmap, in hclge_del_all_fd_entries()
5687 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) in hclge_del_all_fd_entries()
5688 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location, in hclge_del_all_fd_entries()
5692 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, in hclge_del_all_fd_entries()
5697 hdev->fd_active_type = HCLGE_FD_RULE_NONE; in hclge_del_all_fd_entries()
5698 hdev->hclge_fd_rule_num = 0; in hclge_del_all_fd_entries()
5699 bitmap_zero(hdev->fd_bmap, in hclge_del_all_fd_entries()
5700 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); in hclge_del_all_fd_entries()
5703 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_all_fd_entries()
5709 struct hclge_dev *hdev = vport->back; in hclge_restore_fd_entries() local
5718 if (!hnae3_dev_fd_supported(hdev)) in hclge_restore_fd_entries()
5722 if (!hdev->fd_en) in hclge_restore_fd_entries()
5725 spin_lock_bh(&hdev->fd_rule_lock); in hclge_restore_fd_entries()
5726 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_restore_fd_entries()
5727 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); in hclge_restore_fd_entries()
5729 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); in hclge_restore_fd_entries()
5732 dev_warn(&hdev->pdev->dev, in hclge_restore_fd_entries()
5735 clear_bit(rule->location, hdev->fd_bmap); in hclge_restore_fd_entries()
5738 hdev->hclge_fd_rule_num--; in hclge_restore_fd_entries()
5742 if (hdev->hclge_fd_rule_num) in hclge_restore_fd_entries()
5743 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE; in hclge_restore_fd_entries()
5745 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_restore_fd_entries()
5754 struct hclge_dev *hdev = vport->back; in hclge_get_fd_rule_cnt() local
5756 if (!hnae3_dev_fd_supported(hdev)) in hclge_get_fd_rule_cnt()
5759 cmd->rule_cnt = hdev->hclge_fd_rule_num; in hclge_get_fd_rule_cnt()
5760 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; in hclge_get_fd_rule_cnt()
5770 struct hclge_dev *hdev = vport->back; in hclge_get_fd_rule_info() local
5774 if (!hnae3_dev_fd_supported(hdev)) in hclge_get_fd_rule_info()
5779 spin_lock_bh(&hdev->fd_rule_lock); in hclge_get_fd_rule_info()
5781 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { in hclge_get_fd_rule_info()
5787 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_fd_rule_info()
5933 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_fd_rule_info()
5965 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_fd_rule_info()
5974 struct hclge_dev *hdev = vport->back; in hclge_get_all_rules() local
5979 if (!hnae3_dev_fd_supported(hdev)) in hclge_get_all_rules()
5982 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; in hclge_get_all_rules()
5984 spin_lock_bh(&hdev->fd_rule_lock); in hclge_get_all_rules()
5986 &hdev->fd_rule_list, rule_node) { in hclge_get_all_rules()
5988 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_all_rules()
5996 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_all_rules()
6025 hclge_fd_search_flow_keys(struct hclge_dev *hdev, in hclge_fd_search_flow_keys() argument
6031 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_fd_search_flow_keys()
6068 struct hclge_dev *hdev = vport->back; in hclge_add_fd_entry_by_arfs() local
6074 if (!hnae3_dev_fd_supported(hdev)) in hclge_add_fd_entry_by_arfs()
6080 spin_lock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
6085 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) { in hclge_add_fd_entry_by_arfs()
6086 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
6096 rule = hclge_fd_search_flow_keys(hdev, &new_tuples); in hclge_add_fd_entry_by_arfs()
6098 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM); in hclge_add_fd_entry_by_arfs()
6099 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { in hclge_add_fd_entry_by_arfs()
6100 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
6107 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
6112 set_bit(bit_id, hdev->fd_bmap); in hclge_add_fd_entry_by_arfs()
6117 ret = hclge_fd_config_rule(hdev, rule); in hclge_add_fd_entry_by_arfs()
6119 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
6127 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
6134 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); in hclge_add_fd_entry_by_arfs()
6143 static void hclge_rfs_filter_expire(struct hclge_dev *hdev) in hclge_rfs_filter_expire() argument
6146 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_rfs_filter_expire()
6151 spin_lock_bh(&hdev->fd_rule_lock); in hclge_rfs_filter_expire()
6152 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) { in hclge_rfs_filter_expire()
6153 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_rfs_filter_expire()
6156 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_rfs_filter_expire()
6161 hdev->hclge_fd_rule_num--; in hclge_rfs_filter_expire()
6162 clear_bit(rule->location, hdev->fd_bmap); in hclge_rfs_filter_expire()
6165 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_rfs_filter_expire()
6168 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, in hclge_rfs_filter_expire()
6179 struct hclge_dev *hdev = vport->back; in hclge_clear_arfs_rules() local
6181 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE) in hclge_clear_arfs_rules()
6189 struct hclge_dev *hdev = vport->back; in hclge_get_hw_reset_stat() local
6191 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || in hclge_get_hw_reset_stat()
6192 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); in hclge_get_hw_reset_stat()
6198 struct hclge_dev *hdev = vport->back; in hclge_ae_dev_resetting() local
6200 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_ae_dev_resetting()
6206 struct hclge_dev *hdev = vport->back; in hclge_ae_dev_reset_cnt() local
6208 return hdev->rst_stats.hw_reset_done_cnt; in hclge_ae_dev_reset_cnt()
6214 struct hclge_dev *hdev = vport->back; in hclge_enable_fd() local
6217 hdev->fd_en = enable; in hclge_enable_fd()
6218 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; in hclge_enable_fd()
6225 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) in hclge_cfg_mac_mode() argument
6250 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_mac_mode()
6252 dev_err(&hdev->pdev->dev, in hclge_cfg_mac_mode()
6256 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid, in hclge_config_switch_param() argument
6273 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_switch_param()
6275 dev_err(&hdev->pdev->dev, in hclge_config_switch_param()
6285 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_switch_param()
6287 dev_err(&hdev->pdev->dev, in hclge_config_switch_param()
6292 static void hclge_phy_link_status_wait(struct hclge_dev *hdev, in hclge_phy_link_status_wait() argument
6297 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_phy_link_status_wait()
6304 dev_err(&hdev->pdev->dev, in hclge_phy_link_status_wait()
6316 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret) in hclge_mac_link_status_wait() argument
6324 ret = hclge_get_mac_link_status(hdev); in hclge_mac_link_status_wait()
6335 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, in hclge_mac_phy_link_status_wait() argument
6346 hclge_phy_link_status_wait(hdev, link_ret); in hclge_mac_phy_link_status_wait()
6348 return hclge_mac_link_status_wait(hdev, link_ret); in hclge_mac_phy_link_status_wait()
6351 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) in hclge_set_app_loopback() argument
6361 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_app_loopback()
6363 dev_err(&hdev->pdev->dev, in hclge_set_app_loopback()
6380 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_app_loopback()
6382 dev_err(&hdev->pdev->dev, in hclge_set_app_loopback()
6387 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en, in hclge_cfg_serdes_loopback() argument
6409 dev_err(&hdev->pdev->dev, in hclge_cfg_serdes_loopback()
6421 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_serdes_loopback()
6423 dev_err(&hdev->pdev->dev, in hclge_cfg_serdes_loopback()
6432 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_serdes_loopback()
6434 dev_err(&hdev->pdev->dev, in hclge_cfg_serdes_loopback()
6442 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n"); in hclge_cfg_serdes_loopback()
6445 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n"); in hclge_cfg_serdes_loopback()
6451 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, in hclge_set_serdes_loopback() argument
6456 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode); in hclge_set_serdes_loopback()
6460 hclge_cfg_mac_mode(hdev, en); in hclge_set_serdes_loopback()
6462 ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE); in hclge_set_serdes_loopback()
6464 dev_err(&hdev->pdev->dev, in hclge_set_serdes_loopback()
6470 static int hclge_enable_phy_loopback(struct hclge_dev *hdev, in hclge_enable_phy_loopback() argument
6488 static int hclge_disable_phy_loopback(struct hclge_dev *hdev, in hclge_disable_phy_loopback() argument
6500 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en) in hclge_set_phy_loopback() argument
6502 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_set_phy_loopback()
6509 ret = hclge_enable_phy_loopback(hdev, phydev); in hclge_set_phy_loopback()
6511 ret = hclge_disable_phy_loopback(hdev, phydev); in hclge_set_phy_loopback()
6513 dev_err(&hdev->pdev->dev, in hclge_set_phy_loopback()
6518 hclge_cfg_mac_mode(hdev, en); in hclge_set_phy_loopback()
6520 ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE); in hclge_set_phy_loopback()
6522 dev_err(&hdev->pdev->dev, in hclge_set_phy_loopback()
6528 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id, in hclge_tqp_enable() argument
6542 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_tqp_enable()
6544 dev_err(&hdev->pdev->dev, in hclge_tqp_enable()
6554 struct hclge_dev *hdev = vport->back; in hclge_set_loopback() local
6562 if (hdev->pdev->revision >= 0x21) { in hclge_set_loopback()
6565 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param, in hclge_set_loopback()
6573 ret = hclge_set_app_loopback(hdev, en); in hclge_set_loopback()
6577 ret = hclge_set_serdes_loopback(hdev, en, loop_mode); in hclge_set_loopback()
6580 ret = hclge_set_phy_loopback(hdev, en); in hclge_set_loopback()
6584 dev_err(&hdev->pdev->dev, in hclge_set_loopback()
6594 ret = hclge_tqp_enable(hdev, i, 0, en); in hclge_set_loopback()
6602 static int hclge_set_default_loopback(struct hclge_dev *hdev) in hclge_set_default_loopback() argument
6606 ret = hclge_set_app_loopback(hdev, false); in hclge_set_default_loopback()
6610 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES); in hclge_set_default_loopback()
6614 return hclge_cfg_serdes_loopback(hdev, false, in hclge_set_default_loopback()
6637 struct hclge_dev *hdev = vport->back; in hclge_set_timer_task() local
6640 hclge_task_schedule(hdev, round_jiffies_relative(HZ)); in hclge_set_timer_task()
6645 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_set_timer_task()
6646 cancel_delayed_work_sync(&hdev->service_task); in hclge_set_timer_task()
6647 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); in hclge_set_timer_task()
6654 struct hclge_dev *hdev = vport->back; in hclge_ae_start() local
6657 hclge_cfg_mac_mode(hdev, true); in hclge_ae_start()
6658 clear_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_ae_start()
6659 hdev->hw.mac.link = 0; in hclge_ae_start()
6664 hclge_mac_start_phy(hdev); in hclge_ae_start()
6672 struct hclge_dev *hdev = vport->back; in hclge_ae_stop() local
6675 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_ae_stop()
6682 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && in hclge_ae_stop()
6683 hdev->reset_type != HNAE3_FUNC_RESET) { in hclge_ae_stop()
6684 hclge_mac_stop_phy(hdev); in hclge_ae_stop()
6685 hclge_update_link_status(hdev); in hclge_ae_stop()
6692 hclge_config_mac_tnl_int(hdev, false); in hclge_ae_stop()
6695 hclge_cfg_mac_mode(hdev, false); in hclge_ae_stop()
6697 hclge_mac_stop_phy(hdev); in hclge_ae_stop()
6701 hclge_update_link_status(hdev); in hclge_ae_stop()
6734 struct hclge_dev *hdev = vport->back; in hclge_get_mac_vlan_cmd_status() local
6737 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
6747 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
6751 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
6756 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
6764 dev_dbg(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
6769 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
6777 dev_dbg(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
6782 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
6788 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
6858 struct hclge_dev *hdev = vport->back; in hclge_remove_mac_vlan_tbl() local
6868 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_remove_mac_vlan_tbl()
6870 dev_err(&hdev->pdev->dev, in hclge_remove_mac_vlan_tbl()
6887 struct hclge_dev *hdev = vport->back; in hclge_lookup_mac_vlan_tbl() local
6905 ret = hclge_cmd_send(&hdev->hw, desc, 3); in hclge_lookup_mac_vlan_tbl()
6910 ret = hclge_cmd_send(&hdev->hw, desc, 1); in hclge_lookup_mac_vlan_tbl()
6913 dev_err(&hdev->pdev->dev, in hclge_lookup_mac_vlan_tbl()
6929 struct hclge_dev *hdev = vport->back; in hclge_add_mac_vlan_tbl() local
6943 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_add_mac_vlan_tbl()
6959 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); in hclge_add_mac_vlan_tbl()
6969 dev_err(&hdev->pdev->dev, in hclge_add_mac_vlan_tbl()
6978 static int hclge_init_umv_space(struct hclge_dev *hdev) in hclge_init_umv_space() argument
6983 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size, in hclge_init_umv_space()
6988 if (allocated_size < hdev->wanted_umv_size) in hclge_init_umv_space()
6989 dev_warn(&hdev->pdev->dev, in hclge_init_umv_space()
6991 hdev->wanted_umv_size, allocated_size); in hclge_init_umv_space()
6993 mutex_init(&hdev->umv_mutex); in hclge_init_umv_space()
6994 hdev->max_umv_size = allocated_size; in hclge_init_umv_space()
6999 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2); in hclge_init_umv_space()
7000 hdev->share_umv_size = hdev->priv_umv_size + in hclge_init_umv_space()
7001 hdev->max_umv_size % (hdev->num_req_vfs + 2); in hclge_init_umv_space()
7006 static int hclge_uninit_umv_space(struct hclge_dev *hdev) in hclge_uninit_umv_space() argument
7010 if (hdev->max_umv_size > 0) { in hclge_uninit_umv_space()
7011 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL, in hclge_uninit_umv_space()
7015 hdev->max_umv_size = 0; in hclge_uninit_umv_space()
7017 mutex_destroy(&hdev->umv_mutex); in hclge_uninit_umv_space()
7022 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, in hclge_set_umv_space() argument
7036 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_umv_space()
7038 dev_err(&hdev->pdev->dev, in hclge_set_umv_space()
7050 static void hclge_reset_umv_space(struct hclge_dev *hdev) in hclge_reset_umv_space() argument
7055 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_reset_umv_space()
7056 vport = &hdev->vport[i]; in hclge_reset_umv_space()
7060 mutex_lock(&hdev->umv_mutex); in hclge_reset_umv_space()
7061 hdev->share_umv_size = hdev->priv_umv_size + in hclge_reset_umv_space()
7062 hdev->max_umv_size % (hdev->num_req_vfs + 2); in hclge_reset_umv_space()
7063 mutex_unlock(&hdev->umv_mutex); in hclge_reset_umv_space()
7068 struct hclge_dev *hdev = vport->back; in hclge_is_umv_space_full() local
7071 mutex_lock(&hdev->umv_mutex); in hclge_is_umv_space_full()
7072 is_full = (vport->used_umv_num >= hdev->priv_umv_size && in hclge_is_umv_space_full()
7073 hdev->share_umv_size == 0); in hclge_is_umv_space_full()
7074 mutex_unlock(&hdev->umv_mutex); in hclge_is_umv_space_full()
7081 struct hclge_dev *hdev = vport->back; in hclge_update_umv_space() local
7083 mutex_lock(&hdev->umv_mutex); in hclge_update_umv_space()
7085 if (vport->used_umv_num > hdev->priv_umv_size) in hclge_update_umv_space()
7086 hdev->share_umv_size++; in hclge_update_umv_space()
7091 if (vport->used_umv_num >= hdev->priv_umv_size && in hclge_update_umv_space()
7092 hdev->share_umv_size > 0) in hclge_update_umv_space()
7093 hdev->share_umv_size--; in hclge_update_umv_space()
7096 mutex_unlock(&hdev->umv_mutex); in hclge_update_umv_space()
7110 struct hclge_dev *hdev = vport->back; in hclge_add_uc_addr_common() local
7120 dev_err(&hdev->pdev->dev, in hclge_add_uc_addr_common()
7150 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", in hclge_add_uc_addr_common()
7151 hdev->priv_umv_size); in hclge_add_uc_addr_common()
7158 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n", in hclge_add_uc_addr_common()
7163 dev_err(&hdev->pdev->dev, in hclge_add_uc_addr_common()
7181 struct hclge_dev *hdev = vport->back; in hclge_rm_uc_addr_common() local
7189 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n", in hclge_rm_uc_addr_common()
7215 struct hclge_dev *hdev = vport->back; in hclge_add_mc_addr_common() local
7222 dev_err(&hdev->pdev->dev, in hclge_add_mc_addr_common()
7243 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); in hclge_add_mc_addr_common()
7259 struct hclge_dev *hdev = vport->back; in hclge_rm_mc_addr_common() local
7266 dev_dbg(&hdev->pdev->dev, in hclge_rm_mc_addr_common()
7377 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev) in hclge_uninit_vport_mac_table() argument
7383 mutex_lock(&hdev->vport_cfg_mutex); in hclge_uninit_vport_mac_table()
7384 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_uninit_vport_mac_table()
7385 vport = &hdev->vport[i]; in hclge_uninit_vport_mac_table()
7396 mutex_unlock(&hdev->vport_cfg_mutex); in hclge_uninit_vport_mac_table()
7399 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, in hclge_get_mac_ethertype_cmd_status() argument
7410 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
7422 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
7427 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
7432 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
7441 static int hclge_add_mgr_tbl(struct hclge_dev *hdev, in hclge_add_mgr_tbl() argument
7452 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_add_mgr_tbl()
7454 dev_err(&hdev->pdev->dev, in hclge_add_mgr_tbl()
7463 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); in hclge_add_mgr_tbl()
7466 static int init_mgr_tbl(struct hclge_dev *hdev) in init_mgr_tbl() argument
7472 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); in init_mgr_tbl()
7474 dev_err(&hdev->pdev->dev, in init_mgr_tbl()
7487 struct hclge_dev *hdev = vport->back; in hclge_get_mac_addr() local
7489 ether_addr_copy(p, hdev->hw.mac.mac_addr); in hclge_get_mac_addr()
7497 struct hclge_dev *hdev = vport->back; in hclge_set_mac_addr() local
7504 dev_err(&hdev->pdev->dev, in hclge_set_mac_addr()
7511 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) in hclge_set_mac_addr()
7512 dev_warn(&hdev->pdev->dev, in hclge_set_mac_addr()
7517 dev_err(&hdev->pdev->dev, in hclge_set_mac_addr()
7522 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr)) in hclge_set_mac_addr()
7523 dev_err(&hdev->pdev->dev, in hclge_set_mac_addr()
7529 ret = hclge_pause_addr_cfg(hdev, new_addr); in hclge_set_mac_addr()
7531 dev_err(&hdev->pdev->dev, in hclge_set_mac_addr()
7537 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); in hclge_set_mac_addr()
7546 struct hclge_dev *hdev = vport->back; in hclge_do_ioctl() local
7548 if (!hdev->hw.mac.phydev) in hclge_do_ioctl()
7551 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); in hclge_do_ioctl()
7554 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, in hclge_set_vlan_filter_ctrl() argument
7568 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_filter_ctrl()
7570 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", in hclge_set_vlan_filter_ctrl()
7591 struct hclge_dev *hdev = vport->back; in hclge_enable_vlan_filter() local
7593 if (hdev->pdev->revision >= 0x21) { in hclge_enable_vlan_filter()
7594 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, in hclge_enable_vlan_filter()
7596 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, in hclge_enable_vlan_filter()
7599 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, in hclge_enable_vlan_filter()
7609 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid, in hclge_set_vf_vlan_common() argument
7624 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) in hclge_set_vf_vlan_common()
7648 ret = hclge_cmd_send(&hdev->hw, desc, 2); in hclge_set_vf_vlan_common()
7650 dev_err(&hdev->pdev->dev, in hclge_set_vf_vlan_common()
7662 set_bit(vfid, hdev->vf_vlan_full); in hclge_set_vf_vlan_common()
7663 dev_warn(&hdev->pdev->dev, in hclge_set_vf_vlan_common()
7668 dev_err(&hdev->pdev->dev, in hclge_set_vf_vlan_common()
7684 dev_err(&hdev->pdev->dev, in hclge_set_vf_vlan_common()
7692 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, in hclge_set_port_vlan_filter() argument
7713 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_port_vlan_filter()
7715 dev_err(&hdev->pdev->dev, in hclge_set_port_vlan_filter()
7720 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, in hclge_set_vlan_filter_hw() argument
7730 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, in hclge_set_vlan_filter_hw()
7733 dev_err(&hdev->pdev->dev, in hclge_set_vlan_filter_hw()
7741 test_bit(vport_id, hdev->vlan_table[vlan_id])) in hclge_set_vlan_filter_hw()
7744 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { in hclge_set_vlan_filter_hw()
7745 dev_err(&hdev->pdev->dev, in hclge_set_vlan_filter_hw()
7752 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { in hclge_set_vlan_filter_hw()
7753 dev_err(&hdev->pdev->dev, in hclge_set_vlan_filter_hw()
7759 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) in hclge_set_vlan_filter_hw()
7763 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, in hclge_set_vlan_filter_hw()
7773 struct hclge_dev *hdev = vport->back; in hclge_set_vlan_tx_offload_cfg() local
7803 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_tx_offload_cfg()
7805 dev_err(&hdev->pdev->dev, in hclge_set_vlan_tx_offload_cfg()
7816 struct hclge_dev *hdev = vport->back; in hclge_set_vlan_rx_offload_cfg() local
7839 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_rx_offload_cfg()
7841 dev_err(&hdev->pdev->dev, in hclge_set_vlan_rx_offload_cfg()
7894 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) in hclge_set_vlan_protocol_type() argument
7904 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); in hclge_set_vlan_protocol_type()
7906 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); in hclge_set_vlan_protocol_type()
7908 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); in hclge_set_vlan_protocol_type()
7910 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); in hclge_set_vlan_protocol_type()
7912 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_protocol_type()
7914 dev_err(&hdev->pdev->dev, in hclge_set_vlan_protocol_type()
7923 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); in hclge_set_vlan_protocol_type()
7924 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); in hclge_set_vlan_protocol_type()
7926 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_protocol_type()
7928 dev_err(&hdev->pdev->dev, in hclge_set_vlan_protocol_type()
7935 static int hclge_init_vlan_config(struct hclge_dev *hdev) in hclge_init_vlan_config() argument
7939 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_init_vlan_config()
7944 if (hdev->pdev->revision >= 0x21) { in hclge_init_vlan_config()
7946 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_init_vlan_config()
7947 vport = &hdev->vport[i]; in hclge_init_vlan_config()
7948 ret = hclge_set_vlan_filter_ctrl(hdev, in hclge_init_vlan_config()
7957 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, in hclge_init_vlan_config()
7963 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, in hclge_init_vlan_config()
7972 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; in hclge_init_vlan_config()
7973 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; in hclge_init_vlan_config()
7974 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; in hclge_init_vlan_config()
7975 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; in hclge_init_vlan_config()
7976 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE; in hclge_init_vlan_config()
7977 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE; in hclge_init_vlan_config()
7979 ret = hclge_set_vlan_protocol_type(hdev); in hclge_init_vlan_config()
7983 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_init_vlan_config()
7986 vport = &hdev->vport[i]; in hclge_init_vlan_config()
8017 struct hclge_dev *hdev = vport->back; in hclge_add_vport_all_vlan_table() local
8022 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), in hclge_add_vport_all_vlan_table()
8026 dev_err(&hdev->pdev->dev, in hclge_add_vport_all_vlan_table()
8042 struct hclge_dev *hdev = vport->back; in hclge_rm_vport_vlan_table() local
8047 hclge_set_vlan_filter_hw(hdev, in hclge_rm_vport_vlan_table()
8063 struct hclge_dev *hdev = vport->back; in hclge_rm_vport_all_vlan_table() local
8067 hclge_set_vlan_filter_hw(hdev, in hclge_rm_vport_all_vlan_table()
8081 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) in hclge_uninit_vport_vlan_table() argument
8087 mutex_lock(&hdev->vport_cfg_mutex); in hclge_uninit_vport_vlan_table()
8088 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_uninit_vport_vlan_table()
8089 vport = &hdev->vport[i]; in hclge_uninit_vport_vlan_table()
8095 mutex_unlock(&hdev->vport_cfg_mutex); in hclge_uninit_vport_vlan_table()
8102 struct hclge_dev *hdev = vport->back; in hclge_restore_vlan_table() local
8107 mutex_lock(&hdev->vport_cfg_mutex); in hclge_restore_vlan_table()
8108 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_restore_vlan_table()
8109 vport = &hdev->vport[i]; in hclge_restore_vlan_table()
8115 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), in hclge_restore_vlan_table()
8123 hclge_set_vlan_filter_hw(hdev, in hclge_restore_vlan_table()
8131 mutex_unlock(&hdev->vport_cfg_mutex); in hclge_restore_vlan_table()
8157 struct hclge_dev *hdev = vport->back; in hclge_update_vlan_filter_entries() local
8162 return hclge_set_vlan_filter_hw(hdev, in hclge_update_vlan_filter_entries()
8169 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto), in hclge_update_vlan_filter_entries()
8183 struct hclge_dev *hdev = vport->back; in hclge_update_port_base_vlan_cfg() local
8194 ret = hclge_set_vlan_filter_hw(hdev, in hclge_update_port_base_vlan_cfg()
8203 ret = hclge_set_vlan_filter_hw(hdev, in hclge_update_port_base_vlan_cfg()
8257 struct hclge_dev *hdev = vport->back; in hclge_set_vf_vlan_filter() local
8262 if (hdev->pdev->revision == 0x20) in hclge_set_vf_vlan_filter()
8266 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7) in hclge_set_vf_vlan_filter()
8271 vport = &hdev->vport[vfid]; in hclge_set_vf_vlan_filter()
8284 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); in hclge_set_vf_vlan_filter()
8286 hclge_notify_client(hdev, HNAE3_UP_CLIENT); in hclge_set_vf_vlan_filter()
8295 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0], in hclge_set_vf_vlan_filter()
8307 struct hclge_dev *hdev = vport->back; in hclge_set_vlan_filter() local
8315 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) { in hclge_set_vlan_filter()
8327 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, in hclge_set_vlan_filter()
8348 static void hclge_sync_vlan_filter(struct hclge_dev *hdev) in hclge_sync_vlan_filter() argument
8356 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_sync_vlan_filter()
8357 struct hclge_vport *vport = &hdev->vport[i]; in hclge_sync_vlan_filter()
8362 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), in hclge_sync_vlan_filter()
8381 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) in hclge_set_mac_mtu() argument
8392 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_mac_mtu()
8404 struct hclge_dev *hdev = vport->back; in hclge_set_vport_mtu() local
8413 mutex_lock(&hdev->vport_lock); in hclge_set_vport_mtu()
8415 if (vport->vport_id && max_frm_size > hdev->mps) { in hclge_set_vport_mtu()
8416 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
8420 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
8425 for (i = 1; i < hdev->num_alloc_vport; i++) in hclge_set_vport_mtu()
8426 if (max_frm_size < hdev->vport[i].mps) { in hclge_set_vport_mtu()
8427 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
8431 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); in hclge_set_vport_mtu()
8433 ret = hclge_set_mac_mtu(hdev, max_frm_size); in hclge_set_vport_mtu()
8435 dev_err(&hdev->pdev->dev, in hclge_set_vport_mtu()
8440 hdev->mps = max_frm_size; in hclge_set_vport_mtu()
8443 ret = hclge_buffer_alloc(hdev); in hclge_set_vport_mtu()
8445 dev_err(&hdev->pdev->dev, in hclge_set_vport_mtu()
8449 hclge_notify_client(hdev, HNAE3_UP_CLIENT); in hclge_set_vport_mtu()
8450 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
8454 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, in hclge_send_reset_tqp_cmd() argument
8468 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_send_reset_tqp_cmd()
8470 dev_err(&hdev->pdev->dev, in hclge_send_reset_tqp_cmd()
8478 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) in hclge_get_reset_status() argument
8489 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_reset_status()
8491 dev_err(&hdev->pdev->dev, in hclge_get_reset_status()
8513 struct hclge_dev *hdev = vport->back; in hclge_reset_tqp() local
8521 ret = hclge_tqp_enable(hdev, queue_id, 0, false); in hclge_reset_tqp()
8523 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); in hclge_reset_tqp()
8527 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); in hclge_reset_tqp()
8529 dev_err(&hdev->pdev->dev, in hclge_reset_tqp()
8535 reset_status = hclge_get_reset_status(hdev, queue_gid); in hclge_reset_tqp()
8544 dev_err(&hdev->pdev->dev, "Reset TQP fail\n"); in hclge_reset_tqp()
8548 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); in hclge_reset_tqp()
8550 dev_err(&hdev->pdev->dev, in hclge_reset_tqp()
8558 struct hclge_dev *hdev = vport->back; in hclge_reset_vf_queue() local
8566 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); in hclge_reset_vf_queue()
8568 dev_warn(&hdev->pdev->dev, in hclge_reset_vf_queue()
8574 reset_status = hclge_get_reset_status(hdev, queue_gid); in hclge_reset_vf_queue()
8583 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); in hclge_reset_vf_queue()
8587 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); in hclge_reset_vf_queue()
8589 dev_warn(&hdev->pdev->dev, in hclge_reset_vf_queue()
8596 struct hclge_dev *hdev = vport->back; in hclge_get_fw_version() local
8598 return hdev->fw_version; in hclge_get_fw_version()
8601 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) in hclge_set_flowctrl_adv() argument
8603 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_set_flowctrl_adv()
8611 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) in hclge_cfg_pauseparam() argument
8615 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) in hclge_cfg_pauseparam()
8618 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); in hclge_cfg_pauseparam()
8620 dev_err(&hdev->pdev->dev, in hclge_cfg_pauseparam()
8626 int hclge_cfg_flowctrl(struct hclge_dev *hdev) in hclge_cfg_flowctrl() argument
8628 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_cfg_flowctrl()
8655 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); in hclge_cfg_flowctrl()
8662 struct hclge_dev *hdev = vport->back; in hclge_get_pauseparam() local
8663 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_get_pauseparam()
8667 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { in hclge_get_pauseparam()
8673 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { in hclge_get_pauseparam()
8676 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { in hclge_get_pauseparam()
8679 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { in hclge_get_pauseparam()
8688 static void hclge_record_user_pauseparam(struct hclge_dev *hdev, in hclge_record_user_pauseparam() argument
8692 hdev->fc_mode_last_time = HCLGE_FC_FULL; in hclge_record_user_pauseparam()
8694 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; in hclge_record_user_pauseparam()
8696 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; in hclge_record_user_pauseparam()
8698 hdev->fc_mode_last_time = HCLGE_FC_NONE; in hclge_record_user_pauseparam()
8700 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; in hclge_record_user_pauseparam()
8707 struct hclge_dev *hdev = vport->back; in hclge_set_pauseparam() local
8708 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_set_pauseparam()
8714 dev_info(&hdev->pdev->dev, in hclge_set_pauseparam()
8720 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { in hclge_set_pauseparam()
8721 dev_info(&hdev->pdev->dev, in hclge_set_pauseparam()
8726 hclge_set_flowctrl_adv(hdev, rx_en, tx_en); in hclge_set_pauseparam()
8728 hclge_record_user_pauseparam(hdev, rx_en, tx_en); in hclge_set_pauseparam()
8731 return hclge_cfg_pauseparam(hdev, rx_en, tx_en); in hclge_set_pauseparam()
8743 struct hclge_dev *hdev = vport->back; in hclge_get_ksettings_an_result() local
8746 *speed = hdev->hw.mac.speed; in hclge_get_ksettings_an_result()
8748 *duplex = hdev->hw.mac.duplex; in hclge_get_ksettings_an_result()
8750 *auto_neg = hdev->hw.mac.autoneg; in hclge_get_ksettings_an_result()
8757 struct hclge_dev *hdev = vport->back; in hclge_get_media_type() local
8760 *media_type = hdev->hw.mac.media_type; in hclge_get_media_type()
8763 *module_type = hdev->hw.mac.module_type; in hclge_get_media_type()
8770 struct hclge_dev *hdev = vport->back; in hclge_get_mdix_mode() local
8771 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_get_mdix_mode()
8816 static void hclge_info_show(struct hclge_dev *hdev) in hclge_info_show() argument
8818 struct device *dev = &hdev->pdev->dev; in hclge_info_show()
8822 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps); in hclge_info_show()
8823 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc); in hclge_info_show()
8824 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc); in hclge_info_show()
8825 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport); in hclge_info_show()
8826 dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport); in hclge_info_show()
8827 dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs); in hclge_info_show()
8828 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map); in hclge_info_show()
8829 dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size); in hclge_info_show()
8830 dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size); in hclge_info_show()
8831 dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size); in hclge_info_show()
8833 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main"); in hclge_info_show()
8835 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable"); in hclge_info_show()
8837 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable"); in hclge_info_show()
8846 struct hclge_dev *hdev = ae_dev->priv; in hclge_init_nic_client_instance() local
8850 rst_cnt = hdev->rst_stats.reset_cnt; in hclge_init_nic_client_instance()
8855 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); in hclge_init_nic_client_instance()
8856 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_init_nic_client_instance()
8857 rst_cnt != hdev->rst_stats.reset_cnt) { in hclge_init_nic_client_instance()
8863 ret = hclge_config_nic_hw_error(hdev, true); in hclge_init_nic_client_instance()
8872 if (netif_msg_drv(&hdev->vport->nic)) in hclge_init_nic_client_instance()
8873 hclge_info_show(hdev); in hclge_init_nic_client_instance()
8878 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); in hclge_init_nic_client_instance()
8879 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_init_nic_client_instance()
8891 struct hclge_dev *hdev = ae_dev->priv; in hclge_init_roce_client_instance() local
8895 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || in hclge_init_roce_client_instance()
8896 !hdev->nic_client) in hclge_init_roce_client_instance()
8899 client = hdev->roce_client; in hclge_init_roce_client_instance()
8904 rst_cnt = hdev->rst_stats.reset_cnt; in hclge_init_roce_client_instance()
8909 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); in hclge_init_roce_client_instance()
8910 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_init_roce_client_instance()
8911 rst_cnt != hdev->rst_stats.reset_cnt) { in hclge_init_roce_client_instance()
8917 ret = hclge_config_rocee_ras_interrupt(hdev, true); in hclge_init_roce_client_instance()
8929 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); in hclge_init_roce_client_instance()
8930 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_init_roce_client_instance()
8933 hdev->roce_client->ops->uninit_instance(&vport->roce, 0); in hclge_init_roce_client_instance()
8941 struct hclge_dev *hdev = ae_dev->priv; in hclge_init_client_instance() local
8945 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { in hclge_init_client_instance()
8946 vport = &hdev->vport[i]; in hclge_init_client_instance()
8951 hdev->nic_client = client; in hclge_init_client_instance()
8963 if (hnae3_dev_roce_supported(hdev)) { in hclge_init_client_instance()
8964 hdev->roce_client = client; in hclge_init_client_instance()
8981 hdev->nic_client = NULL; in hclge_init_client_instance()
8985 hdev->roce_client = NULL; in hclge_init_client_instance()
8993 struct hclge_dev *hdev = ae_dev->priv; in hclge_uninit_client_instance() local
8997 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { in hclge_uninit_client_instance()
8998 vport = &hdev->vport[i]; in hclge_uninit_client_instance()
8999 if (hdev->roce_client) { in hclge_uninit_client_instance()
9000 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); in hclge_uninit_client_instance()
9001 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_uninit_client_instance()
9004 hdev->roce_client->ops->uninit_instance(&vport->roce, in hclge_uninit_client_instance()
9006 hdev->roce_client = NULL; in hclge_uninit_client_instance()
9011 if (hdev->nic_client && client->ops->uninit_instance) { in hclge_uninit_client_instance()
9012 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); in hclge_uninit_client_instance()
9013 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_uninit_client_instance()
9017 hdev->nic_client = NULL; in hclge_uninit_client_instance()
9023 static int hclge_pci_init(struct hclge_dev *hdev) in hclge_pci_init() argument
9025 struct pci_dev *pdev = hdev->pdev; in hclge_pci_init()
9053 hw = &hdev->hw; in hclge_pci_init()
9061 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); in hclge_pci_init()
9073 static void hclge_pci_uninit(struct hclge_dev *hdev) in hclge_pci_uninit() argument
9075 struct pci_dev *pdev = hdev->pdev; in hclge_pci_uninit()
9077 pcim_iounmap(pdev, hdev->hw.io_base); in hclge_pci_uninit()
9084 static void hclge_state_init(struct hclge_dev *hdev) in hclge_state_init() argument
9086 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); in hclge_state_init()
9087 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_state_init()
9088 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); in hclge_state_init()
9089 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_state_init()
9090 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); in hclge_state_init()
9091 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); in hclge_state_init()
9094 static void hclge_state_uninit(struct hclge_dev *hdev) in hclge_state_uninit() argument
9096 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_state_uninit()
9097 set_bit(HCLGE_STATE_REMOVING, &hdev->state); in hclge_state_uninit()
9099 if (hdev->reset_timer.function) in hclge_state_uninit()
9100 del_timer_sync(&hdev->reset_timer); in hclge_state_uninit()
9101 if (hdev->service_task.work.func) in hclge_state_uninit()
9102 cancel_delayed_work_sync(&hdev->service_task); in hclge_state_uninit()
9103 if (hdev->rst_service_task.func) in hclge_state_uninit()
9104 cancel_work_sync(&hdev->rst_service_task); in hclge_state_uninit()
9105 if (hdev->mbx_service_task.func) in hclge_state_uninit()
9106 cancel_work_sync(&hdev->mbx_service_task); in hclge_state_uninit()
9113 struct hclge_dev *hdev = ae_dev->priv; in hclge_flr_prepare() local
9116 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); in hclge_flr_prepare()
9117 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); in hclge_flr_prepare()
9118 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); in hclge_flr_prepare()
9119 hclge_reset_event(hdev->pdev, NULL); in hclge_flr_prepare()
9121 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && in hclge_flr_prepare()
9125 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) in hclge_flr_prepare()
9126 dev_err(&hdev->pdev->dev, in hclge_flr_prepare()
9132 struct hclge_dev *hdev = ae_dev->priv; in hclge_flr_done() local
9134 set_bit(HNAE3_FLR_DONE, &hdev->flr_state); in hclge_flr_done()
9137 static void hclge_clear_resetting_state(struct hclge_dev *hdev) in hclge_clear_resetting_state() argument
9141 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_clear_resetting_state()
9142 struct hclge_vport *vport = &hdev->vport[i]; in hclge_clear_resetting_state()
9146 ret = hclge_set_vf_rst(hdev, vport->vport_id, false); in hclge_clear_resetting_state()
9148 dev_warn(&hdev->pdev->dev, in hclge_clear_resetting_state()
9157 struct hclge_dev *hdev; in hclge_init_ae_dev() local
9160 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); in hclge_init_ae_dev()
9161 if (!hdev) { in hclge_init_ae_dev()
9166 hdev->pdev = pdev; in hclge_init_ae_dev()
9167 hdev->ae_dev = ae_dev; in hclge_init_ae_dev()
9168 hdev->reset_type = HNAE3_NONE_RESET; in hclge_init_ae_dev()
9169 hdev->reset_level = HNAE3_FUNC_RESET; in hclge_init_ae_dev()
9170 ae_dev->priv = hdev; in hclge_init_ae_dev()
9171 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; in hclge_init_ae_dev()
9173 mutex_init(&hdev->vport_lock); in hclge_init_ae_dev()
9174 mutex_init(&hdev->vport_cfg_mutex); in hclge_init_ae_dev()
9175 spin_lock_init(&hdev->fd_rule_lock); in hclge_init_ae_dev()
9177 ret = hclge_pci_init(hdev); in hclge_init_ae_dev()
9184 ret = hclge_cmd_queue_init(hdev); in hclge_init_ae_dev()
9191 ret = hclge_cmd_init(hdev); in hclge_init_ae_dev()
9195 ret = hclge_get_cap(hdev); in hclge_init_ae_dev()
9202 ret = hclge_configure(hdev); in hclge_init_ae_dev()
9208 ret = hclge_init_msi(hdev); in hclge_init_ae_dev()
9214 ret = hclge_misc_irq_init(hdev); in hclge_init_ae_dev()
9222 ret = hclge_alloc_tqps(hdev); in hclge_init_ae_dev()
9228 ret = hclge_alloc_vport(hdev); in hclge_init_ae_dev()
9234 ret = hclge_map_tqp(hdev); in hclge_init_ae_dev()
9240 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { in hclge_init_ae_dev()
9241 ret = hclge_mac_mdio_config(hdev); in hclge_init_ae_dev()
9243 dev_err(&hdev->pdev->dev, in hclge_init_ae_dev()
9249 ret = hclge_init_umv_space(hdev); in hclge_init_ae_dev()
9255 ret = hclge_mac_init(hdev); in hclge_init_ae_dev()
9261 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); in hclge_init_ae_dev()
9267 ret = hclge_config_gro(hdev, true); in hclge_init_ae_dev()
9271 ret = hclge_init_vlan_config(hdev); in hclge_init_ae_dev()
9277 ret = hclge_tm_schd_init(hdev); in hclge_init_ae_dev()
9283 hclge_rss_init_cfg(hdev); in hclge_init_ae_dev()
9284 ret = hclge_rss_init_hw(hdev); in hclge_init_ae_dev()
9290 ret = init_mgr_tbl(hdev); in hclge_init_ae_dev()
9296 ret = hclge_init_fd_config(hdev); in hclge_init_ae_dev()
9303 INIT_KFIFO(hdev->mac_tnl_log); in hclge_init_ae_dev()
9305 hclge_dcb_ops_set(hdev); in hclge_init_ae_dev()
9307 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); in hclge_init_ae_dev()
9308 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); in hclge_init_ae_dev()
9309 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); in hclge_init_ae_dev()
9310 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); in hclge_init_ae_dev()
9315 hclge_misc_affinity_setup(hdev); in hclge_init_ae_dev()
9317 hclge_clear_all_event_cause(hdev); in hclge_init_ae_dev()
9318 hclge_clear_resetting_state(hdev); in hclge_init_ae_dev()
9332 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); in hclge_init_ae_dev()
9336 hclge_enable_vector(&hdev->misc_vector, true); in hclge_init_ae_dev()
9338 hclge_state_init(hdev); in hclge_init_ae_dev()
9339 hdev->last_reset_time = jiffies; in hclge_init_ae_dev()
9341 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n", in hclge_init_ae_dev()
9347 if (hdev->hw.mac.phydev) in hclge_init_ae_dev()
9348 mdiobus_unregister(hdev->hw.mac.mdio_bus); in hclge_init_ae_dev()
9350 hclge_misc_irq_uninit(hdev); in hclge_init_ae_dev()
9354 hclge_cmd_uninit(hdev); in hclge_init_ae_dev()
9356 pcim_iounmap(pdev, hdev->hw.io_base); in hclge_init_ae_dev()
9364 static void hclge_stats_clear(struct hclge_dev *hdev) in hclge_stats_clear() argument
9366 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); in hclge_stats_clear()
9369 static void hclge_reset_vport_state(struct hclge_dev *hdev) in hclge_reset_vport_state() argument
9371 struct hclge_vport *vport = hdev->vport; in hclge_reset_vport_state()
9374 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_reset_vport_state()
9382 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_ae_dev() local
9386 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_reset_ae_dev()
9388 hclge_stats_clear(hdev); in hclge_reset_ae_dev()
9389 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); in hclge_reset_ae_dev()
9390 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); in hclge_reset_ae_dev()
9392 ret = hclge_cmd_init(hdev); in hclge_reset_ae_dev()
9398 ret = hclge_map_tqp(hdev); in hclge_reset_ae_dev()
9404 hclge_reset_umv_space(hdev); in hclge_reset_ae_dev()
9406 ret = hclge_mac_init(hdev); in hclge_reset_ae_dev()
9412 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); in hclge_reset_ae_dev()
9418 ret = hclge_config_gro(hdev, true); in hclge_reset_ae_dev()
9422 ret = hclge_init_vlan_config(hdev); in hclge_reset_ae_dev()
9428 ret = hclge_tm_init_hw(hdev, true); in hclge_reset_ae_dev()
9434 ret = hclge_rss_init_hw(hdev); in hclge_reset_ae_dev()
9440 ret = hclge_init_fd_config(hdev); in hclge_reset_ae_dev()
9449 ret = hclge_config_nic_hw_error(hdev, true); in hclge_reset_ae_dev()
9457 if (hdev->roce_client) { in hclge_reset_ae_dev()
9458 ret = hclge_config_rocee_ras_interrupt(hdev, true); in hclge_reset_ae_dev()
9467 hclge_reset_vport_state(hdev); in hclge_reset_ae_dev()
9477 struct hclge_dev *hdev = ae_dev->priv; in hclge_uninit_ae_dev() local
9478 struct hclge_mac *mac = &hdev->hw.mac; in hclge_uninit_ae_dev()
9480 hclge_misc_affinity_teardown(hdev); in hclge_uninit_ae_dev()
9481 hclge_state_uninit(hdev); in hclge_uninit_ae_dev()
9486 hclge_uninit_umv_space(hdev); in hclge_uninit_ae_dev()
9489 hclge_enable_vector(&hdev->misc_vector, false); in hclge_uninit_ae_dev()
9490 synchronize_irq(hdev->misc_vector.vector_irq); in hclge_uninit_ae_dev()
9493 hclge_config_mac_tnl_int(hdev, false); in hclge_uninit_ae_dev()
9494 hclge_config_nic_hw_error(hdev, false); in hclge_uninit_ae_dev()
9495 hclge_config_rocee_ras_interrupt(hdev, false); in hclge_uninit_ae_dev()
9497 hclge_cmd_uninit(hdev); in hclge_uninit_ae_dev()
9498 hclge_misc_irq_uninit(hdev); in hclge_uninit_ae_dev()
9499 hclge_pci_uninit(hdev); in hclge_uninit_ae_dev()
9500 mutex_destroy(&hdev->vport_lock); in hclge_uninit_ae_dev()
9501 hclge_uninit_vport_mac_table(hdev); in hclge_uninit_ae_dev()
9502 hclge_uninit_vport_vlan_table(hdev); in hclge_uninit_ae_dev()
9503 mutex_destroy(&hdev->vport_cfg_mutex); in hclge_uninit_ae_dev()
9511 struct hclge_dev *hdev = vport->back; in hclge_get_max_channels() local
9513 return min_t(u32, hdev->rss_size_max, in hclge_get_max_channels()
9530 struct hclge_dev *hdev = vport->back; in hclge_get_tqps_and_rss_info() local
9533 *max_rss_size = hdev->rss_size_max; in hclge_get_tqps_and_rss_info()
9542 struct hclge_dev *hdev = vport->back; in hclge_set_channels() local
9554 ret = hclge_tm_vport_map_update(hdev); in hclge_set_channels()
9556 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret); in hclge_set_channels()
9566 if (!(hdev->hw_tc_map & BIT(i))) in hclge_set_channels()
9573 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); in hclge_set_channels()
9591 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", in hclge_set_channels()
9598 dev_info(&hdev->pdev->dev, in hclge_set_channels()
9606 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit, in hclge_get_regs_num() argument
9614 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_regs_num()
9616 dev_err(&hdev->pdev->dev, in hclge_get_regs_num()
9631 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, in hclge_get_32_bit_regs() argument
9656 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); in hclge_get_32_bit_regs()
9658 dev_err(&hdev->pdev->dev, in hclge_get_32_bit_regs()
9685 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, in hclge_get_64_bit_regs() argument
9710 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); in hclge_get_64_bit_regs()
9712 dev_err(&hdev->pdev->dev, in hclge_get_64_bit_regs()
9747 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc) in hclge_query_bd_num_cmd_send() argument
9758 return hclge_cmd_send(&hdev->hw, desc, 4); in hclge_query_bd_num_cmd_send()
9761 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev, in hclge_get_dfx_reg_bd_num() argument
9771 ret = hclge_query_bd_num_cmd_send(hdev, desc); in hclge_get_dfx_reg_bd_num()
9773 dev_err(&hdev->pdev->dev, in hclge_get_dfx_reg_bd_num()
9789 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev, in hclge_dfx_reg_cmd_send() argument
9804 ret = hclge_cmd_send(&hdev->hw, desc, bd_num); in hclge_dfx_reg_cmd_send()
9806 dev_err(&hdev->pdev->dev, in hclge_dfx_reg_cmd_send()
9834 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len) in hclge_get_dfx_reg_len() argument
9841 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num); in hclge_get_dfx_reg_len()
9843 dev_err(&hdev->pdev->dev, in hclge_get_dfx_reg_len()
9859 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data) in hclge_get_dfx_reg() argument
9868 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num); in hclge_get_dfx_reg()
9870 dev_err(&hdev->pdev->dev, in hclge_get_dfx_reg()
9882 dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__); in hclge_get_dfx_reg()
9888 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num, in hclge_get_dfx_reg()
9891 dev_err(&hdev->pdev->dev, in hclge_get_dfx_reg()
9903 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data, in hclge_fetch_pf_reg() argument
9917 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); in hclge_fetch_pf_reg()
9925 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]); in hclge_fetch_pf_reg()
9934 *reg++ = hclge_read_dev(&hdev->hw, in hclge_fetch_pf_reg()
9944 for (j = 0; j < hdev->num_msi_used - 1; j++) { in hclge_fetch_pf_reg()
9946 *reg++ = hclge_read_dev(&hdev->hw, in hclge_fetch_pf_reg()
9952 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1); in hclge_fetch_pf_reg()
9962 struct hclge_dev *hdev = vport->back; in hclge_get_regs_len() local
9967 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); in hclge_get_regs_len()
9969 dev_err(&hdev->pdev->dev, in hclge_get_regs_len()
9974 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len); in hclge_get_regs_len()
9976 dev_err(&hdev->pdev->dev, in hclge_get_regs_len()
9995 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit + in hclge_get_regs_len()
10004 struct hclge_dev *hdev = vport->back; in hclge_get_regs() local
10009 *version = hdev->fw_version; in hclge_get_regs()
10011 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); in hclge_get_regs()
10013 dev_err(&hdev->pdev->dev, in hclge_get_regs()
10018 reg += hclge_fetch_pf_reg(hdev, reg, kinfo); in hclge_get_regs()
10020 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg); in hclge_get_regs()
10022 dev_err(&hdev->pdev->dev, in hclge_get_regs()
10032 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg); in hclge_get_regs()
10034 dev_err(&hdev->pdev->dev, in hclge_get_regs()
10044 ret = hclge_get_dfx_reg(hdev, reg); in hclge_get_regs()
10046 dev_err(&hdev->pdev->dev, in hclge_get_regs()
10050 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) in hclge_set_led_status() argument
10062 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_led_status()
10064 dev_err(&hdev->pdev->dev, in hclge_set_led_status()
10080 struct hclge_dev *hdev = vport->back; in hclge_set_led_id() local
10084 return hclge_set_led_status(hdev, HCLGE_LED_ON); in hclge_set_led_id()
10086 return hclge_set_led_status(hdev, HCLGE_LED_OFF); in hclge_set_led_id()
10098 struct hclge_dev *hdev = vport->back; in hclge_get_link_mode() local
10102 supported[idx] = hdev->hw.mac.supported[idx]; in hclge_get_link_mode()
10103 advertising[idx] = hdev->hw.mac.advertising[idx]; in hclge_get_link_mode()
10110 struct hclge_dev *hdev = vport->back; in hclge_gro_en() local
10112 return hclge_config_gro(hdev, enable); in hclge_gro_en()