Lines Matching refs:hdev
18 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
19 static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
101 void hclgevf_arq_init(struct hclgevf_dev *hdev) in hclgevf_arq_init() argument
103 struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq; in hclgevf_arq_init()
107 hdev->arq.hdev = hdev; in hclgevf_arq_init()
108 hdev->arq.head = 0; in hclgevf_arq_init()
109 hdev->arq.tail = 0; in hclgevf_arq_init()
110 atomic_set(&hdev->arq.count, 0); in hclgevf_arq_init()
127 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_update_stats() local
130 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); in hclgevf_update_stats()
132 dev_err(&hdev->pdev->dev, in hclgevf_update_stats()
171 static int hclgevf_get_basic_info(struct hclgevf_dev *hdev) in hclgevf_get_basic_info() argument
173 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; in hclgevf_get_basic_info()
181 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, in hclgevf_get_basic_info()
184 dev_err(&hdev->pdev->dev, in hclgevf_get_basic_info()
191 hdev->hw_tc_map = basic_info->hw_tc_map; in hclgevf_get_basic_info()
192 hdev->mbx_api_version = le16_to_cpu(basic_info->mbx_api_version); in hclgevf_get_basic_info()
200 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) in hclgevf_get_port_base_vlan_filter_state() argument
202 struct hnae3_handle *nic = &hdev->nic; in hclgevf_get_port_base_vlan_filter_state()
209 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, in hclgevf_get_port_base_vlan_filter_state()
212 dev_err(&hdev->pdev->dev, in hclgevf_get_port_base_vlan_filter_state()
223 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) in hclgevf_get_queue_info() argument
233 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, in hclgevf_get_queue_info()
236 dev_err(&hdev->pdev->dev, in hclgevf_get_queue_info()
243 hdev->num_tqps = le16_to_cpu(queue_info->num_tqps); in hclgevf_get_queue_info()
244 hdev->rss_size_max = le16_to_cpu(queue_info->rss_size); in hclgevf_get_queue_info()
245 hdev->rx_buf_len = le16_to_cpu(queue_info->rx_buf_len); in hclgevf_get_queue_info()
250 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) in hclgevf_get_queue_depth() argument
260 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, in hclgevf_get_queue_depth()
263 dev_err(&hdev->pdev->dev, in hclgevf_get_queue_depth()
270 hdev->num_tx_desc = le16_to_cpu(queue_depth->num_tx_desc); in hclgevf_get_queue_depth()
271 hdev->num_rx_desc = le16_to_cpu(queue_depth->num_rx_desc); in hclgevf_get_queue_depth()
278 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_get_qid_global() local
286 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, in hclgevf_get_qid_global()
294 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) in hclgevf_get_pf_media_type() argument
301 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, in hclgevf_get_pf_media_type()
304 dev_err(&hdev->pdev->dev, in hclgevf_get_pf_media_type()
310 hdev->hw.mac.media_type = resp_msg[0]; in hclgevf_get_pf_media_type()
311 hdev->hw.mac.module_type = resp_msg[1]; in hclgevf_get_pf_media_type()
316 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) in hclgevf_alloc_tqps() argument
318 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclgevf_alloc_tqps()
322 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, in hclgevf_alloc_tqps()
324 if (!hdev->htqp) in hclgevf_alloc_tqps()
327 tqp = hdev->htqp; in hclgevf_alloc_tqps()
329 for (i = 0; i < hdev->num_tqps; i++) { in hclgevf_alloc_tqps()
330 tqp->dev = &hdev->pdev->dev; in hclgevf_alloc_tqps()
334 tqp->q.buf_size = hdev->rx_buf_len; in hclgevf_alloc_tqps()
335 tqp->q.tx_desc_num = hdev->num_tx_desc; in hclgevf_alloc_tqps()
336 tqp->q.rx_desc_num = hdev->num_rx_desc; in hclgevf_alloc_tqps()
342 tqp->q.io_base = hdev->hw.hw.io_base + in hclgevf_alloc_tqps()
346 tqp->q.io_base = hdev->hw.hw.io_base + in hclgevf_alloc_tqps()
357 tqp->q.mem_base = hdev->hw.hw.mem_base + in hclgevf_alloc_tqps()
358 HCLGEVF_TQP_MEM_OFFSET(hdev, i); in hclgevf_alloc_tqps()
366 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) in hclgevf_knic_setup() argument
368 struct hnae3_handle *nic = &hdev->nic; in hclgevf_knic_setup()
370 u16 new_tqps = hdev->num_tqps; in hclgevf_knic_setup()
375 kinfo->num_tx_desc = hdev->num_tx_desc; in hclgevf_knic_setup()
376 kinfo->num_rx_desc = hdev->num_rx_desc; in hclgevf_knic_setup()
377 kinfo->rx_buf_len = hdev->rx_buf_len; in hclgevf_knic_setup()
379 if (hdev->hw_tc_map & BIT(i)) in hclgevf_knic_setup()
384 kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc); in hclgevf_knic_setup()
386 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); in hclgevf_knic_setup()
388 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, in hclgevf_knic_setup()
394 hdev->htqp[i].q.handle = &hdev->nic; in hclgevf_knic_setup()
395 hdev->htqp[i].q.tqp_index = i; in hclgevf_knic_setup()
396 kinfo->tqp[i] = &hdev->htqp[i].q; in hclgevf_knic_setup()
402 kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); in hclgevf_knic_setup()
409 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) in hclgevf_request_link_info() argument
415 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); in hclgevf_request_link_info()
417 dev_err(&hdev->pdev->dev, in hclgevf_request_link_info()
421 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) in hclgevf_update_link_status() argument
423 struct hnae3_handle *rhandle = &hdev->roce; in hclgevf_update_link_status()
424 struct hnae3_handle *handle = &hdev->nic; in hclgevf_update_link_status()
428 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) in hclgevf_update_link_status()
432 rclient = hdev->roce_client; in hclgevf_update_link_status()
435 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; in hclgevf_update_link_status()
436 if (link_state != hdev->hw.mac.link) { in hclgevf_update_link_status()
437 hdev->hw.mac.link = link_state; in hclgevf_update_link_status()
443 clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); in hclgevf_update_link_status()
446 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) in hclgevf_update_link_mode() argument
455 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); in hclgevf_update_link_mode()
457 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); in hclgevf_update_link_mode()
460 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) in hclgevf_set_handle_info() argument
462 struct hnae3_handle *nic = &hdev->nic; in hclgevf_set_handle_info()
466 nic->pdev = hdev->pdev; in hclgevf_set_handle_info()
467 nic->numa_node_mask = hdev->numa_node_mask; in hclgevf_set_handle_info()
469 nic->kinfo.io_base = hdev->hw.hw.io_base; in hclgevf_set_handle_info()
471 ret = hclgevf_knic_setup(hdev); in hclgevf_set_handle_info()
473 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", in hclgevf_set_handle_info()
478 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) in hclgevf_free_vector() argument
480 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { in hclgevf_free_vector()
481 dev_warn(&hdev->pdev->dev, in hclgevf_free_vector()
486 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; in hclgevf_free_vector()
487 hdev->num_msi_left += 1; in hclgevf_free_vector()
488 hdev->num_msi_used -= 1; in hclgevf_free_vector()
494 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_get_vector() local
499 vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); in hclgevf_get_vector()
500 vector_num = min(hdev->num_msi_left, vector_num); in hclgevf_get_vector()
503 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { in hclgevf_get_vector()
504 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { in hclgevf_get_vector()
505 vector->vector = pci_irq_vector(hdev->pdev, i); in hclgevf_get_vector()
506 vector->io_addr = hdev->hw.hw.io_base + in hclgevf_get_vector()
509 hdev->vector_status[i] = 0; in hclgevf_get_vector()
510 hdev->vector_irq[i] = vector->vector; in hclgevf_get_vector()
519 hdev->num_msi_left -= alloc; in hclgevf_get_vector()
520 hdev->num_msi_used += alloc; in hclgevf_get_vector()
525 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) in hclgevf_get_vector_index() argument
529 for (i = 0; i < hdev->num_msi; i++) in hclgevf_get_vector_index()
530 if (vector == hdev->vector_irq[i]) in hclgevf_get_vector_index()
537 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) in hclgevf_get_rss_hash_key() argument
540 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; in hclgevf_get_rss_hash_key()
552 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, in hclgevf_get_rss_hash_key()
555 dev_err(&hdev->pdev->dev, in hclgevf_get_rss_hash_key()
577 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_get_rss() local
578 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; in hclgevf_get_rss()
581 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hclgevf_get_rss()
587 ret = hclgevf_get_rss_hash_key(hdev); in hclgevf_get_rss()
596 hdev->ae_dev->dev_specs.rss_ind_tbl_size); in hclgevf_get_rss()
604 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_set_rss() local
605 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; in hclgevf_set_rss()
608 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hclgevf_set_rss()
609 ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, in hclgevf_set_rss()
616 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) in hclgevf_set_rss()
620 return hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, in hclgevf_set_rss()
627 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_set_rss_tuple() local
630 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclgevf_set_rss_tuple()
633 ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw, in hclgevf_set_rss_tuple()
634 &hdev->rss_cfg, nfc); in hclgevf_set_rss_tuple()
636 dev_err(&hdev->pdev->dev, in hclgevf_set_rss_tuple()
645 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_get_rss_tuple() local
649 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclgevf_get_rss_tuple()
654 ret = hclge_comm_get_rss_tuple(&hdev->rss_cfg, nfc->flow_type, in hclgevf_get_rss_tuple()
666 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_get_tc_size() local
667 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; in hclgevf_get_tc_size()
676 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_bind_ring_to_vector() local
701 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, in hclgevf_bind_ring_to_vector()
704 dev_err(&hdev->pdev->dev, in hclgevf_bind_ring_to_vector()
719 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_map_ring_to_vector() local
722 vector_id = hclgevf_get_vector_index(hdev, vector); in hclgevf_map_ring_to_vector()
737 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_unmap_ring_from_vector() local
740 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) in hclgevf_unmap_ring_from_vector()
743 vector_id = hclgevf_get_vector_index(hdev, vector); in hclgevf_unmap_ring_from_vector()
762 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_put_vector() local
765 vector_id = hclgevf_get_vector_index(hdev, vector); in hclgevf_put_vector()
773 hclgevf_free_vector(hdev, vector_id); in hclgevf_put_vector()
778 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, in hclgevf_cmd_set_promisc_mode() argument
782 struct hnae3_handle *handle = &hdev->nic; in hclgevf_cmd_set_promisc_mode()
794 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); in hclgevf_cmd_set_promisc_mode()
796 dev_err(&hdev->pdev->dev, in hclgevf_cmd_set_promisc_mode()
805 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_set_promisc_mode() local
808 en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2; in hclgevf_set_promisc_mode()
810 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, in hclgevf_set_promisc_mode()
816 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_request_update_promisc_mode() local
818 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); in hclgevf_request_update_promisc_mode()
819 hclgevf_task_schedule(hdev, 0); in hclgevf_request_update_promisc_mode()
822 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) in hclgevf_sync_promisc_mode() argument
824 struct hnae3_handle *handle = &hdev->nic; in hclgevf_sync_promisc_mode()
829 if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { in hclgevf_sync_promisc_mode()
832 clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); in hclgevf_sync_promisc_mode()
836 static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id, in hclgevf_tqp_enable_cmd_send() argument
850 return hclgevf_cmd_send(&hdev->hw, &desc, 1); in hclgevf_tqp_enable_cmd_send()
855 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_tqp_enable() local
860 ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable); in hclgevf_tqp_enable()
868 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) in hclgevf_get_host_mac_addr() argument
875 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, in hclgevf_get_host_mac_addr()
878 dev_err(&hdev->pdev->dev, in hclgevf_get_host_mac_addr()
890 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_get_mac_addr() local
893 if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) in hclgevf_get_mac_addr()
896 hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); in hclgevf_get_mac_addr()
897 if (hdev->has_pf_mac) in hclgevf_get_mac_addr()
900 ether_addr_copy(p, hdev->hw.mac.mac_addr); in hclgevf_get_mac_addr()
906 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_set_mac_addr() local
907 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; in hclgevf_set_mac_addr()
915 if (is_first && !hdev->has_pf_mac) in hclgevf_set_mac_addr()
919 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); in hclgevf_set_mac_addr()
921 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); in hclgevf_set_mac_addr()
971 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_update_mac_list() local
976 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; in hclgevf_update_mac_list()
978 spin_lock_bh(&hdev->mac_table.mac_list_lock); in hclgevf_update_mac_list()
987 spin_unlock_bh(&hdev->mac_table.mac_list_lock); in hclgevf_update_mac_list()
992 spin_unlock_bh(&hdev->mac_table.mac_list_lock); in hclgevf_update_mac_list()
998 spin_unlock_bh(&hdev->mac_table.mac_list_lock); in hclgevf_update_mac_list()
1006 spin_unlock_bh(&hdev->mac_table.mac_list_lock); in hclgevf_update_mac_list()
1038 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, in hclgevf_add_del_mac_addr() argument
1061 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); in hclgevf_add_del_mac_addr()
1064 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, in hclgevf_config_mac_list() argument
1073 ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); in hclgevf_config_mac_list()
1077 dev_err(&hdev->pdev->dev, in hclgevf_config_mac_list()
1151 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, in hclgevf_sync_mac_list() argument
1165 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; in hclgevf_sync_mac_list()
1167 spin_lock_bh(&hdev->mac_table.mac_list_lock); in hclgevf_sync_mac_list()
1189 spin_unlock_bh(&hdev->mac_table.mac_list_lock); in hclgevf_sync_mac_list()
1192 hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); in hclgevf_sync_mac_list()
1193 hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); in hclgevf_sync_mac_list()
1198 spin_lock_bh(&hdev->mac_table.mac_list_lock); in hclgevf_sync_mac_list()
1203 spin_unlock_bh(&hdev->mac_table.mac_list_lock); in hclgevf_sync_mac_list()
1206 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) in hclgevf_sync_mac_table() argument
1208 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); in hclgevf_sync_mac_table()
1209 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); in hclgevf_sync_mac_table()
1212 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) in hclgevf_uninit_mac_list() argument
1214 spin_lock_bh(&hdev->mac_table.mac_list_lock); in hclgevf_uninit_mac_list()
1216 hclgevf_clear_list(&hdev->mac_table.uc_mac_list); in hclgevf_uninit_mac_list()
1217 hclgevf_clear_list(&hdev->mac_table.mc_mac_list); in hclgevf_uninit_mac_list()
1219 spin_unlock_bh(&hdev->mac_table.mac_list_lock); in hclgevf_uninit_mac_list()
1224 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_enable_vlan_filter() local
1225 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; in hclgevf_enable_vlan_filter()
1235 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); in hclgevf_enable_vlan_filter()
1242 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_set_vlan_filter() local
1257 if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || in hclgevf_set_vlan_filter()
1258 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { in hclgevf_set_vlan_filter()
1259 set_bit(vlan_id, hdev->vlan_del_fail_bmap); in hclgevf_set_vlan_filter()
1274 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); in hclgevf_set_vlan_filter()
1276 set_bit(vlan_id, hdev->vlan_del_fail_bmap); in hclgevf_set_vlan_filter()
1281 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) in hclgevf_sync_vlan_filter() argument
1284 struct hnae3_handle *handle = &hdev->nic; in hclgevf_sync_vlan_filter()
1288 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); in hclgevf_sync_vlan_filter()
1295 clear_bit(vlan_id, hdev->vlan_del_fail_bmap); in hclgevf_sync_vlan_filter()
1300 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); in hclgevf_sync_vlan_filter()
1306 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_en_hw_strip_rxvtag() local
1312 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); in hclgevf_en_hw_strip_rxvtag()
1318 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_reset_tqp() local
1327 dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n", in hclgevf_reset_tqp()
1334 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status, in hclgevf_reset_tqp()
1342 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); in hclgevf_reset_tqp()
1352 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_set_mtu() local
1360 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); in hclgevf_set_mtu()
1363 static int hclgevf_notify_client(struct hclgevf_dev *hdev, in hclgevf_notify_client() argument
1366 struct hnae3_client *client = hdev->nic_client; in hclgevf_notify_client()
1367 struct hnae3_handle *handle = &hdev->nic; in hclgevf_notify_client()
1370 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || in hclgevf_notify_client()
1379 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", in hclgevf_notify_client()
1385 static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev, in hclgevf_notify_roce_client() argument
1388 struct hnae3_client *client = hdev->roce_client; in hclgevf_notify_roce_client()
1389 struct hnae3_handle *handle = &hdev->roce; in hclgevf_notify_roce_client()
1392 if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client) in hclgevf_notify_roce_client()
1400 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", in hclgevf_notify_roce_client()
1405 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) in hclgevf_reset_wait() argument
1415 if (hdev->reset_type == HNAE3_VF_RESET) in hclgevf_reset_wait()
1416 ret = readl_poll_timeout(hdev->hw.hw.io_base + in hclgevf_reset_wait()
1422 ret = readl_poll_timeout(hdev->hw.hw.io_base + in hclgevf_reset_wait()
1430 dev_err(&hdev->pdev->dev, in hclgevf_reset_wait()
1444 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) in hclgevf_reset_handshake() argument
1448 reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); in hclgevf_reset_handshake()
1454 hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, in hclgevf_reset_handshake()
1458 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) in hclgevf_reset_stack() argument
1463 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); in hclgevf_reset_stack()
1468 ret = hclgevf_reset_hdev(hdev); in hclgevf_reset_stack()
1470 dev_err(&hdev->pdev->dev, in hclgevf_reset_stack()
1476 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); in hclgevf_reset_stack()
1481 hclgevf_reset_handshake(hdev, false); in hclgevf_reset_stack()
1484 return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); in hclgevf_reset_stack()
1487 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) in hclgevf_reset_prepare_wait() argument
1491 if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { in hclgevf_reset_prepare_wait()
1496 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); in hclgevf_reset_prepare_wait()
1498 dev_err(&hdev->pdev->dev, in hclgevf_reset_prepare_wait()
1502 hdev->rst_stats.vf_func_rst_cnt++; in hclgevf_reset_prepare_wait()
1505 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclgevf_reset_prepare_wait()
1508 hclgevf_reset_handshake(hdev, true); in hclgevf_reset_prepare_wait()
1509 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n", in hclgevf_reset_prepare_wait()
1510 hdev->reset_type); in hclgevf_reset_prepare_wait()
1515 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) in hclgevf_dump_rst_info() argument
1517 dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", in hclgevf_dump_rst_info()
1518 hdev->rst_stats.vf_func_rst_cnt); in hclgevf_dump_rst_info()
1519 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", in hclgevf_dump_rst_info()
1520 hdev->rst_stats.flr_rst_cnt); in hclgevf_dump_rst_info()
1521 dev_info(&hdev->pdev->dev, "VF reset count: %u\n", in hclgevf_dump_rst_info()
1522 hdev->rst_stats.vf_rst_cnt); in hclgevf_dump_rst_info()
1523 dev_info(&hdev->pdev->dev, "reset done count: %u\n", in hclgevf_dump_rst_info()
1524 hdev->rst_stats.rst_done_cnt); in hclgevf_dump_rst_info()
1525 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", in hclgevf_dump_rst_info()
1526 hdev->rst_stats.hw_rst_done_cnt); in hclgevf_dump_rst_info()
1527 dev_info(&hdev->pdev->dev, "reset count: %u\n", in hclgevf_dump_rst_info()
1528 hdev->rst_stats.rst_cnt); in hclgevf_dump_rst_info()
1529 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", in hclgevf_dump_rst_info()
1530 hdev->rst_stats.rst_fail_cnt); in hclgevf_dump_rst_info()
1531 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", in hclgevf_dump_rst_info()
1532 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); in hclgevf_dump_rst_info()
1533 dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", in hclgevf_dump_rst_info()
1534 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG)); in hclgevf_dump_rst_info()
1535 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", in hclgevf_dump_rst_info()
1536 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG)); in hclgevf_dump_rst_info()
1537 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", in hclgevf_dump_rst_info()
1538 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); in hclgevf_dump_rst_info()
1539 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); in hclgevf_dump_rst_info()
1542 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) in hclgevf_reset_err_handle() argument
1545 hclgevf_reset_handshake(hdev, true); in hclgevf_reset_err_handle()
1546 hdev->rst_stats.rst_fail_cnt++; in hclgevf_reset_err_handle()
1547 dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", in hclgevf_reset_err_handle()
1548 hdev->rst_stats.rst_fail_cnt); in hclgevf_reset_err_handle()
1550 if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) in hclgevf_reset_err_handle()
1551 set_bit(hdev->reset_type, &hdev->reset_pending); in hclgevf_reset_err_handle()
1553 if (hclgevf_is_reset_pending(hdev)) { in hclgevf_reset_err_handle()
1554 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); in hclgevf_reset_err_handle()
1555 hclgevf_reset_task_schedule(hdev); in hclgevf_reset_err_handle()
1557 set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); in hclgevf_reset_err_handle()
1558 hclgevf_dump_rst_info(hdev); in hclgevf_reset_err_handle()
1562 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) in hclgevf_reset_prepare() argument
1566 hdev->rst_stats.rst_cnt++; in hclgevf_reset_prepare()
1569 ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); in hclgevf_reset_prepare()
1575 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); in hclgevf_reset_prepare()
1580 return hclgevf_reset_prepare_wait(hdev); in hclgevf_reset_prepare()
1583 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) in hclgevf_reset_rebuild() argument
1587 hdev->rst_stats.hw_rst_done_cnt++; in hclgevf_reset_rebuild()
1588 ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); in hclgevf_reset_rebuild()
1594 ret = hclgevf_reset_stack(hdev); in hclgevf_reset_rebuild()
1597 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); in hclgevf_reset_rebuild()
1601 ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT); in hclgevf_reset_rebuild()
1606 hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1) in hclgevf_reset_rebuild()
1609 ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT); in hclgevf_reset_rebuild()
1613 hdev->last_reset_time = jiffies; in hclgevf_reset_rebuild()
1614 hdev->rst_stats.rst_done_cnt++; in hclgevf_reset_rebuild()
1615 hdev->rst_stats.rst_fail_cnt = 0; in hclgevf_reset_rebuild()
1616 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); in hclgevf_reset_rebuild()
1621 static void hclgevf_reset(struct hclgevf_dev *hdev) in hclgevf_reset() argument
1623 if (hclgevf_reset_prepare(hdev)) in hclgevf_reset()
1629 if (hclgevf_reset_wait(hdev)) { in hclgevf_reset()
1631 dev_err(&hdev->pdev->dev, in hclgevf_reset()
1636 if (hclgevf_reset_rebuild(hdev)) in hclgevf_reset()
1642 hclgevf_reset_err_handle(hdev); in hclgevf_reset()
1645 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, in hclgevf_get_reset_level() argument
1679 struct hclgevf_dev *hdev = ae_dev->priv; in hclgevf_reset_event() local
1681 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); in hclgevf_reset_event()
1683 if (hdev->default_reset_request) in hclgevf_reset_event()
1684 hdev->reset_level = in hclgevf_reset_event()
1685 hclgevf_get_reset_level(hdev, in hclgevf_reset_event()
1686 &hdev->default_reset_request); in hclgevf_reset_event()
1688 hdev->reset_level = HNAE3_VF_FUNC_RESET; in hclgevf_reset_event()
1691 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); in hclgevf_reset_event()
1692 hclgevf_reset_task_schedule(hdev); in hclgevf_reset_event()
1694 hdev->last_reset_time = jiffies; in hclgevf_reset_event()
1700 struct hclgevf_dev *hdev = ae_dev->priv; in hclgevf_set_def_reset_request() local
1702 set_bit(rst_type, &hdev->default_reset_request); in hclgevf_set_def_reset_request()
1716 struct hclgevf_dev *hdev = ae_dev->priv; in hclgevf_reset_prepare_general() local
1721 down(&hdev->reset_sem); in hclgevf_reset_prepare_general()
1722 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); in hclgevf_reset_prepare_general()
1723 hdev->reset_type = rst_type; in hclgevf_reset_prepare_general()
1724 ret = hclgevf_reset_prepare(hdev); in hclgevf_reset_prepare_general()
1725 if (!ret && !hdev->reset_pending) in hclgevf_reset_prepare_general()
1728 dev_err(&hdev->pdev->dev, in hclgevf_reset_prepare_general()
1730 ret, hdev->reset_pending, retry_cnt); in hclgevf_reset_prepare_general()
1731 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); in hclgevf_reset_prepare_general()
1732 up(&hdev->reset_sem); in hclgevf_reset_prepare_general()
1737 hclgevf_enable_vector(&hdev->misc_vector, false); in hclgevf_reset_prepare_general()
1739 if (hdev->reset_type == HNAE3_FLR_RESET) in hclgevf_reset_prepare_general()
1740 hdev->rst_stats.flr_rst_cnt++; in hclgevf_reset_prepare_general()
1745 struct hclgevf_dev *hdev = ae_dev->priv; in hclgevf_reset_done() local
1748 hclgevf_enable_vector(&hdev->misc_vector, true); in hclgevf_reset_done()
1750 ret = hclgevf_reset_rebuild(hdev); in hclgevf_reset_done()
1752 dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", in hclgevf_reset_done()
1755 hdev->reset_type = HNAE3_NONE_RESET; in hclgevf_reset_done()
1756 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); in hclgevf_reset_done()
1757 up(&hdev->reset_sem); in hclgevf_reset_done()
1762 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_get_fw_version() local
1764 return hdev->fw_version; in hclgevf_get_fw_version()
1767 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) in hclgevf_get_misc_vector() argument
1769 struct hclgevf_misc_vector *vector = &hdev->misc_vector; in hclgevf_get_misc_vector()
1771 vector->vector_irq = pci_irq_vector(hdev->pdev, in hclgevf_get_misc_vector()
1773 vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; in hclgevf_get_misc_vector()
1775 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; in hclgevf_get_misc_vector()
1776 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; in hclgevf_get_misc_vector()
1778 hdev->num_msi_left -= 1; in hclgevf_get_misc_vector()
1779 hdev->num_msi_used += 1; in hclgevf_get_misc_vector()
1782 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) in hclgevf_reset_task_schedule() argument
1784 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && in hclgevf_reset_task_schedule()
1785 test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) && in hclgevf_reset_task_schedule()
1787 &hdev->state)) in hclgevf_reset_task_schedule()
1788 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); in hclgevf_reset_task_schedule()
1791 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) in hclgevf_mbx_task_schedule() argument
1793 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && in hclgevf_mbx_task_schedule()
1795 &hdev->state)) in hclgevf_mbx_task_schedule()
1796 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); in hclgevf_mbx_task_schedule()
1799 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, in hclgevf_task_schedule() argument
1802 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && in hclgevf_task_schedule()
1803 !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) in hclgevf_task_schedule()
1804 mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); in hclgevf_task_schedule()
1807 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) in hclgevf_reset_service_task() argument
1811 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) in hclgevf_reset_service_task()
1814 down(&hdev->reset_sem); in hclgevf_reset_service_task()
1815 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); in hclgevf_reset_service_task()
1818 &hdev->reset_state)) { in hclgevf_reset_service_task()
1824 hdev->reset_attempts = 0; in hclgevf_reset_service_task()
1826 hdev->last_reset_time = jiffies; in hclgevf_reset_service_task()
1827 hdev->reset_type = in hclgevf_reset_service_task()
1828 hclgevf_get_reset_level(hdev, &hdev->reset_pending); in hclgevf_reset_service_task()
1829 if (hdev->reset_type != HNAE3_NONE_RESET) in hclgevf_reset_service_task()
1830 hclgevf_reset(hdev); in hclgevf_reset_service_task()
1832 &hdev->reset_state)) { in hclgevf_reset_service_task()
1856 if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { in hclgevf_reset_service_task()
1858 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); in hclgevf_reset_service_task()
1861 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); in hclgevf_reset_service_task()
1863 hdev->reset_attempts++; in hclgevf_reset_service_task()
1865 set_bit(hdev->reset_level, &hdev->reset_pending); in hclgevf_reset_service_task()
1866 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); in hclgevf_reset_service_task()
1868 hclgevf_reset_task_schedule(hdev); in hclgevf_reset_service_task()
1871 hdev->reset_type = HNAE3_NONE_RESET; in hclgevf_reset_service_task()
1872 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); in hclgevf_reset_service_task()
1873 up(&hdev->reset_sem); in hclgevf_reset_service_task()
1876 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) in hclgevf_mailbox_service_task() argument
1878 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) in hclgevf_mailbox_service_task()
1881 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) in hclgevf_mailbox_service_task()
1884 hclgevf_mbx_async_handler(hdev); in hclgevf_mailbox_service_task()
1886 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); in hclgevf_mailbox_service_task()
1889 static void hclgevf_keep_alive(struct hclgevf_dev *hdev) in hclgevf_keep_alive() argument
1894 if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) in hclgevf_keep_alive()
1898 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); in hclgevf_keep_alive()
1900 dev_err(&hdev->pdev->dev, in hclgevf_keep_alive()
1904 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) in hclgevf_periodic_service_task() argument
1907 struct hnae3_handle *handle = &hdev->nic; in hclgevf_periodic_service_task()
1909 if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) in hclgevf_periodic_service_task()
1912 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { in hclgevf_periodic_service_task()
1913 delta = jiffies - hdev->last_serv_processed; in hclgevf_periodic_service_task()
1921 hdev->serv_processed_cnt++; in hclgevf_periodic_service_task()
1922 if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) in hclgevf_periodic_service_task()
1923 hclgevf_keep_alive(hdev); in hclgevf_periodic_service_task()
1925 if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { in hclgevf_periodic_service_task()
1926 hdev->last_serv_processed = jiffies; in hclgevf_periodic_service_task()
1930 if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) in hclgevf_periodic_service_task()
1931 hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); in hclgevf_periodic_service_task()
1936 if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state)) in hclgevf_periodic_service_task()
1937 hclgevf_request_link_info(hdev); in hclgevf_periodic_service_task()
1939 hclgevf_update_link_mode(hdev); in hclgevf_periodic_service_task()
1941 hclgevf_sync_vlan_filter(hdev); in hclgevf_periodic_service_task()
1943 hclgevf_sync_mac_table(hdev); in hclgevf_periodic_service_task()
1945 hclgevf_sync_promisc_mode(hdev); in hclgevf_periodic_service_task()
1947 hdev->last_serv_processed = jiffies; in hclgevf_periodic_service_task()
1950 hclgevf_task_schedule(hdev, delta); in hclgevf_periodic_service_task()
1955 struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, in hclgevf_service_task() local
1958 hclgevf_reset_service_task(hdev); in hclgevf_service_task()
1959 hclgevf_mailbox_service_task(hdev); in hclgevf_service_task()
1960 hclgevf_periodic_service_task(hdev); in hclgevf_service_task()
1966 hclgevf_reset_service_task(hdev); in hclgevf_service_task()
1967 hclgevf_mailbox_service_task(hdev); in hclgevf_service_task()
1970 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) in hclgevf_clear_event_cause() argument
1972 hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr); in hclgevf_clear_event_cause()
1975 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, in hclgevf_check_evt_cause() argument
1981 cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, in hclgevf_check_evt_cause()
1984 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); in hclgevf_check_evt_cause()
1985 dev_info(&hdev->pdev->dev, in hclgevf_check_evt_cause()
1987 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); in hclgevf_check_evt_cause()
1988 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); in hclgevf_check_evt_cause()
1989 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclgevf_check_evt_cause()
1991 hdev->rst_stats.vf_rst_cnt++; in hclgevf_check_evt_cause()
1995 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); in hclgevf_check_evt_cause()
1996 hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, in hclgevf_check_evt_cause()
2010 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) in hclgevf_check_evt_cause()
2020 dev_info(&hdev->pdev->dev, in hclgevf_check_evt_cause()
2030 struct hclgevf_dev *hdev = data; in hclgevf_misc_irq_handle() local
2033 hclgevf_enable_vector(&hdev->misc_vector, false); in hclgevf_misc_irq_handle()
2034 event_cause = hclgevf_check_evt_cause(hdev, &clearval); in hclgevf_misc_irq_handle()
2036 hclgevf_clear_event_cause(hdev, clearval); in hclgevf_misc_irq_handle()
2040 hclgevf_reset_task_schedule(hdev); in hclgevf_misc_irq_handle()
2043 hclgevf_mbx_handler(hdev); in hclgevf_misc_irq_handle()
2049 hclgevf_enable_vector(&hdev->misc_vector, true); in hclgevf_misc_irq_handle()
2054 static int hclgevf_configure(struct hclgevf_dev *hdev) in hclgevf_configure() argument
2058 hdev->gro_en = true; in hclgevf_configure()
2060 ret = hclgevf_get_basic_info(hdev); in hclgevf_configure()
2065 ret = hclgevf_get_port_base_vlan_filter_state(hdev); in hclgevf_configure()
2070 ret = hclgevf_get_queue_info(hdev); in hclgevf_configure()
2075 ret = hclgevf_get_queue_depth(hdev); in hclgevf_configure()
2079 return hclgevf_get_pf_media_type(hdev); in hclgevf_configure()
2085 struct hclgevf_dev *hdev; in hclgevf_alloc_hdev() local
2087 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); in hclgevf_alloc_hdev()
2088 if (!hdev) in hclgevf_alloc_hdev()
2091 hdev->pdev = pdev; in hclgevf_alloc_hdev()
2092 hdev->ae_dev = ae_dev; in hclgevf_alloc_hdev()
2093 ae_dev->priv = hdev; in hclgevf_alloc_hdev()
2098 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) in hclgevf_init_roce_base_info() argument
2100 struct hnae3_handle *roce = &hdev->roce; in hclgevf_init_roce_base_info()
2101 struct hnae3_handle *nic = &hdev->nic; in hclgevf_init_roce_base_info()
2103 roce->rinfo.num_vectors = hdev->num_roce_msix; in hclgevf_init_roce_base_info()
2105 if (hdev->num_msi_left < roce->rinfo.num_vectors || in hclgevf_init_roce_base_info()
2106 hdev->num_msi_left == 0) in hclgevf_init_roce_base_info()
2109 roce->rinfo.base_vector = hdev->roce_base_msix_offset; in hclgevf_init_roce_base_info()
2112 roce->rinfo.roce_io_base = hdev->hw.hw.io_base; in hclgevf_init_roce_base_info()
2113 roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; in hclgevf_init_roce_base_info()
2122 static int hclgevf_config_gro(struct hclgevf_dev *hdev) in hclgevf_config_gro() argument
2128 if (!hnae3_ae_dev_gro_supported(hdev->ae_dev)) in hclgevf_config_gro()
2135 req->gro_en = hdev->gro_en ? 1 : 0; in hclgevf_config_gro()
2137 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); in hclgevf_config_gro()
2139 dev_err(&hdev->pdev->dev, in hclgevf_config_gro()
2145 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) in hclgevf_rss_init_hw() argument
2147 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; in hclgevf_rss_init_hw()
2153 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hclgevf_rss_init_hw()
2154 ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, in hclgevf_rss_init_hw()
2160 ret = hclge_comm_set_rss_input_tuple(&hdev->nic, &hdev->hw.hw, in hclgevf_rss_init_hw()
2166 ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, in hclgevf_rss_init_hw()
2171 hclge_comm_get_rss_tc_info(rss_cfg->rss_size, hdev->hw_tc_map, in hclgevf_rss_init_hw()
2174 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, in hclgevf_rss_init_hw()
2178 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) in hclgevf_init_vlan_config() argument
2180 struct hnae3_handle *nic = &hdev->nic; in hclgevf_init_vlan_config()
2185 dev_err(&hdev->pdev->dev, in hclgevf_init_vlan_config()
2190 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, in hclgevf_init_vlan_config()
2194 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) in hclgevf_flush_link_update() argument
2198 unsigned long last = hdev->serv_processed_cnt; in hclgevf_flush_link_update()
2201 while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && in hclgevf_flush_link_update()
2203 last == hdev->serv_processed_cnt) in hclgevf_flush_link_update()
2209 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_set_timer_task() local
2212 hclgevf_task_schedule(hdev, 0); in hclgevf_set_timer_task()
2214 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); in hclgevf_set_timer_task()
2218 hclgevf_flush_link_update(hdev); in hclgevf_set_timer_task()
2224 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_ae_start() local
2226 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); in hclgevf_ae_start()
2227 clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state); in hclgevf_ae_start()
2231 hclgevf_request_link_info(hdev); in hclgevf_ae_start()
2233 hclgevf_update_link_mode(hdev); in hclgevf_ae_start()
2240 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_ae_stop() local
2242 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); in hclgevf_ae_stop()
2244 if (hdev->reset_type != HNAE3_VF_RESET) in hclgevf_ae_stop()
2248 hclgevf_update_link_status(hdev, 0); in hclgevf_ae_stop()
2256 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_set_alive() local
2262 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); in hclgevf_set_alive()
2272 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_client_stop() local
2277 dev_warn(&hdev->pdev->dev, in hclgevf_client_stop()
2281 static void hclgevf_state_init(struct hclgevf_dev *hdev) in hclgevf_state_init() argument
2283 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); in hclgevf_state_init()
2284 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); in hclgevf_state_init()
2285 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); in hclgevf_state_init()
2287 INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); in hclgevf_state_init()
2289 mutex_init(&hdev->mbx_resp.mbx_mutex); in hclgevf_state_init()
2290 sema_init(&hdev->reset_sem, 1); in hclgevf_state_init()
2292 spin_lock_init(&hdev->mac_table.mac_list_lock); in hclgevf_state_init()
2293 INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); in hclgevf_state_init()
2294 INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); in hclgevf_state_init()
2297 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); in hclgevf_state_init()
2300 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) in hclgevf_state_uninit() argument
2302 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); in hclgevf_state_uninit()
2303 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); in hclgevf_state_uninit()
2305 if (hdev->service_task.work.func) in hclgevf_state_uninit()
2306 cancel_delayed_work_sync(&hdev->service_task); in hclgevf_state_uninit()
2308 mutex_destroy(&hdev->mbx_resp.mbx_mutex); in hclgevf_state_uninit()
2311 static int hclgevf_init_msi(struct hclgevf_dev *hdev) in hclgevf_init_msi() argument
2313 struct pci_dev *pdev = hdev->pdev; in hclgevf_init_msi()
2317 if (hnae3_dev_roce_supported(hdev)) in hclgevf_init_msi()
2319 hdev->roce_base_msix_offset + 1, in hclgevf_init_msi()
2320 hdev->num_msi, in hclgevf_init_msi()
2324 hdev->num_msi, in hclgevf_init_msi()
2333 if (vectors < hdev->num_msi) in hclgevf_init_msi()
2334 dev_warn(&hdev->pdev->dev, in hclgevf_init_msi()
2336 hdev->num_msi, vectors); in hclgevf_init_msi()
2338 hdev->num_msi = vectors; in hclgevf_init_msi()
2339 hdev->num_msi_left = vectors; in hclgevf_init_msi()
2341 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, in hclgevf_init_msi()
2343 if (!hdev->vector_status) { in hclgevf_init_msi()
2348 for (i = 0; i < hdev->num_msi; i++) in hclgevf_init_msi()
2349 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; in hclgevf_init_msi()
2351 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, in hclgevf_init_msi()
2353 if (!hdev->vector_irq) { in hclgevf_init_msi()
2354 devm_kfree(&pdev->dev, hdev->vector_status); in hclgevf_init_msi()
2362 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) in hclgevf_uninit_msi() argument
2364 struct pci_dev *pdev = hdev->pdev; in hclgevf_uninit_msi()
2366 devm_kfree(&pdev->dev, hdev->vector_status); in hclgevf_uninit_msi()
2367 devm_kfree(&pdev->dev, hdev->vector_irq); in hclgevf_uninit_msi()
2371 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) in hclgevf_misc_irq_init() argument
2375 hclgevf_get_misc_vector(hdev); in hclgevf_misc_irq_init()
2377 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", in hclgevf_misc_irq_init()
2378 HCLGEVF_NAME, pci_name(hdev->pdev)); in hclgevf_misc_irq_init()
2379 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, in hclgevf_misc_irq_init()
2380 0, hdev->misc_vector.name, hdev); in hclgevf_misc_irq_init()
2382 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", in hclgevf_misc_irq_init()
2383 hdev->misc_vector.vector_irq); in hclgevf_misc_irq_init()
2387 hclgevf_clear_event_cause(hdev, 0); in hclgevf_misc_irq_init()
2390 hclgevf_enable_vector(&hdev->misc_vector, true); in hclgevf_misc_irq_init()
2395 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) in hclgevf_misc_irq_uninit() argument
2398 hclgevf_enable_vector(&hdev->misc_vector, false); in hclgevf_misc_irq_uninit()
2399 synchronize_irq(hdev->misc_vector.vector_irq); in hclgevf_misc_irq_uninit()
2400 free_irq(hdev->misc_vector.vector_irq, hdev); in hclgevf_misc_irq_uninit()
2401 hclgevf_free_vector(hdev, 0); in hclgevf_misc_irq_uninit()
2404 static void hclgevf_info_show(struct hclgevf_dev *hdev) in hclgevf_info_show() argument
2406 struct device *dev = &hdev->pdev->dev; in hclgevf_info_show()
2410 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); in hclgevf_info_show()
2411 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); in hclgevf_info_show()
2412 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); in hclgevf_info_show()
2413 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); in hclgevf_info_show()
2414 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); in hclgevf_info_show()
2416 hdev->hw.mac.media_type); in hclgevf_info_show()
2424 struct hclgevf_dev *hdev = ae_dev->priv; in hclgevf_init_nic_client_instance() local
2425 int rst_cnt = hdev->rst_stats.rst_cnt; in hclgevf_init_nic_client_instance()
2428 ret = client->ops->init_instance(&hdev->nic); in hclgevf_init_nic_client_instance()
2432 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); in hclgevf_init_nic_client_instance()
2433 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || in hclgevf_init_nic_client_instance()
2434 rst_cnt != hdev->rst_stats.rst_cnt) { in hclgevf_init_nic_client_instance()
2435 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); in hclgevf_init_nic_client_instance()
2437 client->ops->uninit_instance(&hdev->nic, 0); in hclgevf_init_nic_client_instance()
2443 if (netif_msg_drv(&hdev->nic)) in hclgevf_init_nic_client_instance()
2444 hclgevf_info_show(hdev); in hclgevf_init_nic_client_instance()
2452 struct hclgevf_dev *hdev = ae_dev->priv; in hclgevf_init_roce_client_instance() local
2455 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || in hclgevf_init_roce_client_instance()
2456 !hdev->nic_client) in hclgevf_init_roce_client_instance()
2459 ret = hclgevf_init_roce_base_info(hdev); in hclgevf_init_roce_client_instance()
2463 ret = client->ops->init_instance(&hdev->roce); in hclgevf_init_roce_client_instance()
2467 set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); in hclgevf_init_roce_client_instance()
2476 struct hclgevf_dev *hdev = ae_dev->priv; in hclgevf_init_client_instance() local
2481 hdev->nic_client = client; in hclgevf_init_client_instance()
2482 hdev->nic.client = client; in hclgevf_init_client_instance()
2489 hdev->roce_client); in hclgevf_init_client_instance()
2495 if (hnae3_dev_roce_supported(hdev)) { in hclgevf_init_client_instance()
2496 hdev->roce_client = client; in hclgevf_init_client_instance()
2497 hdev->roce.client = client; in hclgevf_init_client_instance()
2512 hdev->nic_client = NULL; in hclgevf_init_client_instance()
2513 hdev->nic.client = NULL; in hclgevf_init_client_instance()
2516 hdev->roce_client = NULL; in hclgevf_init_client_instance()
2517 hdev->roce.client = NULL; in hclgevf_init_client_instance()
2524 struct hclgevf_dev *hdev = ae_dev->priv; in hclgevf_uninit_client_instance() local
2527 if (hdev->roce_client) { in hclgevf_uninit_client_instance()
2528 while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) in hclgevf_uninit_client_instance()
2530 clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); in hclgevf_uninit_client_instance()
2532 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); in hclgevf_uninit_client_instance()
2533 hdev->roce_client = NULL; in hclgevf_uninit_client_instance()
2534 hdev->roce.client = NULL; in hclgevf_uninit_client_instance()
2538 if (client->ops->uninit_instance && hdev->nic_client && in hclgevf_uninit_client_instance()
2540 while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) in hclgevf_uninit_client_instance()
2542 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); in hclgevf_uninit_client_instance()
2544 client->ops->uninit_instance(&hdev->nic, 0); in hclgevf_uninit_client_instance()
2545 hdev->nic_client = NULL; in hclgevf_uninit_client_instance()
2546 hdev->nic.client = NULL; in hclgevf_uninit_client_instance()
2550 static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev) in hclgevf_dev_mem_map() argument
2552 struct pci_dev *pdev = hdev->pdev; in hclgevf_dev_mem_map()
2553 struct hclgevf_hw *hw = &hdev->hw; in hclgevf_dev_mem_map()
2571 static int hclgevf_pci_init(struct hclgevf_dev *hdev) in hclgevf_pci_init() argument
2573 struct pci_dev *pdev = hdev->pdev; in hclgevf_pci_init()
2596 hw = &hdev->hw; in hclgevf_pci_init()
2604 ret = hclgevf_dev_mem_map(hdev); in hclgevf_pci_init()
2611 pci_iounmap(pdev, hdev->hw.hw.io_base); in hclgevf_pci_init()
2621 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) in hclgevf_pci_uninit() argument
2623 struct pci_dev *pdev = hdev->pdev; in hclgevf_pci_uninit()
2625 if (hdev->hw.hw.mem_base) in hclgevf_pci_uninit()
2626 devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); in hclgevf_pci_uninit()
2628 pci_iounmap(pdev, hdev->hw.hw.io_base); in hclgevf_pci_uninit()
2634 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) in hclgevf_query_vf_resource() argument
2641 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); in hclgevf_query_vf_resource()
2643 dev_err(&hdev->pdev->dev, in hclgevf_query_vf_resource()
2650 if (hnae3_dev_roce_supported(hdev)) { in hclgevf_query_vf_resource()
2651 hdev->roce_base_msix_offset = in hclgevf_query_vf_resource()
2655 hdev->num_roce_msix = in hclgevf_query_vf_resource()
2660 hdev->num_nic_msix = hdev->num_roce_msix; in hclgevf_query_vf_resource()
2665 hdev->num_msi = hdev->num_roce_msix + in hclgevf_query_vf_resource()
2666 hdev->roce_base_msix_offset; in hclgevf_query_vf_resource()
2668 hdev->num_msi = in hclgevf_query_vf_resource()
2672 hdev->num_nic_msix = hdev->num_msi; in hclgevf_query_vf_resource()
2675 if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { in hclgevf_query_vf_resource()
2676 dev_err(&hdev->pdev->dev, in hclgevf_query_vf_resource()
2678 hdev->num_nic_msix); in hclgevf_query_vf_resource()
2685 static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev) in hclgevf_set_default_dev_specs() argument
2689 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclgevf_set_default_dev_specs()
2699 static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, in hclgevf_parse_dev_specs() argument
2702 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclgevf_parse_dev_specs()
2718 static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) in hclgevf_check_dev_specs() argument
2720 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; in hclgevf_check_dev_specs()
2734 static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) in hclgevf_query_dev_specs() argument
2743 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { in hclgevf_query_dev_specs()
2744 hclgevf_set_default_dev_specs(hdev); in hclgevf_query_dev_specs()
2755 ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM); in hclgevf_query_dev_specs()
2759 hclgevf_parse_dev_specs(hdev, desc); in hclgevf_query_dev_specs()
2760 hclgevf_check_dev_specs(hdev); in hclgevf_query_dev_specs()
2765 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) in hclgevf_pci_reset() argument
2767 struct pci_dev *pdev = hdev->pdev; in hclgevf_pci_reset()
2770 if (hdev->reset_type == HNAE3_VF_FULL_RESET && in hclgevf_pci_reset()
2771 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { in hclgevf_pci_reset()
2772 hclgevf_misc_irq_uninit(hdev); in hclgevf_pci_reset()
2773 hclgevf_uninit_msi(hdev); in hclgevf_pci_reset()
2774 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); in hclgevf_pci_reset()
2777 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { in hclgevf_pci_reset()
2779 ret = hclgevf_init_msi(hdev); in hclgevf_pci_reset()
2786 ret = hclgevf_misc_irq_init(hdev); in hclgevf_pci_reset()
2788 hclgevf_uninit_msi(hdev); in hclgevf_pci_reset()
2794 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); in hclgevf_pci_reset()
2800 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) in hclgevf_clear_vport_list() argument
2806 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); in hclgevf_clear_vport_list()
2809 static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev) in hclgevf_init_rxd_adv_layout() argument
2811 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) in hclgevf_init_rxd_adv_layout()
2812 hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1); in hclgevf_init_rxd_adv_layout()
2815 static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev) in hclgevf_uninit_rxd_adv_layout() argument
2817 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) in hclgevf_uninit_rxd_adv_layout()
2818 hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0); in hclgevf_uninit_rxd_adv_layout()
2821 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) in hclgevf_reset_hdev() argument
2823 struct pci_dev *pdev = hdev->pdev; in hclgevf_reset_hdev()
2826 ret = hclgevf_pci_reset(hdev); in hclgevf_reset_hdev()
2832 hclgevf_arq_init(hdev); in hclgevf_reset_hdev()
2833 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, in hclgevf_reset_hdev()
2834 &hdev->fw_version, false, in hclgevf_reset_hdev()
2835 hdev->reset_pending); in hclgevf_reset_hdev()
2841 ret = hclgevf_rss_init_hw(hdev); in hclgevf_reset_hdev()
2843 dev_err(&hdev->pdev->dev, in hclgevf_reset_hdev()
2848 ret = hclgevf_config_gro(hdev); in hclgevf_reset_hdev()
2852 ret = hclgevf_init_vlan_config(hdev); in hclgevf_reset_hdev()
2854 dev_err(&hdev->pdev->dev, in hclgevf_reset_hdev()
2860 ret = hclgevf_get_port_base_vlan_filter_state(hdev); in hclgevf_reset_hdev()
2864 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); in hclgevf_reset_hdev()
2866 hclgevf_init_rxd_adv_layout(hdev); in hclgevf_reset_hdev()
2868 dev_info(&hdev->pdev->dev, "Reset done\n"); in hclgevf_reset_hdev()
2873 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) in hclgevf_init_hdev() argument
2875 struct pci_dev *pdev = hdev->pdev; in hclgevf_init_hdev()
2878 ret = hclgevf_pci_init(hdev); in hclgevf_init_hdev()
2882 ret = hclgevf_devlink_init(hdev); in hclgevf_init_hdev()
2886 ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); in hclgevf_init_hdev()
2890 hclgevf_arq_init(hdev); in hclgevf_init_hdev()
2891 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, in hclgevf_init_hdev()
2892 &hdev->fw_version, false, in hclgevf_init_hdev()
2893 hdev->reset_pending); in hclgevf_init_hdev()
2898 ret = hclgevf_query_vf_resource(hdev); in hclgevf_init_hdev()
2902 ret = hclgevf_query_dev_specs(hdev); in hclgevf_init_hdev()
2909 ret = hclgevf_init_msi(hdev); in hclgevf_init_hdev()
2915 hclgevf_state_init(hdev); in hclgevf_init_hdev()
2916 hdev->reset_level = HNAE3_VF_FUNC_RESET; in hclgevf_init_hdev()
2917 hdev->reset_type = HNAE3_NONE_RESET; in hclgevf_init_hdev()
2919 ret = hclgevf_misc_irq_init(hdev); in hclgevf_init_hdev()
2923 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); in hclgevf_init_hdev()
2925 ret = hclgevf_configure(hdev); in hclgevf_init_hdev()
2931 ret = hclgevf_alloc_tqps(hdev); in hclgevf_init_hdev()
2937 ret = hclgevf_set_handle_info(hdev); in hclgevf_init_hdev()
2941 ret = hclgevf_config_gro(hdev); in hclgevf_init_hdev()
2946 ret = hclge_comm_rss_init_cfg(&hdev->nic, hdev->ae_dev, in hclgevf_init_hdev()
2947 &hdev->rss_cfg); in hclgevf_init_hdev()
2953 ret = hclgevf_rss_init_hw(hdev); in hclgevf_init_hdev()
2955 dev_err(&hdev->pdev->dev, in hclgevf_init_hdev()
2961 ret = hclgevf_clear_vport_list(hdev); in hclgevf_init_hdev()
2969 ret = hclgevf_init_vlan_config(hdev); in hclgevf_init_hdev()
2971 dev_err(&hdev->pdev->dev, in hclgevf_init_hdev()
2976 hclgevf_init_rxd_adv_layout(hdev); in hclgevf_init_hdev()
2978 set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state); in hclgevf_init_hdev()
2980 hdev->last_reset_time = jiffies; in hclgevf_init_hdev()
2981 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", in hclgevf_init_hdev()
2984 hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); in hclgevf_init_hdev()
2989 hclgevf_misc_irq_uninit(hdev); in hclgevf_init_hdev()
2991 hclgevf_state_uninit(hdev); in hclgevf_init_hdev()
2992 hclgevf_uninit_msi(hdev); in hclgevf_init_hdev()
2994 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); in hclgevf_init_hdev()
2996 hclgevf_devlink_uninit(hdev); in hclgevf_init_hdev()
2998 hclgevf_pci_uninit(hdev); in hclgevf_init_hdev()
2999 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); in hclgevf_init_hdev()
3003 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) in hclgevf_uninit_hdev() argument
3007 hclgevf_state_uninit(hdev); in hclgevf_uninit_hdev()
3008 hclgevf_uninit_rxd_adv_layout(hdev); in hclgevf_uninit_hdev()
3011 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); in hclgevf_uninit_hdev()
3013 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { in hclgevf_uninit_hdev()
3014 hclgevf_misc_irq_uninit(hdev); in hclgevf_uninit_hdev()
3015 hclgevf_uninit_msi(hdev); in hclgevf_uninit_hdev()
3018 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); in hclgevf_uninit_hdev()
3019 hclgevf_devlink_uninit(hdev); in hclgevf_uninit_hdev()
3020 hclgevf_pci_uninit(hdev); in hclgevf_uninit_hdev()
3021 hclgevf_uninit_mac_list(hdev); in hclgevf_uninit_hdev()
3046 struct hclgevf_dev *hdev = ae_dev->priv; in hclgevf_uninit_ae_dev() local
3048 hclgevf_uninit_hdev(hdev); in hclgevf_uninit_ae_dev()
3052 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) in hclgevf_get_max_channels() argument
3054 struct hnae3_handle *nic = &hdev->nic; in hclgevf_get_max_channels()
3057 return min_t(u32, hdev->rss_size_max, in hclgevf_get_max_channels()
3058 hdev->num_tqps / kinfo->tc_info.num_tc); in hclgevf_get_max_channels()
3074 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_get_channels() local
3076 ch->max_combined = hclgevf_get_max_channels(hdev); in hclgevf_get_channels()
3085 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_get_tqps_and_rss_info() local
3087 *alloc_tqps = hdev->num_tqps; in hclgevf_get_tqps_and_rss_info()
3088 *max_rss_size = hdev->rss_size_max; in hclgevf_get_tqps_and_rss_info()
3095 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_update_rss_size() local
3100 max_rss_size = min_t(u16, hdev->rss_size_max, in hclgevf_update_rss_size()
3101 hdev->num_tqps / kinfo->tc_info.num_tc); in hclgevf_update_rss_size()
3119 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_set_channels() local
3132 hclge_comm_get_rss_tc_info(cur_rss_size, hdev->hw_tc_map, in hclgevf_set_channels()
3134 ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, in hclgevf_set_channels()
3144 rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size, in hclgevf_set_channels()
3149 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) in hclgevf_set_channels()
3152 hdev->rss_cfg.rss_size = kinfo->rss_size; in hclgevf_set_channels()
3156 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", in hclgevf_set_channels()
3163 dev_info(&hdev->pdev->dev, in hclgevf_set_channels()
3173 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_get_status() local
3175 return hdev->hw.mac.link; in hclgevf_get_status()
3182 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_get_ksettings_an_result() local
3185 *speed = hdev->hw.mac.speed; in hclgevf_get_ksettings_an_result()
3187 *duplex = hdev->hw.mac.duplex; in hclgevf_get_ksettings_an_result()
3192 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, in hclgevf_update_speed_duplex() argument
3195 hdev->hw.mac.speed = speed; in hclgevf_update_speed_duplex()
3196 hdev->hw.mac.duplex = duplex; in hclgevf_update_speed_duplex()
3201 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_gro_en() local
3202 bool gro_en_old = hdev->gro_en; in hclgevf_gro_en()
3205 hdev->gro_en = enable; in hclgevf_gro_en()
3206 ret = hclgevf_config_gro(hdev); in hclgevf_gro_en()
3208 hdev->gro_en = gro_en_old; in hclgevf_gro_en()
3216 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_get_media_type() local
3219 *media_type = hdev->hw.mac.media_type; in hclgevf_get_media_type()
3222 *module_type = hdev->hw.mac.module_type; in hclgevf_get_media_type()
3227 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_get_hw_reset_stat() local
3229 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); in hclgevf_get_hw_reset_stat()
3234 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_get_cmdq_stat() local
3236 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclgevf_get_cmdq_stat()
3241 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_ae_dev_resetting() local
3243 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); in hclgevf_ae_dev_resetting()
3248 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_ae_dev_reset_cnt() local
3250 return hdev->rst_stats.hw_rst_done_cnt; in hclgevf_ae_dev_reset_cnt()
3257 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_get_link_mode() local
3259 *supported = hdev->hw.mac.supported; in hclgevf_get_link_mode()
3260 *advertising = hdev->hw.mac.advertising; in hclgevf_get_link_mode()
3271 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_get_regs_len() local
3278 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + in hclgevf_get_regs_len()
3279 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; in hclgevf_get_regs_len()
3285 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); in hclgevf_get_regs() local
3289 *version = hdev->fw_version; in hclgevf_get_regs()
3295 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); in hclgevf_get_regs()
3302 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); in hclgevf_get_regs()
3308 for (j = 0; j < hdev->num_tqps; j++) { in hclgevf_get_regs()
3310 *reg++ = hclgevf_read_dev(&hdev->hw, in hclgevf_get_regs()
3319 for (j = 0; j < hdev->num_msi_used - 1; j++) { in hclgevf_get_regs()
3321 *reg++ = hclgevf_read_dev(&hdev->hw, in hclgevf_get_regs()
3329 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, in hclgevf_update_port_base_vlan_info() argument
3332 struct hnae3_handle *nic = &hdev->nic; in hclgevf_update_port_base_vlan_info()
3338 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || in hclgevf_update_port_base_vlan_info()
3339 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { in hclgevf_update_port_base_vlan_info()
3340 dev_warn(&hdev->pdev->dev, in hclgevf_update_port_base_vlan_info()
3346 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); in hclgevf_update_port_base_vlan_info()
3356 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); in hclgevf_update_port_base_vlan_info()
3364 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); in hclgevf_update_port_base_vlan_info()