Lines Matching full:rf
7 * @rf: RDMA PCI function
13 int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4, in irdma_arp_table() argument
25 spin_lock_irqsave(&rf->arp_lock, flags); in irdma_arp_table()
26 for (arp_index = 0; (u32)arp_index < rf->arp_table_size; arp_index++) { in irdma_arp_table()
27 if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip))) in irdma_arp_table()
33 if (arp_index != rf->arp_table_size) { in irdma_arp_table()
39 if (irdma_alloc_rsrc(rf, rf->allocated_arps, rf->arp_table_size, in irdma_arp_table()
40 (u32 *)&arp_index, &rf->next_arp_index)) { in irdma_arp_table()
45 memcpy(rf->arp_table[arp_index].ip_addr, ip, in irdma_arp_table()
46 sizeof(rf->arp_table[arp_index].ip_addr)); in irdma_arp_table()
47 ether_addr_copy(rf->arp_table[arp_index].mac_addr, mac_addr); in irdma_arp_table()
50 if (arp_index == rf->arp_table_size) in irdma_arp_table()
54 if (arp_index == rf->arp_table_size) { in irdma_arp_table()
59 memset(rf->arp_table[arp_index].ip_addr, 0, in irdma_arp_table()
60 sizeof(rf->arp_table[arp_index].ip_addr)); in irdma_arp_table()
61 eth_zero_addr(rf->arp_table[arp_index].mac_addr); in irdma_arp_table()
62 irdma_free_rsrc(rf, rf->allocated_arps, arp_index); in irdma_arp_table()
69 spin_unlock_irqrestore(&rf->arp_lock, flags); in irdma_arp_table()
75 * @rf: RDMA function
80 int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, u8 *mac) in irdma_add_arp() argument
84 arpidx = irdma_arp_table(rf, &ip[0], ipv4, NULL, IRDMA_ARP_RESOLVE); in irdma_add_arp()
86 if (ether_addr_equal(rf->arp_table[arpidx].mac_addr, mac)) in irdma_add_arp()
89 irdma_manage_arp_cache(rf, rf->arp_table[arpidx].mac_addr, ip, in irdma_add_arp()
93 irdma_manage_arp_cache(rf, mac, ip, ipv4, IRDMA_ARP_ADD); in irdma_add_arp()
95 return irdma_arp_table(rf, ip, ipv4, NULL, IRDMA_ARP_RESOLVE); in irdma_add_arp()
169 irdma_manage_arp_cache(iwdev->rf, netdev->dev_addr, in irdma_inetaddr_event()
176 irdma_add_arp(iwdev->rf, &local_ipaddr, true, netdev->dev_addr); in irdma_inetaddr_event()
215 irdma_manage_arp_cache(iwdev->rf, netdev->dev_addr, in irdma_inet6addr_event()
222 irdma_add_arp(iwdev->rf, local_ipaddr6, false, in irdma_inet6addr_event()
275 irdma_add_arp(iwdev->rf, local_ipaddr, ipv4, neigh->ha); in irdma_net_event()
278 irdma_manage_arp_cache(iwdev->rf, neigh->ha, in irdma_net_event()
357 irdma_manage_arp_cache(iwdev->rf, in irdma_add_ipv6_addr()
394 irdma_manage_arp_cache(iwdev->rf, dev->dev_addr, in irdma_add_ipv4_addr()
520 * @rf: RDMA PCI function
522 void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf) in irdma_cleanup_pending_cqp_op() argument
524 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_cleanup_pending_cqp_op()
525 struct irdma_cqp *cqp = &rf->cqp; in irdma_cleanup_pending_cqp_op()
551 * @rf: RDMA PCI function
554 static enum irdma_status_code irdma_wait_event(struct irdma_pci_f *rf, in irdma_wait_event() argument
561 cqp_timeout.compl_cqp_cmds = rf->sc_dev.cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]; in irdma_wait_event()
563 irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq); in irdma_wait_event()
569 irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev); in irdma_wait_event()
574 if (!rf->reset) { in irdma_wait_event()
575 rf->reset = true; in irdma_wait_event()
576 rf->gen_ops.request_reset(rf); in irdma_wait_event()
586 if (!rf->reset) { in irdma_wait_event()
587 rf->reset = true; in irdma_wait_event()
588 rf->gen_ops.request_reset(rf); in irdma_wait_event()
680 * @rf: RDMA PCI function
683 enum irdma_status_code irdma_handle_cqp_op(struct irdma_pci_f *rf, in irdma_handle_cqp_op() argument
686 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_handle_cqp_op()
691 if (rf->reset) in irdma_handle_cqp_op()
701 status = irdma_wait_event(rf, cqp_request); in irdma_handle_cqp_op()
712 ibdev_err(&rf->iwdev->ibdev, in irdma_handle_cqp_op()
719 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_handle_cqp_op()
738 spin_lock_irqsave(&iwdev->rf->qptable_lock, flags); in irdma_qp_rem_ref()
740 spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); in irdma_qp_rem_ref()
745 iwdev->rf->qp_table[qp_num] = NULL; in irdma_qp_rem_ref()
746 spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); in irdma_qp_rem_ref()
764 if (qpn < IW_FIRST_QPN || qpn >= iwdev->rf->max_qp) in irdma_get_qp()
767 return &iwdev->rf->qp_table[qpn]->ibqp; in irdma_get_qp()
810 struct irdma_pci_f *rf = dev_to_rf(dev); in irdma_cqp_sds_cmd() local
813 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); in irdma_cqp_sds_cmd()
825 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_cqp_sds_cmd()
826 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_cqp_sds_cmd()
843 struct irdma_pci_f *rf = dev_to_rf(dev); in irdma_cqp_qp_suspend_resume() local
846 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false); in irdma_cqp_qp_suspend_resume()
856 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_cqp_qp_suspend_resume()
857 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_cqp_qp_suspend_resume()
960 struct irdma_pci_f *rf = dev_to_rf(dev); in irdma_cqp_query_fpm_val_cmd() local
963 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); in irdma_cqp_query_fpm_val_cmd()
977 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_cqp_query_fpm_val_cmd()
978 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_cqp_query_fpm_val_cmd()
995 struct irdma_pci_f *rf = dev_to_rf(dev); in irdma_cqp_commit_fpm_val_cmd() local
998 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); in irdma_cqp_commit_fpm_val_cmd()
1012 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_cqp_commit_fpm_val_cmd()
1013 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_cqp_commit_fpm_val_cmd()
1026 struct irdma_pci_f *rf = dev_to_rf(dev); in irdma_cqp_cq_create_cmd() local
1027 struct irdma_cqp *iwcqp = &rf->cqp; in irdma_cqp_cq_create_cmd()
1042 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_cqp_cq_create_cmd()
1056 struct irdma_pci_f *rf = dev_to_rf(dev); in irdma_cqp_qp_create_cmd() local
1057 struct irdma_cqp *iwcqp = &rf->cqp; in irdma_cqp_qp_create_cmd()
1077 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_cqp_qp_create_cmd()
1085 * @rf: RDMA PCI function
1088 static void irdma_dealloc_push_page(struct irdma_pci_f *rf, in irdma_dealloc_push_page() argument
1098 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false); in irdma_dealloc_push_page()
1109 cqp_info->in.u.manage_push_page.cqp = &rf->cqp.sc_cqp; in irdma_dealloc_push_page()
1111 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_dealloc_push_page()
1114 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_dealloc_push_page()
1124 struct irdma_pci_f *rf = iwdev->rf; in irdma_free_qp_rsrc() local
1128 irdma_dealloc_push_page(rf, &iwqp->sc_qp); in irdma_free_qp_rsrc()
1136 irdma_free_rsrc(rf, rf->allocated_qps, qp_num); in irdma_free_qp_rsrc()
1137 dma_free_coherent(rf->sc_dev.hw->device, iwqp->q2_ctx_mem.size, in irdma_free_qp_rsrc()
1140 dma_free_coherent(rf->sc_dev.hw->device, iwqp->kqp.dma_mem.size, in irdma_free_qp_rsrc()
1149 * @rf: RDMA PCI function
1152 void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq) in irdma_cq_wq_destroy() argument
1157 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); in irdma_cq_wq_destroy()
1167 irdma_handle_cqp_op(rf, cqp_request); in irdma_cq_wq_destroy()
1168 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_cq_wq_destroy()
1199 struct irdma_pci_f *rf = iwdev->rf; in irdma_hw_modify_qp() local
1204 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); in irdma_hw_modify_qp()
1219 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_hw_modify_qp()
1220 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_hw_modify_qp()
1239 irdma_gen_ae(rf, &iwqp->sc_qp, &ae_info, false); in irdma_hw_modify_qp()
1241 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, in irdma_hw_modify_qp()
1255 irdma_handle_cqp_op(rf, cqp_request); in irdma_hw_modify_qp()
1256 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_hw_modify_qp()
1275 struct irdma_pci_f *rf = dev_to_rf(dev); in irdma_cqp_cq_destroy_cmd() local
1277 irdma_cq_wq_destroy(rf, cq); in irdma_cqp_cq_destroy_cmd()
1287 struct irdma_pci_f *rf = dev_to_rf(dev); in irdma_cqp_qp_destroy_cmd() local
1288 struct irdma_cqp *iwcqp = &rf->cqp; in irdma_cqp_qp_destroy_cmd()
1305 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_cqp_qp_destroy_cmd()
1306 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_cqp_qp_destroy_cmd()
1319 struct irdma_pci_f *rf = dev_to_rf(dev); in irdma_ieq_mpa_crc_ae() local
1321 ibdev_dbg(&rf->iwdev->ibdev, "AEQ: Generate MPA CRC AE\n"); in irdma_ieq_mpa_crc_ae()
1324 irdma_gen_ae(rf, qp, &info, false); in irdma_ieq_mpa_crc_ae()
1880 struct irdma_pci_f *rf = dev_to_rf(dev); in irdma_cqp_gather_stats_cmd() local
1881 struct irdma_cqp *iwcqp = &rf->cqp; in irdma_cqp_gather_stats_cmd()
1896 cqp_info->in.u.stats_gather.cqp = &rf->cqp.sc_cqp; in irdma_cqp_gather_stats_cmd()
1900 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_cqp_gather_stats_cmd()
1903 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_cqp_gather_stats_cmd()
1918 struct irdma_pci_f *rf = dev_to_rf(vsi->dev); in irdma_cqp_stats_inst_cmd() local
1919 struct irdma_cqp *iwcqp = &rf->cqp; in irdma_cqp_stats_inst_cmd()
1937 cqp_info->in.u.stats_manage.cqp = &rf->cqp.sc_cqp; in irdma_cqp_stats_inst_cmd()
1938 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_cqp_stats_inst_cmd()
1957 struct irdma_pci_f *rf = dev_to_rf(dev); in irdma_cqp_ceq_cmd() local
1960 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); in irdma_cqp_ceq_cmd()
1970 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_cqp_ceq_cmd()
1971 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_cqp_ceq_cmd()
1987 struct irdma_pci_f *rf = dev_to_rf(dev); in irdma_cqp_aeq_cmd() local
1990 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); in irdma_cqp_aeq_cmd()
2000 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_cqp_aeq_cmd()
2001 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_cqp_aeq_cmd()
2016 struct irdma_pci_f *rf = dev_to_rf(dev); in irdma_cqp_ws_node_cmd() local
2017 struct irdma_cqp *iwcqp = &rf->cqp; in irdma_cqp_ws_node_cmd()
2024 if (!rf->sc_dev.ceq_valid) in irdma_cqp_ws_node_cmd()
2040 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_cqp_ws_node_cmd()
2050 ibdev_dbg(&rf->iwdev->ibdev, "DCB: opcode=%d, compl_info.retval=%d\n", in irdma_cqp_ws_node_cmd()
2057 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_cqp_ws_node_cmd()
2071 struct irdma_pci_f *rf = dev_to_rf(dev); in irdma_cqp_up_map_cmd() local
2072 struct irdma_cqp *iwcqp = &rf->cqp; in irdma_cqp_up_map_cmd()
2090 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_cqp_up_map_cmd()
2091 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_cqp_up_map_cmd()
2098 * @rf: RDMA PCI function
2107 int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd, in irdma_ah_cqp_op() argument
2119 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); in irdma_ah_cqp_op()
2129 cqp_info->in.u.ah_create.cqp = &rf->cqp.sc_cqp; in irdma_ah_cqp_op()
2133 cqp_info->in.u.ah_destroy.cqp = &rf->cqp.sc_cqp; in irdma_ah_cqp_op()
2140 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_ah_cqp_op()
2141 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_ah_cqp_op()
2203 struct irdma_pci_f *rf = dev_to_rf(dev); in irdma_puda_create_ah() local
2211 err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, in irdma_puda_create_ah()
2212 &ah_info->ah_idx, &rf->next_ah); in irdma_puda_create_ah()
2220 err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait, in irdma_puda_create_ah()
2223 err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait, in irdma_puda_create_ah()
2231 irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx); in irdma_puda_create_ah()
2245 struct irdma_pci_f *rf = dev_to_rf(dev); in irdma_puda_free_ah() local
2251 irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_DESTROY, false, NULL, NULL); in irdma_puda_free_ah()
2252 irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx); in irdma_puda_free_ah()
2478 struct irdma_pci_f *rf = dev_to_rf(dev); in irdma_alloc_ws_node_id() local
2482 if (irdma_alloc_rsrc(rf, rf->allocated_ws_nodes, rf->max_ws_node_id, in irdma_alloc_ws_node_id()
2496 struct irdma_pci_f *rf = dev_to_rf(dev); in irdma_free_ws_node_id() local
2498 irdma_free_rsrc(rf, rf->allocated_ws_nodes, (u32)node_id); in irdma_free_ws_node_id()
2510 if (qp->iwdev->rf->reset) in irdma_modify_qp_to_err()