Lines Matching refs:iwdev
193 struct i40iw_device *iwdev = from_tasklet(iwdev, t, dpc_tasklet); in i40iw_dpc() local
195 if (iwdev->msix_shared) in i40iw_dpc()
196 i40iw_process_ceq(iwdev, iwdev->ceqlist); in i40iw_dpc()
197 i40iw_process_aeq(iwdev); in i40iw_dpc()
198 i40iw_enable_intr(&iwdev->sc_dev, iwdev->iw_msixtbl[0].idx); in i40iw_dpc()
208 struct i40iw_device *iwdev = iwceq->iwdev; in i40iw_ceq_dpc() local
210 i40iw_process_ceq(iwdev, iwceq); in i40iw_ceq_dpc()
211 i40iw_enable_intr(&iwdev->sc_dev, iwceq->msix_idx); in i40iw_ceq_dpc()
221 struct i40iw_device *iwdev = (struct i40iw_device *)data; in i40iw_irq_handler() local
223 tasklet_schedule(&iwdev->dpc_tasklet); in i40iw_irq_handler()
235 static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp) in i40iw_destroy_cqp() argument
237 struct i40iw_sc_dev *dev = &iwdev->sc_dev; in i40iw_destroy_cqp()
238 struct i40iw_cqp *cqp = &iwdev->cqp; in i40iw_destroy_cqp()
243 i40iw_cleanup_pending_cqp_op(iwdev); in i40iw_destroy_cqp()
247 iwdev->cqp.scratch_array = NULL; in i40iw_destroy_cqp()
281 static void i40iw_destroy_aeq(struct i40iw_device *iwdev) in i40iw_destroy_aeq() argument
284 struct i40iw_sc_dev *dev = &iwdev->sc_dev; in i40iw_destroy_aeq()
285 struct i40iw_aeq *aeq = &iwdev->aeq; in i40iw_destroy_aeq()
287 if (!iwdev->msix_shared) in i40iw_destroy_aeq()
288 i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev); in i40iw_destroy_aeq()
289 if (iwdev->reset) in i40iw_destroy_aeq()
309 static void i40iw_destroy_ceq(struct i40iw_device *iwdev, in i40iw_destroy_ceq() argument
313 struct i40iw_sc_dev *dev = &iwdev->sc_dev; in i40iw_destroy_ceq()
315 if (iwdev->reset) in i40iw_destroy_ceq()
338 static void i40iw_dele_ceqs(struct i40iw_device *iwdev) in i40iw_dele_ceqs() argument
341 struct i40iw_sc_dev *dev = &iwdev->sc_dev; in i40iw_dele_ceqs()
342 struct i40iw_ceq *iwceq = iwdev->ceqlist; in i40iw_dele_ceqs()
343 struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl; in i40iw_dele_ceqs()
345 if (iwdev->msix_shared) { in i40iw_dele_ceqs()
346 i40iw_disable_irq(dev, msix_vec, (void *)iwdev); in i40iw_dele_ceqs()
347 i40iw_destroy_ceq(iwdev, iwceq); in i40iw_dele_ceqs()
352 for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) { in i40iw_dele_ceqs()
354 i40iw_destroy_ceq(iwdev, iwceq); in i40iw_dele_ceqs()
357 iwdev->sc_dev.ceq_valid = false; in i40iw_dele_ceqs()
367 static void i40iw_destroy_ccq(struct i40iw_device *iwdev) in i40iw_destroy_ccq() argument
369 struct i40iw_sc_dev *dev = &iwdev->sc_dev; in i40iw_destroy_ccq()
370 struct i40iw_ccq *ccq = &iwdev->ccq; in i40iw_destroy_ccq()
373 if (!iwdev->reset) in i40iw_destroy_ccq()
471 static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev, in i40iw_create_hmc_objs() argument
474 struct i40iw_sc_dev *dev = &iwdev->sc_dev; in i40iw_create_hmc_objs()
482 info.entry_type = iwdev->sd_type; in i40iw_create_hmc_objs()
522 enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev, in i40iw_obj_aligned_mem() argument
530 va = (unsigned long)iwdev->obj_next.va; in i40iw_obj_aligned_mem()
536 memptr->pa = iwdev->obj_next.pa + extra; in i40iw_obj_aligned_mem()
538 if ((memptr->va + size) > (iwdev->obj_mem.va + iwdev->obj_mem.size)) in i40iw_obj_aligned_mem()
541 iwdev->obj_next.va = memptr->va + size; in i40iw_obj_aligned_mem()
542 iwdev->obj_next.pa = memptr->pa + size; in i40iw_obj_aligned_mem()
553 static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev) in i40iw_create_cqp() argument
558 struct i40iw_sc_dev *dev = &iwdev->sc_dev; in i40iw_create_cqp()
560 struct i40iw_cqp *cqp = &iwdev->cqp; in i40iw_create_cqp()
580 status = i40iw_obj_aligned_mem(iwdev, &mem, sizeof(struct i40iw_cqp_ctx), in i40iw_create_cqp()
593 cqp_init_info.hmc_profile = iwdev->resource_profile; in i40iw_create_cqp()
594 cqp_init_info.enabled_vf_count = iwdev->max_rdma_vfs; in i40iw_create_cqp()
618 i40iw_destroy_cqp(iwdev, false); in i40iw_create_cqp()
629 static enum i40iw_status_code i40iw_create_ccq(struct i40iw_device *iwdev) in i40iw_create_ccq() argument
631 struct i40iw_sc_dev *dev = &iwdev->sc_dev; in i40iw_create_ccq()
635 struct i40iw_ccq *ccq = &iwdev->ccq; in i40iw_create_ccq()
647 status = i40iw_obj_aligned_mem(iwdev, &mem, ccq->shadow_area.size, in i40iw_create_ccq()
680 static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iwdev, in i40iw_configure_ceq_vector() argument
687 if (iwdev->msix_shared && !ceq_id) { in i40iw_configure_ceq_vector()
688 tasklet_setup(&iwdev->dpc_tasklet, i40iw_dpc); in i40iw_configure_ceq_vector()
689 status = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "AEQCEQ", iwdev); in i40iw_configure_ceq_vector()
717 static enum i40iw_status_code i40iw_create_ceq(struct i40iw_device *iwdev, in i40iw_create_ceq() argument
723 struct i40iw_sc_dev *dev = &iwdev->sc_dev; in i40iw_create_ceq()
728 iwceq->iwdev = iwdev; in i40iw_create_ceq()
730 iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt; in i40iw_create_ceq()
739 info.elem_cnt = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt; in i40iw_create_ceq()
742 scratch = (uintptr_t)&iwdev->cqp.sc_cqp; in i40iw_create_ceq()
753 void i40iw_request_reset(struct i40iw_device *iwdev) in i40iw_request_reset() argument
755 struct i40e_info *ldev = iwdev->ldev; in i40iw_request_reset()
757 ldev->ops->request_reset(ldev, iwdev->client, 1); in i40iw_request_reset()
769 static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev, in i40iw_setup_ceqs() argument
781 iwdev->iw_qvlist); in i40iw_setup_ceqs()
789 num_ceqs = min(iwdev->msix_count, iwdev->sc_dev.hmc_fpm_misc.max_ceqs); in i40iw_setup_ceqs()
790 iwdev->ceqlist = kcalloc(num_ceqs, sizeof(*iwdev->ceqlist), GFP_KERNEL); in i40iw_setup_ceqs()
791 if (!iwdev->ceqlist) { in i40iw_setup_ceqs()
795 i = (iwdev->msix_shared) ? 0 : 1; in i40iw_setup_ceqs()
797 iwceq = &iwdev->ceqlist[ceq_id]; in i40iw_setup_ceqs()
798 status = i40iw_create_ceq(iwdev, iwceq, ceq_id); in i40iw_setup_ceqs()
804 msix_vec = &iwdev->iw_msixtbl[i]; in i40iw_setup_ceqs()
807 status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec); in i40iw_setup_ceqs()
809 i40iw_destroy_ceq(iwdev, iwceq); in i40iw_setup_ceqs()
812 i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx); in i40iw_setup_ceqs()
813 iwdev->ceqs_count++; in i40iw_setup_ceqs()
816 if (status && !iwdev->ceqs_count) { in i40iw_setup_ceqs()
817 kfree(iwdev->ceqlist); in i40iw_setup_ceqs()
818 iwdev->ceqlist = NULL; in i40iw_setup_ceqs()
821 iwdev->sc_dev.ceq_valid = true; in i40iw_setup_ceqs()
834 static enum i40iw_status_code i40iw_configure_aeq_vector(struct i40iw_device *iwdev) in i40iw_configure_aeq_vector() argument
836 struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl; in i40iw_configure_aeq_vector()
839 if (!iwdev->msix_shared) { in i40iw_configure_aeq_vector()
840 tasklet_setup(&iwdev->dpc_tasklet, i40iw_dpc); in i40iw_configure_aeq_vector()
841 ret = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "i40iw", iwdev); in i40iw_configure_aeq_vector()
858 static enum i40iw_status_code i40iw_create_aeq(struct i40iw_device *iwdev) in i40iw_create_aeq() argument
862 struct i40iw_sc_dev *dev = &iwdev->sc_dev; in i40iw_create_aeq()
863 struct i40iw_aeq *aeq = &iwdev->aeq; in i40iw_create_aeq()
867 aeq_size = 2 * iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt + in i40iw_create_aeq()
868 iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt; in i40iw_create_aeq()
899 static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev) in i40iw_setup_aeq() argument
901 struct i40iw_sc_dev *dev = &iwdev->sc_dev; in i40iw_setup_aeq()
904 status = i40iw_create_aeq(iwdev); in i40iw_setup_aeq()
908 status = i40iw_configure_aeq_vector(iwdev); in i40iw_setup_aeq()
910 i40iw_destroy_aeq(iwdev); in i40iw_setup_aeq()
914 if (!iwdev->msix_shared) in i40iw_setup_aeq()
915 i40iw_enable_intr(dev, iwdev->iw_msixtbl[0].idx); in i40iw_setup_aeq()
925 static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev) in i40iw_initialize_ilq() argument
942 status = i40iw_puda_create_rsrc(&iwdev->vsi, &info); in i40iw_initialize_ilq()
954 static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev) in i40iw_initialize_ieq() argument
962 info.qp_id = iwdev->vsi.exception_lan_queue; in i40iw_initialize_ieq()
967 info.buf_size = iwdev->vsi.mtu + VLAN_ETH_HLEN; in i40iw_initialize_ieq()
969 status = i40iw_puda_create_rsrc(&iwdev->vsi, &info); in i40iw_initialize_ieq()
981 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; in i40iw_reinitialize_ieq() local
983 i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, false); in i40iw_reinitialize_ieq()
984 if (i40iw_initialize_ieq(iwdev)) { in i40iw_reinitialize_ieq()
985 iwdev->reset = true; in i40iw_reinitialize_ieq()
986 i40iw_request_reset(iwdev); in i40iw_reinitialize_ieq()
998 static enum i40iw_status_code i40iw_hmc_setup(struct i40iw_device *iwdev) in i40iw_hmc_setup() argument
1002 iwdev->sd_type = I40IW_SD_TYPE_DIRECT; in i40iw_hmc_setup()
1003 status = i40iw_config_fpm_values(&iwdev->sc_dev, IW_CFG_FPM_QP_COUNT); in i40iw_hmc_setup()
1006 status = i40iw_create_hmc_objs(iwdev, true); in i40iw_hmc_setup()
1009 iwdev->init_state = HMC_OBJS_CREATED; in i40iw_hmc_setup()
1018 static void i40iw_del_init_mem(struct i40iw_device *iwdev) in i40iw_del_init_mem() argument
1020 struct i40iw_sc_dev *dev = &iwdev->sc_dev; in i40iw_del_init_mem()
1022 i40iw_free_dma_mem(&iwdev->hw, &iwdev->obj_mem); in i40iw_del_init_mem()
1025 kfree(iwdev->mem_resources); in i40iw_del_init_mem()
1026 iwdev->mem_resources = NULL; in i40iw_del_init_mem()
1027 kfree(iwdev->ceqlist); in i40iw_del_init_mem()
1028 iwdev->ceqlist = NULL; in i40iw_del_init_mem()
1029 kfree(iwdev->iw_msixtbl); in i40iw_del_init_mem()
1030 iwdev->iw_msixtbl = NULL; in i40iw_del_init_mem()
1031 kfree(iwdev->hmc_info_mem); in i40iw_del_init_mem()
1032 iwdev->hmc_info_mem = NULL; in i40iw_del_init_mem()
1040 static void i40iw_del_macip_entry(struct i40iw_device *iwdev, u8 idx) in i40iw_del_macip_entry() argument
1042 struct i40iw_cqp *iwcqp = &iwdev->cqp; in i40iw_del_macip_entry()
1059 status = i40iw_handle_cqp_op(iwdev, cqp_request); in i40iw_del_macip_entry()
1070 static enum i40iw_status_code i40iw_add_mac_ipaddr_entry(struct i40iw_device *iwdev, in i40iw_add_mac_ipaddr_entry() argument
1075 struct i40iw_cqp *iwcqp = &iwdev->cqp; in i40iw_add_mac_ipaddr_entry()
1096 status = i40iw_handle_cqp_op(iwdev, cqp_request); in i40iw_add_mac_ipaddr_entry()
1111 static enum i40iw_status_code i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device *iwdev, in i40iw_alloc_local_mac_ipaddr_entry() argument
1114 struct i40iw_cqp *iwcqp = &iwdev->cqp; in i40iw_alloc_local_mac_ipaddr_entry()
1133 status = i40iw_handle_cqp_op(iwdev, cqp_request); in i40iw_alloc_local_mac_ipaddr_entry()
1151 static enum i40iw_status_code i40iw_alloc_set_mac_ipaddr(struct i40iw_device *iwdev, in i40iw_alloc_set_mac_ipaddr() argument
1156 status = i40iw_alloc_local_mac_ipaddr_entry(iwdev, &iwdev->mac_ip_table_idx); in i40iw_alloc_set_mac_ipaddr()
1158 status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr, in i40iw_alloc_set_mac_ipaddr()
1159 (u8)iwdev->mac_ip_table_idx); in i40iw_alloc_set_mac_ipaddr()
1161 i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); in i40iw_alloc_set_mac_ipaddr()
1170 static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev) in i40iw_add_ipv6_addr() argument
1180 (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) || in i40iw_add_ipv6_addr()
1181 (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) { in i40iw_add_ipv6_addr()
1192 i40iw_manage_arp_cache(iwdev, in i40iw_add_ipv6_addr()
1207 static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev) in i40iw_add_ipv4_addr() argument
1216 (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) || in i40iw_add_ipv4_addr()
1217 (dev == iwdev->netdev)) && (READ_ONCE(dev->flags) & IFF_UP)) { in i40iw_add_ipv4_addr()
1224 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, in i40iw_add_ipv4_addr()
1229 i40iw_manage_arp_cache(iwdev, in i40iw_add_ipv4_addr()
1248 static enum i40iw_status_code i40iw_add_mac_ip(struct i40iw_device *iwdev) in i40iw_add_mac_ip() argument
1250 struct net_device *netdev = iwdev->netdev; in i40iw_add_mac_ip()
1253 status = i40iw_alloc_set_mac_ipaddr(iwdev, (u8 *)netdev->dev_addr); in i40iw_add_mac_ip()
1256 i40iw_add_ipv4_addr(iwdev); in i40iw_add_mac_ip()
1257 i40iw_add_ipv6_addr(iwdev); in i40iw_add_mac_ip()
1301 static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev, in i40iw_initialize_dev() argument
1305 struct i40iw_sc_dev *dev = &iwdev->sc_dev; in i40iw_initialize_dev()
1320 iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL); in i40iw_initialize_dev()
1321 if (!iwdev->hmc_info_mem) in i40iw_initialize_dev()
1324 iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem; in i40iw_initialize_dev()
1325 dev->hmc_info = &iwdev->hw.hmc; in i40iw_initialize_dev()
1326 dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1); in i40iw_initialize_dev()
1327 status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE, in i40iw_initialize_dev()
1333 status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE, in i40iw_initialize_dev()
1342 info.hw = &iwdev->hw; in i40iw_initialize_dev()
1352 iwdev->dcb = true; in i40iw_initialize_dev()
1354 i40iw_pr_info("DCB is set/clear = %d\n", iwdev->dcb); in i40iw_initialize_dev()
1356 status = i40iw_device_init(&iwdev->sc_dev, &info); in i40iw_initialize_dev()
1361 vsi_info.dev = &iwdev->sc_dev; in i40iw_initialize_dev()
1362 vsi_info.back_vsi = (void *)iwdev; in i40iw_initialize_dev()
1365 i40iw_sc_vsi_init(&iwdev->vsi, &vsi_info); in i40iw_initialize_dev()
1377 i40iw_vsi_stats_init(&iwdev->vsi, &stats_info); in i40iw_initialize_dev()
1381 kfree(iwdev->hmc_info_mem); in i40iw_initialize_dev()
1382 iwdev->hmc_info_mem = NULL; in i40iw_initialize_dev()
1417 static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev, in i40iw_save_msix_info() argument
1431 iwdev->msix_count = ldev->msix_count; in i40iw_save_msix_info()
1433 size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count; in i40iw_save_msix_info()
1435 size += sizeof(struct i40e_qv_info) * iwdev->msix_count - 1; in i40iw_save_msix_info()
1436 iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL); in i40iw_save_msix_info()
1438 if (!iwdev->iw_msixtbl) in i40iw_save_msix_info()
1440 iwdev->iw_qvlist = (struct i40e_qvlist_info *)(&iwdev->iw_msixtbl[iwdev->msix_count]); in i40iw_save_msix_info()
1441 iw_qvlist = iwdev->iw_qvlist; in i40iw_save_msix_info()
1443 iw_qvlist->num_vectors = iwdev->msix_count; in i40iw_save_msix_info()
1444 if (iwdev->msix_count <= num_online_cpus()) in i40iw_save_msix_info()
1445 iwdev->msix_shared = true; in i40iw_save_msix_info()
1446 for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) { in i40iw_save_msix_info()
1447 iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry; in i40iw_save_msix_info()
1448 iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector; in i40iw_save_msix_info()
1449 iwdev->iw_msixtbl[i].cpu_affinity = ceq_idx; in i40iw_save_msix_info()
1452 if (iwdev->msix_shared) in i40iw_save_msix_info()
1461 iw_qvinfo->v_idx = iwdev->iw_msixtbl[i].idx; in i40iw_save_msix_info()
1473 static void i40iw_deinit_device(struct i40iw_device *iwdev) in i40iw_deinit_device() argument
1475 struct i40e_info *ldev = iwdev->ldev; in i40iw_deinit_device()
1477 struct i40iw_sc_dev *dev = &iwdev->sc_dev; in i40iw_deinit_device()
1479 i40iw_pr_info("state = %d\n", iwdev->init_state); in i40iw_deinit_device()
1480 if (iwdev->param_wq) in i40iw_deinit_device()
1481 destroy_workqueue(iwdev->param_wq); in i40iw_deinit_device()
1483 switch (iwdev->init_state) { in i40iw_deinit_device()
1485 iwdev->iw_status = 0; in i40iw_deinit_device()
1486 i40iw_port_ibevent(iwdev); in i40iw_deinit_device()
1487 i40iw_destroy_rdma_device(iwdev->iwibdev); in i40iw_deinit_device()
1490 if (!iwdev->reset) in i40iw_deinit_device()
1491 i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); in i40iw_deinit_device()
1494 i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc); in i40iw_deinit_device()
1497 i40iw_dele_ceqs(iwdev); in i40iw_deinit_device()
1500 i40iw_destroy_aeq(iwdev); in i40iw_deinit_device()
1503 i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset); in i40iw_deinit_device()
1506 i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset); in i40iw_deinit_device()
1509 i40iw_destroy_ccq(iwdev); in i40iw_deinit_device()
1512 i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset); in i40iw_deinit_device()
1515 i40iw_destroy_cqp(iwdev, true); in i40iw_deinit_device()
1518 i40iw_cleanup_cm_core(&iwdev->cm_core); in i40iw_deinit_device()
1519 if (iwdev->vsi.pestat) { in i40iw_deinit_device()
1520 i40iw_vsi_stats_free(&iwdev->vsi); in i40iw_deinit_device()
1521 kfree(iwdev->vsi.pestat); in i40iw_deinit_device()
1523 i40iw_del_init_mem(iwdev); in i40iw_deinit_device()
1527 i40iw_pr_err("bad init_state = %d\n", iwdev->init_state); in i40iw_deinit_device()
1532 kfree(iwdev->hdl); in i40iw_deinit_device()
1549 struct i40iw_device *iwdev = &hdl->device; in i40iw_setup_init_state() local
1550 struct i40iw_sc_dev *dev = &iwdev->sc_dev; in i40iw_setup_init_state()
1555 iwdev->mpa_version = mpa_version; in i40iw_setup_init_state()
1556 iwdev->resource_profile = (resource_profile < I40IW_HMC_PROFILE_EQUAL) ? in i40iw_setup_init_state()
1559 iwdev->max_rdma_vfs = in i40iw_setup_init_state()
1560 (iwdev->resource_profile != I40IW_HMC_PROFILE_DEFAULT) ? max_rdma_vfs : 0; in i40iw_setup_init_state()
1561 iwdev->max_enabled_vfs = iwdev->max_rdma_vfs; in i40iw_setup_init_state()
1562 iwdev->netdev = ldev->netdev; in i40iw_setup_init_state()
1565 iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET; in i40iw_setup_init_state()
1567 iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_VF_DB_ADDR_OFFSET; in i40iw_setup_init_state()
1569 status = i40iw_save_msix_info(iwdev, ldev); in i40iw_setup_init_state()
1572 iwdev->hw.pcidev = ldev->pcidev; in i40iw_setup_init_state()
1573 iwdev->hw.hw_addr = ldev->hw_addr; in i40iw_setup_init_state()
1574 status = i40iw_allocate_dma_mem(&iwdev->hw, in i40iw_setup_init_state()
1575 &iwdev->obj_mem, 8192, 4096); in i40iw_setup_init_state()
1578 iwdev->obj_next = iwdev->obj_mem; in i40iw_setup_init_state()
1580 init_waitqueue_head(&iwdev->vchnl_waitq); in i40iw_setup_init_state()
1582 init_waitqueue_head(&iwdev->close_wq); in i40iw_setup_init_state()
1584 status = i40iw_initialize_dev(iwdev, ldev); in i40iw_setup_init_state()
1587 kfree(iwdev->iw_msixtbl); in i40iw_setup_init_state()
1588 i40iw_free_dma_mem(dev->hw, &iwdev->obj_mem); in i40iw_setup_init_state()
1589 iwdev->iw_msixtbl = NULL; in i40iw_setup_init_state()
1600 static void i40iw_get_used_rsrc(struct i40iw_device *iwdev) in i40iw_get_used_rsrc() argument
1602 iwdev->used_pds = find_next_zero_bit(iwdev->allocated_pds, iwdev->max_pd, 0); in i40iw_get_used_rsrc()
1603 iwdev->used_qps = find_next_zero_bit(iwdev->allocated_qps, iwdev->max_qp, 0); in i40iw_get_used_rsrc()
1604 iwdev->used_cqs = find_next_zero_bit(iwdev->allocated_cqs, iwdev->max_cq, 0); in i40iw_get_used_rsrc()
1605 iwdev->used_mrs = find_next_zero_bit(iwdev->allocated_mrs, iwdev->max_mr, 0); in i40iw_get_used_rsrc()
1620 struct i40iw_device *iwdev; in i40iw_open() local
1632 iwdev = &hdl->device; in i40iw_open()
1633 iwdev->hdl = hdl; in i40iw_open()
1634 dev = &iwdev->sc_dev; in i40iw_open()
1635 if (i40iw_setup_cm_core(iwdev)) { in i40iw_open()
1636 kfree(iwdev->hdl); in i40iw_open()
1640 dev->back_dev = (void *)iwdev; in i40iw_open()
1641 iwdev->ldev = &hdl->ldev; in i40iw_open()
1642 iwdev->client = client; in i40iw_open()
1643 mutex_init(&iwdev->pbl_mutex); in i40iw_open()
1650 iwdev->init_state = INITIAL_STATE; in i40iw_open()
1653 status = i40iw_create_cqp(iwdev); in i40iw_open()
1656 iwdev->init_state = CQP_CREATED; in i40iw_open()
1657 status = i40iw_hmc_setup(iwdev); in i40iw_open()
1660 status = i40iw_create_ccq(iwdev); in i40iw_open()
1663 iwdev->init_state = CCQ_CREATED; in i40iw_open()
1664 status = i40iw_initialize_ilq(iwdev); in i40iw_open()
1667 iwdev->init_state = ILQ_CREATED; in i40iw_open()
1668 status = i40iw_initialize_ieq(iwdev); in i40iw_open()
1671 iwdev->init_state = IEQ_CREATED; in i40iw_open()
1672 status = i40iw_setup_aeq(iwdev); in i40iw_open()
1675 iwdev->init_state = AEQ_CREATED; in i40iw_open()
1676 status = i40iw_setup_ceqs(iwdev, ldev); in i40iw_open()
1685 iwdev->init_state = CEQ_CREATED; in i40iw_open()
1686 status = i40iw_initialize_hw_resources(iwdev); in i40iw_open()
1689 i40iw_get_used_rsrc(iwdev); in i40iw_open()
1691 status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc); in i40iw_open()
1694 iwdev->init_state = PBLE_CHUNK_MEM; in i40iw_open()
1695 iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM); in i40iw_open()
1696 status = i40iw_add_mac_ip(iwdev); in i40iw_open()
1699 iwdev->init_state = IP_ADDR_REGISTERED; in i40iw_open()
1700 if (i40iw_register_rdma_device(iwdev)) { in i40iw_open()
1705 iwdev->init_state = RDMA_DEV_REGISTERED; in i40iw_open()
1706 iwdev->iw_status = 1; in i40iw_open()
1707 i40iw_port_ibevent(iwdev); in i40iw_open()
1708 iwdev->param_wq = alloc_ordered_workqueue("l2params", WQ_MEM_RECLAIM); in i40iw_open()
1709 if(iwdev->param_wq == NULL) in i40iw_open()
1715 i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state); in i40iw_open()
1716 i40iw_deinit_device(iwdev); in i40iw_open()
1728 struct i40iw_device *iwdev = dwork->iwdev; in i40iw_l2params_worker() local
1730 i40iw_change_l2params(&iwdev->vsi, &dwork->l2params); in i40iw_l2params_worker()
1731 atomic_dec(&iwdev->params_busy); in i40iw_l2params_worker()
1747 struct i40iw_device *iwdev; in i40iw_l2param_change() local
1754 iwdev = &hdl->device; in i40iw_l2param_change()
1756 if (atomic_read(&iwdev->params_busy)) in i40iw_l2param_change()
1764 atomic_inc(&iwdev->params_busy); in i40iw_l2param_change()
1766 work->iwdev = iwdev; in i40iw_l2param_change()
1771 l2params->mtu = (params->mtu) ? params->mtu : iwdev->vsi.mtu; in i40iw_l2param_change()
1774 queue_work(iwdev->param_wq, &work->work); in i40iw_l2param_change()
1787 struct i40iw_device *iwdev; in i40iw_close() local
1794 iwdev = &hdl->device; in i40iw_close()
1795 iwdev->closing = true; in i40iw_close()
1798 iwdev->reset = true; in i40iw_close()
1800 i40iw_cm_teardown_connections(iwdev, NULL, NULL, true); in i40iw_close()
1801 destroy_workqueue(iwdev->virtchnl_wq); in i40iw_close()
1802 i40iw_deinit_device(iwdev); in i40iw_close()
1823 struct i40iw_device *iwdev; in i40iw_vf_reset() local
1830 iwdev = (struct i40iw_device *)dev->back_dev; in i40iw_vf_reset()
1837 spin_lock_irqsave(&iwdev->vsi.pestat->lock, flags); in i40iw_vf_reset()
1839 spin_unlock_irqrestore(&iwdev->vsi.pestat->lock, flags); in i40iw_vf_reset()
1930 struct i40iw_device *iwdev; in i40iw_virtchnl_receive() local
1941 iwdev = dev->back_dev; in i40iw_virtchnl_receive()
1946 atomic_dec(&iwdev->vchnl_msgs); in i40iw_virtchnl_receive()
1947 wake_up(&iwdev->vchnl_waitq); in i40iw_virtchnl_receive()
1964 struct i40iw_device *iwdev; in i40iw_vf_clear_to_send() local
1967 iwdev = dev->back_dev; in i40iw_vf_clear_to_send()
1970 (atomic_read(&iwdev->vchnl_msgs) == 0)) in i40iw_vf_clear_to_send()
1977 (atomic_read(&iwdev->vchnl_msgs) == 0), in i40iw_vf_clear_to_send()
2001 struct i40iw_device *iwdev; in i40iw_virtchnl_send() local
2007 iwdev = dev->back_dev; in i40iw_virtchnl_send()
2008 ldev = iwdev->ldev; in i40iw_virtchnl_send()