/Linux-v5.4/drivers/infiniband/hw/i40iw/ |
D | i40iw_ctrl.c | 75 static inline void i40iw_get_cqp_reg_info(struct i40iw_sc_cqp *cqp, in i40iw_get_cqp_reg_info() argument 80 if (cqp->dev->is_pf) { in i40iw_get_cqp_reg_info() 81 *val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPTAIL); in i40iw_get_cqp_reg_info() 85 *val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPTAIL1); in i40iw_get_cqp_reg_info() 98 struct i40iw_sc_cqp *cqp, in i40iw_cqp_poll_registers() argument 107 i40iw_get_cqp_reg_info(cqp, &val, &newtail, &error); in i40iw_cqp_poll_registers() 109 error = (cqp->dev->is_pf) ? in i40iw_cqp_poll_registers() 110 i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES) : in i40iw_cqp_poll_registers() 111 i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1); in i40iw_cqp_poll_registers() 116 I40IW_RING_MOVE_TAIL(cqp->sq_ring); in i40iw_cqp_poll_registers() [all …]
|
D | i40iw_utils.c | 362 struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait) in i40iw_get_cqp_request() argument 367 spin_lock_irqsave(&cqp->req_lock, flags); in i40iw_get_cqp_request() 368 if (!list_empty(&cqp->cqp_avail_reqs)) { in i40iw_get_cqp_request() 369 cqp_request = list_entry(cqp->cqp_avail_reqs.next, in i40iw_get_cqp_request() 373 spin_unlock_irqrestore(&cqp->req_lock, flags); in i40iw_get_cqp_request() 401 void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request) in i40iw_free_cqp_request() argument 403 struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp); in i40iw_free_cqp_request() 413 spin_lock_irqsave(&cqp->req_lock, flags); in i40iw_free_cqp_request() 414 list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs); in i40iw_free_cqp_request() 415 spin_unlock_irqrestore(&cqp->req_lock, flags); in i40iw_free_cqp_request() [all …]
|
D | i40iw_vf.c | 51 enum i40iw_status_code i40iw_manage_vf_pble_bp(struct i40iw_sc_cqp *cqp, in i40iw_manage_vf_pble_bp() argument 59 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); in i40iw_manage_vf_pble_bp() 70 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); in i40iw_manage_vf_pble_bp() 76 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE VF_PBLE_BP WQE", wqe, I40IW_CQP_WQE_SIZE * 8); in i40iw_manage_vf_pble_bp() 79 i40iw_sc_cqp_post_sq(cqp); in i40iw_manage_vf_pble_bp()
|
D | i40iw_main.c | 242 struct i40iw_cqp *cqp = &iwdev->cqp; in i40iw_destroy_cqp() local 245 dev->cqp_ops->cqp_destroy(dev->cqp); in i40iw_destroy_cqp() 249 i40iw_free_dma_mem(dev->hw, &cqp->sq); in i40iw_destroy_cqp() 250 kfree(cqp->scratch_array); in i40iw_destroy_cqp() 251 iwdev->cqp.scratch_array = NULL; in i40iw_destroy_cqp() 253 kfree(cqp->cqp_requests); in i40iw_destroy_cqp() 254 cqp->cqp_requests = NULL; in i40iw_destroy_cqp() 499 return (dev->cqp_misc_ops->static_hmc_pages_allocated(dev->cqp, 0, in i40iw_create_hmc_objs() 564 struct i40iw_cqp *cqp = &iwdev->cqp; in i40iw_create_cqp() local 568 cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL); in i40iw_create_cqp() [all …]
|
D | i40iw_hw.c | 148 i40iw_put_cqp_request(&iwdev->cqp, cqp_request); in i40iw_cqp_ce_handler() 152 i40iw_put_cqp_request(&iwdev->cqp, cqp_request); in i40iw_cqp_ce_handler() 453 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, add_port); in i40iw_cqp_manage_abvpt_cmd() 466 cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->cqp.sc_cqp; in i40iw_cqp_manage_abvpt_cmd() 539 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false); in i40iw_manage_arp_cache() 552 cqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp; in i40iw_manage_arp_cache() 556 cqp_info->in.u.del_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp; in i40iw_manage_arp_cache() 560 cqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp; in i40iw_manage_arp_cache() 598 struct i40iw_cqp *iwcqp = &iwdev->cqp; in i40iw_manage_qhash() 652 cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->cqp.sc_cqp; in i40iw_manage_qhash() [all …]
|
D | i40iw_p.h | 52 void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp); 54 u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch); 96 enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp, 99 enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp, 102 enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(struct i40iw_sc_cqp *cqp,
|
D | i40iw_type.h | 486 struct i40iw_sc_cqp *cqp; member 559 struct i40iw_sc_cqp *cqp; member 1219 struct i40iw_sc_cqp *cqp; member 1225 struct i40iw_sc_cqp *cqp; member 1231 struct i40iw_sc_cqp *cqp; member 1238 struct i40iw_sc_cqp *cqp; member 1244 struct i40iw_sc_cqp *cqp; member 1250 struct i40iw_sc_cqp *cqp; member 1262 struct i40iw_sc_cqp *cqp; member 1305 struct i40iw_sc_cqp *cqp; member [all …]
|
D | i40iw_puda.c | 518 struct i40iw_sc_cqp *cqp; in i40iw_puda_qp_wqe() local 524 cqp = dev->cqp; in i40iw_puda_qp_wqe() 525 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0); in i40iw_puda_qp_wqe() 536 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); in i40iw_puda_qp_wqe() 540 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32); in i40iw_puda_qp_wqe() 541 i40iw_sc_cqp_post_sq(cqp); in i40iw_puda_qp_wqe() 542 status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp, in i40iw_puda_qp_wqe() 631 struct i40iw_sc_cqp *cqp; in i40iw_puda_cq_wqe() local 636 cqp = dev->cqp; in i40iw_puda_cq_wqe() 637 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0); in i40iw_puda_cq_wqe() [all …]
|
D | i40iw.h | 256 struct i40iw_cqp cqp; member 418 struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait); 419 void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request); 420 void i40iw_put_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request);
|
D | i40iw_vf.h | 55 enum i40iw_status_code i40iw_manage_vf_pble_bp(struct i40iw_sc_cqp *cqp,
|
D | i40iw_verbs.c | 222 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); in i40iw_alloc_push_page() 234 cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp; in i40iw_alloc_push_page() 242 i40iw_put_cqp_request(&iwdev->cqp, cqp_request); in i40iw_alloc_push_page() 259 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false); in i40iw_dealloc_push_page() 270 cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp; in i40iw_dealloc_push_page() 523 struct i40iw_cqp *iwcqp = &iwdev->cqp; in i40iw_create_qp() 798 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait); in i40iw_hw_modify_qp() 1038 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); in i40iw_cq_wq_destroy() 1183 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); in i40iw_create_cq() 1518 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); in i40iw_hw_alloc_stag() [all …]
|
D | i40iw_hmc.c | 164 return dev->cqp->process_cqp_sds(dev, &sdinfo); in i40iw_hmc_sd_one() 207 ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo); in i40iw_hmc_sd_grp() 218 ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo); in i40iw_hmc_sd_grp()
|
/Linux-v5.4/drivers/scsi/lpfc/ |
D | lpfc_sli4.h | 1014 int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, 1024 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
|
D | lpfc_sli.c | 14863 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, in lpfc_cq_create_set() argument 14880 if (!cqp || !hdwq || !numcq) in lpfc_cq_create_set() 14888 length += ((numcq * cqp[0]->page_count) * in lpfc_cq_create_set() 14906 cq = cqp[idx]; in lpfc_cq_create_set() 15080 cq = cqp[idx]; in lpfc_cq_create_set() 15884 struct lpfc_queue **drqp, struct lpfc_queue **cqp, in lpfc_mrq_create() argument 15899 if (!hrqp || !drqp || !cqp || !numrq) in lpfc_mrq_create() 15935 cq = cqp[idx]; in lpfc_mrq_create()
|