Home
last modified time | relevance | path

Searched refs:qidx (Results 1 – 25 of 36) sorted by relevance

12

/Linux-v5.4/drivers/net/ethernet/cavium/thunder/
Dnicvf_queues.c31 static int nicvf_poll_reg(struct nicvf *nic, int qidx, in nicvf_poll_reg() argument
42 reg_val = nicvf_queue_reg_read(nic, reg, qidx); in nicvf_poll_reg()
504 struct snd_queue *sq, int q_len, int qidx) in nicvf_init_snd_queue() argument
524 qidx += ((nic->sqs_id + 1) * MAX_SND_QUEUES_PER_QS); in nicvf_init_snd_queue()
525 if (qidx < nic->pnicvf->xdp_tx_queues) { in nicvf_init_snd_queue()
627 struct queue_set *qs, int qidx) in nicvf_reclaim_snd_queue() argument
630 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); in nicvf_reclaim_snd_queue()
632 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) in nicvf_reclaim_snd_queue()
635 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); in nicvf_reclaim_snd_queue()
639 struct queue_set *qs, int qidx) in nicvf_reclaim_rcv_queue() argument
[all …]
Dnicvf_main.c75 static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) in nicvf_netdev_qidx() argument
78 return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS); in nicvf_netdev_qidx()
80 return qidx; in nicvf_netdev_qidx()
104 u64 qidx, u64 val) in nicvf_queue_reg_write() argument
108 writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT)); in nicvf_queue_reg_write()
111 u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx) in nicvf_queue_reg_read() argument
115 return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT)); in nicvf_queue_reg_read()
992 int qidx; in nicvf_handle_qs_err() local
998 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { in nicvf_handle_qs_err()
1000 qidx); in nicvf_handle_qs_err()
[all …]
Dnicvf_ethtool.c215 int stats, qidx; in nicvf_get_qset_strings() local
218 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { in nicvf_get_qset_strings()
220 sprintf(*data, "rxq%d: %s", qidx + start_qidx, in nicvf_get_qset_strings()
226 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { in nicvf_get_qset_strings()
228 sprintf(*data, "txq%d: %s", qidx + start_qidx, in nicvf_get_qset_strings()
302 int stat, qidx; in nicvf_get_qset_stats() local
307 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { in nicvf_get_qset_stats()
308 nicvf_update_rq_stats(nic, qidx); in nicvf_get_qset_stats()
310 *((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats) in nicvf_get_qset_stats()
314 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { in nicvf_get_qset_stats()
[all …]
Dnicvf_queues.h336 int qidx, bool enable);
338 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
339 void nicvf_sq_disable(struct nicvf *nic, int qidx);
342 struct snd_queue *sq, int qidx);
365 u64 qidx, u64 val);
367 u64 offset, u64 qidx);
/Linux-v5.4/drivers/crypto/chelsio/
Dchcr_ipsec.c387 u32 qidx; in copy_esn_pktxt() local
395 qidx = skb->queue_mapping; in copy_esn_pktxt()
396 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in copy_esn_pktxt()
437 u32 ctrl0, qidx; in copy_cpltx_pktxt() local
443 qidx = skb->queue_mapping; in copy_cpltx_pktxt()
444 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in copy_cpltx_pktxt()
482 unsigned int qidx; in copy_key_cpltx_pktxt() local
486 qidx = skb->queue_mapping; in copy_key_cpltx_pktxt()
487 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in copy_key_cpltx_pktxt()
542 int qidx = skb_get_queue_mapping(skb); in chcr_crypto_wreq() local
[all …]
/Linux-v5.4/drivers/net/ethernet/broadcom/bnxt/
Dbnxt_dcb.c51 u8 qidx; in bnxt_hwrm_queue_pri2cos_cfg() local
56 qidx = bp->tc_to_qidx[ets->prio_tc[i]]; in bnxt_hwrm_queue_pri2cos_cfg()
57 pri2cos[i] = bp->q_info[qidx].queue_id; in bnxt_hwrm_queue_pri2cos_cfg()
101 u8 qidx = bp->tc_to_qidx[i]; in bnxt_hwrm_queue_cos2bw_cfg() local
105 qidx); in bnxt_hwrm_queue_cos2bw_cfg()
108 cos2bw.queue_id = bp->q_info[qidx].queue_id; in bnxt_hwrm_queue_cos2bw_cfg()
124 data = &req.unused_0 + qidx * (sizeof(cos2bw) - 4); in bnxt_hwrm_queue_cos2bw_cfg()
126 if (qidx == 0) { in bnxt_hwrm_queue_cos2bw_cfg()
260 u8 qidx = bp->tc_to_qidx[i]; in bnxt_hwrm_queue_pfc_cfg() local
262 if (!BNXT_LLQ(bp->q_info[qidx].queue_profile)) { in bnxt_hwrm_queue_pfc_cfg()
/Linux-v5.4/drivers/scsi/qla2xxx/
Dqla_nvme.c81 unsigned int qidx, u16 qsize, void **handle) in qla_nvme_alloc_queue() argument
87 if (!qidx) in qla_nvme_alloc_queue()
88 qidx++; in qla_nvme_alloc_queue()
95 __func__, handle, qidx, qsize); in qla_nvme_alloc_queue()
97 if (qidx > qla_nvme_fc_transport.max_hw_queues) { in qla_nvme_alloc_queue()
100 __func__, qidx, qla_nvme_fc_transport.max_hw_queues); in qla_nvme_alloc_queue()
104 if (ha->queue_pair_map[qidx]) { in qla_nvme_alloc_queue()
105 *handle = ha->queue_pair_map[qidx]; in qla_nvme_alloc_queue()
108 *handle, qidx); in qla_nvme_alloc_queue()
/Linux-v5.4/drivers/scsi/csiostor/
Dcsio_wr.c745 csio_wr_cleanup_eq_stpg(struct csio_hw *hw, int qidx) in csio_wr_cleanup_eq_stpg() argument
747 struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx]; in csio_wr_cleanup_eq_stpg()
762 csio_wr_cleanup_iq_ftr(struct csio_hw *hw, int qidx) in csio_wr_cleanup_iq_ftr() argument
765 struct csio_q *q = wrm->q_arr[qidx]; in csio_wr_cleanup_iq_ftr()
862 csio_wr_get(struct csio_hw *hw, int qidx, uint32_t size, in csio_wr_get() argument
866 struct csio_q *q = wrm->q_arr[qidx]; in csio_wr_get()
877 CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx)); in csio_wr_get()
981 csio_wr_issue(struct csio_hw *hw, int qidx, bool prio) in csio_wr_issue() argument
984 struct csio_q *q = wrm->q_arr[qidx]; in csio_wr_issue()
986 CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx)); in csio_wr_issue()
[all …]
/Linux-v5.4/drivers/net/ethernet/marvell/octeontx2/af/
Drvu_nix.c487 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize) in rvu_nix_aq_enq_inst()
491 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize) in rvu_nix_aq_enq_inst()
495 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize) in rvu_nix_aq_enq_inst()
502 (req->qidx >= (256UL << (cfg & 0xF)))) in rvu_nix_aq_enq_inst()
509 (req->qidx >= (256UL << (cfg & 0xF)))) in rvu_nix_aq_enq_inst()
537 inst.cindex = req->qidx; in rvu_nix_aq_enq_inst()
604 __set_bit(req->qidx, pfvf->rq_bmap); in rvu_nix_aq_enq_inst()
606 __set_bit(req->qidx, pfvf->sq_bmap); in rvu_nix_aq_enq_inst()
608 __set_bit(req->qidx, pfvf->cq_bmap); in rvu_nix_aq_enq_inst()
614 (test_bit(req->qidx, pfvf->rq_bmap) & in rvu_nix_aq_enq_inst()
[all …]
/Linux-v5.4/include/linux/
Dnvme-fc-driver.h390 unsigned int qidx, u16 qsize,
393 unsigned int qidx, void *handle);
/Linux-v5.4/drivers/scsi/lpfc/
Dlpfc_nvme.h42 uint32_t qidx; /* queue index passed to create */ member
Dlpfc_debugfs.h514 lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int qidx) in lpfc_debug_dump_hba_eq() argument
518 qp = phba->sli4_hba.hdwq[qidx].hba_eq; in lpfc_debug_dump_hba_eq()
520 pr_err("EQ[Idx:%d|Qid:%d]\n", qidx, qp->queue_id); in lpfc_debug_dump_hba_eq()
Dlpfc_nvme.c215 unsigned int qidx, u16 qsize, in lpfc_nvme_create_queue() argument
233 qhandle->qidx = qidx; in lpfc_nvme_create_queue()
239 if (qidx) { in lpfc_nvme_create_queue()
241 qhandle->index = ((qidx - 1) % in lpfc_nvme_create_queue()
245 qhandle->index = qidx; in lpfc_nvme_create_queue()
251 qidx, qhandle->cpu_id, qhandle->index, qhandle); in lpfc_nvme_create_queue()
272 unsigned int qidx, in lpfc_nvme_delete_queue() argument
286 lport, qidx, handle); in lpfc_nvme_delete_queue()
1598 if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) { in lpfc_nvme_fcp_io_submit()
1659 lpfc_ncmd->qidx = lpfc_queue_info->qidx; in lpfc_nvme_fcp_io_submit()
Dlpfc_init.c9230 int qidx, uint32_t qtype) in lpfc_create_wq_cq() argument
9238 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); in lpfc_create_wq_cq()
9248 qidx, (uint32_t)rc); in lpfc_create_wq_cq()
9259 qidx, cq->queue_id, qidx, eq->queue_id); in lpfc_create_wq_cq()
9266 qidx, (uint32_t)rc); in lpfc_create_wq_cq()
9278 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); in lpfc_create_wq_cq()
9309 int qidx; in lpfc_setup_cq_lookup() local
9314 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { in lpfc_setup_cq_lookup()
9316 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; in lpfc_setup_cq_lookup()
9350 int qidx, cpu; in lpfc_sli4_queue_setup() local
[all …]
Dlpfc_debugfs.c3688 int qidx; in lpfc_idiag_wqs_for_cq() local
3690 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { in lpfc_idiag_wqs_for_cq()
3691 qp = phba->sli4_hba.hdwq[qidx].io_wq; in lpfc_idiag_wqs_for_cq()
4135 uint32_t qidx, quetp, queid, index, count, offset, value; in lpfc_idiag_queacc_write() local
4173 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { in lpfc_idiag_queacc_write()
4174 qp = phba->sli4_hba.hdwq[qidx].hba_eq; in lpfc_idiag_queacc_write()
4224 for (qidx = 0; qidx < phba->cfg_hdw_queue; in lpfc_idiag_queacc_write()
4225 qidx++) { in lpfc_idiag_queacc_write()
4226 qp = phba->sli4_hba.hdwq[qidx].io_cq; in lpfc_idiag_queacc_write()
4280 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { in lpfc_idiag_queacc_write()
[all …]
Dlpfc_sli.h442 uint16_t qidx; member
/Linux-v5.4/drivers/net/ethernet/chelsio/cxgb3/
Dcxgb3_main.c403 int i, j, err, qidx = 0; in request_msix_data_irqs() local
409 err = request_irq(adap->msix_info[qidx + 1].vec, in request_msix_data_irqs()
411 adap->sge.qs[qidx]. in request_msix_data_irqs()
413 adap->msix_info[qidx + 1].desc, in request_msix_data_irqs()
414 &adap->sge.qs[qidx]); in request_msix_data_irqs()
416 while (--qidx >= 0) in request_msix_data_irqs()
417 free_irq(adap->msix_info[qidx + 1].vec, in request_msix_data_irqs()
418 &adap->sge.qs[qidx]); in request_msix_data_irqs()
421 qidx++; in request_msix_data_irqs()
942 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, in send_pktsched_cmd() argument
[all …]
Dsge.c167 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx) in fl_to_qset() argument
169 return container_of(q, struct sge_qset, fl[qidx]); in fl_to_qset()
177 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx) in txq_to_qset() argument
179 return container_of(q, struct sge_qset, txq[qidx]); in txq_to_qset()
1265 int qidx; in t3_eth_xmit() local
1283 qidx = skb_get_queue_mapping(skb); in t3_eth_xmit()
1284 qs = &pi->qs[qidx]; in t3_eth_xmit()
1286 txq = netdev_get_tx_queue(dev, qidx); in t3_eth_xmit()
/Linux-v5.4/drivers/crypto/ccp/
Dccp-dev-v5.c216 n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1; in ccp5_get_free_slots()
238 mP = (u32 *) &cmd_q->qbase[cmd_q->qidx]; in ccp5_do_cmd()
243 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; in ccp5_do_cmd()
249 tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); in ccp5_do_cmd()
828 cmd_q->qidx = 0; in ccp5_init()
Dccp-dev.h274 unsigned int qidx; member
/Linux-v5.4/drivers/net/ethernet/broadcom/bnx2x/
Dbnx2x_sriov.c1745 int qidx = 0, abs_vfid; in bnx2x_iov_eq_sp_event() local
1794 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); in bnx2x_iov_eq_sp_event()
1808 vf->abs_vfid, qidx); in bnx2x_iov_eq_sp_event()
1809 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, in bnx2x_iov_eq_sp_event()
1811 qidx)->sp_obj, in bnx2x_iov_eq_sp_event()
1816 vf->abs_vfid, qidx); in bnx2x_iov_eq_sp_event()
1817 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); in bnx2x_iov_eq_sp_event()
1821 vf->abs_vfid, qidx); in bnx2x_iov_eq_sp_event()
1826 vf->abs_vfid, qidx); in bnx2x_iov_eq_sp_event()
1831 vf->abs_vfid, qidx); in bnx2x_iov_eq_sp_event()
Dbnx2x_vfpf.c26 static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
684 static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) in bnx2x_vfpf_teardown_queue() argument
694 req->vf_qid = qidx; in bnx2x_vfpf_teardown_queue()
706 BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx, in bnx2x_vfpf_teardown_queue()
713 BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx, in bnx2x_vfpf_teardown_queue()
/Linux-v5.4/drivers/net/ethernet/chelsio/cxgb4vf/
Dsge.c1161 int qidx, credits, max_pkt_len; in t4vf_eth_xmit() local
1196 qidx = skb_get_queue_mapping(skb); in t4vf_eth_xmit()
1197 BUG_ON(qidx >= pi->nqsets); in t4vf_eth_xmit()
1198 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in t4vf_eth_xmit()
1229 dev->name, qidx); in t4vf_eth_xmit()
/Linux-v5.4/drivers/net/ethernet/chelsio/cxgb4/
Dsge.c1361 int qidx, credits; in cxgb4_eth_xmit() local
1404 qidx = skb_get_queue_mapping(skb); in cxgb4_eth_xmit()
1416 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in cxgb4_eth_xmit()
1441 dev->name, qidx); in cxgb4_eth_xmit()
1713 int qidx, credits, max_pkt_len; in cxgb4_vf_eth_xmit() local
1744 qidx = skb_get_queue_mapping(skb); in cxgb4_vf_eth_xmit()
1745 WARN_ON(qidx >= pi->nqsets); in cxgb4_vf_eth_xmit()
1746 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in cxgb4_vf_eth_xmit()
1770 dev->name, qidx); in cxgb4_vf_eth_xmit()
/Linux-v5.4/drivers/net/wireless/broadcom/brcm80211/brcmfmac/
Dfwsignal.c1999 int qidx, hslot; in brcmf_fws_rollback_toq() local
2004 qidx = 2 * fifo; in brcmf_fws_rollback_toq()
2006 qidx++; in brcmf_fws_rollback_toq()
2008 pktout = brcmu_pktq_penq_head(&entry->psq, qidx, skb); in brcmf_fws_rollback_toq()
2010 bphy_err(drvr, "%s queue %d full\n", entry->name, qidx); in brcmf_fws_rollback_toq()

12