Home
last modified time | relevance | path

Searched refs:q_no (Results 1 – 20 of 20) sorted by relevance

/Linux-v4.19/drivers/net/ethernet/cavium/liquidio/
Dcn23xx_vf_device.c54 u32 q_no; in cn23xx_vf_reset_io_queues() local
57 for (q_no = 0; q_no < num_queues; q_no++) { in cn23xx_vf_reset_io_queues()
60 CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_vf_reset_io_queues()
62 octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), in cn23xx_vf_reset_io_queues()
67 for (q_no = 0; q_no < num_queues; q_no++) { in cn23xx_vf_reset_io_queues()
69 CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_vf_reset_io_queues()
74 oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))); in cn23xx_vf_reset_io_queues()
80 q_no); in cn23xx_vf_reset_io_queues()
85 octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), in cn23xx_vf_reset_io_queues()
89 oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))); in cn23xx_vf_reset_io_queues()
[all …]
Dcn23xx_pf_device.c349 u32 q_no, srn, ern; in cn23xx_reset_io_queues() local
359 for (q_no = srn; q_no < ern; q_no++) { in cn23xx_reset_io_queues()
361 d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_reset_io_queues()
363 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64); in cn23xx_reset_io_queues()
367 for (q_no = srn; q_no < ern; q_no++) { in cn23xx_reset_io_queues()
369 CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_reset_io_queues()
374 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no))); in cn23xx_reset_io_queues()
379 q_no); in cn23xx_reset_io_queues()
384 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), in cn23xx_reset_io_queues()
388 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no))); in cn23xx_reset_io_queues()
[all …]
Docteon_droq.c198 int octeon_delete_droq(struct octeon_device *oct, u32 q_no) in octeon_delete_droq() argument
200 struct octeon_droq *droq = oct->droq[q_no]; in octeon_delete_droq()
202 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); in octeon_delete_droq()
212 oct->io_qmask.oq &= ~(1ULL << q_no); in octeon_delete_droq()
213 vfree(oct->droq[q_no]); in octeon_delete_droq()
214 oct->droq[q_no] = NULL; in octeon_delete_droq()
221 u32 q_no, in octeon_init_droq() argument
231 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); in octeon_init_droq()
233 droq = oct->droq[q_no]; in octeon_init_droq()
237 droq->q_no = q_no; in octeon_init_droq()
[all …]
Docteon_droq.h251 u32 q_no; member
341 u32 q_no,
353 int octeon_delete_droq(struct octeon_device *oct_dev, u32 q_no);
368 u32 q_no,
379 int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no);
405 int octeon_create_droq(struct octeon_device *oct, u32 q_no,
415 int octeon_enable_irq(struct octeon_device *oct, u32 q_no);
Docteon_mailbox.c65 mbox->mbox_req.q_no = mbox->q_no; in octeon_mbox_read()
77 mbox->mbox_resp.q_no = mbox->q_no; in octeon_mbox_read()
134 struct octeon_mbox *mbox = oct->mbox[mbox_cmd->q_no]; in octeon_mbox_write()
262 mbox->q_no); in octeon_mbox_process_cmd()
264 oct->sriov_info.dpiring_to_vfpcidev_lut[mbox->q_no], in octeon_mbox_process_cmd()
356 int octeon_mbox_cancel(struct octeon_device *oct, int q_no) in octeon_mbox_cancel() argument
358 struct octeon_mbox *mbox = oct->mbox[q_no]; in octeon_mbox_cancel()
Docteon_device.c894 txpciq.s.q_no = iq_no; in octeon_setup_instr_queues()
953 u32 q_no; in octeon_set_io_queues_off() local
959 for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) { in octeon_set_io_queues_off()
961 oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); in octeon_set_io_queues_off()
967 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in octeon_set_io_queues_off()
973 q_no); in octeon_set_io_queues_off()
979 CN23XX_SLI_IQ_PKT_CONTROL64(q_no), in octeon_set_io_queues_off()
983 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in octeon_set_io_queues_off()
986 "unable to reset qno %u\n", q_no); in octeon_set_io_queues_off()
995 u32 q_no, in octeon_set_droq_pkt_op() argument
[all …]
Docteon_mailbox.h65 u32 q_no; member
88 u32 q_no; member
120 int octeon_mbox_cancel(struct octeon_device *oct, int q_no);
Dlio_core.c200 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in liquidio_set_feature()
473 int q, q_no = 0; in octnet_poll_check_rxq_oom_status() local
477 q_no = lio->linfo.rxpciq[q].s.q_no; in octnet_poll_check_rxq_oom_status()
478 droq = oct->droq[q_no]; in octnet_poll_check_rxq_oom_status()
553 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, in octeon_setup_droq() argument
558 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); in octeon_setup_droq()
560 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); in octeon_setup_droq()
565 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no); in octeon_setup_droq()
570 octeon_set_droq_pkt_op(oct, q_no, 1); in octeon_setup_droq()
575 writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg); in octeon_setup_droq()
[all …]
Dlio_ethtool.c474 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in lio_send_queue_count_update()
710 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_gpio_access()
736 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_id_active()
806 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_mdio45_access()
1097 lio->txq = lio->linfo.txpciq[0].s.q_no; in lio_23xx_reconfigure_queue_count()
1098 lio->rxq = lio->linfo.rxpciq[0].s.q_no; in lio_23xx_reconfigure_queue_count()
1414 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in lio_set_pauseparam()
1778 j = lio->linfo.txpciq[vj].s.q_no; in lio_vf_get_ethtool_stats()
1820 j = lio->linfo.rxpciq[vj].s.q_no; in lio_vf_get_ethtool_stats()
2067 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_get_intrmod_cfg()
[all …]
Docteon_nic.h92 u32 q_no; member
119 static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no) in octnet_iq_is_full() argument
121 return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending) in octnet_iq_is_full()
122 >= (oct->instr_queue[q_no]->max_count - 2)); in octnet_iq_is_full()
Dcn66xx_regs.h473 #define CN6XXX_DPI_DMA_ENG_ENB(q_no) \ argument
474 (CN6XXX_DPI_DMA_ENG0_ENB + ((q_no) * 8))
478 #define CN6XXX_DPI_DMA_ENG_BUF(q_no) \ argument
479 (CN6XXX_DPI_DMA_ENG0_BUF + ((q_no) * 8))
Dlio_main.c176 int q_no; in octeon_droq_bh() local
182 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) { in octeon_droq_bh()
183 if (!(oct->io_qmask.oq & BIT_ULL(q_no))) in octeon_droq_bh()
185 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], in octeon_droq_bh()
187 lio_enable_irq(oct->droq[q_no], NULL); in octeon_droq_bh()
193 int adjusted_q_no = q_no + oct->sriov_info.pf_srn; in octeon_droq_bh()
510 lio->oct_dev->num_iqs].s.q_no; in check_txq_status()
695 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in lio_sync_octeon_time()
1242 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in send_rx_ctrl_cmd()
1349 lio->linfo.rxpciq[j].s.q_no); in liquidio_stop_nic_module()
[all …]
Dlio_vf_main.c635 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in send_rx_ctrl_cmd()
738 lio->linfo.rxpciq[j].s.q_no); in liquidio_stop_nic_module()
1042 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in liquidio_set_uc_list()
1091 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in liquidio_set_mcast_list()
1134 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in liquidio_set_mac()
1171 iq_no = lio->linfo.txpciq[i].s.q_no; in liquidio_get_stats64()
1187 oq_no = lio->linfo.rxpciq[i].s.q_no; in liquidio_get_stats64()
1367 sc->iq_no = ndata->q_no; in send_nic_timestamp_pkt()
1415 iq_no = lio->linfo.txpciq[q_idx].s.q_no; in liquidio_xmit()
1442 ndata.q_no = iq_no; in liquidio_xmit()
[all …]
Dliquidio_common.h715 u64 q_no:8; member
731 u64 q_no:8;
743 u64 q_no:8; member
747 u64 q_no:8;
Docteon_device.h863 int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no);
865 int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no);
877 void octeon_set_droq_pkt_op(struct octeon_device *oct, u32 q_no, u32 enable);
Dcn23xx_pf_regs.h558 #define CN23XX_DPI_DMA_REQQ_CTL(q_no) \ argument
559 (CN23XX_DPI_DMA_REQQ0_CTL + ((q_no) * 8))
Docteon_nic.c90 return octeon_send_command(oct, ndata->q_no, ring_doorbell, &ndata->cmd, in octnet_send_nic_data_pkt()
Drequest_manager.c62 u32 iq_no = (u32)txpciq.s.q_no; in octeon_init_instr_queue()
209 u32 iq_no = (u32)txpciq.s.q_no; in octeon_setup_iq()
Docteon_network.h580 qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no; in wake_txqs()
/Linux-v4.19/drivers/scsi/
Dadvansys.c257 #define ASC_QNO_TO_QADDR(q_no) ((ASC_QADR_BEG)+((int)(q_no) << 6)) argument
261 uchar q_no; member
305 uchar q_no; member
357 uchar q_no; member
6710 scsiq->q_no = (uchar)(_val >> 8); in _AscCopyLramScsiDoneQ()
7963 static int AscPutReadyQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no) in AscPutReadyQueue() argument
7985 q_addr = ASC_QNO_TO_QADDR(q_no); in AscPutReadyQueue()
8001 q_no << 8) | (ushort)QS_READY)); in AscPutReadyQueue()
8006 AscPutReadySgListQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no) in AscPutReadySgListQueue() argument
8036 q_addr = ASC_QNO_TO_QADDR(q_no); in AscPutReadySgListQueue()
[all …]