/Linux-v5.4/drivers/net/ethernet/cavium/liquidio/ |
D | cn23xx_vf_device.c | 54 u32 q_no; in cn23xx_vf_reset_io_queues() local 57 for (q_no = 0; q_no < num_queues; q_no++) { in cn23xx_vf_reset_io_queues() 60 CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_vf_reset_io_queues() 62 octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), in cn23xx_vf_reset_io_queues() 67 for (q_no = 0; q_no < num_queues; q_no++) { in cn23xx_vf_reset_io_queues() 69 CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_vf_reset_io_queues() 74 oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))); in cn23xx_vf_reset_io_queues() 80 q_no); in cn23xx_vf_reset_io_queues() 85 octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), in cn23xx_vf_reset_io_queues() 89 oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))); in cn23xx_vf_reset_io_queues() [all …]
|
D | cn23xx_pf_device.c | 349 u32 q_no, srn, ern; in cn23xx_reset_io_queues() local 359 for (q_no = srn; q_no < ern; q_no++) { in cn23xx_reset_io_queues() 361 d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_reset_io_queues() 363 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64); in cn23xx_reset_io_queues() 367 for (q_no = srn; q_no < ern; q_no++) { in cn23xx_reset_io_queues() 369 CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_reset_io_queues() 374 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no))); in cn23xx_reset_io_queues() 379 q_no); in cn23xx_reset_io_queues() 384 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), in cn23xx_reset_io_queues() 388 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no))); in cn23xx_reset_io_queues() [all …]
|
D | octeon_droq.c | 198 int octeon_delete_droq(struct octeon_device *oct, u32 q_no) in octeon_delete_droq() argument 200 struct octeon_droq *droq = oct->droq[q_no]; in octeon_delete_droq() 202 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); in octeon_delete_droq() 212 oct->io_qmask.oq &= ~(1ULL << q_no); in octeon_delete_droq() 213 vfree(oct->droq[q_no]); in octeon_delete_droq() 214 oct->droq[q_no] = NULL; in octeon_delete_droq() 221 u32 q_no, in octeon_init_droq() argument 231 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); in octeon_init_droq() 233 droq = oct->droq[q_no]; in octeon_init_droq() 237 droq->q_no = q_no; in octeon_init_droq() [all …]
|
D | octeon_droq.h | 248 u32 q_no; member 338 u32 q_no, 350 int octeon_delete_droq(struct octeon_device *oct_dev, u32 q_no); 365 u32 q_no, 376 int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no); 402 int octeon_create_droq(struct octeon_device *oct, u32 q_no, 412 int octeon_enable_irq(struct octeon_device *oct, u32 q_no);
|
D | lio_core.c | 168 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in liquidio_set_feature() 433 struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no]; in octeon_schedule_rxq_oom_work() 444 int q_no = wk->ctxul; in octnet_poll_check_rxq_oom_status() local 445 struct octeon_droq *droq = oct->droq[q_no]; in octnet_poll_check_rxq_oom_status() 459 int q, q_no; in setup_rx_oom_poll_fn() local 462 q_no = lio->linfo.rxpciq[q].s.q_no; in setup_rx_oom_poll_fn() 463 wq = &lio->rxq_status_wq[q_no]; in setup_rx_oom_poll_fn() 474 wq->wk.ctxul = q_no; in setup_rx_oom_poll_fn() 485 int q_no; in cleanup_rx_oom_poll_fn() local 487 for (q_no = 0; q_no < oct->num_oqs; q_no++) { in cleanup_rx_oom_poll_fn() [all …]
|
D | octeon_mailbox.c | 65 mbox->mbox_req.q_no = mbox->q_no; in octeon_mbox_read() 77 mbox->mbox_resp.q_no = mbox->q_no; in octeon_mbox_read() 134 struct octeon_mbox *mbox = oct->mbox[mbox_cmd->q_no]; in octeon_mbox_write() 262 mbox->q_no); in octeon_mbox_process_cmd() 263 pcie_flr(oct->sriov_info.dpiring_to_vfpcidev_lut[mbox->q_no]); in octeon_mbox_process_cmd() 354 int octeon_mbox_cancel(struct octeon_device *oct, int q_no) in octeon_mbox_cancel() argument 356 struct octeon_mbox *mbox = oct->mbox[q_no]; in octeon_mbox_cancel()
|
D | octeon_device.c | 894 txpciq.s.q_no = iq_no; in octeon_setup_instr_queues() 953 u32 q_no; in octeon_set_io_queues_off() local 959 for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) { in octeon_set_io_queues_off() 961 oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); in octeon_set_io_queues_off() 967 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in octeon_set_io_queues_off() 973 q_no); in octeon_set_io_queues_off() 979 CN23XX_SLI_IQ_PKT_CONTROL64(q_no), in octeon_set_io_queues_off() 983 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in octeon_set_io_queues_off() 986 "unable to reset qno %u\n", q_no); in octeon_set_io_queues_off() 995 u32 q_no, in octeon_set_droq_pkt_op() argument [all …]
|
D | octeon_mailbox.h | 65 u32 q_no; member 88 u32 q_no; member 120 int octeon_mbox_cancel(struct octeon_device *oct, int q_no);
|
D | lio_ethtool.c | 482 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in lio_send_queue_count_update() 717 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_gpio_access() 743 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_id_active() 786 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_mdio45_access() 1071 lio->txq = lio->linfo.txpciq[0].s.q_no; in lio_23xx_reconfigure_queue_count() 1072 lio->rxq = lio->linfo.rxpciq[0].s.q_no; in lio_23xx_reconfigure_queue_count() 1391 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in lio_set_pauseparam() 1756 j = lio->linfo.txpciq[vj].s.q_no; in lio_vf_get_ethtool_stats() 1798 j = lio->linfo.rxpciq[vj].s.q_no; in lio_vf_get_ethtool_stats() 2015 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_get_intrmod_cfg() [all …]
|
D | octeon_nic.h | 85 u32 q_no; member 112 static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no) in octnet_iq_is_full() argument 114 return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending) in octnet_iq_is_full() 115 >= (oct->instr_queue[q_no]->max_count - 2)); in octnet_iq_is_full()
|
D | cn66xx_regs.h | 473 #define CN6XXX_DPI_DMA_ENG_ENB(q_no) \ argument 474 (CN6XXX_DPI_DMA_ENG0_ENB + ((q_no) * 8)) 478 #define CN6XXX_DPI_DMA_ENG_BUF(q_no) \ argument 479 (CN6XXX_DPI_DMA_ENG0_BUF + ((q_no) * 8))
|
D | lio_main.c | 167 int q_no; in octeon_droq_bh() local 173 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) { in octeon_droq_bh() 174 if (!(oct->io_qmask.oq & BIT_ULL(q_no))) in octeon_droq_bh() 176 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], in octeon_droq_bh() 178 lio_enable_irq(oct->droq[q_no], NULL); in octeon_droq_bh() 184 int adjusted_q_no = q_no + oct->sriov_info.pf_srn; in octeon_droq_bh() 501 lio->oct_dev->num_iqs].s.q_no; in check_txq_status() 666 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in lio_sync_octeon_time() 1209 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in send_rx_ctrl_cmd() 1322 lio->linfo.rxpciq[j].s.q_no); in liquidio_stop_nic_module() [all …]
|
D | lio_vf_main.c | 623 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in send_rx_ctrl_cmd() 731 lio->linfo.rxpciq[j].s.q_no); in liquidio_stop_nic_module() 1050 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in liquidio_set_uc_list() 1099 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in liquidio_set_mcast_list() 1141 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in liquidio_set_mac() 1183 iq_no = lio->linfo.txpciq[i].s.q_no; in liquidio_get_stats64() 1199 oq_no = lio->linfo.rxpciq[i].s.q_no; in liquidio_get_stats64() 1378 sc->iq_no = ndata->q_no; in send_nic_timestamp_pkt() 1426 iq_no = lio->linfo.txpciq[q_idx].s.q_no; in liquidio_xmit() 1453 ndata.q_no = iq_no; in liquidio_xmit() [all …]
|
D | liquidio_common.h | 734 u64 q_no:8; member 750 u64 q_no:8; 762 u64 q_no:8; member 766 u64 q_no:8;
|
D | octeon_device.h | 870 int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no); 872 int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no); 884 void octeon_set_droq_pkt_op(struct octeon_device *oct, u32 q_no, u32 enable);
|
D | cn23xx_pf_regs.h | 558 #define CN23XX_DPI_DMA_REQQ_CTL(q_no) \ argument 559 (CN23XX_DPI_DMA_REQQ0_CTL + ((q_no) * 8))
|
D | octeon_nic.c | 89 return octeon_send_command(oct, ndata->q_no, ring_doorbell, &ndata->cmd, in octnet_send_nic_data_pkt()
|
D | octeon_network.h | 577 qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no; in wake_txqs()
|
D | request_manager.c | 62 u32 iq_no = (u32)txpciq.s.q_no; in octeon_init_instr_queue() 210 u32 iq_no = (u32)txpciq.s.q_no; in octeon_setup_iq()
|
/Linux-v5.4/drivers/scsi/ |
D | advansys.c | 253 #define ASC_QNO_TO_QADDR(q_no) ((ASC_QADR_BEG)+((int)(q_no) << 6)) argument 257 uchar q_no; member 301 uchar q_no; member 353 uchar q_no; member 6704 scsiq->q_no = (uchar)(_val >> 8); in _AscCopyLramScsiDoneQ() 7957 static int AscPutReadyQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no) in AscPutReadyQueue() argument 7979 q_addr = ASC_QNO_TO_QADDR(q_no); in AscPutReadyQueue() 7995 q_no << 8) | (ushort)QS_READY)); in AscPutReadyQueue() 8000 AscPutReadySgListQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no) in AscPutReadySgListQueue() argument 8030 q_addr = ASC_QNO_TO_QADDR(q_no); in AscPutReadySgListQueue() [all …]
|