Home
last modified time | relevance | path

Searched refs:iq (Results 1 – 25 of 38) sorted by relevance

12

/Linux-v4.19/drivers/crypto/cavium/zip/
Dzip_device.c59 return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) * in zip_cmd_queue_consumed()
98 spin_lock(&zip_dev->iq[queue].lock); in zip_load_instr()
109 zip_dbg("sw_head : %lx", zip_dev->iq[queue].sw_head); in zip_load_instr()
110 zip_dbg("sw_tail : %lx", zip_dev->iq[queue].sw_tail); in zip_load_instr()
117 memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr, in zip_load_instr()
119 zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */ in zip_load_instr()
122 ncb_ptr = zip_dev->iq[queue].sw_head; in zip_load_instr()
125 ncb_ptr, zip_dev->iq[queue].sw_head - 16); in zip_load_instr()
128 zip_dev->iq[queue].sw_head = zip_dev->iq[queue].sw_tail; in zip_load_instr()
130 zip_dev->iq[queue].free_flag = 1; in zip_load_instr()
[all …]
Dzip_mem.c59 zip->iq[q].sw_head = (u64 *)__get_free_pages((GFP_KERNEL | GFP_DMA), in zip_cmd_qbuf_alloc()
62 if (!zip->iq[q].sw_head) in zip_cmd_qbuf_alloc()
65 memset(zip->iq[q].sw_head, 0, ZIP_CMD_QBUF_SIZE); in zip_cmd_qbuf_alloc()
67 zip_dbg("cmd_qbuf_alloc[%d] Success : %p\n", q, zip->iq[q].sw_head); in zip_cmd_qbuf_alloc()
78 zip_dbg("Freeing cmd_qbuf 0x%lx\n", zip->iq[q].sw_tail); in zip_cmd_qbuf_free()
80 free_pages((u64)zip->iq[q].sw_tail, get_order(ZIP_CMD_QBUF_SIZE)); in zip_cmd_qbuf_free()
Dzip_main.c167 memset(&zip->iq[q], 0x0, sizeof(struct zip_iq)); in zip_init_hw()
169 spin_lock_init(&zip->iq[q].lock); in zip_init_hw()
180 zip->iq[q].sw_tail = zip->iq[q].sw_head; in zip_init_hw()
181 zip->iq[q].hw_tail = zip->iq[q].sw_head; in zip_init_hw()
185 que_sbuf_addr.s.ptr = (__pa(zip->iq[q].sw_head) >> in zip_init_hw()
198 zip->iq[q].sw_head, zip->iq[q].sw_tail, in zip_init_hw()
199 zip->iq[q].hw_tail); in zip_init_hw()
Dzip_main.h108 struct zip_iq iq[ZIP_MAX_NUM_QUEUES]; member
/Linux-v4.19/drivers/net/ethernet/cavium/liquidio/
Drequest_manager.c45 struct octeon_instr_queue *iq = in IQ_INSTR_MODE_64B() local
47 return iq->iqcmd_64B; in IQ_INSTR_MODE_64B()
60 struct octeon_instr_queue *iq; in octeon_init_instr_queue() local
82 iq = oct->instr_queue[iq_no]; in octeon_init_instr_queue()
84 iq->oct_dev = oct; in octeon_init_instr_queue()
86 iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma); in octeon_init_instr_queue()
87 if (!iq->base_addr) { in octeon_init_instr_queue()
93 iq->max_count = num_descs; in octeon_init_instr_queue()
98 iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs), in octeon_init_instr_queue()
100 if (!iq->request_list) in octeon_init_instr_queue()
[all …]
Dcn23xx_vf_regs.h70 #define CN23XX_VF_SLI_IQ_PKT_CONTROL64(iq) \ argument
71 (CN23XX_VF_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_VF_IQ_OFFSET))
73 #define CN23XX_VF_SLI_IQ_BASE_ADDR64(iq) \ argument
74 (CN23XX_VF_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_VF_IQ_OFFSET))
76 #define CN23XX_VF_SLI_IQ_SIZE(iq) \ argument
77 (CN23XX_VF_SLI_IQ_SIZE_START + ((iq) * CN23XX_VF_IQ_OFFSET))
79 #define CN23XX_VF_SLI_IQ_DOORBELL(iq) \ argument
80 (CN23XX_VF_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_VF_IQ_OFFSET))
82 #define CN23XX_VF_SLI_IQ_INSTR_COUNT64(iq) \ argument
83 (CN23XX_VF_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_VF_IQ_OFFSET))
Dcn66xx_regs.h143 #define CN6XXX_SLI_IQ_BASE_ADDR64(iq) \ argument
144 (CN6XXX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN6XXX_IQ_OFFSET))
146 #define CN6XXX_SLI_IQ_SIZE(iq) \ argument
147 (CN6XXX_SLI_IQ_SIZE_START + ((iq) * CN6XXX_IQ_OFFSET))
149 #define CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq) \ argument
150 (CN6XXX_SLI_IQ_PKT_INSTR_HDR_START64 + ((iq) * CN6XXX_IQ_OFFSET))
152 #define CN6XXX_SLI_IQ_DOORBELL(iq) \ argument
153 (CN6XXX_SLI_IQ_DOORBELL_START + ((iq) * CN6XXX_IQ_OFFSET))
155 #define CN6XXX_SLI_IQ_INSTR_COUNT(iq) \ argument
156 (CN6XXX_SLI_IQ_INSTR_COUNT_START + ((iq) * CN6XXX_IQ_OFFSET))
[all …]
Dcn23xx_vf_device.c104 struct octeon_instr_queue *iq; in cn23xx_vf_setup_global_input_regs() local
116 iq = oct->instr_queue[q_no]; in cn23xx_vf_setup_global_input_regs()
118 if (iq) in cn23xx_vf_setup_global_input_regs()
119 inst_cnt_reg = iq->inst_cnt_reg; in cn23xx_vf_setup_global_input_regs()
214 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; in cn23xx_setup_vf_iq_regs() local
219 iq->base_addr_dma); in cn23xx_setup_vf_iq_regs()
220 octeon_write_csr(oct, CN23XX_VF_SLI_IQ_SIZE(iq_no), iq->max_count); in cn23xx_setup_vf_iq_regs()
225 iq->doorbell_reg = in cn23xx_setup_vf_iq_regs()
227 iq->inst_cnt_reg = in cn23xx_setup_vf_iq_regs()
230 iq_no, iq->doorbell_reg, iq->inst_cnt_reg); in cn23xx_setup_vf_iq_regs()
[all …]
Dcn23xx_pf_regs.h170 #define CN23XX_SLI_IQ_PKT_CONTROL64(iq) \ argument
171 (CN23XX_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_IQ_OFFSET))
173 #define CN23XX_SLI_IQ_BASE_ADDR64(iq) \ argument
174 (CN23XX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_IQ_OFFSET))
176 #define CN23XX_SLI_IQ_SIZE(iq) \ argument
177 (CN23XX_SLI_IQ_SIZE_START + ((iq) * CN23XX_IQ_OFFSET))
179 #define CN23XX_SLI_IQ_DOORBELL(iq) \ argument
180 (CN23XX_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_IQ_OFFSET))
182 #define CN23XX_SLI_IQ_INSTR_COUNT64(iq) \ argument
183 (CN23XX_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_IQ_OFFSET))
Docteon_config.h121 #define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
122 #define CFG_GET_IQ_MAX_Q(cfg) ((cfg)->iq.max_iqs)
123 #define CFG_GET_IQ_PENDING_LIST_SIZE(cfg) ((cfg)->iq.pending_list_size)
124 #define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type)
125 #define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
126 #define CFG_GET_IQ_DB_TIMEOUT(cfg) ((cfg)->iq.db_timeout)
128 #define CFG_GET_IQ_INTR_PKT(cfg) ((cfg)->iq.iq_intr_pkt)
129 #define CFG_SET_IQ_INTR_PKT(cfg, val) (cfg)->iq.iq_intr_pkt = val
410 struct octeon_iq_config iq; member
Dcn66xx_device.c269 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; in lio_cn6xxx_setup_iq_regs() local
275 iq->base_addr_dma); in lio_cn6xxx_setup_iq_regs()
276 octeon_write_csr(oct, CN6XXX_SLI_IQ_SIZE(iq_no), iq->max_count); in lio_cn6xxx_setup_iq_regs()
281 iq->doorbell_reg = oct->mmio[0].hw_addr + CN6XXX_SLI_IQ_DOORBELL(iq_no); in lio_cn6xxx_setup_iq_regs()
282 iq->inst_cnt_reg = oct->mmio[0].hw_addr in lio_cn6xxx_setup_iq_regs()
285 iq_no, iq->doorbell_reg, iq->inst_cnt_reg); in lio_cn6xxx_setup_iq_regs()
290 iq->reset_instr_cnt = readl(iq->inst_cnt_reg); in lio_cn6xxx_setup_iq_regs()
342 mask |= oct->io_qmask.iq; in lio_cn6xxx_enable_io_queues()
360 mask ^= oct->io_qmask.iq; in lio_cn6xxx_disable_io_queues()
364 mask = (u32)oct->io_qmask.iq; in lio_cn6xxx_disable_io_queues()
[all …]
Dcn68xx_regs.h32 #define CN68XX_SLI_IQ_PORT_PKIND(iq) \ argument
33 (CN68XX_SLI_IQ_PORT0_PKIND + ((iq) * CN6XXX_IQ_OFFSET))
Docteon_device.c41 .iq = {
150 .iq = {
316 .iq = {
419 .iq = {
655 if (oct->io_qmask.iq & BIT_ULL(i)) in octeon_free_device_mem()
1275 (oct->io_qmask.iq & BIT_ULL(q_no))) in octeon_get_tx_qsize()
1434 void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) in lio_enable_irq() argument
1451 if (iq) { in lio_enable_irq()
1452 spin_lock_bh(&iq->lock); in lio_enable_irq()
1453 writel(iq->pkt_in_done, iq->inst_cnt_reg); in lio_enable_irq()
[all …]
Dcn23xx_pf_device.c403 struct octeon_instr_queue *iq; in cn23xx_pf_setup_global_input_regs() local
446 iq = oct->instr_queue[q_no]; in cn23xx_pf_setup_global_input_regs()
447 if (iq) in cn23xx_pf_setup_global_input_regs()
448 inst_cnt_reg = iq->inst_cnt_reg; in cn23xx_pf_setup_global_input_regs()
589 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; in cn23xx_setup_iq_regs() local
596 iq->base_addr_dma); in cn23xx_setup_iq_regs()
597 octeon_write_csr(oct, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count); in cn23xx_setup_iq_regs()
602 iq->doorbell_reg = in cn23xx_setup_iq_regs()
604 iq->inst_cnt_reg = in cn23xx_setup_iq_regs()
607 iq_no, iq->doorbell_reg, iq->inst_cnt_reg); in cn23xx_setup_iq_regs()
[all …]
Dlio_vf_main.c131 struct octeon_instr_queue *iq; in pcierror_quiesce_device() local
133 if (!(oct->io_qmask.iq & BIT_ULL(i))) in pcierror_quiesce_device()
135 iq = oct->instr_queue[i]; in pcierror_quiesce_device()
137 if (atomic_read(&iq->instr_pending)) { in pcierror_quiesce_device()
138 spin_lock_bh(&iq->lock); in pcierror_quiesce_device()
139 iq->fill_cnt = 0; in pcierror_quiesce_device()
140 iq->octeon_read_index = iq->host_write_index; in pcierror_quiesce_device()
141 iq->stats.instr_processed += in pcierror_quiesce_device()
142 atomic_read(&iq->instr_pending); in pcierror_quiesce_device()
143 lio_process_iq_request_list(oct, iq, 0); in pcierror_quiesce_device()
[all …]
Docteon_iq.h365 struct octeon_instr_queue *iq, u32 napi_budget);
384 octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
Dlio_main.c272 struct octeon_instr_queue *iq; in pcierror_quiesce_device() local
274 if (!(oct->io_qmask.iq & BIT_ULL(i))) in pcierror_quiesce_device()
276 iq = oct->instr_queue[i]; in pcierror_quiesce_device()
278 if (atomic_read(&iq->instr_pending)) { in pcierror_quiesce_device()
279 spin_lock_bh(&iq->lock); in pcierror_quiesce_device()
280 iq->fill_cnt = 0; in pcierror_quiesce_device()
281 iq->octeon_read_index = iq->host_write_index; in pcierror_quiesce_device()
282 iq->stats.instr_processed += in pcierror_quiesce_device()
283 atomic_read(&iq->instr_pending); in pcierror_quiesce_device()
284 lio_process_iq_request_list(oct, iq, 0); in pcierror_quiesce_device()
[all …]
Dlio_core.c523 struct octeon_instr_queue *iq = oct->instr_queue[iq_num]; in lio_update_txq_status() local
527 netdev = oct->props[iq->ifidx].netdev; in lio_update_txq_status()
536 if (__netif_subqueue_stopped(netdev, iq->q_index) && in lio_update_txq_status()
539 netif_wake_subqueue(netdev, iq->q_index); in lio_update_txq_status()
767 struct octeon_instr_queue *iq; in liquidio_napi_poll() local
781 iq = oct->instr_queue[iq_no]; in liquidio_napi_poll()
782 if (iq) { in liquidio_napi_poll()
786 if (atomic_read(&iq->instr_pending)) in liquidio_napi_poll()
788 tx_done = octeon_flush_iq(oct, iq, budget); in liquidio_napi_poll()
804 (iq && iq->pkt_in_done >= MAX_REG_CNT) || in liquidio_napi_poll()
/Linux-v4.19/drivers/scsi/csiostor/
Dcsio_isr.c212 csio_scsi_isr_handler(struct csio_q *iq) in csio_scsi_isr_handler() argument
214 struct csio_hw *hw = (struct csio_hw *)iq->owner; in csio_scsi_isr_handler()
223 if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl, in csio_scsi_isr_handler()
258 struct csio_q *iq = (struct csio_q *) dev_id; in csio_scsi_isr() local
261 if (unlikely(!iq)) in csio_scsi_isr()
264 hw = (struct csio_hw *)iq->owner; in csio_scsi_isr()
271 csio_scsi_isr_handler(iq); in csio_scsi_isr()
288 struct csio_q *iq = priv; in csio_scsi_intx_handler() local
290 csio_scsi_isr_handler(iq); in csio_scsi_intx_handler()
Dcsio_wr.h410 struct csio_iq iq; member
463 #define csio_q_iqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.iq.iqid)
465 ((__hw)->wrm.q_arr[(__idx)]->un.iq.physiqid)
467 ((__hw)->wrm.q_arr[(__idx)]->un.iq.flq_idx)
473 #define csio_iq_has_fl(__iq) ((__iq)->un.iq.flq_idx != -1)
476 csio_q_flid((__hw), (__hw)->wrm.q_arr[(__iq_qidx)]->un.iq.flq_idx)
Dcsio_wr.c254 q->un.iq.genbit = 1; in csio_wr_alloc_q()
277 q->un.iq.flq_idx = flq_idx; in csio_wr_alloc_q()
279 flq = wrm->q_arr[q->un.iq.flq_idx]; in csio_wr_alloc_q()
305 q->un.iq.flq_idx = -1; in csio_wr_alloc_q()
309 q->un.iq.iq_intx_handler = iq_intx_handler; in csio_wr_alloc_q()
770 q->un.iq.genbit = 1; in csio_wr_cleanup_iq_ftr()
1052 struct csio_q *flq = hw->wrm.q_arr[q->un.iq.flq_idx]; in csio_wr_process_fl()
1111 return (q->un.iq.genbit == (ftr->u.type_gen >> IQWRF_GEN_SHIFT)); in csio_is_new_iqwr()
1138 wrm->q_arr[q->un.iq.flq_idx] : NULL; in csio_wr_process_iq()
1182 q_completed->un.iq.iq_intx_handler); in csio_wr_process_iq()
[all …]
/Linux-v4.19/drivers/net/ethernet/chelsio/cxgb4/
Dsge.c3376 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, in t4_sge_alloc_rxq() argument
3388 iq->size = roundup(iq->size, 16); in t4_sge_alloc_rxq()
3390 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, in t4_sge_alloc_rxq()
3391 &iq->phys_addr, NULL, 0, in t4_sge_alloc_rxq()
3393 if (!iq->desc) in t4_sge_alloc_rxq()
3410 FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) | in t4_sge_alloc_rxq()
3411 FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4)); in t4_sge_alloc_rxq()
3412 c.iqsize = htons(iq->size); in t4_sge_alloc_rxq()
3413 c.iqaddr = cpu_to_be64(iq->phys_addr); in t4_sge_alloc_rxq()
3472 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); in t4_sge_alloc_rxq()
[all …]
Dcxgb4_filter.c325 int iq; in get_filter_steerq() local
333 if (fs->iq) in get_filter_steerq()
335 iq = 0; in get_filter_steerq()
342 if (fs->iq < pi->nqsets) in get_filter_steerq()
343 iq = adapter->sge.ethrxq[pi->first_qset + in get_filter_steerq()
344 fs->iq].rspq.abs_id; in get_filter_steerq()
346 iq = fs->iq; in get_filter_steerq()
349 return iq; in get_filter_steerq()
602 FW_FILTER_WR_IQ_V(f->fs.iq)); in set_filter_wr()
1018 RSS_QUEUE_V(f->fs.iq) | in mk_act_open_req6()
[all …]
/Linux-v4.19/drivers/media/tuners/
Dr820t.c1598 static void r820t_compre_cor(struct r820t_sect_type iq[3]) in r820t_compre_cor()
1603 if (iq[0].value > iq[i - 1].value) in r820t_compre_cor()
1604 swap(iq[0], iq[i - 1]); in r820t_compre_cor()
1609 struct r820t_sect_type iq[3], u8 reg) in r820t_compre_step()
1622 tmp.phase_y = iq[0].phase_y; in r820t_compre_step()
1623 tmp.gain_x = iq[0].gain_x; in r820t_compre_step()
1645 if (tmp.value <= iq[0].value) { in r820t_compre_step()
1646 iq[0].gain_x = tmp.gain_x; in r820t_compre_step()
1647 iq[0].phase_y = tmp.phase_y; in r820t_compre_step()
1648 iq[0].value = tmp.value; in r820t_compre_step()
[all …]
/Linux-v4.19/scripts/
Dtags.sh281 if $1 --version 2>&1 | grep -iq exuberant; then
283 elif $1 --version 2>&1 | grep -iq emacs; then

12