/Linux-v4.19/drivers/net/ethernet/amazon/ena/ |
D | ena_eth_com.c | 42 head_masked = io_cq->head & (io_cq->q_depth - 1); in ena_com_get_next_rx_cdesc() 67 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) in ena_com_cq_inc_head() 76 tail_masked = io_sq->tail & (io_sq->q_depth - 1); in get_sq_desc() 85 u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1); in ena_com_copy_curr_sq_desc_to_dev() 102 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) in ena_com_sq_update_tail() 109 u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1); in ena_com_write_header() 129 idx &= (io_cq->q_depth - 1); in ena_com_rx_cdesc_idx_to_ptr() 157 head_masked = io_cq->head & (io_cq->q_depth - 1); in ena_com_cdesc_rx_pkt_get() 486 masked_head = io_cq->head & (io_cq->q_depth - 1); in ena_com_tx_comp_req_id_get() 502 if (unlikely(cdesc->req_id >= io_cq->q_depth)) { in ena_com_tx_comp_req_id_get()
|
D | ena_com.c | 113 u16 size = ADMIN_SQ_SIZE(queue->q_depth); in ena_com_admin_init_sq() 135 u16 size = ADMIN_CQ_SIZE(queue->q_depth); in ena_com_admin_init_cq() 158 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; in ena_com_admin_init_aenq() 168 aenq->head = aenq->q_depth; in ena_com_admin_init_aenq() 178 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; in ena_com_admin_init_aenq() 204 if (unlikely(command_id >= queue->q_depth)) { in get_comp_ctxt() 206 command_id, queue->q_depth); in get_comp_ctxt() 234 queue_size_mask = admin_queue->q_depth - 1; in __ena_com_submit_admin_cmd() 240 if (cnt >= admin_queue->q_depth) { in __ena_com_submit_admin_cmd() 283 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx); in ena_com_init_comp_ctxt() [all …]
|
D | ena_eth_com.h | 107 return io_sq->q_depth - 1 - cnt; in ena_com_sq_empty_space() 131 need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH); in ena_com_update_dev_comp_head()
|
D | ena_com.h | 169 u16 q_depth; member 194 u16 q_depth; member 240 u16 q_depth; member 267 u16 q_depth; member
|
/Linux-v4.19/drivers/net/ethernet/huawei/hinic/ |
D | hinic_hw_wq.c | 43 #define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size) 510 u16 wqebb_size, u16 wq_page_size, u16 q_depth, in hinic_wq_allocate() argument 528 if (q_depth & (q_depth - 1)) { in hinic_wq_allocate() 550 wq->q_depth = q_depth; in hinic_wq_allocate() 566 atomic_set(&wq->delta, q_depth); in hinic_wq_allocate() 567 wq->mask = q_depth - 1; in hinic_wq_allocate() 604 u16 q_depth, u16 max_wqe_size) in hinic_wqs_cmdq_alloc() argument 620 if (q_depth & (q_depth - 1)) { in hinic_wqs_cmdq_alloc() 647 wq[i].q_depth = q_depth; in hinic_wqs_cmdq_alloc() 664 atomic_set(&wq[i].delta, q_depth); in hinic_wqs_cmdq_alloc() [all …]
|
D | hinic_hw_wq.h | 39 u16 q_depth; member 88 u16 q_depth, u16 max_wqe_size); 99 u16 wqebb_size, u16 wq_page_size, u16 q_depth,
|
D | hinic_hw_qp.c | 230 skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb); in alloc_sq_skb_arr() 258 skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb); in alloc_rq_skb_arr() 330 cqe_size = wq->q_depth * sizeof(*rq->cqe); in alloc_rq_cqe() 335 cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma); in alloc_rq_cqe() 340 for (i = 0; i < wq->q_depth; i++) { in alloc_rq_cqe() 373 for (i = 0; i < wq->q_depth; i++) in free_rq_cqe()
|
D | hinic_hw_cmdq.c | 377 if (next_prod_idx >= wq->q_depth) { in cmdq_sync_cmd_direct_resp() 379 next_prod_idx -= wq->q_depth; in cmdq_sync_cmd_direct_resp() 454 if (next_prod_idx >= wq->q_depth) { in cmdq_set_arm_bit() 456 next_prod_idx -= wq->q_depth; in cmdq_set_arm_bit() 756 cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth)); in init_cmdq() 761 wq->q_depth)); in init_cmdq()
|
/Linux-v4.19/drivers/mmc/core/ |
D | queue.c | 381 static int mmc_mq_init_queue(struct mmc_queue *mq, int q_depth, in mmc_mq_init_queue() argument 388 mq->tag_set.queue_depth = q_depth; in mmc_mq_init_queue() 424 int q_depth; in mmc_mq_init() local 432 q_depth = min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth); in mmc_mq_init() 434 q_depth = MMC_QUEUE_DEPTH; in mmc_mq_init() 436 ret = mmc_mq_init_queue(mq, q_depth, &mmc_mq_ops, lock); in mmc_mq_init()
|
/Linux-v4.19/drivers/nvme/host/ |
D | pci.c | 95 int q_depth; member 168 u16 q_depth; member 456 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_submit_cmd() 876 if (unlikely(cqe->command_id >= nvmeq->q_depth)) { in nvme_handle_cqe() 904 if (++start == nvmeq->q_depth) in nvme_complete_cqes() 911 if (++nvmeq->cq_head == nvmeq->q_depth) { in nvme_update_cq_head() 1024 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_cq() 1054 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_sq() 1233 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), in nvme_free_queue() 1236 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), in nvme_free_queue() [all …]
|
/Linux-v4.19/drivers/net/ethernet/brocade/bna/ |
D | bnad.c | 86 for (i = 0; i < ccb->q_depth; i++) { in bnad_cq_cleanup() 99 u32 q_depth, u32 index) in bnad_tx_buff_unmap() argument 122 BNA_QE_INDX_INC(index, q_depth); in bnad_tx_buff_unmap() 134 BNA_QE_INDX_INC(index, q_depth); in bnad_tx_buff_unmap() 151 for (i = 0; i < tcb->q_depth; i++) { in bnad_txq_cleanup() 155 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i); in bnad_txq_cleanup() 170 u32 wis, unmap_wis, hw_cons, cons, q_depth; in bnad_txcmpl_process() local 182 q_depth = tcb->q_depth; in bnad_txcmpl_process() 184 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth); in bnad_txcmpl_process() 185 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth))); in bnad_txcmpl_process() [all …]
|
D | bna_types.h | 430 u32 q_depth; member 558 u32 q_depth; member 574 int q_depth; member 622 u32 q_depth; member
|
D | bfa_msgq.c | 526 msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth); in bfa_msgq_init() 528 msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth); in bfa_msgq_init()
|
D | bfi.h | 421 u16 q_depth; /* Total num of entries in the queue */ member
|
D | bna_tx_rx.c | 2393 q0->rcb->q_depth = rx_cfg->q0_depth; in bna_rx_create() 2394 q0->q_depth = rx_cfg->q0_depth; in bna_rx_create() 2420 q1->rcb->q_depth = rx_cfg->q1_depth; in bna_rx_create() 2421 q1->q_depth = rx_cfg->q1_depth; in bna_rx_create() 2452 rxp->cq.ccb->q_depth = cq_depth; in bna_rx_create() 3474 txq->tcb->q_depth = tx_cfg->txq_depth; in bna_tx_create()
|
/Linux-v4.19/drivers/block/rsxx/ |
D | cregs.c | 147 card->creg_ctrl.q_depth--; in creg_kick_queue() 199 card->creg_ctrl.q_depth++; in creg_queue_cmd() 336 card->creg_ctrl.q_depth--; in creg_reset() 413 card->creg_ctrl.q_depth + 20000); in __issue_creg_rw() 720 card->creg_ctrl.q_depth++; in rsxx_eeh_save_issued_creg()
|
D | rsxx_priv.h | 142 unsigned int q_depth; member
|
D | dma.c | 207 u32 q_depth = 0; in dma_intr_coal_auto_tune() local 215 q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth); in dma_intr_coal_auto_tune() 218 q_depth / 2, in dma_intr_coal_auto_tune()
|
/Linux-v4.19/drivers/scsi/bfa/ |
D | bfi.h | 546 #define BFI_MSGQ_FULL(_q) (((_q->pi + 1) % _q->q_depth) == _q->ci) 548 #define BFI_MSGQ_UPDATE_CI(_q) (_q->ci = (_q->ci + 1) % _q->q_depth) 549 #define BFI_MSGQ_UPDATE_PI(_q) (_q->pi = (_q->pi + 1) % _q->q_depth) 552 #define BFI_MSGQ_FREE_CNT(_q) ((_q->ci - _q->pi - 1) & (_q->q_depth - 1)) 593 u16 q_depth; /* Total num of entries in the queue */ member
|
D | bfa_fcpim.h | 127 u16 q_depth; member
|
D | bfa_defs_svc.h | 966 u16 q_depth; /* SCSI Queue depth */ member
|
D | bfa_svc.c | 3575 fcport->cfg.q_depth = in bfa_fcport_isr() 3576 cpu_to_be16(fcport->cfg.q_depth); in bfa_fcport_isr() 3996 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa); in bfa_fcport_get_attr()
|
D | bfa_fcpim.c | 505 return fcpim->q_depth; in bfa_fcpim_qdepth_get()
|