Lines Matching refs:q_depth

95 	int q_depth;  member
168 u16 q_depth; member
456 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_submit_cmd()
876 if (unlikely(cqe->command_id >= nvmeq->q_depth)) { in nvme_handle_cqe()
904 if (++start == nvmeq->q_depth) in nvme_complete_cqes()
911 if (++nvmeq->cq_head == nvmeq->q_depth) { in nvme_update_cq_head()
1024 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_cq()
1054 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_sq()
1233 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), in nvme_free_queue()
1236 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), in nvme_free_queue()
1302 int q_depth = dev->q_depth; in nvme_cmb_qdepth() local
1303 unsigned q_size_aligned = roundup(q_depth * entry_size, in nvme_cmb_qdepth()
1309 q_depth = div_u64(mem_per_q, entry_size); in nvme_cmb_qdepth()
1316 if (q_depth < 64) in nvme_cmb_qdepth()
1320 return q_depth; in nvme_cmb_qdepth()
1359 nvmeq->q_depth = depth; in nvme_alloc_queue()
1396 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); in nvme_init_queue()
1409 unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth), in nvme_create_queue()
1570 aqa = nvmeq->q_depth - 1; in nvme_pci_configure_admin_queue()
1598 if (nvme_alloc_queue(dev, i, dev->q_depth)) { in nvme_create_io_queues()
1896 dev->q_depth = result; in nvme_setup_io_queues()
2030 min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1; in nvme_dev_add()
2088 dev->q_depth = min_t(int, NVME_CAP_MQES(dev->ctrl.cap) + 1, in nvme_pci_enable()
2098 dev->q_depth = 2; in nvme_pci_enable()
2101 dev->q_depth); in nvme_pci_enable()
2105 dev->q_depth = 64; in nvme_pci_enable()
2107 "set queue depth=%u\n", dev->q_depth); in nvme_pci_enable()