Lines Matching refs:nvmeq
226 struct nvme_queue *nvmeq; member
284 struct nvme_queue *nvmeq, int qid) in nvme_dbbuf_init() argument
289 nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
290 nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
291 nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
292 nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
295 static void nvme_dbbuf_free(struct nvme_queue *nvmeq) in nvme_dbbuf_free() argument
297 if (!nvmeq->qid) in nvme_dbbuf_free()
300 nvmeq->dbbuf_sq_db = NULL; in nvme_dbbuf_free()
301 nvmeq->dbbuf_cq_db = NULL; in nvme_dbbuf_free()
302 nvmeq->dbbuf_sq_ei = NULL; in nvme_dbbuf_free()
303 nvmeq->dbbuf_cq_ei = NULL; in nvme_dbbuf_free()
399 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_admin_init_hctx() local
404 hctx->driver_data = nvmeq; in nvme_admin_init_hctx()
412 struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; in nvme_init_hctx() local
415 hctx->driver_data = nvmeq; in nvme_init_hctx()
425 struct nvme_queue *nvmeq = &dev->queues[queue_idx]; in nvme_init_request() local
427 BUG_ON(!nvmeq); in nvme_init_request()
428 iod->nvmeq = nvmeq; in nvme_init_request()
477 static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) in nvme_write_sq_db() argument
480 u16 next_tail = nvmeq->sq_tail + 1; in nvme_write_sq_db()
482 if (next_tail == nvmeq->q_depth) in nvme_write_sq_db()
484 if (next_tail != nvmeq->last_sq_tail) in nvme_write_sq_db()
488 if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, in nvme_write_sq_db()
489 nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) in nvme_write_sq_db()
490 writel(nvmeq->sq_tail, nvmeq->q_db); in nvme_write_sq_db()
491 nvmeq->last_sq_tail = nvmeq->sq_tail; in nvme_write_sq_db()
500 static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, in nvme_submit_cmd() argument
503 spin_lock(&nvmeq->sq_lock); in nvme_submit_cmd()
504 memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), in nvme_submit_cmd()
506 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_submit_cmd()
507 nvmeq->sq_tail = 0; in nvme_submit_cmd()
508 nvme_write_sq_db(nvmeq, write_sq); in nvme_submit_cmd()
509 spin_unlock(&nvmeq->sq_lock); in nvme_submit_cmd()
514 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_commit_rqs() local
516 spin_lock(&nvmeq->sq_lock); in nvme_commit_rqs()
517 if (nvmeq->sq_tail != nvmeq->last_sq_tail) in nvme_commit_rqs()
518 nvme_write_sq_db(nvmeq, true); in nvme_commit_rqs()
519 spin_unlock(&nvmeq->sq_lock); in nvme_commit_rqs()
538 if (!iod->nvmeq->qid) in nvme_pci_use_sgls()
830 if (iod->nvmeq->qid && in nvme_map_data()
886 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_queue_rq() local
887 struct nvme_dev *dev = nvmeq->dev; in nvme_queue_rq()
901 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) in nvme_queue_rq()
921 nvme_submit_cmd(nvmeq, &cmnd, bd->last); in nvme_queue_rq()
933 struct nvme_dev *dev = iod->nvmeq->dev; in nvme_pci_complete_rq()
944 static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq) in nvme_cqe_pending() argument
946 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; in nvme_cqe_pending()
948 return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase; in nvme_cqe_pending()
951 static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) in nvme_ring_cq_doorbell() argument
953 u16 head = nvmeq->cq_head; in nvme_ring_cq_doorbell()
955 if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, in nvme_ring_cq_doorbell()
956 nvmeq->dbbuf_cq_ei)) in nvme_ring_cq_doorbell()
957 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); in nvme_ring_cq_doorbell()
960 static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq) in nvme_queue_tagset() argument
962 if (!nvmeq->qid) in nvme_queue_tagset()
963 return nvmeq->dev->admin_tagset.tags[0]; in nvme_queue_tagset()
964 return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; in nvme_queue_tagset()
967 static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) in nvme_handle_cqe() argument
969 struct nvme_completion *cqe = &nvmeq->cqes[idx]; in nvme_handle_cqe()
978 if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) { in nvme_handle_cqe()
979 nvme_complete_async_event(&nvmeq->dev->ctrl, in nvme_handle_cqe()
984 req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id); in nvme_handle_cqe()
986 dev_warn(nvmeq->dev->ctrl.device, in nvme_handle_cqe()
992 trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); in nvme_handle_cqe()
997 static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) in nvme_update_cq_head() argument
999 u16 tmp = nvmeq->cq_head + 1; in nvme_update_cq_head()
1001 if (tmp == nvmeq->q_depth) { in nvme_update_cq_head()
1002 nvmeq->cq_head = 0; in nvme_update_cq_head()
1003 nvmeq->cq_phase ^= 1; in nvme_update_cq_head()
1005 nvmeq->cq_head = tmp; in nvme_update_cq_head()
1009 static inline int nvme_process_cq(struct nvme_queue *nvmeq) in nvme_process_cq() argument
1013 while (nvme_cqe_pending(nvmeq)) { in nvme_process_cq()
1020 nvme_handle_cqe(nvmeq, nvmeq->cq_head); in nvme_process_cq()
1021 nvme_update_cq_head(nvmeq); in nvme_process_cq()
1025 nvme_ring_cq_doorbell(nvmeq); in nvme_process_cq()
1031 struct nvme_queue *nvmeq = data; in nvme_irq() local
1039 if (nvme_process_cq(nvmeq)) in nvme_irq()
1048 struct nvme_queue *nvmeq = data; in nvme_irq_check() local
1050 if (nvme_cqe_pending(nvmeq)) in nvme_irq_check()
1059 static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) in nvme_poll_irqdisable() argument
1061 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); in nvme_poll_irqdisable()
1063 WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); in nvme_poll_irqdisable()
1065 disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); in nvme_poll_irqdisable()
1066 nvme_process_cq(nvmeq); in nvme_poll_irqdisable()
1067 enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); in nvme_poll_irqdisable()
1072 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_poll() local
1075 if (!nvme_cqe_pending(nvmeq)) in nvme_poll()
1078 spin_lock(&nvmeq->cq_poll_lock); in nvme_poll()
1079 found = nvme_process_cq(nvmeq); in nvme_poll()
1080 spin_unlock(&nvmeq->cq_poll_lock); in nvme_poll()
1088 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_pci_submit_async_event() local
1094 nvme_submit_cmd(nvmeq, &c, true); in nvme_pci_submit_async_event()
1109 struct nvme_queue *nvmeq, s16 vector) in adapter_alloc_cq() argument
1114 if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) in adapter_alloc_cq()
1123 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); in adapter_alloc_cq()
1125 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_cq()
1133 struct nvme_queue *nvmeq) in adapter_alloc_sq() argument
1153 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); in adapter_alloc_sq()
1155 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_sq()
1175 struct nvme_queue *nvmeq = iod->nvmeq; in abort_endio() local
1177 dev_warn(nvmeq->dev->ctrl.device, in abort_endio()
1179 atomic_inc(&nvmeq->dev->ctrl.abort_limit); in abort_endio()
1229 struct nvme_queue *nvmeq = iod->nvmeq; in nvme_timeout() local
1230 struct nvme_dev *dev = nvmeq->dev; in nvme_timeout()
1255 if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) in nvme_timeout()
1258 nvme_poll_irqdisable(nvmeq); in nvme_timeout()
1263 req->tag, nvmeq->qid); in nvme_timeout()
1280 req->tag, nvmeq->qid); in nvme_timeout()
1295 if (!nvmeq->qid || iod->aborted) { in nvme_timeout()
1298 req->tag, nvmeq->qid); in nvme_timeout()
1315 cmd.abort.sqid = cpu_to_le16(nvmeq->qid); in nvme_timeout()
1317 dev_warn(nvmeq->dev->ctrl.device, in nvme_timeout()
1319 req->tag, nvmeq->qid); in nvme_timeout()
1340 static void nvme_free_queue(struct nvme_queue *nvmeq) in nvme_free_queue() argument
1342 dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), in nvme_free_queue()
1343 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); in nvme_free_queue()
1344 if (!nvmeq->sq_cmds) in nvme_free_queue()
1347 if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { in nvme_free_queue()
1348 pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), in nvme_free_queue()
1349 nvmeq->sq_cmds, SQ_SIZE(nvmeq)); in nvme_free_queue()
1351 dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), in nvme_free_queue()
1352 nvmeq->sq_cmds, nvmeq->sq_dma_addr); in nvme_free_queue()
1370 static int nvme_suspend_queue(struct nvme_queue *nvmeq) in nvme_suspend_queue() argument
1372 if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) in nvme_suspend_queue()
1378 nvmeq->dev->online_queues--; in nvme_suspend_queue()
1379 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) in nvme_suspend_queue()
1380 blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q); in nvme_suspend_queue()
1381 if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags)) in nvme_suspend_queue()
1382 pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq); in nvme_suspend_queue()
1396 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_disable_admin_queue() local
1403 nvme_poll_irqdisable(nvmeq); in nvme_disable_admin_queue()
1448 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, in nvme_alloc_sq_cmds() argument
1454 nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); in nvme_alloc_sq_cmds()
1455 if (nvmeq->sq_cmds) { in nvme_alloc_sq_cmds()
1456 nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, in nvme_alloc_sq_cmds()
1457 nvmeq->sq_cmds); in nvme_alloc_sq_cmds()
1458 if (nvmeq->sq_dma_addr) { in nvme_alloc_sq_cmds()
1459 set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); in nvme_alloc_sq_cmds()
1463 pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); in nvme_alloc_sq_cmds()
1467 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), in nvme_alloc_sq_cmds()
1468 &nvmeq->sq_dma_addr, GFP_KERNEL); in nvme_alloc_sq_cmds()
1469 if (!nvmeq->sq_cmds) in nvme_alloc_sq_cmds()
1476 struct nvme_queue *nvmeq = &dev->queues[qid]; in nvme_alloc_queue() local
1481 nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; in nvme_alloc_queue()
1482 nvmeq->q_depth = depth; in nvme_alloc_queue()
1483 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), in nvme_alloc_queue()
1484 &nvmeq->cq_dma_addr, GFP_KERNEL); in nvme_alloc_queue()
1485 if (!nvmeq->cqes) in nvme_alloc_queue()
1488 if (nvme_alloc_sq_cmds(dev, nvmeq, qid)) in nvme_alloc_queue()
1491 nvmeq->dev = dev; in nvme_alloc_queue()
1492 spin_lock_init(&nvmeq->sq_lock); in nvme_alloc_queue()
1493 spin_lock_init(&nvmeq->cq_poll_lock); in nvme_alloc_queue()
1494 nvmeq->cq_head = 0; in nvme_alloc_queue()
1495 nvmeq->cq_phase = 1; in nvme_alloc_queue()
1496 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_alloc_queue()
1497 nvmeq->qid = qid; in nvme_alloc_queue()
1503 dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, in nvme_alloc_queue()
1504 nvmeq->cq_dma_addr); in nvme_alloc_queue()
1509 static int queue_request_irq(struct nvme_queue *nvmeq) in queue_request_irq() argument
1511 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); in queue_request_irq()
1512 int nr = nvmeq->dev->ctrl.instance; in queue_request_irq()
1515 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, in queue_request_irq()
1516 nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); in queue_request_irq()
1518 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, in queue_request_irq()
1519 NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); in queue_request_irq()
1523 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) in nvme_init_queue() argument
1525 struct nvme_dev *dev = nvmeq->dev; in nvme_init_queue()
1527 nvmeq->sq_tail = 0; in nvme_init_queue()
1528 nvmeq->last_sq_tail = 0; in nvme_init_queue()
1529 nvmeq->cq_head = 0; in nvme_init_queue()
1530 nvmeq->cq_phase = 1; in nvme_init_queue()
1531 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_init_queue()
1532 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); in nvme_init_queue()
1533 nvme_dbbuf_init(dev, nvmeq, qid); in nvme_init_queue()
1538 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) in nvme_create_queue() argument
1540 struct nvme_dev *dev = nvmeq->dev; in nvme_create_queue()
1544 clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); in nvme_create_queue()
1553 set_bit(NVMEQ_POLLED, &nvmeq->flags); in nvme_create_queue()
1555 result = adapter_alloc_cq(dev, qid, nvmeq, vector); in nvme_create_queue()
1559 result = adapter_alloc_sq(dev, qid, nvmeq); in nvme_create_queue()
1565 nvmeq->cq_vector = vector; in nvme_create_queue()
1566 nvme_init_queue(nvmeq, qid); in nvme_create_queue()
1569 result = queue_request_irq(nvmeq); in nvme_create_queue()
1574 set_bit(NVMEQ_ENABLED, &nvmeq->flags); in nvme_create_queue()
1681 struct nvme_queue *nvmeq; in nvme_pci_configure_admin_queue() local
1704 nvmeq = &dev->queues[0]; in nvme_pci_configure_admin_queue()
1705 aqa = nvmeq->q_depth - 1; in nvme_pci_configure_admin_queue()
1709 lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); in nvme_pci_configure_admin_queue()
1710 lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); in nvme_pci_configure_admin_queue()
1716 nvmeq->cq_vector = 0; in nvme_pci_configure_admin_queue()
1717 nvme_init_queue(nvmeq, 0); in nvme_pci_configure_admin_queue()
1718 result = queue_request_irq(nvmeq); in nvme_pci_configure_admin_queue()
1724 set_bit(NVMEQ_ENABLED, &nvmeq->flags); in nvme_pci_configure_admin_queue()
2211 struct nvme_queue *nvmeq = req->end_io_data; in nvme_del_queue_end() local
2214 complete(&nvmeq->delete_done); in nvme_del_queue_end()
2219 struct nvme_queue *nvmeq = req->end_io_data; in nvme_del_cq_end() local
2222 set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); in nvme_del_cq_end()
2227 static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) in nvme_delete_queue() argument
2229 struct request_queue *q = nvmeq->dev->ctrl.admin_q; in nvme_delete_queue()
2235 cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); in nvme_delete_queue()
2242 req->end_io_data = nvmeq; in nvme_delete_queue()
2244 init_completion(&nvmeq->delete_done); in nvme_delete_queue()
2265 struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; in __nvme_disable_io_queues() local
2267 timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, in __nvme_disable_io_queues()