Lines Matching refs:iod
432 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_pci_init_request() local
435 nvme_req(req)->cmd = &iod->cmd; in nvme_pci_init_request()
518 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_pci_iod_list() local
519 return (void **)(iod->sgt.sgl + blk_rq_nr_phys_segments(req)); in nvme_pci_iod_list()
542 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_free_prps() local
543 dma_addr_t dma_addr = iod->first_dma; in nvme_free_prps()
546 for (i = 0; i < iod->nr_allocations; i++) { in nvme_free_prps()
558 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_free_sgls() local
559 dma_addr_t dma_addr = iod->first_dma; in nvme_free_sgls()
562 for (i = 0; i < iod->nr_allocations; i++) { in nvme_free_sgls()
573 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_unmap_data() local
575 if (iod->dma_len) { in nvme_unmap_data()
576 dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, in nvme_unmap_data()
581 WARN_ON_ONCE(!iod->sgt.nents); in nvme_unmap_data()
583 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); in nvme_unmap_data()
585 if (iod->nr_allocations == 0) in nvme_unmap_data()
587 iod->first_dma); in nvme_unmap_data()
588 else if (iod->use_sgl) in nvme_unmap_data()
592 mempool_free(iod->sgt.sgl, dev->iod_mempool); in nvme_unmap_data()
612 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_pci_setup_prps() local
615 struct scatterlist *sg = iod->sgt.sgl; in nvme_pci_setup_prps()
626 iod->first_dma = 0; in nvme_pci_setup_prps()
640 iod->first_dma = dma_addr; in nvme_pci_setup_prps()
647 iod->nr_allocations = 0; in nvme_pci_setup_prps()
650 iod->nr_allocations = 1; in nvme_pci_setup_prps()
655 iod->nr_allocations = -1; in nvme_pci_setup_prps()
659 iod->first_dma = prp_dma; in nvme_pci_setup_prps()
667 list[iod->nr_allocations++] = prp_list; in nvme_pci_setup_prps()
687 cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl)); in nvme_pci_setup_prps()
688 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); in nvme_pci_setup_prps()
694 WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents), in nvme_pci_setup_prps()
696 blk_rq_payload_bytes(req), iod->sgt.nents); in nvme_pci_setup_prps()
724 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_pci_setup_sgls() local
727 struct scatterlist *sg = iod->sgt.sgl; in nvme_pci_setup_sgls()
728 unsigned int entries = iod->sgt.nents; in nvme_pci_setup_sgls()
742 iod->nr_allocations = 0; in nvme_pci_setup_sgls()
745 iod->nr_allocations = 1; in nvme_pci_setup_sgls()
750 iod->nr_allocations = -1; in nvme_pci_setup_sgls()
755 iod->first_dma = sgl_dma; in nvme_pci_setup_sgls()
769 nvme_pci_iod_list(req)[iod->nr_allocations++] = sg_list; in nvme_pci_setup_sgls()
788 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_setup_prp_simple() local
792 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); in nvme_setup_prp_simple()
793 if (dma_mapping_error(dev->dev, iod->first_dma)) in nvme_setup_prp_simple()
795 iod->dma_len = bv->bv_len; in nvme_setup_prp_simple()
797 cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); in nvme_setup_prp_simple()
799 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); in nvme_setup_prp_simple()
809 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_setup_sgl_simple() local
811 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); in nvme_setup_sgl_simple()
812 if (dma_mapping_error(dev->dev, iod->first_dma)) in nvme_setup_sgl_simple()
814 iod->dma_len = bv->bv_len; in nvme_setup_sgl_simple()
817 cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); in nvme_setup_sgl_simple()
818 cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len); in nvme_setup_sgl_simple()
826 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_map_data() local
846 iod->dma_len = 0; in nvme_map_data()
847 iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); in nvme_map_data()
848 if (!iod->sgt.sgl) in nvme_map_data()
850 sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req)); in nvme_map_data()
851 iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl); in nvme_map_data()
852 if (!iod->sgt.orig_nents) in nvme_map_data()
855 rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), in nvme_map_data()
863 iod->use_sgl = nvme_pci_use_sgls(dev, req); in nvme_map_data()
864 if (iod->use_sgl) in nvme_map_data()
873 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); in nvme_map_data()
875 mempool_free(iod->sgt.sgl, dev->iod_mempool); in nvme_map_data()
882 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_map_metadata() local
884 iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), in nvme_map_metadata()
886 if (dma_mapping_error(dev->dev, iod->meta_dma)) in nvme_map_metadata()
888 cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); in nvme_map_metadata()
894 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_prep_rq() local
897 iod->aborted = false; in nvme_prep_rq()
898 iod->nr_allocations = -1; in nvme_prep_rq()
899 iod->sgt.nents = 0; in nvme_prep_rq()
906 ret = nvme_map_data(dev, req, &iod->cmd); in nvme_prep_rq()
912 ret = nvme_map_metadata(dev, req, &iod->cmd); in nvme_prep_rq()
935 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_queue_rq() local
952 nvme_sq_copy_cmd(nvmeq, &iod->cmd); in nvme_queue_rq()
963 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_submit_cmds() local
965 nvme_sq_copy_cmd(nvmeq, &iod->cmd); in nvme_submit_cmds()
1022 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_pci_unmap_rq() local
1024 dma_unmap_page(dev->dev, iod->meta_dma, in nvme_pci_unmap_rq()
1337 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_timeout() local
1404 if (!nvmeq->qid || iod->aborted) { in nvme_timeout()
1419 iod->aborted = true; in nvme_timeout()