Lines Matching refs:fod
149 struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */ member
184 return (fodptr - fodptr->queue->fod); in nvmet_fc_fodnum()
253 struct nvmet_fc_fcp_iod *fod);
630 struct nvmet_fc_fcp_iod *fod = queue->fod; in nvmet_fc_prep_fcp_iodlist() local
633 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_prep_fcp_iodlist()
634 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); in nvmet_fc_prep_fcp_iodlist()
635 fod->tgtport = tgtport; in nvmet_fc_prep_fcp_iodlist()
636 fod->queue = queue; in nvmet_fc_prep_fcp_iodlist()
637 fod->active = false; in nvmet_fc_prep_fcp_iodlist()
638 fod->abort = false; in nvmet_fc_prep_fcp_iodlist()
639 fod->aborted = false; in nvmet_fc_prep_fcp_iodlist()
640 fod->fcpreq = NULL; in nvmet_fc_prep_fcp_iodlist()
641 list_add_tail(&fod->fcp_list, &queue->fod_list); in nvmet_fc_prep_fcp_iodlist()
642 spin_lock_init(&fod->flock); in nvmet_fc_prep_fcp_iodlist()
644 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf, in nvmet_fc_prep_fcp_iodlist()
645 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_prep_fcp_iodlist()
646 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) { in nvmet_fc_prep_fcp_iodlist()
647 list_del(&fod->fcp_list); in nvmet_fc_prep_fcp_iodlist()
648 for (fod--, i--; i >= 0; fod--, i--) { in nvmet_fc_prep_fcp_iodlist()
649 fc_dma_unmap_single(tgtport->dev, fod->rspdma, in nvmet_fc_prep_fcp_iodlist()
650 sizeof(fod->rspiubuf), in nvmet_fc_prep_fcp_iodlist()
652 fod->rspdma = 0L; in nvmet_fc_prep_fcp_iodlist()
653 list_del(&fod->fcp_list); in nvmet_fc_prep_fcp_iodlist()
665 struct nvmet_fc_fcp_iod *fod = queue->fod; in nvmet_fc_destroy_fcp_iodlist() local
668 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_destroy_fcp_iodlist()
669 if (fod->rspdma) in nvmet_fc_destroy_fcp_iodlist()
670 fc_dma_unmap_single(tgtport->dev, fod->rspdma, in nvmet_fc_destroy_fcp_iodlist()
671 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_destroy_fcp_iodlist()
678 struct nvmet_fc_fcp_iod *fod; in nvmet_fc_alloc_fcp_iod() local
682 fod = list_first_entry_or_null(&queue->fod_list, in nvmet_fc_alloc_fcp_iod()
684 if (fod) { in nvmet_fc_alloc_fcp_iod()
685 list_del(&fod->fcp_list); in nvmet_fc_alloc_fcp_iod()
686 fod->active = true; in nvmet_fc_alloc_fcp_iod()
693 return fod; in nvmet_fc_alloc_fcp_iod()
702 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; in nvmet_fc_queue_fcp_req() local
711 nvmet_fc_handle_fcp_rqst(tgtport, fod); in nvmet_fc_queue_fcp_req()
717 struct nvmet_fc_fcp_iod *fod = in nvmet_fc_fcp_rqst_op_defer_work() local
721 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq); in nvmet_fc_fcp_rqst_op_defer_work()
727 struct nvmet_fc_fcp_iod *fod) in nvmet_fc_free_fcp_iod() argument
729 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_free_fcp_iod()
730 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_free_fcp_iod()
734 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, in nvmet_fc_free_fcp_iod()
735 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_free_fcp_iod()
739 fod->active = false; in nvmet_fc_free_fcp_iod()
740 fod->abort = false; in nvmet_fc_free_fcp_iod()
741 fod->aborted = false; in nvmet_fc_free_fcp_iod()
742 fod->writedataactive = false; in nvmet_fc_free_fcp_iod()
743 fod->fcpreq = NULL; in nvmet_fc_free_fcp_iod()
754 list_add_tail(&fod->fcp_list, &fod->queue->fod_list); in nvmet_fc_free_fcp_iod()
770 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); in nvmet_fc_free_fcp_iod()
775 fcpreq->nvmet_fc_private = fod; in nvmet_fc_free_fcp_iod()
776 fod->fcpreq = fcpreq; in nvmet_fc_free_fcp_iod()
777 fod->active = true; in nvmet_fc_free_fcp_iod()
787 queue_work(queue->work_q, &fod->defer_work); in nvmet_fc_free_fcp_iod()
800 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); in nvmet_fc_alloc_target_queue()
882 struct nvmet_fc_fcp_iod *fod = queue->fod; in nvmet_fc_delete_target_queue() local
896 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_delete_target_queue()
897 if (fod->active) { in nvmet_fc_delete_target_queue()
898 spin_lock(&fod->flock); in nvmet_fc_delete_target_queue()
899 fod->abort = true; in nvmet_fc_delete_target_queue()
905 if (fod->writedataactive) { in nvmet_fc_delete_target_queue()
906 fod->aborted = true; in nvmet_fc_delete_target_queue()
907 spin_unlock(&fod->flock); in nvmet_fc_delete_target_queue()
909 &tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_delete_target_queue()
911 spin_unlock(&fod->flock); in nvmet_fc_delete_target_queue()
2077 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) in nvmet_fc_alloc_tgt_pgs() argument
2082 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent); in nvmet_fc_alloc_tgt_pgs()
2086 fod->data_sg = sg; in nvmet_fc_alloc_tgt_pgs()
2087 fod->data_sg_cnt = nent; in nvmet_fc_alloc_tgt_pgs()
2088 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, in nvmet_fc_alloc_tgt_pgs()
2089 ((fod->io_dir == NVMET_FCP_WRITE) ? in nvmet_fc_alloc_tgt_pgs()
2092 fod->next_sg = fod->data_sg; in nvmet_fc_alloc_tgt_pgs()
2101 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod) in nvmet_fc_free_tgt_pgs() argument
2103 if (!fod->data_sg || !fod->data_sg_cnt) in nvmet_fc_free_tgt_pgs()
2106 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, in nvmet_fc_free_tgt_pgs()
2107 ((fod->io_dir == NVMET_FCP_WRITE) ? in nvmet_fc_free_tgt_pgs()
2109 sgl_free(fod->data_sg); in nvmet_fc_free_tgt_pgs()
2110 fod->data_sg = NULL; in nvmet_fc_free_tgt_pgs()
2111 fod->data_sg_cnt = 0; in nvmet_fc_free_tgt_pgs()
2133 struct nvmet_fc_fcp_iod *fod) in nvmet_fc_prep_fcp_rsp() argument
2135 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; in nvmet_fc_prep_fcp_rsp()
2136 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; in nvmet_fc_prep_fcp_rsp()
2142 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) in nvmet_fc_prep_fcp_rsp()
2143 xfr_length = fod->req.transfer_len; in nvmet_fc_prep_fcp_rsp()
2145 xfr_length = fod->offset; in nvmet_fc_prep_fcp_rsp()
2166 rspcnt = atomic_inc_return(&fod->queue->zrspcnt); in nvmet_fc_prep_fcp_rsp()
2167 if (!(rspcnt % fod->queue->ersp_ratio) || in nvmet_fc_prep_fcp_rsp()
2169 xfr_length != fod->req.transfer_len || in nvmet_fc_prep_fcp_rsp()
2172 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head))) in nvmet_fc_prep_fcp_rsp()
2176 fod->fcpreq->rspaddr = ersp; in nvmet_fc_prep_fcp_rsp()
2177 fod->fcpreq->rspdma = fod->rspdma; in nvmet_fc_prep_fcp_rsp()
2181 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; in nvmet_fc_prep_fcp_rsp()
2184 rsn = atomic_inc_return(&fod->queue->rsn); in nvmet_fc_prep_fcp_rsp()
2187 fod->fcpreq->rsplen = sizeof(*ersp); in nvmet_fc_prep_fcp_rsp()
2190 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma, in nvmet_fc_prep_fcp_rsp()
2191 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_prep_fcp_rsp()
2198 struct nvmet_fc_fcp_iod *fod) in nvmet_fc_abort_op() argument
2200 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_abort_op()
2203 nvmet_fc_free_tgt_pgs(fod); in nvmet_fc_abort_op()
2210 if (!fod->aborted) in nvmet_fc_abort_op()
2213 nvmet_fc_free_fcp_iod(fod->queue, fod); in nvmet_fc_abort_op()
2218 struct nvmet_fc_fcp_iod *fod) in nvmet_fc_xmt_fcp_rsp() argument
2222 fod->fcpreq->op = NVMET_FCOP_RSP; in nvmet_fc_xmt_fcp_rsp()
2223 fod->fcpreq->timeout = 0; in nvmet_fc_xmt_fcp_rsp()
2225 nvmet_fc_prep_fcp_rsp(tgtport, fod); in nvmet_fc_xmt_fcp_rsp()
2227 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_xmt_fcp_rsp()
2229 nvmet_fc_abort_op(tgtport, fod); in nvmet_fc_xmt_fcp_rsp()
2234 struct nvmet_fc_fcp_iod *fod, u8 op) in nvmet_fc_transfer_fcp_data() argument
2236 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_transfer_fcp_data()
2237 struct scatterlist *sg = fod->next_sg; in nvmet_fc_transfer_fcp_data()
2239 u32 remaininglen = fod->req.transfer_len - fod->offset; in nvmet_fc_transfer_fcp_data()
2244 fcpreq->offset = fod->offset; in nvmet_fc_transfer_fcp_data()
2271 fod->next_sg = sg; in nvmet_fc_transfer_fcp_data()
2273 fod->next_sg = NULL; in nvmet_fc_transfer_fcp_data()
2285 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) && in nvmet_fc_transfer_fcp_data()
2288 nvmet_fc_prep_fcp_rsp(tgtport, fod); in nvmet_fc_transfer_fcp_data()
2291 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_transfer_fcp_data()
2298 fod->abort = true; in nvmet_fc_transfer_fcp_data()
2301 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_transfer_fcp_data()
2302 fod->writedataactive = false; in nvmet_fc_transfer_fcp_data()
2303 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_transfer_fcp_data()
2304 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); in nvmet_fc_transfer_fcp_data()
2308 nvmet_fc_xmt_fcp_op_done(fod->fcpreq); in nvmet_fc_transfer_fcp_data()
2314 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort) in __nvmet_fc_fod_op_abort() argument
2316 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in __nvmet_fc_fod_op_abort()
2317 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in __nvmet_fc_fod_op_abort()
2322 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); in __nvmet_fc_fod_op_abort()
2326 nvmet_fc_abort_op(tgtport, fod); in __nvmet_fc_fod_op_abort()
2337 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) in nvmet_fc_fod_op_done() argument
2339 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_fod_op_done()
2340 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_fod_op_done()
2344 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_fod_op_done()
2345 abort = fod->abort; in nvmet_fc_fod_op_done()
2346 fod->writedataactive = false; in nvmet_fc_fod_op_done()
2347 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_fod_op_done()
2352 if (__nvmet_fc_fod_op_abort(fod, abort)) in nvmet_fc_fod_op_done()
2356 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_fod_op_done()
2357 fod->abort = true; in nvmet_fc_fod_op_done()
2358 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_fod_op_done()
2360 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); in nvmet_fc_fod_op_done()
2364 fod->offset += fcpreq->transferred_length; in nvmet_fc_fod_op_done()
2365 if (fod->offset != fod->req.transfer_len) { in nvmet_fc_fod_op_done()
2366 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_fod_op_done()
2367 fod->writedataactive = true; in nvmet_fc_fod_op_done()
2368 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_fod_op_done()
2371 nvmet_fc_transfer_fcp_data(tgtport, fod, in nvmet_fc_fod_op_done()
2377 fod->req.execute(&fod->req); in nvmet_fc_fod_op_done()
2382 if (__nvmet_fc_fod_op_abort(fod, abort)) in nvmet_fc_fod_op_done()
2386 nvmet_fc_abort_op(tgtport, fod); in nvmet_fc_fod_op_done()
2394 nvmet_fc_free_tgt_pgs(fod); in nvmet_fc_fod_op_done()
2395 nvmet_fc_free_fcp_iod(fod->queue, fod); in nvmet_fc_fod_op_done()
2399 fod->offset += fcpreq->transferred_length; in nvmet_fc_fod_op_done()
2400 if (fod->offset != fod->req.transfer_len) { in nvmet_fc_fod_op_done()
2402 nvmet_fc_transfer_fcp_data(tgtport, fod, in nvmet_fc_fod_op_done()
2410 nvmet_fc_free_tgt_pgs(fod); in nvmet_fc_fod_op_done()
2412 nvmet_fc_xmt_fcp_rsp(tgtport, fod); in nvmet_fc_fod_op_done()
2417 if (__nvmet_fc_fod_op_abort(fod, abort)) in nvmet_fc_fod_op_done()
2419 nvmet_fc_free_fcp_iod(fod->queue, fod); in nvmet_fc_fod_op_done()
2430 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; in nvmet_fc_xmt_fcp_op_done() local
2432 nvmet_fc_fod_op_done(fod); in nvmet_fc_xmt_fcp_op_done()
2440 struct nvmet_fc_fcp_iod *fod, int status) in __nvmet_fc_fcp_nvme_cmd_done() argument
2442 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; in __nvmet_fc_fcp_nvme_cmd_done()
2443 struct nvme_completion *cqe = &fod->rspiubuf.cqe; in __nvmet_fc_fcp_nvme_cmd_done()
2447 spin_lock_irqsave(&fod->flock, flags); in __nvmet_fc_fcp_nvme_cmd_done()
2448 abort = fod->abort; in __nvmet_fc_fcp_nvme_cmd_done()
2449 spin_unlock_irqrestore(&fod->flock, flags); in __nvmet_fc_fcp_nvme_cmd_done()
2453 fod->queue->sqhd = cqe->sq_head; in __nvmet_fc_fcp_nvme_cmd_done()
2456 nvmet_fc_abort_op(tgtport, fod); in __nvmet_fc_fcp_nvme_cmd_done()
2464 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ in __nvmet_fc_fcp_nvme_cmd_done()
2465 cqe->sq_id = cpu_to_le16(fod->queue->qid); in __nvmet_fc_fcp_nvme_cmd_done()
2475 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { in __nvmet_fc_fcp_nvme_cmd_done()
2477 nvmet_fc_transfer_fcp_data(tgtport, fod, in __nvmet_fc_fcp_nvme_cmd_done()
2486 nvmet_fc_free_tgt_pgs(fod); in __nvmet_fc_fcp_nvme_cmd_done()
2488 nvmet_fc_xmt_fcp_rsp(tgtport, fod); in __nvmet_fc_fcp_nvme_cmd_done()
2495 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req); in nvmet_fc_fcp_nvme_cmd_done() local
2496 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_fcp_nvme_cmd_done()
2498 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0); in nvmet_fc_fcp_nvme_cmd_done()
2507 struct nvmet_fc_fcp_iod *fod) in nvmet_fc_handle_fcp_rqst() argument
2509 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; in nvmet_fc_handle_fcp_rqst()
2522 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; in nvmet_fc_handle_fcp_rqst()
2525 fod->io_dir = NVMET_FCP_WRITE; in nvmet_fc_handle_fcp_rqst()
2529 fod->io_dir = NVMET_FCP_READ; in nvmet_fc_handle_fcp_rqst()
2533 fod->io_dir = NVMET_FCP_NODATA; in nvmet_fc_handle_fcp_rqst()
2538 fod->req.cmd = &fod->cmdiubuf.sqe; in nvmet_fc_handle_fcp_rqst()
2539 fod->req.cqe = &fod->rspiubuf.cqe; in nvmet_fc_handle_fcp_rqst()
2541 fod->req.port = tgtport->pe->port; in nvmet_fc_handle_fcp_rqst()
2544 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); in nvmet_fc_handle_fcp_rqst()
2546 fod->data_sg = NULL; in nvmet_fc_handle_fcp_rqst()
2547 fod->data_sg_cnt = 0; in nvmet_fc_handle_fcp_rqst()
2549 ret = nvmet_req_init(&fod->req, in nvmet_fc_handle_fcp_rqst()
2550 &fod->queue->nvme_cq, in nvmet_fc_handle_fcp_rqst()
2551 &fod->queue->nvme_sq, in nvmet_fc_handle_fcp_rqst()
2559 fod->req.transfer_len = xfrlen; in nvmet_fc_handle_fcp_rqst()
2562 atomic_inc(&fod->queue->sqtail); in nvmet_fc_handle_fcp_rqst()
2564 if (fod->req.transfer_len) { in nvmet_fc_handle_fcp_rqst()
2565 ret = nvmet_fc_alloc_tgt_pgs(fod); in nvmet_fc_handle_fcp_rqst()
2567 nvmet_req_complete(&fod->req, ret); in nvmet_fc_handle_fcp_rqst()
2571 fod->req.sg = fod->data_sg; in nvmet_fc_handle_fcp_rqst()
2572 fod->req.sg_cnt = fod->data_sg_cnt; in nvmet_fc_handle_fcp_rqst()
2573 fod->offset = 0; in nvmet_fc_handle_fcp_rqst()
2575 if (fod->io_dir == NVMET_FCP_WRITE) { in nvmet_fc_handle_fcp_rqst()
2577 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA); in nvmet_fc_handle_fcp_rqst()
2587 fod->req.execute(&fod->req); in nvmet_fc_handle_fcp_rqst()
2591 nvmet_fc_abort_op(tgtport, fod); in nvmet_fc_handle_fcp_rqst()
2649 struct nvmet_fc_fcp_iod *fod; in nvmet_fc_rcv_fcp_req() local
2674 fod = nvmet_fc_alloc_fcp_iod(queue); in nvmet_fc_rcv_fcp_req()
2675 if (fod) { in nvmet_fc_rcv_fcp_req()
2678 fcpreq->nvmet_fc_private = fod; in nvmet_fc_rcv_fcp_req()
2679 fod->fcpreq = fcpreq; in nvmet_fc_rcv_fcp_req()
2681 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); in nvmet_fc_rcv_fcp_req()
2756 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; in nvmet_fc_rcv_fcp_abort() local
2760 if (!fod || fod->fcpreq != fcpreq) in nvmet_fc_rcv_fcp_abort()
2764 queue = fod->queue; in nvmet_fc_rcv_fcp_abort()
2767 if (fod->active) { in nvmet_fc_rcv_fcp_abort()
2773 spin_lock(&fod->flock); in nvmet_fc_rcv_fcp_abort()
2774 fod->abort = true; in nvmet_fc_rcv_fcp_abort()
2775 fod->aborted = true; in nvmet_fc_rcv_fcp_abort()
2776 spin_unlock(&fod->flock); in nvmet_fc_rcv_fcp_abort()