Lines Matching +full:loss +full:- +full:of +full:- +full:lock

2  * This file is part of the Chelsio FCoE driver for Linux.
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
9 * COPYING in the main directory of this source tree, or the
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
76 * csio_scsi_match_io - Match an ioreq with the given SCSI level data.
80 * Should be called with lock held.
88 switch (sld->level) { in csio_scsi_match_io()
93 return ((ioreq->lnode == sld->lnode) && in csio_scsi_match_io()
94 (ioreq->rnode == sld->rnode) && in csio_scsi_match_io()
95 ((uint64_t)scmnd->device->lun == sld->oslun)); in csio_scsi_match_io()
98 return ((ioreq->lnode == sld->lnode) && in csio_scsi_match_io()
99 (ioreq->rnode == sld->rnode)); in csio_scsi_match_io()
101 return (ioreq->lnode == sld->lnode); in csio_scsi_match_io()
110 * csio_scsi_gather_active_ios - Gather active I/Os based on level
115 * Should be called with lock held.
124 if (list_empty(&scm->active_q)) in csio_scsi_gather_active_ios()
128 if (sld->level == CSIO_LEV_ALL) { in csio_scsi_gather_active_ios()
129 list_splice_tail_init(&scm->active_q, dest); in csio_scsi_gather_active_ios()
133 list_for_each_safe(tmp, next, &scm->active_q) { in csio_scsi_gather_active_ios()
156 * csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod.
169 if (likely(scmnd->SCp.Message == 0)) { in csio_scsi_fcp_cmnd()
170 int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun); in csio_scsi_fcp_cmnd()
171 fcp_cmnd->fc_tm_flags = 0; in csio_scsi_fcp_cmnd()
172 fcp_cmnd->fc_cmdref = 0; in csio_scsi_fcp_cmnd()
174 memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16); in csio_scsi_fcp_cmnd()
175 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; in csio_scsi_fcp_cmnd()
176 fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd)); in csio_scsi_fcp_cmnd()
178 if (req->nsge) in csio_scsi_fcp_cmnd()
179 if (req->datadir == DMA_TO_DEVICE) in csio_scsi_fcp_cmnd()
180 fcp_cmnd->fc_flags = FCP_CFL_WRDATA; in csio_scsi_fcp_cmnd()
182 fcp_cmnd->fc_flags = FCP_CFL_RDDATA; in csio_scsi_fcp_cmnd()
184 fcp_cmnd->fc_flags = 0; in csio_scsi_fcp_cmnd()
187 int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun); in csio_scsi_fcp_cmnd()
188 fcp_cmnd->fc_tm_flags = (uint8_t)scmnd->SCp.Message; in csio_scsi_fcp_cmnd()
193 * csio_scsi_init_cmd_wr - Initialize the SCSI CMD WR.
196 * @size: Size of WR (including FW WR + immed data + rsp SG entry
203 struct csio_hw *hw = req->lnode->hwp; in csio_scsi_init_cmd_wr()
204 struct csio_rnode *rn = req->rnode; in csio_scsi_init_cmd_wr()
207 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; in csio_scsi_init_cmd_wr()
209 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_CMD_WR) | in csio_scsi_init_cmd_wr()
211 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | in csio_scsi_init_cmd_wr()
215 wr->cookie = (uintptr_t) req; in csio_scsi_init_cmd_wr()
216 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); in csio_scsi_init_cmd_wr()
217 wr->tmo_val = (uint8_t) req->tmo; in csio_scsi_init_cmd_wr()
218 wr->r3 = 0; in csio_scsi_init_cmd_wr()
219 memset(&wr->r5, 0, 8); in csio_scsi_init_cmd_wr()
222 dma_buf = &req->dma_buf; in csio_scsi_init_cmd_wr()
225 wr->rsp_dmalen = cpu_to_be32(dma_buf->len); in csio_scsi_init_cmd_wr()
226 wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); in csio_scsi_init_cmd_wr()
228 wr->r6 = 0; in csio_scsi_init_cmd_wr()
230 wr->u.fcoe.ctl_pri = 0; in csio_scsi_init_cmd_wr()
231 wr->u.fcoe.cp_en_class = 0; in csio_scsi_init_cmd_wr()
232 wr->u.fcoe.r4_lo[0] = 0; in csio_scsi_init_cmd_wr()
233 wr->u.fcoe.r4_lo[1] = 0; in csio_scsi_init_cmd_wr()
248 * csio_scsi_cmd - Create a SCSI CMD WR.
258 struct csio_hw *hw = req->lnode->hwp; in csio_scsi_cmd()
260 uint32_t size = CSIO_SCSI_CMD_WR_SZ_16(scsim->proto_cmd_len); in csio_scsi_cmd()
262 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); in csio_scsi_cmd()
263 if (unlikely(req->drv_status != 0)) in csio_scsi_cmd()
270 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); in csio_scsi_cmd()
273 * Make a temporary copy of the WR and write back in csio_scsi_cmd()
278 memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); in csio_scsi_cmd()
283 * csio_scsi_init_ulptx_dsgl - Fill in a ULP_TX_SC_DSGL
301 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE_F | in csio_scsi_init_ultptx_dsgl()
302 ULPTX_NSGE_V(req->nsge)); in csio_scsi_init_ultptx_dsgl()
304 if (likely(!req->dcopy)) { in csio_scsi_init_ultptx_dsgl()
305 scsi_for_each_sg(scmnd, sgel, req->nsge, i) { in csio_scsi_init_ultptx_dsgl()
307 sgl->addr0 = cpu_to_be64(sg_dma_address(sgel)); in csio_scsi_init_ultptx_dsgl()
308 sgl->len0 = cpu_to_be32(sg_dma_len(sgel)); in csio_scsi_init_ultptx_dsgl()
312 if ((i - 1) & 0x1) { in csio_scsi_init_ultptx_dsgl()
313 sge_pair->addr[1] = cpu_to_be64( in csio_scsi_init_ultptx_dsgl()
315 sge_pair->len[1] = cpu_to_be32( in csio_scsi_init_ultptx_dsgl()
319 sge_pair->addr[0] = cpu_to_be64( in csio_scsi_init_ultptx_dsgl()
321 sge_pair->len[0] = cpu_to_be32( in csio_scsi_init_ultptx_dsgl()
328 list_for_each(tmp, &req->gen_list) { in csio_scsi_init_ultptx_dsgl()
331 sgl->addr0 = cpu_to_be64(dma_buf->paddr); in csio_scsi_init_ultptx_dsgl()
332 sgl->len0 = cpu_to_be32( in csio_scsi_init_ultptx_dsgl()
333 min(xfer_len, dma_buf->len)); in csio_scsi_init_ultptx_dsgl()
335 } else if ((i - 1) & 0x1) { in csio_scsi_init_ultptx_dsgl()
336 sge_pair->addr[1] = cpu_to_be64(dma_buf->paddr); in csio_scsi_init_ultptx_dsgl()
337 sge_pair->len[1] = cpu_to_be32( in csio_scsi_init_ultptx_dsgl()
338 min(xfer_len, dma_buf->len)); in csio_scsi_init_ultptx_dsgl()
341 sge_pair->addr[0] = cpu_to_be64(dma_buf->paddr); in csio_scsi_init_ultptx_dsgl()
342 sge_pair->len[0] = cpu_to_be32( in csio_scsi_init_ultptx_dsgl()
343 min(xfer_len, dma_buf->len)); in csio_scsi_init_ultptx_dsgl()
345 xfer_len -= min(xfer_len, dma_buf->len); in csio_scsi_init_ultptx_dsgl()
352 * csio_scsi_init_read_wr - Initialize the READ SCSI WR.
355 * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL
362 struct csio_hw *hw = req->lnode->hwp; in csio_scsi_init_read_wr()
363 struct csio_rnode *rn = req->rnode; in csio_scsi_init_read_wr()
367 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; in csio_scsi_init_read_wr()
370 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_READ_WR) | in csio_scsi_init_read_wr()
372 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | in csio_scsi_init_read_wr()
374 wr->cookie = (uintptr_t)req; in csio_scsi_init_read_wr()
375 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); in csio_scsi_init_read_wr()
376 wr->tmo_val = (uint8_t)(req->tmo); in csio_scsi_init_read_wr()
377 wr->use_xfer_cnt = 1; in csio_scsi_init_read_wr()
378 wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); in csio_scsi_init_read_wr()
379 wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); in csio_scsi_init_read_wr()
381 dma_buf = &req->dma_buf; in csio_scsi_init_read_wr()
384 wr->rsp_dmalen = cpu_to_be32(dma_buf->len); in csio_scsi_init_read_wr()
385 wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); in csio_scsi_init_read_wr()
387 wr->r4 = 0; in csio_scsi_init_read_wr()
389 wr->u.fcoe.ctl_pri = 0; in csio_scsi_init_read_wr()
390 wr->u.fcoe.cp_en_class = 0; in csio_scsi_init_read_wr()
391 wr->u.fcoe.r3_lo[0] = 0; in csio_scsi_init_read_wr()
392 wr->u.fcoe.r3_lo[1] = 0; in csio_scsi_init_read_wr()
405 * csio_scsi_init_write_wr - Initialize the WRITE SCSI WR.
408 * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL
415 struct csio_hw *hw = req->lnode->hwp; in csio_scsi_init_write_wr()
416 struct csio_rnode *rn = req->rnode; in csio_scsi_init_write_wr()
420 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; in csio_scsi_init_write_wr()
423 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_WRITE_WR) | in csio_scsi_init_write_wr()
425 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | in csio_scsi_init_write_wr()
427 wr->cookie = (uintptr_t)req; in csio_scsi_init_write_wr()
428 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); in csio_scsi_init_write_wr()
429 wr->tmo_val = (uint8_t)(req->tmo); in csio_scsi_init_write_wr()
430 wr->use_xfer_cnt = 1; in csio_scsi_init_write_wr()
431 wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); in csio_scsi_init_write_wr()
432 wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); in csio_scsi_init_write_wr()
434 dma_buf = &req->dma_buf; in csio_scsi_init_write_wr()
437 wr->rsp_dmalen = cpu_to_be32(dma_buf->len); in csio_scsi_init_write_wr()
438 wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); in csio_scsi_init_write_wr()
440 wr->r4 = 0; in csio_scsi_init_write_wr()
442 wr->u.fcoe.ctl_pri = 0; in csio_scsi_init_write_wr()
443 wr->u.fcoe.cp_en_class = 0; in csio_scsi_init_write_wr()
444 wr->u.fcoe.r3_lo[0] = 0; in csio_scsi_init_write_wr()
445 wr->u.fcoe.r3_lo[1] = 0; in csio_scsi_init_write_wr()
464 if (unlikely((req)->nsge > 1)) \
466 (ALIGN(((req)->nsge - 1), 2) / 2)); \
471 * csio_scsi_read - Create a SCSI READ WR.
483 struct csio_hw *hw = req->lnode->hwp; in csio_scsi_read()
486 CSIO_SCSI_DATA_WRSZ(req, read, size, scsim->proto_cmd_len); in csio_scsi_read()
489 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); in csio_scsi_read()
490 if (likely(req->drv_status == 0)) { in csio_scsi_read()
495 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); in csio_scsi_read()
497 * Make a temporary copy of the WR and write back in csio_scsi_read()
502 memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); in csio_scsi_read()
508 * csio_scsi_write - Create a SCSI WRITE WR.
520 struct csio_hw *hw = req->lnode->hwp; in csio_scsi_write()
523 CSIO_SCSI_DATA_WRSZ(req, write, size, scsim->proto_cmd_len); in csio_scsi_write()
526 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); in csio_scsi_write()
527 if (likely(req->drv_status == 0)) { in csio_scsi_write()
532 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); in csio_scsi_write()
534 * Make a temporary copy of the WR and write back in csio_scsi_write()
539 memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); in csio_scsi_write()
545 * csio_setup_ddp - Setup DDP buffers for Read request.
556 struct csio_hw *hw = req->lnode->hwp; in csio_setup_ddp()
569 scsi_for_each_sg(scmnd, sgel, req->nsge, i) { in csio_setup_ddp()
573 buf_off = sg_addr & (ddp_pagesz - 1); in csio_setup_ddp()
583 if ((i != (req->nsge - 1)) && in csio_setup_ddp()
584 ((buf_off + sg_len) & (ddp_pagesz - 1))) { in csio_setup_ddp()
593 req->dcopy = 0; in csio_setup_ddp()
604 req->dcopy = 1; in csio_setup_ddp()
607 INIT_LIST_HEAD(&req->gen_list); in csio_setup_ddp()
614 if (dma_buf == NULL || i > scsim->max_sge) { in csio_setup_ddp()
615 req->drv_status = -EBUSY; in csio_setup_ddp()
618 alloc_len += dma_buf->len; in csio_setup_ddp()
620 list_add_tail(&dma_buf->list, &req->gen_list); in csio_setup_ddp()
624 if (!req->drv_status) { in csio_setup_ddp()
625 /* set number of ddp bufs used */ in csio_setup_ddp()
626 req->nsge = i; in csio_setup_ddp()
633 csio_put_scsi_ddp_list(scsim, &req->gen_list, i); in csio_setup_ddp()
637 * csio_scsi_init_abrt_cls_wr - Initialize an ABORT/CLOSE WR.
640 * @size: Size of WR
649 struct csio_hw *hw = req->lnode->hwp; in csio_scsi_init_abrt_cls_wr()
650 struct csio_rnode *rn = req->rnode; in csio_scsi_init_abrt_cls_wr()
653 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_ABRT_CLS_WR)); in csio_scsi_init_abrt_cls_wr()
654 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | in csio_scsi_init_abrt_cls_wr()
658 wr->cookie = (uintptr_t) req; in csio_scsi_init_abrt_cls_wr()
659 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); in csio_scsi_init_abrt_cls_wr()
660 wr->tmo_val = (uint8_t) req->tmo; in csio_scsi_init_abrt_cls_wr()
662 wr->sub_opcode_to_chk_all_io = in csio_scsi_init_abrt_cls_wr()
665 wr->r3[0] = 0; in csio_scsi_init_abrt_cls_wr()
666 wr->r3[1] = 0; in csio_scsi_init_abrt_cls_wr()
667 wr->r3[2] = 0; in csio_scsi_init_abrt_cls_wr()
668 wr->r3[3] = 0; in csio_scsi_init_abrt_cls_wr()
669 /* Since we re-use the same ioreq for abort as well */ in csio_scsi_init_abrt_cls_wr()
670 wr->t_cookie = (uintptr_t) req; in csio_scsi_init_abrt_cls_wr()
677 struct csio_hw *hw = req->lnode->hwp; in csio_scsi_abrt_cls()
680 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); in csio_scsi_abrt_cls()
681 if (req->drv_status != 0) in csio_scsi_abrt_cls()
688 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); in csio_scsi_abrt_cls()
690 * Make a temporary copy of the WR and write back in csio_scsi_abrt_cls()
695 memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); in csio_scsi_abrt_cls()
705 struct csio_hw *hw = req->lnode->hwp; in csio_scsis_uninit()
711 if (req->nsge) { in csio_scsis_uninit()
712 if (req->datadir == DMA_TO_DEVICE) { in csio_scsis_uninit()
713 req->dcopy = 0; in csio_scsis_uninit()
721 if (likely(req->drv_status == 0)) { in csio_scsis_uninit()
723 csio_set_state(&req->sm, csio_scsis_io_active); in csio_scsis_uninit()
724 list_add_tail(&req->sm.sm_list, &scsim->active_q); in csio_scsis_uninit()
725 csio_wr_issue(hw, req->eq_idx, false); in csio_scsis_uninit()
734 if (req->drv_status == 0) { in csio_scsis_uninit()
743 csio_set_state(&req->sm, csio_scsis_tm_active); in csio_scsis_uninit()
744 list_add_tail(&req->sm.sm_list, &scsim->active_q); in csio_scsis_uninit()
745 csio_wr_issue(hw, req->eq_idx, false); in csio_scsis_uninit()
755 * - a window in the cleanup path of the SCSI module in csio_scsis_uninit()
757 * - a window in the time we tried to issue an abort/close in csio_scsis_uninit()
758 * of a request to FW, and the FW completed the request in csio_scsis_uninit()
762 req->drv_status = -EINVAL; in csio_scsis_uninit()
775 struct csio_hw *hw = req->lnode->hwp; in csio_scsis_io_active()
782 list_del_init(&req->sm.sm_list); in csio_scsis_io_active()
783 csio_set_state(&req->sm, csio_scsis_uninit); in csio_scsis_io_active()
787 * I-T nexus loss (link down, remote device logo etc). We in csio_scsis_io_active()
789 * immediately, since we wouldnt have reported the I-T nexus in csio_scsis_io_active()
790 * loss itself. This forces us to serialize such completions in csio_scsis_io_active()
791 * with the reporting of the I-T nexus loss. Therefore, we in csio_scsis_io_active()
793 * The reporting of I-T nexus loss to the upper layer is then in csio_scsis_io_active()
794 * followed by the returning of I/Os in this internal queue. in csio_scsis_io_active()
799 if (unlikely(req->wr_status != FW_SUCCESS)) { in csio_scsis_io_active()
800 rn = req->rnode; in csio_scsis_io_active()
805 if (csio_scsi_itnexus_loss_error(req->wr_status) && in csio_scsis_io_active()
807 csio_set_state(&req->sm, in csio_scsis_io_active()
809 list_add_tail(&req->sm.sm_list, in csio_scsis_io_active()
810 &rn->host_cmpl_q); in csio_scsis_io_active()
818 if (req->drv_status == 0) { in csio_scsis_io_active()
819 csio_wr_issue(hw, req->eq_idx, false); in csio_scsis_io_active()
820 csio_set_state(&req->sm, csio_scsis_aborting); in csio_scsis_io_active()
826 if (req->drv_status == 0) { in csio_scsis_io_active()
827 csio_wr_issue(hw, req->eq_idx, false); in csio_scsis_io_active()
828 csio_set_state(&req->sm, csio_scsis_closing); in csio_scsis_io_active()
833 req->wr_status = FW_HOSTERROR; in csio_scsis_io_active()
835 csio_set_state(&req->sm, csio_scsis_uninit); in csio_scsis_io_active()
847 struct csio_hw *hw = req->lnode->hwp; in csio_scsis_tm_active()
853 list_del_init(&req->sm.sm_list); in csio_scsis_tm_active()
854 csio_set_state(&req->sm, csio_scsis_uninit); in csio_scsis_tm_active()
860 if (req->drv_status == 0) { in csio_scsis_tm_active()
861 csio_wr_issue(hw, req->eq_idx, false); in csio_scsis_tm_active()
862 csio_set_state(&req->sm, csio_scsis_aborting); in csio_scsis_tm_active()
869 if (req->drv_status == 0) { in csio_scsis_tm_active()
870 csio_wr_issue(hw, req->eq_idx, false); in csio_scsis_tm_active()
871 csio_set_state(&req->sm, csio_scsis_closing); in csio_scsis_tm_active()
876 req->wr_status = FW_HOSTERROR; in csio_scsis_tm_active()
878 csio_set_state(&req->sm, csio_scsis_uninit); in csio_scsis_tm_active()
890 struct csio_hw *hw = req->lnode->hwp; in csio_scsis_aborting()
897 "in aborting st\n", req, req->wr_status); in csio_scsis_aborting()
899 * Use -ECANCELED to explicitly tell the ABORTED event that in csio_scsis_aborting()
902 * FW (because the ABORT and completion of the I/O crossed each in csio_scsis_aborting()
904 * state, the success or failure of the I/O is unimportant to in csio_scsis_aborting()
907 req->drv_status = -ECANCELED; in csio_scsis_aborting()
916 csio_dbg(hw, "abort of %p return status:0x%x drv_status:%x\n", in csio_scsis_aborting()
917 req, req->wr_status, req->drv_status); in csio_scsis_aborting()
922 if (req->drv_status != -ECANCELED) { in csio_scsis_aborting()
932 * 2. The completion of an I/O and the receipt of in csio_scsis_aborting()
937 * was an I-T nexus loss (link down, remote device logged in csio_scsis_aborting()
938 * out etc). FW sent back an appropriate IT nexus loss status in csio_scsis_aborting()
948 * to abort. Manipulate the return value of the request in csio_scsis_aborting()
952 if ((req->wr_status == FW_SUCCESS) || in csio_scsis_aborting()
953 (req->wr_status == FW_EINVAL) || in csio_scsis_aborting()
954 csio_scsi_itnexus_loss_error(req->wr_status)) in csio_scsis_aborting()
955 req->wr_status = FW_SCSI_ABORT_REQUESTED; in csio_scsis_aborting()
958 list_del_init(&req->sm.sm_list); in csio_scsis_aborting()
959 csio_set_state(&req->sm, csio_scsis_uninit); in csio_scsis_aborting()
963 req->wr_status = FW_HOSTERROR; in csio_scsis_aborting()
965 csio_set_state(&req->sm, csio_scsis_uninit); in csio_scsis_aborting()
987 struct csio_hw *hw = req->lnode->hwp; in csio_scsis_closing()
994 "in closing st\n", req, req->wr_status); in csio_scsis_closing()
996 * Use -ECANCELED to explicitly tell the CLOSED event that in csio_scsis_closing()
999 * FW (because the CLOSE and completion of the I/O crossed each in csio_scsis_closing()
1001 * state, the success or failure of the I/O is unimportant to in csio_scsis_closing()
1004 req->drv_status = -ECANCELED; in csio_scsis_closing()
1012 if (req->drv_status != -ECANCELED) { in csio_scsis_closing()
1024 CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) || in csio_scsis_closing()
1025 (req->wr_status == FW_EINVAL)); in csio_scsis_closing()
1026 req->wr_status = FW_SCSI_CLOSE_REQUESTED; in csio_scsis_closing()
1029 list_del_init(&req->sm.sm_list); in csio_scsis_closing()
1030 csio_set_state(&req->sm, csio_scsis_uninit); in csio_scsis_closing()
1037 req->wr_status = FW_HOSTERROR; in csio_scsis_closing()
1039 csio_set_state(&req->sm, csio_scsis_uninit); in csio_scsis_closing()
1058 * amount of time. in csio_scsis_shost_cmpl_await()
1068 * to the next level of error recovery. in csio_scsis_shost_cmpl_await()
1070 req->drv_status = 0; in csio_scsis_shost_cmpl_await()
1073 csio_set_state(&req->sm, csio_scsis_uninit); in csio_scsis_shost_cmpl_await()
1076 csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n", in csio_scsis_shost_cmpl_await()
1083 * csio_scsi_cmpl_handler - WR completion handler for SCSI.
1086 * @len: Length of the WR.
1092 * ISR. It is called with lock held. It walks past the RSS and CPL message
1094 * It then gets the status, WR handle (ioreq pointer) and the len of
1095 * the WR, based on WR opcode. Only on a non-good status is the entire
1096 * WR copied into the WR cache (ioreq->fw_wr).
1114 if (unlikely(cpl->opcode != CPL_FW6_MSG)) { in csio_scsi_cmpl_handler()
1116 cpl->opcode); in csio_scsi_cmpl_handler()
1121 tempwr = (uint8_t *)(cpl->data); in csio_scsi_cmpl_handler()
1129 (((struct fw_scsi_read_wr *)tempwr)->cookie)); in csio_scsi_cmpl_handler()
1132 ioreq->wr_status = status; in csio_scsi_cmpl_handler()
1139 (((struct fw_scsi_abrt_cls_wr *)tempwr)->cookie)); in csio_scsi_cmpl_handler()
1142 ioreq->wr_status = status; in csio_scsi_cmpl_handler()
1152 * csio_scsi_cleanup_io_q - Cleanup the given queue.
1156 * Called with lock held. Has to exit with lock held.
1161 struct csio_hw *hw = scm->hw; in csio_scsi_cleanup_io_q()
1166 /* Call back the completion routines of the active_q */ in csio_scsi_cleanup_io_q()
1170 list_del_init(&ioreq->sm.sm_list); in csio_scsi_cleanup_io_q()
1172 spin_unlock_irq(&hw->lock); in csio_scsi_cleanup_io_q()
1179 ioreq->io_cbfn(hw, ioreq); in csio_scsi_cleanup_io_q()
1181 spin_lock_irq(&scm->freelist_lock); in csio_scsi_cleanup_io_q()
1183 spin_unlock_irq(&scm->freelist_lock); in csio_scsi_cleanup_io_q()
1185 spin_lock_irq(&hw->lock); in csio_scsi_cleanup_io_q()
1194 struct csio_lnode *ln = ioreq->lnode; in csio_abrt_cls()
1195 struct csio_hw *hw = ln->hwp; in csio_abrt_cls()
1217 * csio_scsi_abort_io_q - Abort all I/Os on given queue
1223 * of tmo milliseconds for them to complete. Returns success
1224 * if all I/Os are aborted. Else returns -ETIMEDOUT.
1225 * Should be entered with lock held. Exits with lock held.
1227 * Lock has to be held across the loop that aborts I/Os, since dropping the lock
1229 * of this function has to ensure that the number of I/os to be aborted
1230 * is finite enough to not cause lock-held-for-too-long issues.
1235 struct csio_hw *hw = scm->hw; in csio_scsi_abort_io_q()
1252 while (!list_empty(q) && count--) { in csio_scsi_abort_io_q()
1253 spin_unlock_irq(&hw->lock); in csio_scsi_abort_io_q()
1255 spin_lock_irq(&hw->lock); in csio_scsi_abort_io_q()
1262 return -ETIMEDOUT; in csio_scsi_abort_io_q()
1266 * csio_scsim_cleanup_io - Cleanup all I/Os in SCSI module.
1269 * Called with lock held, should exit with lock held.
1275 struct csio_hw *hw = scm->hw; in csio_scsim_cleanup_io()
1280 if (list_empty(&scm->active_q)) in csio_scsim_cleanup_io()
1284 while (!list_empty(&scm->active_q) && count--) { in csio_scsim_cleanup_io()
1285 spin_unlock_irq(&hw->lock); in csio_scsim_cleanup_io()
1287 spin_lock_irq(&hw->lock); in csio_scsim_cleanup_io()
1291 if (list_empty(&scm->active_q)) in csio_scsim_cleanup_io()
1296 rv = csio_scsi_abort_io_q(scm, &scm->active_q, 30000); in csio_scsim_cleanup_io()
1302 csio_scsi_cleanup_io_q(scm, &scm->active_q); in csio_scsim_cleanup_io()
1304 CSIO_DB_ASSERT(list_empty(&scm->active_q)); in csio_scsim_cleanup_io()
1310 * csio_scsim_cleanup_io_lnode - Cleanup all I/Os of given lnode.
1314 * Called with lock held, should exit with lock held.
1315 * Can sleep (with dropped lock) when waiting for I/Os to complete.
1320 struct csio_hw *hw = scm->hw; in csio_scsim_cleanup_io_lnode()
1329 INIT_LIST_HEAD(&ln->cmpl_q); in csio_scsim_cleanup_io_lnode()
1330 csio_scsi_gather_active_ios(scm, &sld, &ln->cmpl_q); in csio_scsim_cleanup_io_lnode()
1333 if (list_empty(&ln->cmpl_q)) in csio_scsim_cleanup_io_lnode()
1337 while (!list_empty(&ln->cmpl_q) && count--) { in csio_scsim_cleanup_io_lnode()
1338 spin_unlock_irq(&hw->lock); in csio_scsim_cleanup_io_lnode()
1340 spin_lock_irq(&hw->lock); in csio_scsim_cleanup_io_lnode()
1344 if (list_empty(&ln->cmpl_q)) in csio_scsim_cleanup_io_lnode()
1350 rv = csio_scsi_abort_io_q(scm, &ln->cmpl_q, 30000); in csio_scsim_cleanup_io_lnode()
1353 csio_scsi_cleanup_io_q(scm, &ln->cmpl_q); in csio_scsim_cleanup_io_lnode()
1356 CSIO_DB_ASSERT(list_empty(&ln->cmpl_q)); in csio_scsim_cleanup_io_lnode()
1383 return -EINVAL; in csio_device_reset()
1391 spin_lock_irq(&hw->lock); in csio_device_reset()
1393 spin_unlock_irq(&hw->lock); in csio_device_reset()
1412 return -EINVAL; in csio_disable_port()
1415 csio_lnodes_block_by_port(hw, ln->portid); in csio_disable_port()
1417 spin_lock_irq(&hw->lock); in csio_disable_port()
1418 csio_disable_lnodes(hw, ln->portid, disable); in csio_disable_port()
1419 spin_unlock_irq(&hw->lock); in csio_disable_port()
1422 csio_lnodes_unblock_by_port(hw, ln->portid); in csio_disable_port()
1433 return snprintf(buf, PAGE_SIZE, "%x\n", ln->params.log_level); in csio_show_dbg_level()
1446 return -EINVAL; in csio_store_dbg_level()
1449 return -EINVAL; in csio_store_dbg_level()
1451 ln->params.log_level = dbg_level; in csio_store_dbg_level()
1452 hw->params.log_level = dbg_level; in csio_store_dbg_level()
1477 return snprintf(buf, PAGE_SIZE, "%d\n", ln->num_reg_rnodes); in csio_show_num_reg_rnodes()
1504 dma_buf = (struct csio_dma_buf *)csio_list_next(&req->gen_list); in csio_scsi_copy_to_sgl()
1506 /* Copy data from driver buffer to SGs of SCSI CMD */ in csio_scsi_copy_to_sgl()
1508 if (buf_off >= dma_buf->len) { in csio_scsi_copy_to_sgl()
1515 if (start_off >= sg->length) { in csio_scsi_copy_to_sgl()
1516 start_off -= sg->length; in csio_scsi_copy_to_sgl()
1521 buf_addr = dma_buf->vaddr + buf_off; in csio_scsi_copy_to_sgl()
1522 sg_off = sg->offset + start_off; in csio_scsi_copy_to_sgl()
1523 bytes_copy = min((dma_buf->len - buf_off), in csio_scsi_copy_to_sgl()
1524 sg->length - start_off); in csio_scsi_copy_to_sgl()
1525 bytes_copy = min((uint32_t)(PAGE_SIZE - (sg_off & ~PAGE_MASK)), in csio_scsi_copy_to_sgl()
1530 csio_err(hw, "failed to kmap sg:%p of ioreq:%p\n", in csio_scsi_copy_to_sgl()
1542 bytes_left -= bytes_copy; in csio_scsi_copy_to_sgl()
1552 * csio_scsi_err_handler - SCSI error handler.
1568 struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); in csio_scsi_err_handler()
1571 switch (req->wr_status) { in csio_scsi_err_handler()
1581 dma_buf = &req->dma_buf; in csio_scsi_err_handler()
1582 fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr; in csio_scsi_err_handler()
1584 flags = fcp_resp->resp.fr_flags; in csio_scsi_err_handler()
1585 scsi_status = fcp_resp->resp.fr_status; in csio_scsi_err_handler()
1588 rsp_len = be32_to_cpu(fcp_resp->ext.fr_rsp_len); in csio_scsi_err_handler()
1590 (rsp_info->rsp_code != FCP_TMF_CMPL)) { in csio_scsi_err_handler()
1596 if ((flags & FCP_SNS_LEN_VAL) && fcp_resp->ext.fr_sns_len) { in csio_scsi_err_handler()
1597 sns_len = be32_to_cpu(fcp_resp->ext.fr_sns_len); in csio_scsi_err_handler()
1601 memcpy(cmnd->sense_buffer, in csio_scsi_err_handler()
1602 &rsp_info->_fr_resvd[0] + rsp_len, sns_len); in csio_scsi_err_handler()
1611 be32_to_cpu(fcp_resp->ext.fr_resid)); in csio_scsi_err_handler()
1615 ((scsi_bufflen(cmnd) - scsi_get_resid(cmnd)) in csio_scsi_err_handler()
1616 < cmnd->underflow)) in csio_scsi_err_handler()
1626 "Over-flow error,cmnd:0x%x expected len:0x%x" in csio_scsi_err_handler()
1627 " resid:0x%x\n", cmnd->cmnd[0], in csio_scsi_err_handler()
1635 "Under-flow error,cmnd:0x%x expected" in csio_scsi_err_handler()
1637 cmnd->cmnd[0], scsi_bufflen(cmnd), in csio_scsi_err_handler()
1638 scsi_get_resid(cmnd), cmnd->device->lun, in csio_scsi_err_handler()
1639 rn->flowid); in csio_scsi_err_handler()
1648 cmnd->cmnd[0], in csio_scsi_err_handler()
1649 (req->wr_status == FW_SCSI_CLOSE_REQUESTED) ? in csio_scsi_err_handler()
1656 if (req->wr_status == FW_SCSI_CLOSE_REQUESTED) in csio_scsi_err_handler()
1665 req, cmnd, req->wr_status); in csio_scsi_err_handler()
1675 * to device-disappeared! in csio_scsi_err_handler()
1707 req->wr_status, req, cmnd); in csio_scsi_err_handler()
1716 if (req->nsge > 0) { in csio_scsi_err_handler()
1718 if (req->dcopy && (host_status == DID_OK)) in csio_scsi_err_handler()
1722 cmnd->result = (((host_status) << 16) | scsi_status); in csio_scsi_err_handler()
1723 cmnd->scsi_done(cmnd); in csio_scsi_err_handler()
1727 complete(&req->cmplobj); in csio_scsi_err_handler()
1731 * csio_scsi_cbfn - SCSI callback function.
1743 if (likely(req->wr_status == FW_SUCCESS)) { in csio_scsi_cbfn()
1744 if (req->nsge > 0) { in csio_scsi_cbfn()
1746 if (req->dcopy) in csio_scsi_cbfn()
1750 cmnd->result = (((host_status) << 16) | scsi_status); in csio_scsi_cbfn()
1751 cmnd->scsi_done(cmnd); in csio_scsi_cbfn()
1761 * csio_queuecommand - Entry point to kickstart an I/O request.
1766 * - Checks for HW and Rnode module readiness.
1767 * - Gets a free ioreq structure (which is already initialized
1769 * - Maps SG elements.
1770 * - Initializes ioreq members.
1771 * - Kicks off the SCSI state machine for this IO.
1772 * - Returns busy status on error.
1780 struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); in csio_queuecommand()
1787 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); in csio_queuecommand()
1789 sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(scsi_cmd_to_rq(cmnd))]; in csio_queuecommand()
1793 cmnd->result = nr; in csio_queuecommand()
1799 cmnd->result = (DID_REQUEUE << 16); in csio_queuecommand()
1804 /* Get req->nsge, if there are SG elements to be mapped */ in csio_queuecommand()
1812 if (unlikely(nsge > scsim->max_sge)) { in csio_queuecommand()
1815 " SGEs: %d, Max SGEs: %d\n", nsge, scsim->max_sge); in csio_queuecommand()
1820 /* Get a free ioreq structure - SM is already set to uninit */ in csio_queuecommand()
1823 csio_err(hw, "Out of I/O request elements. Active #:%d\n", in csio_queuecommand()
1824 scsim->stats.n_active); in csio_queuecommand()
1829 ioreq->nsge = nsge; in csio_queuecommand()
1830 ioreq->lnode = ln; in csio_queuecommand()
1831 ioreq->rnode = rn; in csio_queuecommand()
1832 ioreq->iq_idx = sqset->iq_idx; in csio_queuecommand()
1833 ioreq->eq_idx = sqset->eq_idx; in csio_queuecommand()
1834 ioreq->wr_status = 0; in csio_queuecommand()
1835 ioreq->drv_status = 0; in csio_queuecommand()
1837 ioreq->tmo = 0; in csio_queuecommand()
1838 ioreq->datadir = cmnd->sc_data_direction; in csio_queuecommand()
1840 if (cmnd->sc_data_direction == DMA_TO_DEVICE) { in csio_queuecommand()
1842 ln->stats.n_output_bytes += scsi_bufflen(cmnd); in csio_queuecommand()
1843 } else if (cmnd->sc_data_direction == DMA_FROM_DEVICE) { in csio_queuecommand()
1845 ln->stats.n_input_bytes += scsi_bufflen(cmnd); in csio_queuecommand()
1850 ioreq->io_cbfn = csio_scsi_cbfn; in csio_queuecommand()
1853 cmnd->host_scribble = (unsigned char *)ioreq; in csio_queuecommand()
1854 cmnd->SCp.Message = 0; in csio_queuecommand()
1857 spin_lock_irqsave(&hw->lock, flags); in csio_queuecommand()
1859 spin_unlock_irqrestore(&hw->lock, flags); in csio_queuecommand()
1879 cmnd->scsi_done(cmnd); in csio_queuecommand()
1888 struct csio_lnode *ln = ioreq->lnode; in csio_do_abrt_cls()
1889 struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu]; in csio_do_abrt_cls()
1891 ioreq->tmo = CSIO_SCSI_ABRT_TMO_MS; in csio_do_abrt_cls()
1894 * the ingress queue ID of the original I/O being aborted/closed - we in csio_do_abrt_cls()
1898 ioreq->eq_idx = sqset->eq_idx; in csio_do_abrt_cls()
1912 struct csio_lnode *ln = shost_priv(cmnd->device->host); in csio_eh_abort_handler()
1918 struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); in csio_eh_abort_handler()
1924 ioreq = (struct csio_ioreq *)cmnd->host_scribble; in csio_eh_abort_handler()
1934 ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid, in csio_eh_abort_handler()
1935 cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx)); in csio_eh_abort_handler()
1945 reinit_completion(&ioreq->cmplobj); in csio_eh_abort_handler()
1946 spin_lock_irq(&hw->lock); in csio_eh_abort_handler()
1948 spin_unlock_irq(&hw->lock); in csio_eh_abort_handler()
1951 if (rv == -EINVAL) { in csio_eh_abort_handler()
1965 wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo)); in csio_eh_abort_handler()
1970 csio_err(hw, "Abort timed out -- req: %p\n", ioreq); in csio_eh_abort_handler()
1974 if (ioreq->nsge > 0) in csio_eh_abort_handler()
1977 spin_lock_irq(&hw->lock); in csio_eh_abort_handler()
1979 spin_unlock_irq(&hw->lock); in csio_eh_abort_handler()
1981 cmnd->result = (DID_ERROR << 16); in csio_eh_abort_handler()
1982 cmnd->scsi_done(cmnd); in csio_eh_abort_handler()
1988 if (host_byte(cmnd->result) == DID_REQUEUE) { in csio_eh_abort_handler()
1991 cmnd->device->id, cmnd->device->lun, in csio_eh_abort_handler()
1992 scsi_cmd_to_rq(cmnd)->tag); in csio_eh_abort_handler()
1997 cmnd->device->id, cmnd->device->lun, in csio_eh_abort_handler()
1998 scsi_cmd_to_rq(cmnd)->tag); in csio_eh_abort_handler()
2004 * csio_tm_cbfn - TM callback function.
2022 req, req->wr_status); in csio_tm_cbfn()
2025 cmnd->SCp.Status = req->wr_status; in csio_tm_cbfn()
2038 if (req->wr_status == FW_SCSI_RSP_ERR) { in csio_tm_cbfn()
2039 dma_buf = &req->dma_buf; in csio_tm_cbfn()
2040 fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr; in csio_tm_cbfn()
2043 flags = fcp_resp->resp.fr_flags; in csio_tm_cbfn()
2047 if (rsp_info->rsp_code == FCP_TMF_CMPL) in csio_tm_cbfn()
2048 cmnd->SCp.Status = FW_SUCCESS; in csio_tm_cbfn()
2050 csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code); in csio_tm_cbfn()
2060 struct csio_lnode *ln = shost_priv(cmnd->device->host); in csio_eh_lun_reset_handler()
2063 struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); in csio_eh_lun_reset_handler()
2076 cmnd->device->lun, rn->flowid, rn->scsi_id); in csio_eh_lun_reset_handler()
2080 "LUN reset cannot be issued on non-ready" in csio_eh_lun_reset_handler()
2082 ln->vnp_flowid, cmnd->device->lun); in csio_eh_lun_reset_handler()
2093 * remote node has come back online, or device loss timer has fired in csio_eh_lun_reset_handler()
2098 if (fc_remote_port_chkready(rn->rport)) { in csio_eh_lun_reset_handler()
2100 "LUN reset cannot be issued on non-ready" in csio_eh_lun_reset_handler()
2102 rn->flowid, cmnd->device->lun); in csio_eh_lun_reset_handler()
2106 /* Get a free ioreq structure - SM is already set to uninit */ in csio_eh_lun_reset_handler()
2110 csio_err(hw, "Out of IO request elements. Active # :%d\n", in csio_eh_lun_reset_handler()
2111 scsim->stats.n_active); in csio_eh_lun_reset_handler()
2115 sqset = &hw->sqset[ln->portid][smp_processor_id()]; in csio_eh_lun_reset_handler()
2116 ioreq->nsge = 0; in csio_eh_lun_reset_handler()
2117 ioreq->lnode = ln; in csio_eh_lun_reset_handler()
2118 ioreq->rnode = rn; in csio_eh_lun_reset_handler()
2119 ioreq->iq_idx = sqset->iq_idx; in csio_eh_lun_reset_handler()
2120 ioreq->eq_idx = sqset->eq_idx; in csio_eh_lun_reset_handler()
2123 cmnd->host_scribble = (unsigned char *)ioreq; in csio_eh_lun_reset_handler()
2124 cmnd->SCp.Status = 0; in csio_eh_lun_reset_handler()
2126 cmnd->SCp.Message = FCP_TMF_LUN_RESET; in csio_eh_lun_reset_handler()
2127 ioreq->tmo = CSIO_SCSI_LUNRST_TMO_MS / 1000; in csio_eh_lun_reset_handler()
2130 * FW times the LUN reset for ioreq->tmo, so we got to wait a little in csio_eh_lun_reset_handler()
2134 count = DIV_ROUND_UP((ioreq->tmo + 10) * 1000, CSIO_SCSI_TM_POLL_MS); in csio_eh_lun_reset_handler()
2137 ioreq->io_cbfn = csio_tm_cbfn; in csio_eh_lun_reset_handler()
2139 /* Save of the ioreq info for later use */ in csio_eh_lun_reset_handler()
2141 sld.lnode = ioreq->lnode; in csio_eh_lun_reset_handler()
2142 sld.rnode = ioreq->rnode; in csio_eh_lun_reset_handler()
2143 sld.oslun = cmnd->device->lun; in csio_eh_lun_reset_handler()
2145 spin_lock_irqsave(&hw->lock, flags); in csio_eh_lun_reset_handler()
2148 spin_unlock_irqrestore(&hw->lock, flags); in csio_eh_lun_reset_handler()
2160 && count--) in csio_eh_lun_reset_handler()
2163 /* LUN reset timed-out */ in csio_eh_lun_reset_handler()
2166 cmnd->device->id, cmnd->device->lun); in csio_eh_lun_reset_handler()
2168 spin_lock_irq(&hw->lock); in csio_eh_lun_reset_handler()
2170 list_del_init(&ioreq->sm.sm_list); in csio_eh_lun_reset_handler()
2171 spin_unlock_irq(&hw->lock); in csio_eh_lun_reset_handler()
2177 if (cmnd->SCp.Status != FW_SUCCESS) { in csio_eh_lun_reset_handler()
2179 cmnd->device->id, cmnd->device->lun, cmnd->SCp.Status); in csio_eh_lun_reset_handler()
2189 spin_lock_irq(&hw->lock); in csio_eh_lun_reset_handler()
2193 spin_unlock_irq(&hw->lock); in csio_eh_lun_reset_handler()
2198 "Attempt to abort I/Os during LUN reset of %llu" in csio_eh_lun_reset_handler()
2199 " returned %d\n", cmnd->device->lun, retval); in csio_eh_lun_reset_handler()
2201 spin_lock_irq(&hw->lock); in csio_eh_lun_reset_handler()
2202 list_splice_tail_init(&local_q, &scsim->active_q); in csio_eh_lun_reset_handler()
2203 spin_unlock_irq(&hw->lock); in csio_eh_lun_reset_handler()
2210 cmnd->device->id, cmnd->device->lun); in csio_eh_lun_reset_handler()
2227 return -ENXIO; in csio_slave_alloc()
2229 sdev->hostdata = *((struct csio_lnode **)(rport->dd_data)); in csio_slave_alloc()
2244 sdev->hostdata = NULL; in csio_slave_destroy()
2253 spin_lock_irq(shost->host_lock); in csio_scan_finished()
2254 if (!ln->hwp || csio_list_deleted(&ln->sm.sm_list)) in csio_scan_finished()
2260 spin_unlock_irq(shost->host_lock); in csio_scan_finished()
2277 .this_id = -1,
2296 .this_id = -1,
2304 * csio_scsi_alloc_ddp_bufs - Allocate buffers for DDP of unaligned SGLs.
2308 * @num_buf : Number of buffers.
2327 return -EINVAL; in csio_scsi_alloc_ddp_bufs()
2329 INIT_LIST_HEAD(&scm->ddp_freelist); in csio_scsi_alloc_ddp_bufs()
2332 buf_size = (buf_size + PAGE_SIZE - 1) & PAGE_MASK; in csio_scsi_alloc_ddp_bufs()
2342 scm->stats.n_free_ddp); in csio_scsi_alloc_ddp_bufs()
2347 ddp_desc->vaddr = dma_alloc_coherent(&hw->pdev->dev, unit_size, in csio_scsi_alloc_ddp_bufs()
2348 &ddp_desc->paddr, GFP_KERNEL); in csio_scsi_alloc_ddp_bufs()
2349 if (!ddp_desc->vaddr) { in csio_scsi_alloc_ddp_bufs()
2357 ddp_desc->len = unit_size; in csio_scsi_alloc_ddp_bufs()
2360 list_add_tail(&ddp_desc->list, &scm->ddp_freelist); in csio_scsi_alloc_ddp_bufs()
2367 list_for_each(tmp, &scm->ddp_freelist) { in csio_scsi_alloc_ddp_bufs()
2370 dma_free_coherent(&hw->pdev->dev, ddp_desc->len, in csio_scsi_alloc_ddp_bufs()
2371 ddp_desc->vaddr, ddp_desc->paddr); in csio_scsi_alloc_ddp_bufs()
2372 list_del_init(&ddp_desc->list); in csio_scsi_alloc_ddp_bufs()
2375 scm->stats.n_free_ddp = 0; in csio_scsi_alloc_ddp_bufs()
2377 return -ENOMEM; in csio_scsi_alloc_ddp_bufs()
2381 * csio_scsi_free_ddp_bufs - free DDP buffers of unaligned SGLs.
2394 list_for_each(tmp, &scm->ddp_freelist) { in csio_scsi_free_ddp_bufs()
2397 dma_free_coherent(&hw->pdev->dev, ddp_desc->len, in csio_scsi_free_ddp_bufs()
2398 ddp_desc->vaddr, ddp_desc->paddr); in csio_scsi_free_ddp_bufs()
2399 list_del_init(&ddp_desc->list); in csio_scsi_free_ddp_bufs()
2402 scm->stats.n_free_ddp = 0; in csio_scsi_free_ddp_bufs()
2406 * csio_scsim_init - Initialize SCSI Module
2418 INIT_LIST_HEAD(&scm->active_q); in csio_scsim_init()
2419 scm->hw = hw; in csio_scsim_init()
2421 scm->proto_cmd_len = sizeof(struct fcp_cmnd); in csio_scsim_init()
2422 scm->proto_rsp_len = CSIO_SCSI_RSP_LEN; in csio_scsim_init()
2423 scm->max_sge = CSIO_SCSI_MAX_SGE; in csio_scsim_init()
2425 spin_lock_init(&scm->freelist_lock); in csio_scsim_init()
2427 /* Pre-allocate ioreqs and initialize them */ in csio_scsim_init()
2428 INIT_LIST_HEAD(&scm->ioreq_freelist); in csio_scsim_init()
2436 scm->stats.n_free_ioreq); in csio_scsim_init()
2442 dma_buf = &ioreq->dma_buf; in csio_scsim_init()
2443 dma_buf->vaddr = dma_pool_alloc(hw->scsi_dma_pool, GFP_KERNEL, in csio_scsim_init()
2444 &dma_buf->paddr); in csio_scsim_init()
2445 if (!dma_buf->vaddr) { in csio_scsim_init()
2453 dma_buf->len = scm->proto_rsp_len; in csio_scsim_init()
2456 csio_init_state(&ioreq->sm, csio_scsis_uninit); in csio_scsim_init()
2457 INIT_LIST_HEAD(&ioreq->gen_list); in csio_scsim_init()
2458 init_completion(&ioreq->cmplobj); in csio_scsim_init()
2460 list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist); in csio_scsim_init()
2474 while (!list_empty(&scm->ioreq_freelist)) { in csio_scsim_init()
2477 tmp = list_first_entry(&scm->ioreq_freelist, in csio_scsim_init()
2479 list_del_init(&tmp->sm_list); in csio_scsim_init()
2482 dma_buf = &ioreq->dma_buf; in csio_scsim_init()
2483 dma_pool_free(hw->scsi_dma_pool, dma_buf->vaddr, in csio_scsim_init()
2484 dma_buf->paddr); in csio_scsim_init()
2489 scm->stats.n_free_ioreq = 0; in csio_scsim_init()
2491 return -ENOMEM; in csio_scsim_init()
2505 while (!list_empty(&scm->ioreq_freelist)) { in csio_scsim_exit()
2508 tmp = list_first_entry(&scm->ioreq_freelist, in csio_scsim_exit()
2510 list_del_init(&tmp->sm_list); in csio_scsim_exit()
2513 dma_buf = &ioreq->dma_buf; in csio_scsim_exit()
2514 dma_pool_free(scm->hw->scsi_dma_pool, dma_buf->vaddr, in csio_scsim_exit()
2515 dma_buf->paddr); in csio_scsim_exit()
2520 scm->stats.n_free_ioreq = 0; in csio_scsim_exit()
2522 csio_scsi_free_ddp_bufs(scm, scm->hw); in csio_scsim_exit()