Lines Matching +full:reset +full:- +full:n +full:- +full:io
1 // SPDX-License-Identifier: GPL-2.0-only
98 u32 hash = scsi_cmd_to_rq(sc)->tag & (FNIC_IO_LOCKS - 1); in fnic_io_lock_hash()
100 return &fnic->io_req_lock[hash]; in fnic_io_lock_hash()
106 return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)]; in fnic_io_lock_tag()
111 * also unmap and free the device-private scatter/gather list.
117 if (io_req->sgl_list_pa) in fnic_release_ioreq_buf()
118 dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa, in fnic_release_ioreq_buf()
119 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt, in fnic_release_ioreq_buf()
123 if (io_req->sgl_cnt) in fnic_release_ioreq_buf()
124 mempool_free(io_req->sgl_list_alloc, in fnic_release_ioreq_buf()
125 fnic->io_sgl_pool[io_req->sgl_type]); in fnic_release_ioreq_buf()
126 if (io_req->sense_buf_pa) in fnic_release_ioreq_buf()
127 dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa, in fnic_release_ioreq_buf()
135 if (!fnic->fw_ack_recd[0]) in free_wq_copy_descs()
142 if (wq->to_clean_index <= fnic->fw_ack_index[0]) in free_wq_copy_descs()
143 wq->ring.desc_avail += (fnic->fw_ack_index[0] in free_wq_copy_descs()
144 - wq->to_clean_index + 1); in free_wq_copy_descs()
146 wq->ring.desc_avail += (wq->ring.desc_count in free_wq_copy_descs()
147 - wq->to_clean_index in free_wq_copy_descs()
148 + fnic->fw_ack_index[0] + 1); in free_wq_copy_descs()
155 wq->to_clean_index = in free_wq_copy_descs()
156 (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count; in free_wq_copy_descs()
159 fnic->fw_ack_recd[0] = 0; in free_wq_copy_descs()
175 spin_lock_irqsave(&fnic->fnic_lock, flags); in __fnic_set_state_flags()
176 spin_lock_irqsave(fnic->lport->host->host_lock, host_lock_flags); in __fnic_set_state_flags()
179 fnic->state_flags &= ~st_flags; in __fnic_set_state_flags()
181 fnic->state_flags |= st_flags; in __fnic_set_state_flags()
183 spin_unlock_irqrestore(fnic->lport->host->host_lock, host_lock_flags); in __fnic_set_state_flags()
184 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in __fnic_set_state_flags()
192 * Routine to send reset msg to fw
196 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; in fnic_fw_reset_handler()
200 /* indicate fwreset to io path */ in fnic_fw_reset_handler()
203 skb_queue_purge(&fnic->frame_queue); in fnic_fw_reset_handler()
204 skb_queue_purge(&fnic->tx_queue); in fnic_fw_reset_handler()
206 /* wait for io cmpl */ in fnic_fw_reset_handler()
207 while (atomic_read(&fnic->in_flight)) in fnic_fw_reset_handler()
210 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); in fnic_fw_reset_handler()
212 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) in fnic_fw_reset_handler()
216 ret = -EAGAIN; in fnic_fw_reset_handler()
219 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); in fnic_fw_reset_handler()
220 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > in fnic_fw_reset_handler()
221 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) in fnic_fw_reset_handler()
222 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, in fnic_fw_reset_handler()
224 &fnic->fnic_stats.fw_stats.active_fw_reqs)); in fnic_fw_reset_handler()
227 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); in fnic_fw_reset_handler()
230 atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets); in fnic_fw_reset_handler()
231 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_fw_reset_handler()
232 "Issued fw reset\n"); in fnic_fw_reset_handler()
235 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_fw_reset_handler()
236 "Failed to issue fw reset\n"); in fnic_fw_reset_handler()
249 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; in fnic_flogi_reg_handler()
251 struct fc_lport *lp = fnic->lport; in fnic_flogi_reg_handler()
256 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); in fnic_flogi_reg_handler()
258 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) in fnic_flogi_reg_handler()
262 ret = -EAGAIN; in fnic_flogi_reg_handler()
266 if (fnic->ctlr.map_dest) { in fnic_flogi_reg_handler()
270 memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN); in fnic_flogi_reg_handler()
274 if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) { in fnic_flogi_reg_handler()
277 fnic->data_src_addr, in fnic_flogi_reg_handler()
278 lp->r_a_tov, lp->e_d_tov); in fnic_flogi_reg_handler()
279 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_flogi_reg_handler()
280 "FLOGI FIP reg issued fcid %x src %pM dest %pM\n", in fnic_flogi_reg_handler()
281 fc_id, fnic->data_src_addr, gw_mac); in fnic_flogi_reg_handler()
285 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_flogi_reg_handler()
286 "FLOGI reg issued fcid %x map %d dest %pM\n", in fnic_flogi_reg_handler()
287 fc_id, fnic->ctlr.map_dest, gw_mac); in fnic_flogi_reg_handler()
290 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); in fnic_flogi_reg_handler()
291 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > in fnic_flogi_reg_handler()
292 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) in fnic_flogi_reg_handler()
293 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, in fnic_flogi_reg_handler()
294 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); in fnic_flogi_reg_handler()
297 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); in fnic_flogi_reg_handler()
312 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); in fnic_queue_wq_copy_desc()
313 struct fc_rport_libfc_priv *rp = rport->dd_data; in fnic_queue_wq_copy_desc()
315 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; in fnic_queue_wq_copy_desc()
324 desc = io_req->sgl_list; in fnic_queue_wq_copy_desc()
326 desc->addr = cpu_to_le64(sg_dma_address(sg)); in fnic_queue_wq_copy_desc()
327 desc->len = cpu_to_le32(sg_dma_len(sg)); in fnic_queue_wq_copy_desc()
328 desc->_resvd = 0; in fnic_queue_wq_copy_desc()
332 io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev, in fnic_queue_wq_copy_desc()
333 io_req->sgl_list, in fnic_queue_wq_copy_desc()
334 sizeof(io_req->sgl_list[0]) * sg_count, in fnic_queue_wq_copy_desc()
336 if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) { in fnic_queue_wq_copy_desc()
337 printk(KERN_ERR "DMA mapping failed\n"); in fnic_queue_wq_copy_desc()
342 io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev, in fnic_queue_wq_copy_desc()
343 sc->sense_buffer, in fnic_queue_wq_copy_desc()
346 if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) { in fnic_queue_wq_copy_desc()
347 dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa, in fnic_queue_wq_copy_desc()
348 sizeof(io_req->sgl_list[0]) * sg_count, in fnic_queue_wq_copy_desc()
350 printk(KERN_ERR "DMA mapping failed\n"); in fnic_queue_wq_copy_desc()
354 int_to_scsilun(sc->device->lun, &fc_lun); in fnic_queue_wq_copy_desc()
357 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); in fnic_queue_wq_copy_desc()
359 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) in fnic_queue_wq_copy_desc()
363 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); in fnic_queue_wq_copy_desc()
364 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, in fnic_queue_wq_copy_desc()
365 "fnic_queue_wq_copy_desc failure - no descriptors\n"); in fnic_queue_wq_copy_desc()
366 atomic64_inc(&misc_stats->io_cpwq_alloc_failures); in fnic_queue_wq_copy_desc()
371 if (sc->sc_data_direction == DMA_FROM_DEVICE) in fnic_queue_wq_copy_desc()
373 else if (sc->sc_data_direction == DMA_TO_DEVICE) in fnic_queue_wq_copy_desc()
377 if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) && in fnic_queue_wq_copy_desc()
378 (rp->flags & FC_RP_FLAGS_RETRY)) in fnic_queue_wq_copy_desc()
381 fnic_queue_wq_copy_desc_icmnd_16(wq, scsi_cmd_to_rq(sc)->tag, in fnic_queue_wq_copy_desc()
382 0, exch_flags, io_req->sgl_cnt, in fnic_queue_wq_copy_desc()
384 io_req->sgl_list_pa, in fnic_queue_wq_copy_desc()
385 io_req->sense_buf_pa, in fnic_queue_wq_copy_desc()
390 sc->cmnd, sc->cmd_len, in fnic_queue_wq_copy_desc()
392 fc_lun.scsi_lun, io_req->port_id, in fnic_queue_wq_copy_desc()
393 rport->maxframe_size, rp->r_a_tov, in fnic_queue_wq_copy_desc()
394 rp->e_d_tov); in fnic_queue_wq_copy_desc()
396 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); in fnic_queue_wq_copy_desc()
397 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > in fnic_queue_wq_copy_desc()
398 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) in fnic_queue_wq_copy_desc()
399 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, in fnic_queue_wq_copy_desc()
400 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); in fnic_queue_wq_copy_desc()
402 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); in fnic_queue_wq_copy_desc()
414 const int tag = scsi_cmd_to_rq(sc)->tag; in fnic_queuecommand_lck()
415 struct fc_lport *lp = shost_priv(sc->device->host); in fnic_queuecommand_lck()
419 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_queuecommand_lck()
436 rport = starget_to_rport(scsi_target(sc->device)); in fnic_queuecommand_lck()
438 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_queuecommand_lck()
439 "returning DID_NO_CONNECT for IO as rport is NULL\n"); in fnic_queuecommand_lck()
440 sc->result = DID_NO_CONNECT << 16; in fnic_queuecommand_lck()
447 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_queuecommand_lck()
448 "rport is not ready\n"); in fnic_queuecommand_lck()
449 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); in fnic_queuecommand_lck()
450 sc->result = ret; in fnic_queuecommand_lck()
455 rp = rport->dd_data; in fnic_queuecommand_lck()
456 if (!rp || rp->rp_state == RPORT_ST_DELETE) { in fnic_queuecommand_lck()
457 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_queuecommand_lck()
458 "rport 0x%x removed, returning DID_NO_CONNECT\n", in fnic_queuecommand_lck()
459 rport->port_id); in fnic_queuecommand_lck()
461 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); in fnic_queuecommand_lck()
462 sc->result = DID_NO_CONNECT<<16; in fnic_queuecommand_lck()
467 if (rp->rp_state != RPORT_ST_READY) { in fnic_queuecommand_lck()
468 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_queuecommand_lck()
469 "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n", in fnic_queuecommand_lck()
470 rport->port_id, rp->rp_state); in fnic_queuecommand_lck()
472 sc->result = DID_IMM_RETRY << 16; in fnic_queuecommand_lck()
477 if (lp->state != LPORT_ST_READY || !(lp->link_up)) in fnic_queuecommand_lck()
480 atomic_inc(&fnic->in_flight); in fnic_queuecommand_lck()
484 * Don't re-enable interrupts in case they were disabled prior to the in fnic_queuecommand_lck()
487 spin_unlock(lp->host->host_lock); in fnic_queuecommand_lck()
488 fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED; in fnic_queuecommand_lck()
489 fnic_priv(sc)->flags = FNIC_NO_FLAGS; in fnic_queuecommand_lck()
491 /* Get a new io_req for this SCSI IO */ in fnic_queuecommand_lck()
492 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); in fnic_queuecommand_lck()
494 atomic64_inc(&fnic_stats->io_stats.alloc_failures); in fnic_queuecommand_lck()
503 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, in fnic_queuecommand_lck()
504 tag, sc, 0, sc->cmnd[0], sg_count, fnic_priv(sc)->state); in fnic_queuecommand_lck()
505 mempool_free(io_req, fnic->io_req_pool); in fnic_queuecommand_lck()
510 io_req->sgl_cnt = sg_count; in fnic_queuecommand_lck()
511 io_req->sgl_type = FNIC_SGL_CACHE_DFLT; in fnic_queuecommand_lck()
513 io_req->sgl_type = FNIC_SGL_CACHE_MAX; in fnic_queuecommand_lck()
516 io_req->sgl_list = in fnic_queuecommand_lck()
517 mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type], in fnic_queuecommand_lck()
519 if (!io_req->sgl_list) { in fnic_queuecommand_lck()
520 atomic64_inc(&fnic_stats->io_stats.alloc_failures); in fnic_queuecommand_lck()
523 mempool_free(io_req, fnic->io_req_pool); in fnic_queuecommand_lck()
528 io_req->sgl_list_alloc = io_req->sgl_list; in fnic_queuecommand_lck()
529 ptr = (unsigned long) io_req->sgl_list; in fnic_queuecommand_lck()
531 io_req->sgl_list = (struct host_sg_desc *) in fnic_queuecommand_lck()
533 + FNIC_SG_DESC_ALIGN - 1) in fnic_queuecommand_lck()
534 & ~(FNIC_SG_DESC_ALIGN - 1)); in fnic_queuecommand_lck()
539 * Will acquire lock defore setting to IO initialized. in fnic_queuecommand_lck()
547 io_req->port_id = rport->port_id; in fnic_queuecommand_lck()
548 io_req->start_time = jiffies; in fnic_queuecommand_lck()
549 fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING; in fnic_queuecommand_lck()
550 fnic_priv(sc)->io_req = io_req; in fnic_queuecommand_lck()
551 fnic_priv(sc)->flags |= FNIC_IO_INITIALIZED; in fnic_queuecommand_lck()
554 wq = &fnic->wq_copy[0]; in fnic_queuecommand_lck()
561 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, in fnic_queuecommand_lck()
563 io_req = fnic_priv(sc)->io_req; in fnic_queuecommand_lck()
564 fnic_priv(sc)->io_req = NULL; in fnic_queuecommand_lck()
565 fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE; in fnic_queuecommand_lck()
569 mempool_free(io_req, fnic->io_req_pool); in fnic_queuecommand_lck()
571 atomic_dec(&fnic->in_flight); in fnic_queuecommand_lck()
573 spin_lock(lp->host->host_lock); in fnic_queuecommand_lck()
576 atomic64_inc(&fnic_stats->io_stats.active_ios); in fnic_queuecommand_lck()
577 atomic64_inc(&fnic_stats->io_stats.num_ios); in fnic_queuecommand_lck()
578 if (atomic64_read(&fnic_stats->io_stats.active_ios) > in fnic_queuecommand_lck()
579 atomic64_read(&fnic_stats->io_stats.max_active_ios)) in fnic_queuecommand_lck()
580 atomic64_set(&fnic_stats->io_stats.max_active_ios, in fnic_queuecommand_lck()
581 atomic64_read(&fnic_stats->io_stats.active_ios)); in fnic_queuecommand_lck()
583 /* REVISIT: Use per IO lock in the final code */ in fnic_queuecommand_lck()
584 fnic_priv(sc)->flags |= FNIC_IO_ISSUED; in fnic_queuecommand_lck()
587 cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 | in fnic_queuecommand_lck()
588 (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 | in fnic_queuecommand_lck()
589 (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 | in fnic_queuecommand_lck()
590 sc->cmnd[5]); in fnic_queuecommand_lck()
592 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, in fnic_queuecommand_lck()
596 /* if only we issued IO, will we have the io lock */ in fnic_queuecommand_lck()
600 atomic_dec(&fnic->in_flight); in fnic_queuecommand_lck()
602 spin_lock(lp->host->host_lock); in fnic_queuecommand_lck()
610 * Routine to handle fw reset completion in DEF_SCSI_QCMD()
620 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; in DEF_SCSI_QCMD()
622 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); in DEF_SCSI_QCMD()
624 atomic64_inc(&reset_stats->fw_reset_completions); in DEF_SCSI_QCMD()
626 /* Clean up all outstanding io requests */ in DEF_SCSI_QCMD()
629 atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0); in DEF_SCSI_QCMD()
630 atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0); in DEF_SCSI_QCMD()
631 atomic64_set(&fnic->io_cmpl_skip, 0); in DEF_SCSI_QCMD()
633 spin_lock_irqsave(&fnic->fnic_lock, flags); in DEF_SCSI_QCMD()
636 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) { in DEF_SCSI_QCMD()
637 /* Check status of reset completion */ in DEF_SCSI_QCMD()
639 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in DEF_SCSI_QCMD()
640 "reset cmpl success\n"); in DEF_SCSI_QCMD()
642 fnic->state = FNIC_IN_ETH_MODE; in DEF_SCSI_QCMD()
645 fnic->lport->host, in DEF_SCSI_QCMD()
646 "fnic fw_reset : failed %s\n", in DEF_SCSI_QCMD()
653 * reset the firmware. Free the cached flogi in DEF_SCSI_QCMD()
655 fnic->state = FNIC_IN_FC_MODE; in DEF_SCSI_QCMD()
656 atomic64_inc(&reset_stats->fw_reset_failures); in DEF_SCSI_QCMD()
657 ret = -1; in DEF_SCSI_QCMD()
661 fnic->lport->host, in DEF_SCSI_QCMD()
663 " reset cmpl\n", fnic_state_to_str(fnic->state)); in DEF_SCSI_QCMD()
664 atomic64_inc(&reset_stats->fw_reset_failures); in DEF_SCSI_QCMD()
665 ret = -1; in DEF_SCSI_QCMD()
668 /* Thread removing device blocks till firmware reset is complete */ in DEF_SCSI_QCMD()
669 if (fnic->remove_wait) in DEF_SCSI_QCMD()
670 complete(fnic->remove_wait); in DEF_SCSI_QCMD()
673 * If fnic is being removed, or fw reset failed in DEF_SCSI_QCMD()
676 if (fnic->remove_wait || ret) { in DEF_SCSI_QCMD()
677 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in DEF_SCSI_QCMD()
678 skb_queue_purge(&fnic->tx_queue); in DEF_SCSI_QCMD()
682 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in DEF_SCSI_QCMD()
705 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); in fnic_fcpio_flogi_reg_cmpl_handler()
708 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_fcpio_flogi_reg_cmpl_handler()
710 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) { in fnic_fcpio_flogi_reg_cmpl_handler()
714 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_fcpio_flogi_reg_cmpl_handler()
715 "flog reg succeeded\n"); in fnic_fcpio_flogi_reg_cmpl_handler()
716 fnic->state = FNIC_IN_FC_MODE; in fnic_fcpio_flogi_reg_cmpl_handler()
719 fnic->lport->host, in fnic_fcpio_flogi_reg_cmpl_handler()
720 "fnic flogi reg :failed %s\n", in fnic_fcpio_flogi_reg_cmpl_handler()
722 fnic->state = FNIC_IN_ETH_MODE; in fnic_fcpio_flogi_reg_cmpl_handler()
723 ret = -1; in fnic_fcpio_flogi_reg_cmpl_handler()
726 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_fcpio_flogi_reg_cmpl_handler()
728 " processing flogi reg completion\n", in fnic_fcpio_flogi_reg_cmpl_handler()
729 fnic_state_to_str(fnic->state)); in fnic_fcpio_flogi_reg_cmpl_handler()
730 ret = -1; in fnic_fcpio_flogi_reg_cmpl_handler()
734 if (fnic->stop_rx_link_events) { in fnic_fcpio_flogi_reg_cmpl_handler()
735 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_fcpio_flogi_reg_cmpl_handler()
738 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_fcpio_flogi_reg_cmpl_handler()
741 queue_work(fnic_event_queue, &fnic->frame_work); in fnic_fcpio_flogi_reg_cmpl_handler()
743 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_fcpio_flogi_reg_cmpl_handler()
753 if (wq->to_clean_index <= wq->to_use_index) { in is_ack_index_in_range()
755 if (request_out < wq->to_clean_index || in is_ack_index_in_range()
756 request_out >= wq->to_use_index) in is_ack_index_in_range()
760 if (request_out < wq->to_clean_index && in is_ack_index_in_range()
761 request_out >= wq->to_use_index) in is_ack_index_in_range()
780 u16 request_out = desc->u.ack.request_out; in fnic_fcpio_ack_handler()
785 wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count]; in fnic_fcpio_ack_handler()
786 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); in fnic_fcpio_ack_handler()
788 fnic->fnic_stats.misc_stats.last_ack_time = jiffies; in fnic_fcpio_ack_handler()
790 fnic->fw_ack_index[0] = request_out; in fnic_fcpio_ack_handler()
791 fnic->fw_ack_recd[0] = 1; in fnic_fcpio_ack_handler()
794 &fnic->fnic_stats.misc_stats.ack_index_out_of_range); in fnic_fcpio_ack_handler()
796 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); in fnic_fcpio_ack_handler()
798 fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3], in fnic_fcpio_ack_handler()
817 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_fcpio_icmnd_cmpl_handler()
825 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); in fnic_fcpio_icmnd_cmpl_handler()
827 icmnd_cmpl = &desc->u.icmnd_cmpl; in fnic_fcpio_icmnd_cmpl_handler()
829 if (id >= fnic->fnic_max_tag_id) { in fnic_fcpio_icmnd_cmpl_handler()
830 shost_printk(KERN_ERR, fnic->lport->host, in fnic_fcpio_icmnd_cmpl_handler()
831 "Tag out of range tag %x hdr status = %s\n", in fnic_fcpio_icmnd_cmpl_handler()
836 sc = scsi_host_find_tag(fnic->lport->host, id); in fnic_fcpio_icmnd_cmpl_handler()
839 atomic64_inc(&fnic_stats->io_stats.sc_null); in fnic_fcpio_icmnd_cmpl_handler()
840 shost_printk(KERN_ERR, fnic->lport->host, in fnic_fcpio_icmnd_cmpl_handler()
841 "icmnd_cmpl sc is null - " in fnic_fcpio_icmnd_cmpl_handler()
842 "hdr status = %s tag = 0x%x desc = 0x%p\n", in fnic_fcpio_icmnd_cmpl_handler()
845 fnic->lport->host->host_no, id, in fnic_fcpio_icmnd_cmpl_handler()
846 ((u64)icmnd_cmpl->_resvd0[1] << 16 | in fnic_fcpio_icmnd_cmpl_handler()
847 (u64)icmnd_cmpl->_resvd0[0]), in fnic_fcpio_icmnd_cmpl_handler()
849 (u64)icmnd_cmpl->scsi_status << 8 | in fnic_fcpio_icmnd_cmpl_handler()
850 (u64)icmnd_cmpl->flags), desc, in fnic_fcpio_icmnd_cmpl_handler()
851 (u64)icmnd_cmpl->residual, 0); in fnic_fcpio_icmnd_cmpl_handler()
857 io_req = fnic_priv(sc)->io_req; in fnic_fcpio_icmnd_cmpl_handler()
860 atomic64_inc(&fnic_stats->io_stats.ioreq_null); in fnic_fcpio_icmnd_cmpl_handler()
861 fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL; in fnic_fcpio_icmnd_cmpl_handler()
863 shost_printk(KERN_ERR, fnic->lport->host, in fnic_fcpio_icmnd_cmpl_handler()
864 "icmnd_cmpl io_req is null - " in fnic_fcpio_icmnd_cmpl_handler()
865 "hdr status = %s tag = 0x%x sc 0x%p\n", in fnic_fcpio_icmnd_cmpl_handler()
869 start_time = io_req->start_time; in fnic_fcpio_icmnd_cmpl_handler()
871 /* firmware completed the io */ in fnic_fcpio_icmnd_cmpl_handler()
872 io_req->io_completed = 1; in fnic_fcpio_icmnd_cmpl_handler()
875 * if SCSI-ML has already issued abort on this command, in fnic_fcpio_icmnd_cmpl_handler()
876 * set completion of the IO. The abts path will clean it up in fnic_fcpio_icmnd_cmpl_handler()
878 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { in fnic_fcpio_icmnd_cmpl_handler()
884 fnic_priv(sc)->flags |= FNIC_IO_DONE; in fnic_fcpio_icmnd_cmpl_handler()
885 fnic_priv(sc)->flags |= FNIC_IO_ABTS_PENDING; in fnic_fcpio_icmnd_cmpl_handler()
888 fnic_priv(sc)->flags |= FNIC_IO_ABORTED; in fnic_fcpio_icmnd_cmpl_handler()
890 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, in fnic_fcpio_icmnd_cmpl_handler()
893 "scsi_status = %x residual = %d\n", in fnic_fcpio_icmnd_cmpl_handler()
896 icmnd_cmpl->scsi_status, in fnic_fcpio_icmnd_cmpl_handler()
897 icmnd_cmpl->residual); in fnic_fcpio_icmnd_cmpl_handler()
901 /* Mark the IO as complete */ in fnic_fcpio_icmnd_cmpl_handler()
902 fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE; in fnic_fcpio_icmnd_cmpl_handler()
904 icmnd_cmpl = &desc->u.icmnd_cmpl; in fnic_fcpio_icmnd_cmpl_handler()
908 sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status; in fnic_fcpio_icmnd_cmpl_handler()
911 if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) { in fnic_fcpio_icmnd_cmpl_handler()
912 xfer_len -= icmnd_cmpl->residual; in fnic_fcpio_icmnd_cmpl_handler()
913 scsi_set_resid(sc, icmnd_cmpl->residual); in fnic_fcpio_icmnd_cmpl_handler()
916 if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION) in fnic_fcpio_icmnd_cmpl_handler()
917 atomic64_inc(&fnic_stats->misc_stats.check_condition); in fnic_fcpio_icmnd_cmpl_handler()
919 if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL) in fnic_fcpio_icmnd_cmpl_handler()
920 atomic64_inc(&fnic_stats->misc_stats.queue_fulls); in fnic_fcpio_icmnd_cmpl_handler()
924 atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout); in fnic_fcpio_icmnd_cmpl_handler()
925 sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status; in fnic_fcpio_icmnd_cmpl_handler()
929 atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted); in fnic_fcpio_icmnd_cmpl_handler()
930 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; in fnic_fcpio_icmnd_cmpl_handler()
934 atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch); in fnic_fcpio_icmnd_cmpl_handler()
935 scsi_set_resid(sc, icmnd_cmpl->residual); in fnic_fcpio_icmnd_cmpl_handler()
936 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; in fnic_fcpio_icmnd_cmpl_handler()
940 atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources); in fnic_fcpio_icmnd_cmpl_handler()
941 sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status; in fnic_fcpio_icmnd_cmpl_handler()
945 atomic64_inc(&fnic_stats->io_stats.io_not_found); in fnic_fcpio_icmnd_cmpl_handler()
946 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; in fnic_fcpio_icmnd_cmpl_handler()
950 atomic64_inc(&fnic_stats->misc_stats.sgl_invalid); in fnic_fcpio_icmnd_cmpl_handler()
951 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; in fnic_fcpio_icmnd_cmpl_handler()
955 atomic64_inc(&fnic_stats->fw_stats.io_fw_errs); in fnic_fcpio_icmnd_cmpl_handler()
956 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; in fnic_fcpio_icmnd_cmpl_handler()
960 atomic64_inc(&fnic_stats->misc_stats.mss_invalid); in fnic_fcpio_icmnd_cmpl_handler()
961 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; in fnic_fcpio_icmnd_cmpl_handler()
968 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; in fnic_fcpio_icmnd_cmpl_handler()
973 fnic_priv(sc)->io_req = NULL; in fnic_fcpio_icmnd_cmpl_handler()
974 fnic_priv(sc)->flags |= FNIC_IO_DONE; in fnic_fcpio_icmnd_cmpl_handler()
977 atomic64_inc(&fnic_stats->io_stats.io_failures); in fnic_fcpio_icmnd_cmpl_handler()
978 shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", in fnic_fcpio_icmnd_cmpl_handler()
985 (u64)icmnd_cmpl->scsi_status << 48 | in fnic_fcpio_icmnd_cmpl_handler()
986 (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 | in fnic_fcpio_icmnd_cmpl_handler()
987 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | in fnic_fcpio_icmnd_cmpl_handler()
988 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]; in fnic_fcpio_icmnd_cmpl_handler()
991 sc->device->host->host_no, id, sc, in fnic_fcpio_icmnd_cmpl_handler()
992 ((u64)icmnd_cmpl->_resvd0[1] << 56 | in fnic_fcpio_icmnd_cmpl_handler()
993 (u64)icmnd_cmpl->_resvd0[0] << 48 | in fnic_fcpio_icmnd_cmpl_handler()
994 jiffies_to_msecs(jiffies - start_time)), in fnic_fcpio_icmnd_cmpl_handler()
997 if (sc->sc_data_direction == DMA_FROM_DEVICE) { in fnic_fcpio_icmnd_cmpl_handler()
998 fnic->lport->host_stats.fcp_input_requests++; in fnic_fcpio_icmnd_cmpl_handler()
999 fnic->fcp_input_bytes += xfer_len; in fnic_fcpio_icmnd_cmpl_handler()
1000 } else if (sc->sc_data_direction == DMA_TO_DEVICE) { in fnic_fcpio_icmnd_cmpl_handler()
1001 fnic->lport->host_stats.fcp_output_requests++; in fnic_fcpio_icmnd_cmpl_handler()
1002 fnic->fcp_output_bytes += xfer_len; in fnic_fcpio_icmnd_cmpl_handler()
1004 fnic->lport->host_stats.fcp_control_requests++; in fnic_fcpio_icmnd_cmpl_handler()
1006 /* Call SCSI completion function to complete the IO */ in fnic_fcpio_icmnd_cmpl_handler()
1010 mempool_free(io_req, fnic->io_req_pool); in fnic_fcpio_icmnd_cmpl_handler()
1012 atomic64_dec(&fnic_stats->io_stats.active_ios); in fnic_fcpio_icmnd_cmpl_handler()
1013 if (atomic64_read(&fnic->io_cmpl_skip)) in fnic_fcpio_icmnd_cmpl_handler()
1014 atomic64_dec(&fnic->io_cmpl_skip); in fnic_fcpio_icmnd_cmpl_handler()
1016 atomic64_inc(&fnic_stats->io_stats.io_completions); in fnic_fcpio_icmnd_cmpl_handler()
1019 io_duration_time = jiffies_to_msecs(jiffies) - in fnic_fcpio_icmnd_cmpl_handler()
1023 atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec); in fnic_fcpio_icmnd_cmpl_handler()
1025 atomic64_inc(&fnic_stats->io_stats.io_btw_10_to_100_msec); in fnic_fcpio_icmnd_cmpl_handler()
1027 atomic64_inc(&fnic_stats->io_stats.io_btw_100_to_500_msec); in fnic_fcpio_icmnd_cmpl_handler()
1029 atomic64_inc(&fnic_stats->io_stats.io_btw_500_to_5000_msec); in fnic_fcpio_icmnd_cmpl_handler()
1031 atomic64_inc(&fnic_stats->io_stats.io_btw_5000_to_10000_msec); in fnic_fcpio_icmnd_cmpl_handler()
1033 atomic64_inc(&fnic_stats->io_stats.io_btw_10000_to_30000_msec); in fnic_fcpio_icmnd_cmpl_handler()
1035 atomic64_inc(&fnic_stats->io_stats.io_greater_than_30000_msec); in fnic_fcpio_icmnd_cmpl_handler()
1037 if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time)) in fnic_fcpio_icmnd_cmpl_handler()
1038 atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time); in fnic_fcpio_icmnd_cmpl_handler()
1054 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_fcpio_itmf_cmpl_handler()
1055 struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats; in fnic_fcpio_itmf_cmpl_handler()
1056 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; in fnic_fcpio_itmf_cmpl_handler()
1057 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; in fnic_fcpio_itmf_cmpl_handler()
1062 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); in fnic_fcpio_itmf_cmpl_handler()
1065 if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) { in fnic_fcpio_itmf_cmpl_handler()
1066 shost_printk(KERN_ERR, fnic->lport->host, in fnic_fcpio_itmf_cmpl_handler()
1067 "Tag out of range tag %x hdr status = %s\n", in fnic_fcpio_itmf_cmpl_handler()
1072 sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); in fnic_fcpio_itmf_cmpl_handler()
1075 atomic64_inc(&fnic_stats->io_stats.sc_null); in fnic_fcpio_itmf_cmpl_handler()
1076 shost_printk(KERN_ERR, fnic->lport->host, in fnic_fcpio_itmf_cmpl_handler()
1077 "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n", in fnic_fcpio_itmf_cmpl_handler()
1083 io_req = fnic_priv(sc)->io_req; in fnic_fcpio_itmf_cmpl_handler()
1086 atomic64_inc(&fnic_stats->io_stats.ioreq_null); in fnic_fcpio_itmf_cmpl_handler()
1088 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; in fnic_fcpio_itmf_cmpl_handler()
1089 shost_printk(KERN_ERR, fnic->lport->host, in fnic_fcpio_itmf_cmpl_handler()
1090 "itmf_cmpl io_req is null - " in fnic_fcpio_itmf_cmpl_handler()
1091 "hdr status = %s tag = 0x%x sc 0x%p\n", in fnic_fcpio_itmf_cmpl_handler()
1095 start_time = io_req->start_time; in fnic_fcpio_itmf_cmpl_handler()
1098 /* Abort and terminate completion of device reset req */ in fnic_fcpio_itmf_cmpl_handler()
1100 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_fcpio_itmf_cmpl_handler()
1101 "dev reset abts cmpl recd. id %x status %s\n", in fnic_fcpio_itmf_cmpl_handler()
1103 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE; in fnic_fcpio_itmf_cmpl_handler()
1104 fnic_priv(sc)->abts_status = hdr_status; in fnic_fcpio_itmf_cmpl_handler()
1105 fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; in fnic_fcpio_itmf_cmpl_handler()
1106 if (io_req->abts_done) in fnic_fcpio_itmf_cmpl_handler()
1107 complete(io_req->abts_done); in fnic_fcpio_itmf_cmpl_handler()
1115 if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED) in fnic_fcpio_itmf_cmpl_handler()
1116 atomic64_inc(&abts_stats->abort_fw_timeouts); in fnic_fcpio_itmf_cmpl_handler()
1119 &term_stats->terminate_fw_timeouts); in fnic_fcpio_itmf_cmpl_handler()
1122 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, in fnic_fcpio_itmf_cmpl_handler()
1123 "abort reject recd. id %d\n", in fnic_fcpio_itmf_cmpl_handler()
1127 if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED) in fnic_fcpio_itmf_cmpl_handler()
1128 atomic64_inc(&abts_stats->abort_io_not_found); in fnic_fcpio_itmf_cmpl_handler()
1131 &term_stats->terminate_io_not_found); in fnic_fcpio_itmf_cmpl_handler()
1134 if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED) in fnic_fcpio_itmf_cmpl_handler()
1135 atomic64_inc(&abts_stats->abort_failures); in fnic_fcpio_itmf_cmpl_handler()
1138 &term_stats->terminate_failures); in fnic_fcpio_itmf_cmpl_handler()
1141 if (fnic_priv(sc)->state != FNIC_IOREQ_ABTS_PENDING) { in fnic_fcpio_itmf_cmpl_handler()
1147 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE; in fnic_fcpio_itmf_cmpl_handler()
1148 fnic_priv(sc)->abts_status = hdr_status; in fnic_fcpio_itmf_cmpl_handler()
1150 /* If the status is IO not found consider it as success */ in fnic_fcpio_itmf_cmpl_handler()
1152 fnic_priv(sc)->abts_status = FCPIO_SUCCESS; in fnic_fcpio_itmf_cmpl_handler()
1154 if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) in fnic_fcpio_itmf_cmpl_handler()
1155 atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls); in fnic_fcpio_itmf_cmpl_handler()
1157 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_fcpio_itmf_cmpl_handler()
1158 "abts cmpl recd. id %d status %s\n", in fnic_fcpio_itmf_cmpl_handler()
1164 * signal completion to it. IO will be cleaned in the thread in fnic_fcpio_itmf_cmpl_handler()
1167 if (io_req->abts_done) { in fnic_fcpio_itmf_cmpl_handler()
1168 complete(io_req->abts_done); in fnic_fcpio_itmf_cmpl_handler()
1171 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_fcpio_itmf_cmpl_handler()
1172 "abts cmpl, completing IO\n"); in fnic_fcpio_itmf_cmpl_handler()
1173 fnic_priv(sc)->io_req = NULL; in fnic_fcpio_itmf_cmpl_handler()
1174 sc->result = (DID_ERROR << 16); in fnic_fcpio_itmf_cmpl_handler()
1179 mempool_free(io_req, fnic->io_req_pool); in fnic_fcpio_itmf_cmpl_handler()
1181 sc->device->host->host_no, id, in fnic_fcpio_itmf_cmpl_handler()
1183 jiffies_to_msecs(jiffies - start_time), in fnic_fcpio_itmf_cmpl_handler()
1186 (u64)sc->cmnd[0] << 32 | in fnic_fcpio_itmf_cmpl_handler()
1187 (u64)sc->cmnd[2] << 24 | in fnic_fcpio_itmf_cmpl_handler()
1188 (u64)sc->cmnd[3] << 16 | in fnic_fcpio_itmf_cmpl_handler()
1189 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), in fnic_fcpio_itmf_cmpl_handler()
1192 atomic64_dec(&fnic_stats->io_stats.active_ios); in fnic_fcpio_itmf_cmpl_handler()
1193 if (atomic64_read(&fnic->io_cmpl_skip)) in fnic_fcpio_itmf_cmpl_handler()
1194 atomic64_dec(&fnic->io_cmpl_skip); in fnic_fcpio_itmf_cmpl_handler()
1196 atomic64_inc(&fnic_stats->io_stats.io_completions); in fnic_fcpio_itmf_cmpl_handler()
1199 /* Completion of device reset */ in fnic_fcpio_itmf_cmpl_handler()
1200 fnic_priv(sc)->lr_status = hdr_status; in fnic_fcpio_itmf_cmpl_handler()
1201 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { in fnic_fcpio_itmf_cmpl_handler()
1203 fnic_priv(sc)->flags |= FNIC_DEV_RST_ABTS_PENDING; in fnic_fcpio_itmf_cmpl_handler()
1205 sc->device->host->host_no, id, sc, in fnic_fcpio_itmf_cmpl_handler()
1206 jiffies_to_msecs(jiffies - start_time), in fnic_fcpio_itmf_cmpl_handler()
1208 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_fcpio_itmf_cmpl_handler()
1210 "dev reset cmpl recd. id %d status %s\n", in fnic_fcpio_itmf_cmpl_handler()
1215 if (fnic_priv(sc)->flags & FNIC_DEV_RST_TIMED_OUT) { in fnic_fcpio_itmf_cmpl_handler()
1219 sc->device->host->host_no, id, sc, in fnic_fcpio_itmf_cmpl_handler()
1220 jiffies_to_msecs(jiffies - start_time), in fnic_fcpio_itmf_cmpl_handler()
1222 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_fcpio_itmf_cmpl_handler()
1223 "dev reset cmpl recd after time out. " in fnic_fcpio_itmf_cmpl_handler()
1224 "id %d status %s\n", in fnic_fcpio_itmf_cmpl_handler()
1229 fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE; in fnic_fcpio_itmf_cmpl_handler()
1230 fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; in fnic_fcpio_itmf_cmpl_handler()
1231 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_fcpio_itmf_cmpl_handler()
1232 "dev reset cmpl recd. id %d status %s\n", in fnic_fcpio_itmf_cmpl_handler()
1235 if (io_req->dr_done) in fnic_fcpio_itmf_cmpl_handler()
1236 complete(io_req->dr_done); in fnic_fcpio_itmf_cmpl_handler()
1240 shost_printk(KERN_ERR, fnic->lport->host, in fnic_fcpio_itmf_cmpl_handler()
1241 "Unexpected itmf io state %s tag %x\n", in fnic_fcpio_itmf_cmpl_handler()
1242 fnic_ioreq_state_to_str(fnic_priv(sc)->state), id); in fnic_fcpio_itmf_cmpl_handler()
1258 switch (desc->hdr.type) { in fnic_fcpio_cmpl_handler()
1260 case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/ in fnic_fcpio_cmpl_handler()
1263 case FCPIO_RESET_CMPL: /* fw completed reset */ in fnic_fcpio_cmpl_handler()
1264 atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs); in fnic_fcpio_cmpl_handler()
1270 switch (desc->hdr.type) { in fnic_fcpio_cmpl_handler()
1279 case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/ in fnic_fcpio_cmpl_handler()
1288 case FCPIO_RESET_CMPL: /* fw completed reset */ in fnic_fcpio_cmpl_handler()
1293 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_fcpio_cmpl_handler()
1294 "firmware completion type %d\n", in fnic_fcpio_cmpl_handler()
1295 desc->hdr.type); in fnic_fcpio_cmpl_handler()
1311 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; in fnic_wq_copy_cmpl_handler()
1317 for (i = 0; i < fnic->wq_copy_count; i++) { in fnic_wq_copy_cmpl_handler()
1318 cq_index = i + fnic->raw_wq_count + fnic->rq_count; in fnic_wq_copy_cmpl_handler()
1321 cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index], in fnic_wq_copy_cmpl_handler()
1327 delta_jiffies = end_jiffies - start_jiffies; in fnic_wq_copy_cmpl_handler()
1329 (u64) atomic64_read(&misc_stats->max_isr_jiffies)) { in fnic_wq_copy_cmpl_handler()
1330 atomic64_set(&misc_stats->max_isr_jiffies, in fnic_wq_copy_cmpl_handler()
1333 atomic64_set(&misc_stats->max_isr_time_ms, delta_ms); in fnic_wq_copy_cmpl_handler()
1334 atomic64_set(&misc_stats->corr_work_done, in fnic_wq_copy_cmpl_handler()
1343 const int tag = scsi_cmd_to_rq(sc)->tag; in fnic_cleanup_io_iter()
1349 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_cleanup_io_iter()
1354 io_req = fnic_priv(sc)->io_req; in fnic_cleanup_io_iter()
1355 if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && in fnic_cleanup_io_iter()
1356 !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { in fnic_cleanup_io_iter()
1358 * We will be here only when FW completes reset in fnic_cleanup_io_iter()
1361 fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; in fnic_cleanup_io_iter()
1362 if (io_req && io_req->dr_done) in fnic_cleanup_io_iter()
1363 complete(io_req->dr_done); in fnic_cleanup_io_iter()
1364 else if (io_req && io_req->abts_done) in fnic_cleanup_io_iter()
1365 complete(io_req->abts_done); in fnic_cleanup_io_iter()
1368 } else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { in fnic_cleanup_io_iter()
1377 fnic_priv(sc)->io_req = NULL; in fnic_cleanup_io_iter()
1385 start_time = io_req->start_time; in fnic_cleanup_io_iter()
1387 mempool_free(io_req, fnic->io_req_pool); in fnic_cleanup_io_iter()
1390 sc->result = DID_TRANSPORT_DISRUPTED << 16; in fnic_cleanup_io_iter()
1391 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_cleanup_io_iter()
1392 "fnic_cleanup_io: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n", in fnic_cleanup_io_iter()
1393 tag, sc, jiffies - start_time); in fnic_cleanup_io_iter()
1395 if (atomic64_read(&fnic->io_cmpl_skip)) in fnic_cleanup_io_iter()
1396 atomic64_dec(&fnic->io_cmpl_skip); in fnic_cleanup_io_iter()
1398 atomic64_inc(&fnic_stats->io_stats.io_completions); in fnic_cleanup_io_iter()
1401 if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) in fnic_cleanup_io_iter()
1402 shost_printk(KERN_ERR, fnic->lport->host, in fnic_cleanup_io_iter()
1403 "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n", in fnic_cleanup_io_iter()
1407 sc->device->host->host_no, tag, sc, in fnic_cleanup_io_iter()
1408 jiffies_to_msecs(jiffies - start_time), in fnic_cleanup_io_iter()
1409 0, ((u64)sc->cmnd[0] << 32 | in fnic_cleanup_io_iter()
1410 (u64)sc->cmnd[2] << 24 | in fnic_cleanup_io_iter()
1411 (u64)sc->cmnd[3] << 16 | in fnic_cleanup_io_iter()
1412 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), in fnic_cleanup_io_iter()
1422 scsi_host_busy_iter(fnic->lport->host, in fnic_cleanup_io()
1430 struct fnic *fnic = vnic_dev_priv(wq->vdev); in fnic_wq_copy_cleanup_handler()
1438 fcpio_tag_id_dec(&desc->hdr.tag, &id); in fnic_wq_copy_cleanup_handler()
1441 if (id >= fnic->fnic_max_tag_id) in fnic_wq_copy_cleanup_handler()
1444 sc = scsi_host_find_tag(fnic->lport->host, id); in fnic_wq_copy_cleanup_handler()
1451 /* Get the IO context which this desc refers to */ in fnic_wq_copy_cleanup_handler()
1452 io_req = fnic_priv(sc)->io_req; in fnic_wq_copy_cleanup_handler()
1461 fnic_priv(sc)->io_req = NULL; in fnic_wq_copy_cleanup_handler()
1465 start_time = io_req->start_time; in fnic_wq_copy_cleanup_handler()
1467 mempool_free(io_req, fnic->io_req_pool); in fnic_wq_copy_cleanup_handler()
1470 sc->result = DID_NO_CONNECT << 16; in fnic_wq_copy_cleanup_handler()
1471 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:" in fnic_wq_copy_cleanup_handler()
1472 " DID_NO_CONNECT\n"); in fnic_wq_copy_cleanup_handler()
1475 sc->device->host->host_no, id, sc, in fnic_wq_copy_cleanup_handler()
1476 jiffies_to_msecs(jiffies - start_time), in fnic_wq_copy_cleanup_handler()
1477 0, ((u64)sc->cmnd[0] << 32 | in fnic_wq_copy_cleanup_handler()
1478 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | in fnic_wq_copy_cleanup_handler()
1479 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), in fnic_wq_copy_cleanup_handler()
1489 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; in fnic_queue_abort_io_req()
1490 struct Scsi_Host *host = fnic->lport->host; in fnic_queue_abort_io_req()
1491 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; in fnic_queue_abort_io_req()
1494 spin_lock_irqsave(host->host_lock, flags); in fnic_queue_abort_io_req()
1497 spin_unlock_irqrestore(host->host_lock, flags); in fnic_queue_abort_io_req()
1500 atomic_inc(&fnic->in_flight); in fnic_queue_abort_io_req()
1501 spin_unlock_irqrestore(host->host_lock, flags); in fnic_queue_abort_io_req()
1503 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); in fnic_queue_abort_io_req()
1505 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) in fnic_queue_abort_io_req()
1509 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); in fnic_queue_abort_io_req()
1510 atomic_dec(&fnic->in_flight); in fnic_queue_abort_io_req()
1511 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_queue_abort_io_req()
1512 "fnic_queue_abort_io_req: failure: no descriptors\n"); in fnic_queue_abort_io_req()
1513 atomic64_inc(&misc_stats->abts_cpwq_alloc_failures); in fnic_queue_abort_io_req()
1517 0, task_req, tag, fc_lun, io_req->port_id, in fnic_queue_abort_io_req()
1518 fnic->config.ra_tov, fnic->config.ed_tov); in fnic_queue_abort_io_req()
1520 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); in fnic_queue_abort_io_req()
1521 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > in fnic_queue_abort_io_req()
1522 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) in fnic_queue_abort_io_req()
1523 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, in fnic_queue_abort_io_req()
1524 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); in fnic_queue_abort_io_req()
1526 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); in fnic_queue_abort_io_req()
1527 atomic_dec(&fnic->in_flight); in fnic_queue_abort_io_req()
1541 struct fnic *fnic = iter_data->fnic; in fnic_rport_abort_io_iter()
1542 int abt_tag = scsi_cmd_to_rq(sc)->tag; in fnic_rport_abort_io_iter()
1546 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; in fnic_rport_abort_io_iter()
1547 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; in fnic_rport_abort_io_iter()
1554 io_req = fnic_priv(sc)->io_req; in fnic_rport_abort_io_iter()
1556 if (!io_req || io_req->port_id != iter_data->port_id) { in fnic_rport_abort_io_iter()
1561 if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && in fnic_rport_abort_io_iter()
1562 !(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) { in fnic_rport_abort_io_iter()
1563 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_rport_abort_io_iter()
1564 "fnic_rport_exch_reset dev rst not pending sc 0x%p\n", in fnic_rport_abort_io_iter()
1571 * Found IO that is still pending with firmware and in fnic_rport_abort_io_iter()
1574 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { in fnic_rport_abort_io_iter()
1578 if (io_req->abts_done) { in fnic_rport_abort_io_iter()
1579 shost_printk(KERN_ERR, fnic->lport->host, in fnic_rport_abort_io_iter()
1580 "fnic_rport_exch_reset: io_req->abts_done is set " in fnic_rport_abort_io_iter()
1581 "state is %s\n", in fnic_rport_abort_io_iter()
1582 fnic_ioreq_state_to_str(fnic_priv(sc)->state)); in fnic_rport_abort_io_iter()
1585 if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) { in fnic_rport_abort_io_iter()
1586 shost_printk(KERN_ERR, fnic->lport->host, in fnic_rport_abort_io_iter()
1588 "IO not yet issued %p tag 0x%x flags " in fnic_rport_abort_io_iter()
1589 "%x state %d\n", in fnic_rport_abort_io_iter()
1590 sc, abt_tag, fnic_priv(sc)->flags, fnic_priv(sc)->state); in fnic_rport_abort_io_iter()
1592 old_ioreq_state = fnic_priv(sc)->state; in fnic_rport_abort_io_iter()
1593 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; in fnic_rport_abort_io_iter()
1594 fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; in fnic_rport_abort_io_iter()
1595 if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { in fnic_rport_abort_io_iter()
1596 atomic64_inc(&reset_stats->device_reset_terminates); in fnic_rport_abort_io_iter()
1599 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_rport_abort_io_iter()
1600 "fnic_rport_exch_reset dev rst sc 0x%p\n", sc); in fnic_rport_abort_io_iter()
1601 BUG_ON(io_req->abts_done); in fnic_rport_abort_io_iter()
1603 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_rport_abort_io_iter()
1604 "fnic_rport_reset_exch: Issuing abts\n"); in fnic_rport_abort_io_iter()
1609 int_to_scsilun(sc->device->lun, &fc_lun); in fnic_rport_abort_io_iter()
1618 * lun reset in fnic_rport_abort_io_iter()
1621 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) in fnic_rport_abort_io_iter()
1622 fnic_priv(sc)->state = old_ioreq_state; in fnic_rport_abort_io_iter()
1626 if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) in fnic_rport_abort_io_iter()
1627 fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; in fnic_rport_abort_io_iter()
1629 fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED; in fnic_rport_abort_io_iter()
1631 atomic64_inc(&term_stats->terminates); in fnic_rport_abort_io_iter()
1632 iter_data->term_cnt++; in fnic_rport_abort_io_iter()
1639 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; in fnic_rport_exch_reset()
1647 fnic->lport->host, in fnic_rport_exch_reset()
1648 "fnic_rport_exch_reset called portid 0x%06x\n", in fnic_rport_exch_reset()
1651 if (fnic->in_remove) in fnic_rport_exch_reset()
1654 scsi_host_busy_iter(fnic->lport->host, fnic_rport_abort_io_iter, in fnic_rport_exch_reset()
1656 if (iter_data.term_cnt > atomic64_read(&term_stats->max_terminates)) in fnic_rport_exch_reset()
1657 atomic64_set(&term_stats->max_terminates, iter_data.term_cnt); in fnic_rport_exch_reset()
1668 printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n"); in fnic_terminate_rport_io()
1671 rdata = rport->dd_data; in fnic_terminate_rport_io()
1674 printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n"); in fnic_terminate_rport_io()
1677 lport = rdata->local_port; in fnic_terminate_rport_io()
1680 printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n"); in fnic_terminate_rport_io()
1685 fnic->lport->host, "fnic_terminate_rport_io called" in fnic_terminate_rport_io()
1686 " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n", in fnic_terminate_rport_io()
1687 rport->port_name, rport->node_name, rport, in fnic_terminate_rport_io()
1688 rport->port_id); in fnic_terminate_rport_io()
1690 if (fnic->in_remove) in fnic_terminate_rport_io()
1693 fnic_rport_exch_reset(fnic, rport->port_id); in fnic_terminate_rport_io()
1698 * A SCSI IO is represented by a io_req in the driver.
1699 * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
1718 const int tag = rq->tag; in fnic_abort_cmd()
1725 /* Get local-port, check ready and link up */ in fnic_abort_cmd()
1726 lp = shost_priv(sc->device->host); in fnic_abort_cmd()
1729 fnic_stats = &fnic->fnic_stats; in fnic_abort_cmd()
1730 abts_stats = &fnic->fnic_stats.abts_stats; in fnic_abort_cmd()
1731 term_stats = &fnic->fnic_stats.term_stats; in fnic_abort_cmd()
1733 rport = starget_to_rport(scsi_target(sc->device)); in fnic_abort_cmd()
1735 fnic->lport->host, in fnic_abort_cmd()
1736 "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n", in fnic_abort_cmd()
1737 rport->port_id, sc->device->lun, tag, fnic_priv(sc)->flags); in fnic_abort_cmd()
1739 fnic_priv(sc)->flags = FNIC_NO_FLAGS; in fnic_abort_cmd()
1741 if (lp->state != LPORT_ST_READY || !(lp->link_up)) { in fnic_abort_cmd()
1760 io_req = fnic_priv(sc)->io_req; in fnic_abort_cmd()
1766 io_req->abts_done = &tm_done; in fnic_abort_cmd()
1768 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { in fnic_abort_cmd()
1773 abt_issued_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time); in fnic_abort_cmd()
1775 atomic64_inc(&abts_stats->abort_issued_btw_0_to_6_sec); in fnic_abort_cmd()
1777 atomic64_inc(&abts_stats->abort_issued_btw_6_to_20_sec); in fnic_abort_cmd()
1779 atomic64_inc(&abts_stats->abort_issued_btw_20_to_30_sec); in fnic_abort_cmd()
1781 atomic64_inc(&abts_stats->abort_issued_btw_30_to_40_sec); in fnic_abort_cmd()
1783 atomic64_inc(&abts_stats->abort_issued_btw_40_to_50_sec); in fnic_abort_cmd()
1785 atomic64_inc(&abts_stats->abort_issued_btw_50_to_60_sec); in fnic_abort_cmd()
1787 atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec); in fnic_abort_cmd()
1789 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, in fnic_abort_cmd()
1790 "CBD Opcode: %02x Abort issued time: %lu msec\n", sc->cmnd[0], abt_issued_time); in fnic_abort_cmd()
1794 * the completion wont be done till mid-layer, since abort in fnic_abort_cmd()
1797 old_ioreq_state = fnic_priv(sc)->state; in fnic_abort_cmd()
1798 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; in fnic_abort_cmd()
1799 fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; in fnic_abort_cmd()
1806 * the IO. Else, just locally terminate the IO in the firmware in fnic_abort_cmd()
1811 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); in fnic_abort_cmd()
1816 int_to_scsilun(sc->device->lun, &fc_lun); in fnic_abort_cmd()
1821 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) in fnic_abort_cmd()
1822 fnic_priv(sc)->state = old_ioreq_state; in fnic_abort_cmd()
1823 io_req = fnic_priv(sc)->io_req; in fnic_abort_cmd()
1825 io_req->abts_done = NULL; in fnic_abort_cmd()
1831 fnic_priv(sc)->flags |= FNIC_IO_ABTS_ISSUED; in fnic_abort_cmd()
1832 atomic64_inc(&fnic_stats->abts_stats.aborts); in fnic_abort_cmd()
1834 fnic_priv(sc)->flags |= FNIC_IO_TERM_ISSUED; in fnic_abort_cmd()
1835 atomic64_inc(&fnic_stats->term_stats.terminates); in fnic_abort_cmd()
1839 * We queued an abort IO, wait for its completion. in fnic_abort_cmd()
1846 (2 * fnic->config.ra_tov + in fnic_abort_cmd()
1847 fnic->config.ed_tov)); in fnic_abort_cmd()
1852 io_req = fnic_priv(sc)->io_req; in fnic_abort_cmd()
1854 atomic64_inc(&fnic_stats->io_stats.ioreq_null); in fnic_abort_cmd()
1856 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; in fnic_abort_cmd()
1860 io_req->abts_done = NULL; in fnic_abort_cmd()
1863 if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) { in fnic_abort_cmd()
1866 atomic64_inc(&abts_stats->abort_drv_timeouts); in fnic_abort_cmd()
1868 atomic64_inc(&term_stats->terminate_drv_timeouts); in fnic_abort_cmd()
1870 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_TIMED_OUT; in fnic_abort_cmd()
1875 /* IO out of order */ in fnic_abort_cmd()
1877 if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) { in fnic_abort_cmd()
1879 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_abort_cmd()
1880 "Issuing Host reset due to out of order IO\n"); in fnic_abort_cmd()
1886 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE; in fnic_abort_cmd()
1888 start_time = io_req->start_time; in fnic_abort_cmd()
1892 * Device reset will clean the I/O. in fnic_abort_cmd()
1894 if (fnic_priv(sc)->abts_status == FCPIO_SUCCESS) { in fnic_abort_cmd()
1895 fnic_priv(sc)->io_req = NULL; in fnic_abort_cmd()
1905 mempool_free(io_req, fnic->io_req_pool); in fnic_abort_cmd()
1907 /* Call SCSI completion function to complete the IO */ in fnic_abort_cmd()
1908 sc->result = DID_ABORT << 16; in fnic_abort_cmd()
1910 atomic64_dec(&fnic_stats->io_stats.active_ios); in fnic_abort_cmd()
1911 if (atomic64_read(&fnic->io_cmpl_skip)) in fnic_abort_cmd()
1912 atomic64_dec(&fnic->io_cmpl_skip); in fnic_abort_cmd()
1914 atomic64_inc(&fnic_stats->io_stats.io_completions); in fnic_abort_cmd()
1917 FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, tag, sc, in fnic_abort_cmd()
1918 jiffies_to_msecs(jiffies - start_time), in fnic_abort_cmd()
1919 0, ((u64)sc->cmnd[0] << 32 | in fnic_abort_cmd()
1920 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | in fnic_abort_cmd()
1921 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), in fnic_abort_cmd()
1924 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_abort_cmd()
1925 "Returning from abort cmd type %x %s\n", task_req, in fnic_abort_cmd()
1935 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; in fnic_queue_dr_io_req()
1936 struct Scsi_Host *host = fnic->lport->host; in fnic_queue_dr_io_req()
1937 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; in fnic_queue_dr_io_req()
1942 spin_lock_irqsave(host->host_lock, intr_flags); in fnic_queue_dr_io_req()
1945 spin_unlock_irqrestore(host->host_lock, intr_flags); in fnic_queue_dr_io_req()
1948 atomic_inc(&fnic->in_flight); in fnic_queue_dr_io_req()
1949 spin_unlock_irqrestore(host->host_lock, intr_flags); in fnic_queue_dr_io_req()
1951 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); in fnic_queue_dr_io_req()
1953 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) in fnic_queue_dr_io_req()
1957 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_queue_dr_io_req()
1958 "queue_dr_io_req failure - no descriptors\n"); in fnic_queue_dr_io_req()
1959 atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures); in fnic_queue_dr_io_req()
1960 ret = -EAGAIN; in fnic_queue_dr_io_req()
1965 int_to_scsilun(sc->device->lun, &fc_lun); in fnic_queue_dr_io_req()
1967 fnic_queue_wq_copy_desc_itmf(wq, scsi_cmd_to_rq(sc)->tag | FNIC_TAG_DEV_RST, in fnic_queue_dr_io_req()
1969 fc_lun.scsi_lun, io_req->port_id, in fnic_queue_dr_io_req()
1970 fnic->config.ra_tov, fnic->config.ed_tov); in fnic_queue_dr_io_req()
1972 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); in fnic_queue_dr_io_req()
1973 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > in fnic_queue_dr_io_req()
1974 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) in fnic_queue_dr_io_req()
1975 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, in fnic_queue_dr_io_req()
1976 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); in fnic_queue_dr_io_req()
1979 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); in fnic_queue_dr_io_req()
1980 atomic_dec(&fnic->in_flight); in fnic_queue_dr_io_req()
1995 struct fnic *fnic = iter_data->fnic; in fnic_pending_aborts_iter()
1996 struct scsi_device *lun_dev = iter_data->lun_dev; in fnic_pending_aborts_iter()
1997 int abt_tag = scsi_cmd_to_rq(sc)->tag; in fnic_pending_aborts_iter()
2005 if (sc == iter_data->lr_sc || sc->device != lun_dev) in fnic_pending_aborts_iter()
2010 io_req = fnic_priv(sc)->io_req; in fnic_pending_aborts_iter()
2017 * Found IO that is still pending with firmware and in fnic_pending_aborts_iter()
2020 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_pending_aborts_iter()
2021 "Found IO in %s on lun\n", in fnic_pending_aborts_iter()
2022 fnic_ioreq_state_to_str(fnic_priv(sc)->state)); in fnic_pending_aborts_iter()
2024 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { in fnic_pending_aborts_iter()
2028 if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && in fnic_pending_aborts_iter()
2029 (!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) { in fnic_pending_aborts_iter()
2030 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, in fnic_pending_aborts_iter()
2031 "%s dev rst not pending sc 0x%p\n", __func__, in fnic_pending_aborts_iter()
2037 if (io_req->abts_done) in fnic_pending_aborts_iter()
2038 shost_printk(KERN_ERR, fnic->lport->host, in fnic_pending_aborts_iter()
2039 "%s: io_req->abts_done is set state is %s\n", in fnic_pending_aborts_iter()
2040 __func__, fnic_ioreq_state_to_str(fnic_priv(sc)->state)); in fnic_pending_aborts_iter()
2041 old_ioreq_state = fnic_priv(sc)->state; in fnic_pending_aborts_iter()
2043 * Any pending IO issued prior to reset is expected to be in fnic_pending_aborts_iter()
2045 * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending. in fnic_pending_aborts_iter()
2046 * When IO is completed, the IO will be handed over and in fnic_pending_aborts_iter()
2049 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; in fnic_pending_aborts_iter()
2051 BUG_ON(io_req->abts_done); in fnic_pending_aborts_iter()
2053 if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { in fnic_pending_aborts_iter()
2055 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, in fnic_pending_aborts_iter()
2056 "%s: dev rst sc 0x%p\n", __func__, sc); in fnic_pending_aborts_iter()
2059 fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; in fnic_pending_aborts_iter()
2060 io_req->abts_done = &tm_done; in fnic_pending_aborts_iter()
2064 int_to_scsilun(sc->device->lun, &fc_lun); in fnic_pending_aborts_iter()
2070 io_req = fnic_priv(sc)->io_req; in fnic_pending_aborts_iter()
2072 io_req->abts_done = NULL; in fnic_pending_aborts_iter()
2073 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) in fnic_pending_aborts_iter()
2074 fnic_priv(sc)->state = old_ioreq_state; in fnic_pending_aborts_iter()
2076 iter_data->ret = FAILED; in fnic_pending_aborts_iter()
2080 if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) in fnic_pending_aborts_iter()
2081 fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; in fnic_pending_aborts_iter()
2084 fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED; in fnic_pending_aborts_iter()
2087 (fnic->config.ed_tov)); in fnic_pending_aborts_iter()
2091 io_req = fnic_priv(sc)->io_req; in fnic_pending_aborts_iter()
2094 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; in fnic_pending_aborts_iter()
2098 io_req->abts_done = NULL; in fnic_pending_aborts_iter()
2101 if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) { in fnic_pending_aborts_iter()
2103 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE; in fnic_pending_aborts_iter()
2104 iter_data->ret = FAILED; in fnic_pending_aborts_iter()
2107 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE; in fnic_pending_aborts_iter()
2109 /* original sc used for lr is handled by dev reset code */ in fnic_pending_aborts_iter()
2110 if (sc != iter_data->lr_sc) in fnic_pending_aborts_iter()
2111 fnic_priv(sc)->io_req = NULL; in fnic_pending_aborts_iter()
2114 /* original sc used for lr is handled by dev reset code */ in fnic_pending_aborts_iter()
2115 if (sc != iter_data->lr_sc) { in fnic_pending_aborts_iter()
2117 mempool_free(io_req, fnic->io_req_pool); in fnic_pending_aborts_iter()
2121 * Any IO is returned during reset, it needs to call scsi_done in fnic_pending_aborts_iter()
2125 sc->result = DID_RESET << 16; in fnic_pending_aborts_iter()
2133 * For each outstanding IO on this lun, whose abort is not completed by fw,
2145 .lun_dev = lr_sc->device, in fnic_clean_pending_aborts()
2152 scsi_host_busy_iter(fnic->lport->host, in fnic_clean_pending_aborts()
2158 schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov)); in fnic_clean_pending_aborts()
2176 struct request_queue *q = rq->q; in fnic_scsi_host_start_tag()
2183 rq->tag = dummy->tag; in fnic_scsi_host_start_tag()
2184 sc->host_scribble = (unsigned char *)dummy; in fnic_scsi_host_start_tag()
2186 return dummy->tag; in fnic_scsi_host_start_tag()
2196 struct request *dummy = (struct request *)sc->host_scribble; in fnic_scsi_host_end_tag()
2202 * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
2221 int tag = rq->tag; in fnic_device_reset()
2229 /* Get local-port, check ready and link up */ in fnic_device_reset()
2230 lp = shost_priv(sc->device->host); in fnic_device_reset()
2233 fnic_stats = &fnic->fnic_stats; in fnic_device_reset()
2234 reset_stats = &fnic->fnic_stats.reset_stats; in fnic_device_reset()
2236 atomic64_inc(&reset_stats->device_resets); in fnic_device_reset()
2238 rport = starget_to_rport(scsi_target(sc->device)); in fnic_device_reset()
2239 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_device_reset()
2240 "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n", in fnic_device_reset()
2241 rport->port_id, sc->device->lun, sc); in fnic_device_reset()
2243 if (lp->state != LPORT_ST_READY || !(lp->link_up)) in fnic_device_reset()
2248 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); in fnic_device_reset()
2252 fnic_priv(sc)->flags = FNIC_DEVICE_RESET; in fnic_device_reset()
2268 io_req = fnic_priv(sc)->io_req; in fnic_device_reset()
2275 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); in fnic_device_reset()
2281 io_req->port_id = rport->port_id; in fnic_device_reset()
2282 fnic_priv(sc)->io_req = io_req; in fnic_device_reset()
2284 io_req->dr_done = &tm_done; in fnic_device_reset()
2285 fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING; in fnic_device_reset()
2286 fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE; in fnic_device_reset()
2289 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag); in fnic_device_reset()
2292 * issue the device reset, if enqueue failed, clean up the ioreq in fnic_device_reset()
2297 io_req = fnic_priv(sc)->io_req; in fnic_device_reset()
2299 io_req->dr_done = NULL; in fnic_device_reset()
2303 fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED; in fnic_device_reset()
2307 * Wait on the local completion for LUN reset. The io_req may be in fnic_device_reset()
2314 io_req = fnic_priv(sc)->io_req; in fnic_device_reset()
2317 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_device_reset()
2318 "io_req is null tag 0x%x sc 0x%p\n", tag, sc); in fnic_device_reset()
2321 io_req->dr_done = NULL; in fnic_device_reset()
2323 status = fnic_priv(sc)->lr_status; in fnic_device_reset()
2326 * If lun reset not completed, bail out with failed. io_req in fnic_device_reset()
2330 atomic64_inc(&reset_stats->device_reset_timeouts); in fnic_device_reset()
2331 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_device_reset()
2332 "Device reset timed out\n"); in fnic_device_reset()
2333 fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT; in fnic_device_reset()
2335 int_to_scsilun(sc->device->lun, &fc_lun); in fnic_device_reset()
2337 * Issue abort and terminate on device reset request. in fnic_device_reset()
2342 if (fnic_priv(sc)->flags & FNIC_DEV_RST_TERM_ISSUED) { in fnic_device_reset()
2355 fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; in fnic_device_reset()
2356 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; in fnic_device_reset()
2357 io_req->abts_done = &tm_done; in fnic_device_reset()
2359 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_device_reset()
2360 "Abort and terminate issued on Device reset " in fnic_device_reset()
2361 "tag 0x%x sc 0x%p\n", tag, sc); in fnic_device_reset()
2367 if (!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { in fnic_device_reset()
2373 io_req = fnic_priv(sc)->io_req; in fnic_device_reset()
2374 io_req->abts_done = NULL; in fnic_device_reset()
2386 fnic->lport->host, in fnic_device_reset()
2387 "Device reset completed - failed\n"); in fnic_device_reset()
2388 io_req = fnic_priv(sc)->io_req; in fnic_device_reset()
2394 * completed. If any of these fail, then LUN reset fails. in fnic_device_reset()
2396 * the lun reset cmd. If all cmds get cleaned, the lun reset in fnic_device_reset()
2401 io_req = fnic_priv(sc)->io_req; in fnic_device_reset()
2402 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_device_reset()
2403 "Device reset failed" in fnic_device_reset()
2404 " since could not abort all IOs\n"); in fnic_device_reset()
2408 /* Clean lun reset command */ in fnic_device_reset()
2410 io_req = fnic_priv(sc)->io_req; in fnic_device_reset()
2417 fnic_priv(sc)->io_req = NULL; in fnic_device_reset()
2422 start_time = io_req->start_time; in fnic_device_reset()
2424 mempool_free(io_req, fnic->io_req_pool); in fnic_device_reset()
2428 FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc, in fnic_device_reset()
2429 jiffies_to_msecs(jiffies - start_time), in fnic_device_reset()
2430 0, ((u64)sc->cmnd[0] << 32 | in fnic_device_reset()
2431 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | in fnic_device_reset()
2432 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), in fnic_device_reset()
2439 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_device_reset()
2440 "Returning from device reset %s\n", in fnic_device_reset()
2445 atomic64_inc(&reset_stats->device_reset_failures); in fnic_device_reset()
2460 reset_stats = &fnic->fnic_stats.reset_stats; in fnic_reset()
2462 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_reset()
2463 "fnic_reset called\n"); in fnic_reset()
2465 atomic64_inc(&reset_stats->fnic_resets); in fnic_reset()
2468 * Reset local port, this will clean up libFC exchanges, in fnic_reset()
2469 * reset remote port sessions, and if link is up, begin flogi in fnic_reset()
2473 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_reset()
2474 "Returning from fnic reset %s\n", in fnic_reset()
2479 atomic64_inc(&reset_stats->fnic_reset_completions); in fnic_reset()
2481 atomic64_inc(&reset_stats->fnic_reset_failures); in fnic_reset()
2488 * error handling levels return FAILED. If host reset completes
2491 * Host Reset is the highest level of error recovery. If this fails, then
2499 struct Scsi_Host *shost = sc->device->host; in fnic_host_reset()
2504 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_host_reset()
2505 if (!fnic->internal_reset_inprogress) { in fnic_host_reset()
2506 fnic->internal_reset_inprogress = true; in fnic_host_reset()
2508 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_host_reset()
2509 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_host_reset()
2510 "host reset in progress skipping another host reset\n"); in fnic_host_reset()
2513 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_host_reset()
2517 * scsi-ml tries to send a TUR to every device if host reset is in fnic_host_reset()
2525 if ((lp->state == LPORT_ST_READY) && in fnic_host_reset()
2526 (lp->link_up)) { in fnic_host_reset()
2534 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_host_reset()
2535 fnic->internal_reset_inprogress = false; in fnic_host_reset()
2536 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_host_reset()
2551 /* Issue firmware reset for fnic, wait for reset to complete */ in fnic_scsi_abort_io()
2553 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_scsi_abort_io()
2554 if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) && in fnic_scsi_abort_io()
2555 fnic->link_events) { in fnic_scsi_abort_io()
2556 /* fw reset is in progress, poll for its completion */ in fnic_scsi_abort_io()
2557 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_scsi_abort_io()
2562 fnic->remove_wait = &remove_wait; in fnic_scsi_abort_io()
2563 old_state = fnic->state; in fnic_scsi_abort_io()
2564 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; in fnic_scsi_abort_io()
2565 fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); in fnic_scsi_abort_io()
2566 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_scsi_abort_io()
2570 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_scsi_abort_io()
2571 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) in fnic_scsi_abort_io()
2572 fnic->state = old_state; in fnic_scsi_abort_io()
2573 fnic->remove_wait = NULL; in fnic_scsi_abort_io()
2574 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_scsi_abort_io()
2578 /* Wait for firmware reset to complete */ in fnic_scsi_abort_io()
2582 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_scsi_abort_io()
2583 fnic->remove_wait = NULL; in fnic_scsi_abort_io()
2584 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, in fnic_scsi_abort_io()
2585 "fnic_scsi_abort_io %s\n", in fnic_scsi_abort_io()
2586 (fnic->state == FNIC_IN_ETH_MODE) ? in fnic_scsi_abort_io()
2588 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_scsi_abort_io()
2593 * This fxn called from libFC to clean up driver IO state on link down
2601 /* issue fw reset */ in fnic_scsi_cleanup()
2603 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_scsi_cleanup()
2604 if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) { in fnic_scsi_cleanup()
2605 /* fw reset is in progress, poll for its completion */ in fnic_scsi_cleanup()
2606 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_scsi_cleanup()
2610 old_state = fnic->state; in fnic_scsi_cleanup()
2611 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; in fnic_scsi_cleanup()
2612 fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); in fnic_scsi_cleanup()
2613 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_scsi_cleanup()
2616 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_scsi_cleanup()
2617 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) in fnic_scsi_cleanup()
2618 fnic->state = old_state; in fnic_scsi_cleanup()
2619 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_scsi_cleanup()
2632 /* Non-zero sid, nothing to do */ in fnic_exch_mgr_reset()
2645 if (!fnic->in_remove) in fnic_exch_mgr_reset()
2650 /* call libFC exch mgr reset to reset its exchanges */ in fnic_exch_mgr_reset()
2659 struct fnic *fnic = iter_data->fnic; in fnic_abts_pending_iter()
2666 * ignore this lun reset cmd or cmds that do not belong to in fnic_abts_pending_iter()
2669 if (iter_data->lr_sc && sc == iter_data->lr_sc) in fnic_abts_pending_iter()
2671 if (iter_data->lun_dev && sc->device != iter_data->lun_dev) in fnic_abts_pending_iter()
2677 io_req = fnic_priv(sc)->io_req; in fnic_abts_pending_iter()
2684 * Found IO that is still pending with firmware and in fnic_abts_pending_iter()
2687 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, in fnic_abts_pending_iter()
2688 "Found IO in %s on lun\n", in fnic_abts_pending_iter()
2689 fnic_ioreq_state_to_str(fnic_priv(sc)->state)); in fnic_abts_pending_iter()
2690 cmd_state = fnic_priv(sc)->state; in fnic_abts_pending_iter()
2693 iter_data->ret = 1; in fnic_abts_pending_iter()
2695 return iter_data->ret ? false : true; in fnic_abts_pending_iter()
2714 iter_data.lun_dev = lr_sc->device; in fnic_is_abts_pending()
2719 scsi_host_busy_iter(fnic->lport->host, in fnic_is_abts_pending()