Lines Matching refs:ctrl

36 	struct nvme_fc_ctrl	*ctrl;  member
100 struct nvme_fc_ctrl *ctrl; member
180 struct nvme_ctrl ctrl; member
184 to_fc_ctrl(struct nvme_ctrl *ctrl) in to_fc_ctrl() argument
186 return container_of(ctrl, struct nvme_fc_ctrl, ctrl); in to_fc_ctrl()
558 nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl) in nvme_fc_resume_controller() argument
560 switch (ctrl->ctrl.state) { in nvme_fc_resume_controller()
567 dev_info(ctrl->ctrl.device, in nvme_fc_resume_controller()
569 "Attempting reconnect\n", ctrl->cnum); in nvme_fc_resume_controller()
571 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0); in nvme_fc_resume_controller()
593 struct nvme_fc_ctrl *ctrl; in nvme_fc_attach_to_suspended_rport() local
629 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) in nvme_fc_attach_to_suspended_rport()
630 nvme_fc_resume_controller(ctrl); in nvme_fc_attach_to_suspended_rport()
790 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl) in nvme_fc_ctrl_connectivity_loss() argument
792 dev_info(ctrl->ctrl.device, in nvme_fc_ctrl_connectivity_loss()
794 "Reconnect", ctrl->cnum); in nvme_fc_ctrl_connectivity_loss()
796 switch (ctrl->ctrl.state) { in nvme_fc_ctrl_connectivity_loss()
806 if (nvme_reset_ctrl(&ctrl->ctrl)) { in nvme_fc_ctrl_connectivity_loss()
807 dev_warn(ctrl->ctrl.device, in nvme_fc_ctrl_connectivity_loss()
809 ctrl->cnum); in nvme_fc_ctrl_connectivity_loss()
810 nvme_delete_ctrl(&ctrl->ctrl); in nvme_fc_ctrl_connectivity_loss()
856 struct nvme_fc_ctrl *ctrl; in nvme_fc_unregister_remoteport() local
872 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { in nvme_fc_unregister_remoteport()
875 dev_warn(ctrl->ctrl.device, in nvme_fc_unregister_remoteport()
877 ctrl->cnum); in nvme_fc_unregister_remoteport()
878 nvme_delete_ctrl(&ctrl->ctrl); in nvme_fc_unregister_remoteport()
880 nvme_fc_ctrl_connectivity_loss(ctrl); in nvme_fc_unregister_remoteport()
1035 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1177 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl, in nvme_fc_connect_admin_queue() argument
1189 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); in nvme_fc_connect_admin_queue()
1191 dev_info(ctrl->ctrl.device, in nvme_fc_connect_admin_queue()
1193 ctrl->cnum); in nvme_fc_connect_admin_queue()
1201 if (ctrl->lport->ops->lsrqst_priv_sz) in nvme_fc_connect_admin_queue()
1220 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); in nvme_fc_connect_admin_queue()
1221 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn, in nvme_fc_connect_admin_queue()
1223 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn, in nvme_fc_connect_admin_queue()
1233 ret = nvme_fc_send_ls_req(ctrl->rport, lsop); in nvme_fc_connect_admin_queue()
1270 dev_err(ctrl->dev, in nvme_fc_connect_admin_queue()
1274 spin_lock_irqsave(&ctrl->lock, flags); in nvme_fc_connect_admin_queue()
1275 ctrl->association_id = in nvme_fc_connect_admin_queue()
1280 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_connect_admin_queue()
1287 dev_err(ctrl->dev, in nvme_fc_connect_admin_queue()
1294 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, in nvme_fc_connect_queue() argument
1305 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); in nvme_fc_connect_queue()
1307 dev_info(ctrl->ctrl.device, in nvme_fc_connect_queue()
1309 ctrl->cnum); in nvme_fc_connect_queue()
1317 if (ctrl->lport->ops->lsrqst_priv_sz) in nvme_fc_connect_queue()
1331 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id); in nvme_fc_connect_queue()
1348 ret = nvme_fc_send_ls_req(ctrl->rport, lsop); in nvme_fc_connect_queue()
1376 dev_err(ctrl->dev, in nvme_fc_connect_queue()
1389 dev_err(ctrl->dev, in nvme_fc_connect_queue()
1425 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) in nvme_fc_xmt_disconnect_assoc() argument
1435 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); in nvme_fc_xmt_disconnect_assoc()
1437 dev_info(ctrl->ctrl.device, in nvme_fc_xmt_disconnect_assoc()
1440 ctrl->cnum); in nvme_fc_xmt_disconnect_assoc()
1447 if (ctrl->lport->ops->lsrqst_priv_sz) in nvme_fc_xmt_disconnect_assoc()
1453 ctrl->association_id); in nvme_fc_xmt_disconnect_assoc()
1455 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop, in nvme_fc_xmt_disconnect_assoc()
1513 struct nvme_fc_ctrl *ctrl, *ret = NULL; in nvme_fc_match_disconn_ls() local
1520 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { in nvme_fc_match_disconn_ls()
1521 if (!nvme_fc_ctrl_get(ctrl)) in nvme_fc_match_disconn_ls()
1523 spin_lock(&ctrl->lock); in nvme_fc_match_disconn_ls()
1524 if (association_id == ctrl->association_id) { in nvme_fc_match_disconn_ls()
1525 oldls = ctrl->rcv_disconn; in nvme_fc_match_disconn_ls()
1526 ctrl->rcv_disconn = lsop; in nvme_fc_match_disconn_ls()
1527 ret = ctrl; in nvme_fc_match_disconn_ls()
1529 spin_unlock(&ctrl->lock); in nvme_fc_match_disconn_ls()
1533 nvme_fc_ctrl_put(ctrl); in nvme_fc_match_disconn_ls()
1542 "LS's received\n", ctrl->cnum); in nvme_fc_match_disconn_ls()
1568 struct nvme_fc_ctrl *ctrl = NULL; in nvme_fc_ls_disconnect_assoc() local
1576 ctrl = nvme_fc_match_disconn_ls(rport, lsop); in nvme_fc_ls_disconnect_assoc()
1577 if (!ctrl) in nvme_fc_ls_disconnect_assoc()
1610 nvme_fc_error_recovery(ctrl, "Disconnect Association LS received"); in nvme_fc_ls_disconnect_assoc()
1613 nvme_fc_ctrl_put(ctrl); in nvme_fc_ls_disconnect_assoc()
1828 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl, in __nvme_fc_exit_request() argument
1831 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma, in __nvme_fc_exit_request()
1833 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma, in __nvme_fc_exit_request()
1849 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) in __nvme_fc_abort_op() argument
1854 spin_lock_irqsave(&ctrl->lock, flags); in __nvme_fc_abort_op()
1858 else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) { in __nvme_fc_abort_op()
1860 ctrl->iocnt++; in __nvme_fc_abort_op()
1862 spin_unlock_irqrestore(&ctrl->lock, flags); in __nvme_fc_abort_op()
1867 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, in __nvme_fc_abort_op()
1868 &ctrl->rport->remoteport, in __nvme_fc_abort_op()
1876 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) in nvme_fc_abort_aen_ops() argument
1878 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; in nvme_fc_abort_aen_ops()
1886 __nvme_fc_abort_op(ctrl, aen_op); in nvme_fc_abort_aen_ops()
1890 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, in __nvme_fc_fcpop_chk_teardowns() argument
1896 spin_lock_irqsave(&ctrl->lock, flags); in __nvme_fc_fcpop_chk_teardowns()
1897 if (test_bit(FCCTRL_TERMIO, &ctrl->flags) && in __nvme_fc_fcpop_chk_teardowns()
1899 if (!--ctrl->iocnt) in __nvme_fc_fcpop_chk_teardowns()
1900 wake_up(&ctrl->ioabort_wait); in __nvme_fc_fcpop_chk_teardowns()
1902 spin_unlock_irqrestore(&ctrl->lock, flags); in __nvme_fc_fcpop_chk_teardowns()
1909 struct nvme_fc_ctrl *ctrl = in nvme_fc_ctrl_ioerr_work() local
1912 nvme_fc_error_recovery(ctrl, "transport detected io error"); in nvme_fc_ctrl_ioerr_work()
1939 struct nvme_fc_ctrl *ctrl = op->ctrl; in nvme_fc_fcpio_done() local
1987 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, in nvme_fc_fcpio_done()
1994 dev_info(ctrl->ctrl.device, in nvme_fc_fcpio_done()
1996 ctrl->cnum, freq->status); in nvme_fc_fcpio_done()
2026 dev_info(ctrl->ctrl.device, in nvme_fc_fcpio_done()
2029 ctrl->cnum, freq->transferred_length, in nvme_fc_fcpio_done()
2048 dev_info(ctrl->ctrl.device, in nvme_fc_fcpio_done()
2052 ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len), in nvme_fc_fcpio_done()
2066 dev_info(ctrl->ctrl.device, in nvme_fc_fcpio_done()
2069 ctrl->cnum, freq->rcv_rsplen); in nvme_fc_fcpio_done()
2077 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); in nvme_fc_fcpio_done()
2078 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); in nvme_fc_fcpio_done()
2081 nvme_fc_ctrl_put(ctrl); in nvme_fc_fcpio_done()
2085 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); in nvme_fc_fcpio_done()
2090 if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING) in nvme_fc_fcpio_done()
2091 queue_work(nvme_reset_wq, &ctrl->ioerr_work); in nvme_fc_fcpio_done()
2095 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, in __nvme_fc_init_request() argument
2110 op->ctrl = ctrl; in __nvme_fc_init_request()
2124 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev, in __nvme_fc_init_request()
2126 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { in __nvme_fc_init_request()
2127 dev_err(ctrl->dev, in __nvme_fc_init_request()
2133 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev, in __nvme_fc_init_request()
2136 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { in __nvme_fc_init_request()
2137 dev_err(ctrl->dev, in __nvme_fc_init_request()
2151 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data); in nvme_fc_init_request() local
2153 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_fc_init_request()
2154 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx]; in nvme_fc_init_request()
2157 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++); in nvme_fc_init_request()
2162 nvme_req(rq)->ctrl = &ctrl->ctrl; in nvme_fc_init_request()
2168 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl) in nvme_fc_init_aen_ops() argument
2176 aen_op = ctrl->aen_ops; in nvme_fc_init_aen_ops()
2178 if (ctrl->lport->ops->fcprqst_priv_sz) { in nvme_fc_init_aen_ops()
2179 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz, in nvme_fc_init_aen_ops()
2187 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0], in nvme_fc_init_aen_ops()
2207 nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) in nvme_fc_term_aen_ops() argument
2212 cancel_work_sync(&ctrl->ctrl.async_event_work); in nvme_fc_term_aen_ops()
2213 aen_op = ctrl->aen_ops; in nvme_fc_term_aen_ops()
2215 __nvme_fc_exit_request(ctrl, aen_op); in nvme_fc_term_aen_ops()
2225 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(data); in __nvme_fc_init_hctx() local
2226 struct nvme_fc_queue *queue = &ctrl->queues[qidx]; in __nvme_fc_init_hctx()
2247 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx) in nvme_fc_init_queue() argument
2251 queue = &ctrl->queues[idx]; in nvme_fc_init_queue()
2253 queue->ctrl = ctrl; in nvme_fc_init_queue()
2256 queue->dev = ctrl->dev; in nvme_fc_init_queue()
2259 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; in nvme_fc_init_queue()
2301 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl, in __nvme_fc_delete_hw_queue() argument
2304 if (ctrl->lport->ops->delete_queue) in __nvme_fc_delete_hw_queue()
2305 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx, in __nvme_fc_delete_hw_queue()
2311 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl) in nvme_fc_free_io_queues() argument
2315 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_fc_free_io_queues()
2316 nvme_fc_free_queue(&ctrl->queues[i]); in nvme_fc_free_io_queues()
2320 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl, in __nvme_fc_create_hw_queue() argument
2326 if (ctrl->lport->ops->create_queue) in __nvme_fc_create_hw_queue()
2327 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport, in __nvme_fc_create_hw_queue()
2334 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl) in nvme_fc_delete_hw_io_queues() argument
2336 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1]; in nvme_fc_delete_hw_io_queues()
2339 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--) in nvme_fc_delete_hw_io_queues()
2340 __nvme_fc_delete_hw_queue(ctrl, queue, i); in nvme_fc_delete_hw_io_queues()
2344 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) in nvme_fc_create_hw_io_queues() argument
2346 struct nvme_fc_queue *queue = &ctrl->queues[1]; in nvme_fc_create_hw_io_queues()
2349 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) { in nvme_fc_create_hw_io_queues()
2350 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize); in nvme_fc_create_hw_io_queues()
2359 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i); in nvme_fc_create_hw_io_queues()
2364 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) in nvme_fc_connect_io_queues() argument
2368 for (i = 1; i < ctrl->ctrl.queue_count; i++) { in nvme_fc_connect_io_queues()
2369 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize, in nvme_fc_connect_io_queues()
2373 ret = nvmf_connect_io_queue(&ctrl->ctrl, i); in nvme_fc_connect_io_queues()
2377 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags); in nvme_fc_connect_io_queues()
2384 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl) in nvme_fc_init_io_queues() argument
2388 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_fc_init_io_queues()
2389 nvme_fc_init_queue(ctrl, i); in nvme_fc_init_io_queues()
2395 struct nvme_fc_ctrl *ctrl = in nvme_fc_ctrl_free() local
2399 if (ctrl->ctrl.tagset) in nvme_fc_ctrl_free()
2400 nvme_remove_io_tag_set(&ctrl->ctrl); in nvme_fc_ctrl_free()
2403 spin_lock_irqsave(&ctrl->rport->lock, flags); in nvme_fc_ctrl_free()
2404 list_del(&ctrl->ctrl_list); in nvme_fc_ctrl_free()
2405 spin_unlock_irqrestore(&ctrl->rport->lock, flags); in nvme_fc_ctrl_free()
2407 nvme_unquiesce_admin_queue(&ctrl->ctrl); in nvme_fc_ctrl_free()
2408 nvme_remove_admin_tag_set(&ctrl->ctrl); in nvme_fc_ctrl_free()
2410 kfree(ctrl->queues); in nvme_fc_ctrl_free()
2412 put_device(ctrl->dev); in nvme_fc_ctrl_free()
2413 nvme_fc_rport_put(ctrl->rport); in nvme_fc_ctrl_free()
2415 ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum); in nvme_fc_ctrl_free()
2416 if (ctrl->ctrl.opts) in nvme_fc_ctrl_free()
2417 nvmf_free_options(ctrl->ctrl.opts); in nvme_fc_ctrl_free()
2418 kfree(ctrl); in nvme_fc_ctrl_free()
2422 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl) in nvme_fc_ctrl_put() argument
2424 kref_put(&ctrl->ref, nvme_fc_ctrl_free); in nvme_fc_ctrl_put()
2428 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl) in nvme_fc_ctrl_get() argument
2430 return kref_get_unless_zero(&ctrl->ref); in nvme_fc_ctrl_get()
2440 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); in nvme_fc_nvme_ctrl_freed() local
2442 WARN_ON(nctrl != &ctrl->ctrl); in nvme_fc_nvme_ctrl_freed()
2444 nvme_fc_ctrl_put(ctrl); in nvme_fc_nvme_ctrl_freed()
2463 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); in nvme_fc_terminate_exchange() local
2467 __nvme_fc_abort_op(ctrl, op); in nvme_fc_terminate_exchange()
2481 __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues) in __nvme_fc_abort_outstanding_ios() argument
2489 if (ctrl->ctrl.queue_count > 1) { in __nvme_fc_abort_outstanding_ios()
2490 for (q = 1; q < ctrl->ctrl.queue_count; q++) in __nvme_fc_abort_outstanding_ios()
2491 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags); in __nvme_fc_abort_outstanding_ios()
2493 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); in __nvme_fc_abort_outstanding_ios()
2507 if (ctrl->ctrl.queue_count > 1) { in __nvme_fc_abort_outstanding_ios()
2508 nvme_quiesce_io_queues(&ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2509 nvme_sync_io_queues(&ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2510 blk_mq_tagset_busy_iter(&ctrl->tag_set, in __nvme_fc_abort_outstanding_ios()
2511 nvme_fc_terminate_exchange, &ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2512 blk_mq_tagset_wait_completed_request(&ctrl->tag_set); in __nvme_fc_abort_outstanding_ios()
2514 nvme_unquiesce_io_queues(&ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2532 nvme_quiesce_admin_queue(&ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2533 blk_sync_queue(ctrl->ctrl.admin_q); in __nvme_fc_abort_outstanding_ios()
2534 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, in __nvme_fc_abort_outstanding_ios()
2535 nvme_fc_terminate_exchange, &ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2536 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set); in __nvme_fc_abort_outstanding_ios()
2538 nvme_unquiesce_admin_queue(&ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2542 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) in nvme_fc_error_recovery() argument
2554 spin_lock_irqsave(&ctrl->lock, flags); in nvme_fc_error_recovery()
2555 state = ctrl->ctrl.state; in nvme_fc_error_recovery()
2557 set_bit(ASSOC_FAILED, &ctrl->flags); in nvme_fc_error_recovery()
2558 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_error_recovery()
2559 __nvme_fc_abort_outstanding_ios(ctrl, true); in nvme_fc_error_recovery()
2560 dev_warn(ctrl->ctrl.device, in nvme_fc_error_recovery()
2562 ctrl->cnum); in nvme_fc_error_recovery()
2565 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_error_recovery()
2571 dev_warn(ctrl->ctrl.device, in nvme_fc_error_recovery()
2573 ctrl->cnum, errmsg); in nvme_fc_error_recovery()
2574 dev_warn(ctrl->ctrl.device, in nvme_fc_error_recovery()
2575 "NVME-FC{%d}: resetting controller\n", ctrl->cnum); in nvme_fc_error_recovery()
2577 nvme_reset_ctrl(&ctrl->ctrl); in nvme_fc_error_recovery()
2583 struct nvme_fc_ctrl *ctrl = op->ctrl; in nvme_fc_timeout() local
2591 dev_info(ctrl->ctrl.device, in nvme_fc_timeout()
2594 ctrl->cnum, op->queue->qnum, sqe->common.opcode, in nvme_fc_timeout()
2596 if (__nvme_fc_abort_op(ctrl, op)) in nvme_fc_timeout()
2597 nvme_fc_error_recovery(ctrl, "io timeout abort failed"); in nvme_fc_timeout()
2608 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, in nvme_fc_map_data() argument
2628 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, in nvme_fc_map_data()
2643 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, in nvme_fc_unmap_data() argument
2651 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, in nvme_fc_unmap_data()
2683 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, in nvme_fc_start_fcp_op() argument
2695 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) in nvme_fc_start_fcp_op()
2698 if (!nvme_fc_ctrl_get(ctrl)) in nvme_fc_start_fcp_op()
2742 ret = nvme_fc_map_data(ctrl, op->rq, op); in nvme_fc_start_fcp_op()
2745 nvme_fc_ctrl_put(ctrl); in nvme_fc_start_fcp_op()
2752 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma, in nvme_fc_start_fcp_op()
2761 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, in nvme_fc_start_fcp_op()
2762 &ctrl->rport->remoteport, in nvme_fc_start_fcp_op()
2779 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); in nvme_fc_start_fcp_op()
2782 nvme_fc_unmap_data(ctrl, op->rq, op); in nvme_fc_start_fcp_op()
2786 nvme_fc_ctrl_put(ctrl); in nvme_fc_start_fcp_op()
2788 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE && in nvme_fc_start_fcp_op()
2804 struct nvme_fc_ctrl *ctrl = queue->ctrl; in nvme_fc_queue_rq() local
2812 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || in nvme_fc_queue_rq()
2813 !nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_fc_queue_rq()
2814 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_fc_queue_rq()
2838 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir); in nvme_fc_queue_rq()
2844 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg); in nvme_fc_submit_async_event() local
2848 if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) in nvme_fc_submit_async_event()
2851 aen_op = &ctrl->aen_ops[0]; in nvme_fc_submit_async_event()
2853 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0, in nvme_fc_submit_async_event()
2856 dev_err(ctrl->ctrl.device, in nvme_fc_submit_async_event()
2864 struct nvme_fc_ctrl *ctrl = op->ctrl; in nvme_fc_complete_rq() local
2869 nvme_fc_unmap_data(ctrl, rq, op); in nvme_fc_complete_rq()
2871 nvme_fc_ctrl_put(ctrl); in nvme_fc_complete_rq()
2876 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data); in nvme_fc_map_queues() local
2888 if (ctrl->lport->ops->map_queues) in nvme_fc_map_queues()
2889 ctrl->lport->ops->map_queues(&ctrl->lport->localport, in nvme_fc_map_queues()
2907 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) in nvme_fc_create_io_queues() argument
2909 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_fc_create_io_queues()
2914 ctrl->lport->ops->max_hw_queues); in nvme_fc_create_io_queues()
2915 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); in nvme_fc_create_io_queues()
2917 dev_info(ctrl->ctrl.device, in nvme_fc_create_io_queues()
2922 ctrl->ctrl.queue_count = nr_io_queues + 1; in nvme_fc_create_io_queues()
2926 nvme_fc_init_io_queues(ctrl); in nvme_fc_create_io_queues()
2928 ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set, in nvme_fc_create_io_queues()
2931 ctrl->lport->ops->fcprqst_priv_sz)); in nvme_fc_create_io_queues()
2935 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_create_io_queues()
2939 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_create_io_queues()
2943 ctrl->ioq_live = true; in nvme_fc_create_io_queues()
2948 nvme_fc_delete_hw_io_queues(ctrl); in nvme_fc_create_io_queues()
2950 nvme_remove_io_tag_set(&ctrl->ctrl); in nvme_fc_create_io_queues()
2951 nvme_fc_free_io_queues(ctrl); in nvme_fc_create_io_queues()
2954 ctrl->ctrl.tagset = NULL; in nvme_fc_create_io_queues()
2960 nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl) in nvme_fc_recreate_io_queues() argument
2962 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_fc_recreate_io_queues()
2963 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1; in nvme_fc_recreate_io_queues()
2968 ctrl->lport->ops->max_hw_queues); in nvme_fc_recreate_io_queues()
2969 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); in nvme_fc_recreate_io_queues()
2971 dev_info(ctrl->ctrl.device, in nvme_fc_recreate_io_queues()
2977 dev_info(ctrl->ctrl.device, in nvme_fc_recreate_io_queues()
2983 ctrl->ctrl.queue_count = nr_io_queues + 1; in nvme_fc_recreate_io_queues()
2985 if (ctrl->ctrl.queue_count == 1) in nvme_fc_recreate_io_queues()
2989 dev_info(ctrl->ctrl.device, in nvme_fc_recreate_io_queues()
2992 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues); in nvme_fc_recreate_io_queues()
2995 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_recreate_io_queues()
2999 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_recreate_io_queues()
3006 nvme_fc_delete_hw_io_queues(ctrl); in nvme_fc_recreate_io_queues()
3008 nvme_fc_free_io_queues(ctrl); in nvme_fc_recreate_io_queues()
3032 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl) in nvme_fc_ctlr_active_on_rport() argument
3034 struct nvme_fc_rport *rport = ctrl->rport; in nvme_fc_ctlr_active_on_rport()
3037 if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags)) in nvme_fc_ctlr_active_on_rport()
3048 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl) in nvme_fc_ctlr_inactive_on_rport() argument
3050 struct nvme_fc_rport *rport = ctrl->rport; in nvme_fc_ctlr_inactive_on_rport()
3071 nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) in nvme_fc_create_association() argument
3073 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_fc_create_association()
3079 ++ctrl->ctrl.nr_reconnects; in nvme_fc_create_association()
3081 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) in nvme_fc_create_association()
3084 if (nvme_fc_ctlr_active_on_rport(ctrl)) in nvme_fc_create_association()
3087 dev_info(ctrl->ctrl.device, in nvme_fc_create_association()
3090 ctrl->cnum, ctrl->lport->localport.port_name, in nvme_fc_create_association()
3091 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn); in nvme_fc_create_association()
3093 clear_bit(ASSOC_FAILED, &ctrl->flags); in nvme_fc_create_association()
3099 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, in nvme_fc_create_association()
3104 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], in nvme_fc_create_association()
3109 ret = nvmf_connect_admin_queue(&ctrl->ctrl); in nvme_fc_create_association()
3113 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); in nvme_fc_create_association()
3122 ret = nvme_enable_ctrl(&ctrl->ctrl); in nvme_fc_create_association()
3123 if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags)) in nvme_fc_create_association()
3128 ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments; in nvme_fc_create_association()
3129 ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments << in nvme_fc_create_association()
3132 nvme_unquiesce_admin_queue(&ctrl->ctrl); in nvme_fc_create_association()
3134 ret = nvme_init_ctrl_finish(&ctrl->ctrl, false); in nvme_fc_create_association()
3135 if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags)) in nvme_fc_create_association()
3143 if (ctrl->ctrl.icdoff) { in nvme_fc_create_association()
3144 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", in nvme_fc_create_association()
3145 ctrl->ctrl.icdoff); in nvme_fc_create_association()
3151 if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) { in nvme_fc_create_association()
3152 dev_err(ctrl->ctrl.device, in nvme_fc_create_association()
3158 if (opts->queue_size > ctrl->ctrl.maxcmd) { in nvme_fc_create_association()
3160 dev_warn(ctrl->ctrl.device, in nvme_fc_create_association()
3163 opts->queue_size, ctrl->ctrl.maxcmd); in nvme_fc_create_association()
3164 opts->queue_size = ctrl->ctrl.maxcmd; in nvme_fc_create_association()
3165 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_fc_create_association()
3168 ret = nvme_fc_init_aen_ops(ctrl); in nvme_fc_create_association()
3176 if (ctrl->ctrl.queue_count > 1) { in nvme_fc_create_association()
3177 if (!ctrl->ioq_live) in nvme_fc_create_association()
3178 ret = nvme_fc_create_io_queues(ctrl); in nvme_fc_create_association()
3180 ret = nvme_fc_recreate_io_queues(ctrl); in nvme_fc_create_association()
3183 spin_lock_irqsave(&ctrl->lock, flags); in nvme_fc_create_association()
3184 if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags)) in nvme_fc_create_association()
3187 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_create_association()
3190 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); in nvme_fc_create_association()
3191 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_create_association()
3193 ctrl->ctrl.nr_reconnects = 0; in nvme_fc_create_association()
3196 nvme_start_ctrl(&ctrl->ctrl); in nvme_fc_create_association()
3201 nvme_fc_term_aen_ops(ctrl); in nvme_fc_create_association()
3203 dev_warn(ctrl->ctrl.device, in nvme_fc_create_association()
3205 ctrl->cnum, ctrl->association_id, ret); in nvme_fc_create_association()
3207 nvme_fc_xmt_disconnect_assoc(ctrl); in nvme_fc_create_association()
3208 spin_lock_irqsave(&ctrl->lock, flags); in nvme_fc_create_association()
3209 ctrl->association_id = 0; in nvme_fc_create_association()
3210 disls = ctrl->rcv_disconn; in nvme_fc_create_association()
3211 ctrl->rcv_disconn = NULL; in nvme_fc_create_association()
3212 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_create_association()
3216 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); in nvme_fc_create_association()
3218 nvme_fc_free_queue(&ctrl->queues[0]); in nvme_fc_create_association()
3219 clear_bit(ASSOC_ACTIVE, &ctrl->flags); in nvme_fc_create_association()
3220 nvme_fc_ctlr_inactive_on_rport(ctrl); in nvme_fc_create_association()
3233 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) in nvme_fc_delete_association() argument
3238 if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags)) in nvme_fc_delete_association()
3241 spin_lock_irqsave(&ctrl->lock, flags); in nvme_fc_delete_association()
3242 set_bit(FCCTRL_TERMIO, &ctrl->flags); in nvme_fc_delete_association()
3243 ctrl->iocnt = 0; in nvme_fc_delete_association()
3244 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_delete_association()
3246 __nvme_fc_abort_outstanding_ios(ctrl, false); in nvme_fc_delete_association()
3249 nvme_fc_abort_aen_ops(ctrl); in nvme_fc_delete_association()
3252 spin_lock_irq(&ctrl->lock); in nvme_fc_delete_association()
3253 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock); in nvme_fc_delete_association()
3254 clear_bit(FCCTRL_TERMIO, &ctrl->flags); in nvme_fc_delete_association()
3255 spin_unlock_irq(&ctrl->lock); in nvme_fc_delete_association()
3257 nvme_fc_term_aen_ops(ctrl); in nvme_fc_delete_association()
3265 if (ctrl->association_id) in nvme_fc_delete_association()
3266 nvme_fc_xmt_disconnect_assoc(ctrl); in nvme_fc_delete_association()
3268 spin_lock_irqsave(&ctrl->lock, flags); in nvme_fc_delete_association()
3269 ctrl->association_id = 0; in nvme_fc_delete_association()
3270 disls = ctrl->rcv_disconn; in nvme_fc_delete_association()
3271 ctrl->rcv_disconn = NULL; in nvme_fc_delete_association()
3272 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_delete_association()
3280 if (ctrl->ctrl.tagset) { in nvme_fc_delete_association()
3281 nvme_fc_delete_hw_io_queues(ctrl); in nvme_fc_delete_association()
3282 nvme_fc_free_io_queues(ctrl); in nvme_fc_delete_association()
3285 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); in nvme_fc_delete_association()
3286 nvme_fc_free_queue(&ctrl->queues[0]); in nvme_fc_delete_association()
3289 nvme_unquiesce_admin_queue(&ctrl->ctrl); in nvme_fc_delete_association()
3292 nvme_unquiesce_io_queues(&ctrl->ctrl); in nvme_fc_delete_association()
3294 nvme_fc_ctlr_inactive_on_rport(ctrl); in nvme_fc_delete_association()
3300 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); in nvme_fc_delete_ctrl() local
3302 cancel_work_sync(&ctrl->ioerr_work); in nvme_fc_delete_ctrl()
3303 cancel_delayed_work_sync(&ctrl->connect_work); in nvme_fc_delete_ctrl()
3308 nvme_fc_delete_association(ctrl); in nvme_fc_delete_ctrl()
3312 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) in nvme_fc_reconnect_or_delete() argument
3314 struct nvme_fc_rport *rport = ctrl->rport; in nvme_fc_reconnect_or_delete()
3316 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; in nvme_fc_reconnect_or_delete()
3319 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) in nvme_fc_reconnect_or_delete()
3323 dev_info(ctrl->ctrl.device, in nvme_fc_reconnect_or_delete()
3325 ctrl->cnum, status); in nvme_fc_reconnect_or_delete()
3331 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) { in nvme_fc_reconnect_or_delete()
3333 dev_info(ctrl->ctrl.device, in nvme_fc_reconnect_or_delete()
3336 ctrl->cnum, recon_delay / HZ); in nvme_fc_reconnect_or_delete()
3340 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay); in nvme_fc_reconnect_or_delete()
3344 dev_warn(ctrl->ctrl.device, in nvme_fc_reconnect_or_delete()
3346 ctrl->cnum); in nvme_fc_reconnect_or_delete()
3348 dev_warn(ctrl->ctrl.device, in nvme_fc_reconnect_or_delete()
3351 ctrl->cnum, ctrl->ctrl.nr_reconnects); in nvme_fc_reconnect_or_delete()
3353 dev_warn(ctrl->ctrl.device, in nvme_fc_reconnect_or_delete()
3356 ctrl->cnum, min_t(int, portptr->dev_loss_tmo, in nvme_fc_reconnect_or_delete()
3357 (ctrl->ctrl.opts->max_reconnects * in nvme_fc_reconnect_or_delete()
3358 ctrl->ctrl.opts->reconnect_delay))); in nvme_fc_reconnect_or_delete()
3359 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl)); in nvme_fc_reconnect_or_delete()
3366 struct nvme_fc_ctrl *ctrl = in nvme_fc_reset_ctrl_work() local
3367 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work); in nvme_fc_reset_ctrl_work()
3369 nvme_stop_ctrl(&ctrl->ctrl); in nvme_fc_reset_ctrl_work()
3372 nvme_fc_delete_association(ctrl); in nvme_fc_reset_ctrl_work()
3374 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) in nvme_fc_reset_ctrl_work()
3375 dev_err(ctrl->ctrl.device, in nvme_fc_reset_ctrl_work()
3377 "to CONNECTING\n", ctrl->cnum); in nvme_fc_reset_ctrl_work()
3379 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { in nvme_fc_reset_ctrl_work()
3380 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { in nvme_fc_reset_ctrl_work()
3381 dev_err(ctrl->ctrl.device, in nvme_fc_reset_ctrl_work()
3383 "after reset\n", ctrl->cnum); in nvme_fc_reset_ctrl_work()
3385 flush_delayed_work(&ctrl->connect_work); in nvme_fc_reset_ctrl_work()
3388 nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN); in nvme_fc_reset_ctrl_work()
3411 struct nvme_fc_ctrl *ctrl = in nvme_fc_connect_ctrl_work() local
3415 ret = nvme_fc_create_association(ctrl); in nvme_fc_connect_ctrl_work()
3417 nvme_fc_reconnect_or_delete(ctrl, ret); in nvme_fc_connect_ctrl_work()
3419 dev_info(ctrl->ctrl.device, in nvme_fc_connect_ctrl_work()
3421 ctrl->cnum); in nvme_fc_connect_ctrl_work()
3447 struct nvme_fc_ctrl *ctrl; in nvme_fc_existing_controller() local
3452 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { in nvme_fc_existing_controller()
3453 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts); in nvme_fc_existing_controller()
3466 struct nvme_fc_ctrl *ctrl; in nvme_fc_init_ctrl() local
3482 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); in nvme_fc_init_ctrl()
3483 if (!ctrl) { in nvme_fc_init_ctrl()
3507 ctrl->ctrl.opts = opts; in nvme_fc_init_ctrl()
3508 ctrl->ctrl.nr_reconnects = 0; in nvme_fc_init_ctrl()
3510 ctrl->ctrl.numa_node = dev_to_node(lport->dev); in nvme_fc_init_ctrl()
3512 ctrl->ctrl.numa_node = NUMA_NO_NODE; in nvme_fc_init_ctrl()
3513 INIT_LIST_HEAD(&ctrl->ctrl_list); in nvme_fc_init_ctrl()
3514 ctrl->lport = lport; in nvme_fc_init_ctrl()
3515 ctrl->rport = rport; in nvme_fc_init_ctrl()
3516 ctrl->dev = lport->dev; in nvme_fc_init_ctrl()
3517 ctrl->cnum = idx; in nvme_fc_init_ctrl()
3518 ctrl->ioq_live = false; in nvme_fc_init_ctrl()
3519 init_waitqueue_head(&ctrl->ioabort_wait); in nvme_fc_init_ctrl()
3521 get_device(ctrl->dev); in nvme_fc_init_ctrl()
3522 kref_init(&ctrl->ref); in nvme_fc_init_ctrl()
3524 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); in nvme_fc_init_ctrl()
3525 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); in nvme_fc_init_ctrl()
3526 INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work); in nvme_fc_init_ctrl()
3527 spin_lock_init(&ctrl->lock); in nvme_fc_init_ctrl()
3530 ctrl->ctrl.queue_count = min_t(unsigned int, in nvme_fc_init_ctrl()
3533 ctrl->ctrl.queue_count++; /* +1 for admin queue */ in nvme_fc_init_ctrl()
3535 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_fc_init_ctrl()
3536 ctrl->ctrl.kato = opts->kato; in nvme_fc_init_ctrl()
3537 ctrl->ctrl.cntlid = 0xffff; in nvme_fc_init_ctrl()
3540 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, in nvme_fc_init_ctrl()
3542 if (!ctrl->queues) in nvme_fc_init_ctrl()
3545 nvme_fc_init_queue(ctrl, 0); in nvme_fc_init_ctrl()
3554 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0); in nvme_fc_init_ctrl()
3560 ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set, in nvme_fc_init_ctrl()
3563 ctrl->lport->ops->fcprqst_priv_sz)); in nvme_fc_init_ctrl()
3568 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); in nvme_fc_init_ctrl()
3571 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) || in nvme_fc_init_ctrl()
3572 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_fc_init_ctrl()
3573 dev_err(ctrl->ctrl.device, in nvme_fc_init_ctrl()
3574 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum); in nvme_fc_init_ctrl()
3578 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { in nvme_fc_init_ctrl()
3579 dev_err(ctrl->ctrl.device, in nvme_fc_init_ctrl()
3581 ctrl->cnum); in nvme_fc_init_ctrl()
3585 flush_delayed_work(&ctrl->connect_work); in nvme_fc_init_ctrl()
3587 dev_info(ctrl->ctrl.device, in nvme_fc_init_ctrl()
3589 ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl)); in nvme_fc_init_ctrl()
3591 return &ctrl->ctrl; in nvme_fc_init_ctrl()
3594 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); in nvme_fc_init_ctrl()
3595 cancel_work_sync(&ctrl->ioerr_work); in nvme_fc_init_ctrl()
3596 cancel_work_sync(&ctrl->ctrl.reset_work); in nvme_fc_init_ctrl()
3597 cancel_delayed_work_sync(&ctrl->connect_work); in nvme_fc_init_ctrl()
3599 ctrl->ctrl.opts = NULL; in nvme_fc_init_ctrl()
3602 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_fc_init_ctrl()
3605 nvme_put_ctrl(&ctrl->ctrl); in nvme_fc_init_ctrl()
3619 kfree(ctrl->queues); in nvme_fc_init_ctrl()
3621 put_device(ctrl->dev); in nvme_fc_init_ctrl()
3622 ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum); in nvme_fc_init_ctrl()
3624 kfree(ctrl); in nvme_fc_init_ctrl()
3701 struct nvme_ctrl *ctrl; in nvme_fc_create_ctrl() local
3735 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport); in nvme_fc_create_ctrl()
3736 if (IS_ERR(ctrl)) in nvme_fc_create_ctrl()
3738 return ctrl; in nvme_fc_create_ctrl()
3961 struct nvme_fc_ctrl *ctrl; in nvme_fc_delete_controllers() local
3964 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { in nvme_fc_delete_controllers()
3965 dev_warn(ctrl->ctrl.device, in nvme_fc_delete_controllers()
3967 ctrl->cnum); in nvme_fc_delete_controllers()
3968 nvme_delete_ctrl(&ctrl->ctrl); in nvme_fc_delete_controllers()