Lines Matching +full:ctrl +full:- +full:module

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
8 #include <linux/blk-mq.h>
10 #include <linux/module.h>
37 struct nvme_ctrl ctrl; member
42 static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl) in to_loop_ctrl() argument
44 return container_of(ctrl, struct nvme_loop_ctrl, ctrl); in to_loop_ctrl()
54 struct nvme_loop_ctrl *ctrl; member
65 static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
71 return queue - queue->ctrl->queues; in nvme_loop_queue_idx()
78 sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT); in nvme_loop_complete_rq()
87 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_loop_tagset()
88 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_loop_tagset()
94 container_of(req->sq, struct nvme_loop_queue, nvme_sq); in nvme_loop_queue_response()
95 struct nvme_completion *cqe = req->cqe; in nvme_loop_queue_response()
104 cqe->command_id))) { in nvme_loop_queue_response()
105 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, in nvme_loop_queue_response()
106 &cqe->result); in nvme_loop_queue_response()
110 rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id); in nvme_loop_queue_response()
112 dev_err(queue->ctrl->ctrl.device, in nvme_loop_queue_response()
114 cqe->command_id, nvme_loop_queue_idx(queue)); in nvme_loop_queue_response()
118 if (!nvme_try_complete_req(rq, cqe->status, cqe->result)) in nvme_loop_queue_response()
128 iod->req.execute(&iod->req); in nvme_loop_execute_work()
134 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_loop_queue_rq()
135 struct nvme_loop_queue *queue = hctx->driver_data; in nvme_loop_queue_rq()
136 struct request *req = bd->rq; in nvme_loop_queue_rq()
138 bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags); in nvme_loop_queue_rq()
141 if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready)) in nvme_loop_queue_rq()
142 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req); in nvme_loop_queue_rq()
144 ret = nvme_setup_cmd(ns, req, &iod->cmd); in nvme_loop_queue_rq()
149 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; in nvme_loop_queue_rq()
150 iod->req.port = queue->ctrl->port; in nvme_loop_queue_rq()
151 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, in nvme_loop_queue_rq()
152 &queue->nvme_sq, &nvme_loop_ops)) in nvme_loop_queue_rq()
156 iod->sg_table.sgl = iod->first_sgl; in nvme_loop_queue_rq()
157 if (sg_alloc_table_chained(&iod->sg_table, in nvme_loop_queue_rq()
159 iod->sg_table.sgl, NVME_INLINE_SG_CNT)) { in nvme_loop_queue_rq()
164 iod->req.sg = iod->sg_table.sgl; in nvme_loop_queue_rq()
165 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl); in nvme_loop_queue_rq()
166 iod->req.transfer_len = blk_rq_payload_bytes(req); in nvme_loop_queue_rq()
169 schedule_work(&iod->work); in nvme_loop_queue_rq()
175 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg); in nvme_loop_submit_async_event() local
176 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_submit_async_event()
177 struct nvme_loop_iod *iod = &ctrl->async_event_iod; in nvme_loop_submit_async_event()
179 memset(&iod->cmd, 0, sizeof(iod->cmd)); in nvme_loop_submit_async_event()
180 iod->cmd.common.opcode = nvme_admin_async_event; in nvme_loop_submit_async_event()
181 iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH; in nvme_loop_submit_async_event()
182 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; in nvme_loop_submit_async_event()
184 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq, in nvme_loop_submit_async_event()
186 dev_err(ctrl->ctrl.device, "failed async event work\n"); in nvme_loop_submit_async_event()
190 schedule_work(&iod->work); in nvme_loop_submit_async_event()
193 static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, in nvme_loop_init_iod() argument
196 iod->req.cmd = &iod->cmd; in nvme_loop_init_iod()
197 iod->req.cqe = &iod->cqe; in nvme_loop_init_iod()
198 iod->queue = &ctrl->queues[queue_idx]; in nvme_loop_init_iod()
199 INIT_WORK(&iod->work, nvme_loop_execute_work); in nvme_loop_init_iod()
207 struct nvme_loop_ctrl *ctrl = set->driver_data; in nvme_loop_init_request() local
209 nvme_req(req)->ctrl = &ctrl->ctrl; in nvme_loop_init_request()
210 return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req), in nvme_loop_init_request()
211 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0); in nvme_loop_init_request()
217 struct nvme_loop_ctrl *ctrl = data; in nvme_loop_init_hctx() local
218 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_loop_init_hctx()
220 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); in nvme_loop_init_hctx()
222 hctx->driver_data = queue; in nvme_loop_init_hctx()
229 struct nvme_loop_ctrl *ctrl = data; in nvme_loop_init_admin_hctx() local
230 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_init_admin_hctx()
234 hctx->driver_data = queue; in nvme_loop_init_admin_hctx()
252 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) in nvme_loop_destroy_admin_queue() argument
254 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); in nvme_loop_destroy_admin_queue()
255 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); in nvme_loop_destroy_admin_queue()
256 blk_cleanup_queue(ctrl->ctrl.admin_q); in nvme_loop_destroy_admin_queue()
257 blk_cleanup_queue(ctrl->ctrl.fabrics_q); in nvme_loop_destroy_admin_queue()
258 blk_mq_free_tag_set(&ctrl->admin_tag_set); in nvme_loop_destroy_admin_queue()
263 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl); in nvme_loop_free_ctrl() local
265 if (list_empty(&ctrl->list)) in nvme_loop_free_ctrl()
269 list_del(&ctrl->list); in nvme_loop_free_ctrl()
272 if (nctrl->tagset) { in nvme_loop_free_ctrl()
273 blk_cleanup_queue(ctrl->ctrl.connect_q); in nvme_loop_free_ctrl()
274 blk_mq_free_tag_set(&ctrl->tag_set); in nvme_loop_free_ctrl()
276 kfree(ctrl->queues); in nvme_loop_free_ctrl()
277 nvmf_free_options(nctrl->opts); in nvme_loop_free_ctrl()
279 kfree(ctrl); in nvme_loop_free_ctrl()
282 static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) in nvme_loop_destroy_io_queues() argument
286 for (i = 1; i < ctrl->ctrl.queue_count; i++) { in nvme_loop_destroy_io_queues()
287 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); in nvme_loop_destroy_io_queues()
288 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); in nvme_loop_destroy_io_queues()
292 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) in nvme_loop_init_io_queues() argument
294 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_loop_init_io_queues()
298 nr_io_queues = min(opts->nr_io_queues, num_online_cpus()); in nvme_loop_init_io_queues()
299 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); in nvme_loop_init_io_queues()
303 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); in nvme_loop_init_io_queues()
306 ctrl->queues[i].ctrl = ctrl; in nvme_loop_init_io_queues()
307 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); in nvme_loop_init_io_queues()
311 ctrl->ctrl.queue_count++; in nvme_loop_init_io_queues()
317 nvme_loop_destroy_io_queues(ctrl); in nvme_loop_init_io_queues()
321 static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl) in nvme_loop_connect_io_queues() argument
325 for (i = 1; i < ctrl->ctrl.queue_count; i++) { in nvme_loop_connect_io_queues()
326 ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false); in nvme_loop_connect_io_queues()
329 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); in nvme_loop_connect_io_queues()
335 static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) in nvme_loop_configure_admin_queue() argument
339 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); in nvme_loop_configure_admin_queue()
340 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops; in nvme_loop_configure_admin_queue()
341 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; in nvme_loop_configure_admin_queue()
342 ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */ in nvme_loop_configure_admin_queue()
343 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node; in nvme_loop_configure_admin_queue()
344 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) + in nvme_loop_configure_admin_queue()
346 ctrl->admin_tag_set.driver_data = ctrl; in nvme_loop_configure_admin_queue()
347 ctrl->admin_tag_set.nr_hw_queues = 1; in nvme_loop_configure_admin_queue()
348 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; in nvme_loop_configure_admin_queue()
349 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED; in nvme_loop_configure_admin_queue()
351 ctrl->queues[0].ctrl = ctrl; in nvme_loop_configure_admin_queue()
352 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq); in nvme_loop_configure_admin_queue()
355 ctrl->ctrl.queue_count = 1; in nvme_loop_configure_admin_queue()
357 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); in nvme_loop_configure_admin_queue()
360 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set; in nvme_loop_configure_admin_queue()
362 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set); in nvme_loop_configure_admin_queue()
363 if (IS_ERR(ctrl->ctrl.fabrics_q)) { in nvme_loop_configure_admin_queue()
364 error = PTR_ERR(ctrl->ctrl.fabrics_q); in nvme_loop_configure_admin_queue()
368 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); in nvme_loop_configure_admin_queue()
369 if (IS_ERR(ctrl->ctrl.admin_q)) { in nvme_loop_configure_admin_queue()
370 error = PTR_ERR(ctrl->ctrl.admin_q); in nvme_loop_configure_admin_queue()
374 error = nvmf_connect_admin_queue(&ctrl->ctrl); in nvme_loop_configure_admin_queue()
378 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); in nvme_loop_configure_admin_queue()
380 error = nvme_enable_ctrl(&ctrl->ctrl); in nvme_loop_configure_admin_queue()
384 ctrl->ctrl.max_hw_sectors = in nvme_loop_configure_admin_queue()
385 (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9); in nvme_loop_configure_admin_queue()
387 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); in nvme_loop_configure_admin_queue()
389 error = nvme_init_identify(&ctrl->ctrl); in nvme_loop_configure_admin_queue()
396 blk_cleanup_queue(ctrl->ctrl.admin_q); in nvme_loop_configure_admin_queue()
398 blk_cleanup_queue(ctrl->ctrl.fabrics_q); in nvme_loop_configure_admin_queue()
400 blk_mq_free_tag_set(&ctrl->admin_tag_set); in nvme_loop_configure_admin_queue()
402 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); in nvme_loop_configure_admin_queue()
406 static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) in nvme_loop_shutdown_ctrl() argument
408 if (ctrl->ctrl.queue_count > 1) { in nvme_loop_shutdown_ctrl()
409 nvme_stop_queues(&ctrl->ctrl); in nvme_loop_shutdown_ctrl()
410 blk_mq_tagset_busy_iter(&ctrl->tag_set, in nvme_loop_shutdown_ctrl()
411 nvme_cancel_request, &ctrl->ctrl); in nvme_loop_shutdown_ctrl()
412 blk_mq_tagset_wait_completed_request(&ctrl->tag_set); in nvme_loop_shutdown_ctrl()
413 nvme_loop_destroy_io_queues(ctrl); in nvme_loop_shutdown_ctrl()
416 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); in nvme_loop_shutdown_ctrl()
417 if (ctrl->ctrl.state == NVME_CTRL_LIVE) in nvme_loop_shutdown_ctrl()
418 nvme_shutdown_ctrl(&ctrl->ctrl); in nvme_loop_shutdown_ctrl()
420 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, in nvme_loop_shutdown_ctrl()
421 nvme_cancel_request, &ctrl->ctrl); in nvme_loop_shutdown_ctrl()
422 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set); in nvme_loop_shutdown_ctrl()
423 nvme_loop_destroy_admin_queue(ctrl); in nvme_loop_shutdown_ctrl()
426 static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl) in nvme_loop_delete_ctrl_host() argument
428 nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl)); in nvme_loop_delete_ctrl_host()
433 struct nvme_loop_ctrl *ctrl; in nvme_loop_delete_ctrl() local
436 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) { in nvme_loop_delete_ctrl()
437 if (ctrl->ctrl.cntlid == nctrl->cntlid) in nvme_loop_delete_ctrl()
438 nvme_delete_ctrl(&ctrl->ctrl); in nvme_loop_delete_ctrl()
445 struct nvme_loop_ctrl *ctrl = in nvme_loop_reset_ctrl_work() local
446 container_of(work, struct nvme_loop_ctrl, ctrl.reset_work); in nvme_loop_reset_ctrl_work()
449 nvme_stop_ctrl(&ctrl->ctrl); in nvme_loop_reset_ctrl_work()
450 nvme_loop_shutdown_ctrl(ctrl); in nvme_loop_reset_ctrl_work()
452 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_loop_reset_ctrl_work()
458 ret = nvme_loop_configure_admin_queue(ctrl); in nvme_loop_reset_ctrl_work()
462 ret = nvme_loop_init_io_queues(ctrl); in nvme_loop_reset_ctrl_work()
466 ret = nvme_loop_connect_io_queues(ctrl); in nvme_loop_reset_ctrl_work()
470 blk_mq_update_nr_hw_queues(&ctrl->tag_set, in nvme_loop_reset_ctrl_work()
471 ctrl->ctrl.queue_count - 1); in nvme_loop_reset_ctrl_work()
473 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE)) in nvme_loop_reset_ctrl_work()
476 nvme_start_ctrl(&ctrl->ctrl); in nvme_loop_reset_ctrl_work()
481 nvme_loop_destroy_io_queues(ctrl); in nvme_loop_reset_ctrl_work()
483 nvme_loop_destroy_admin_queue(ctrl); in nvme_loop_reset_ctrl_work()
485 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); in nvme_loop_reset_ctrl_work()
486 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_loop_reset_ctrl_work()
491 .module = THIS_MODULE,
502 static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) in nvme_loop_create_io_queues() argument
506 ret = nvme_loop_init_io_queues(ctrl); in nvme_loop_create_io_queues()
510 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); in nvme_loop_create_io_queues()
511 ctrl->tag_set.ops = &nvme_loop_mq_ops; in nvme_loop_create_io_queues()
512 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; in nvme_loop_create_io_queues()
513 ctrl->tag_set.reserved_tags = 1; /* fabric connect */ in nvme_loop_create_io_queues()
514 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; in nvme_loop_create_io_queues()
515 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in nvme_loop_create_io_queues()
516 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) + in nvme_loop_create_io_queues()
518 ctrl->tag_set.driver_data = ctrl; in nvme_loop_create_io_queues()
519 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1; in nvme_loop_create_io_queues()
520 ctrl->tag_set.timeout = NVME_IO_TIMEOUT; in nvme_loop_create_io_queues()
521 ctrl->ctrl.tagset = &ctrl->tag_set; in nvme_loop_create_io_queues()
523 ret = blk_mq_alloc_tag_set(&ctrl->tag_set); in nvme_loop_create_io_queues()
527 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); in nvme_loop_create_io_queues()
528 if (IS_ERR(ctrl->ctrl.connect_q)) { in nvme_loop_create_io_queues()
529 ret = PTR_ERR(ctrl->ctrl.connect_q); in nvme_loop_create_io_queues()
533 ret = nvme_loop_connect_io_queues(ctrl); in nvme_loop_create_io_queues()
540 blk_cleanup_queue(ctrl->ctrl.connect_q); in nvme_loop_create_io_queues()
542 blk_mq_free_tag_set(&ctrl->tag_set); in nvme_loop_create_io_queues()
544 nvme_loop_destroy_io_queues(ctrl); in nvme_loop_create_io_queues()
548 static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl) in nvme_loop_find_port() argument
555 if ((ctrl->opts->mask & NVMF_OPT_TRADDR) && in nvme_loop_find_port()
556 strcmp(ctrl->opts->traddr, p->disc_addr.traddr)) in nvme_loop_find_port()
568 struct nvme_loop_ctrl *ctrl; in nvme_loop_create_ctrl() local
571 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); in nvme_loop_create_ctrl()
572 if (!ctrl) in nvme_loop_create_ctrl()
573 return ERR_PTR(-ENOMEM); in nvme_loop_create_ctrl()
574 ctrl->ctrl.opts = opts; in nvme_loop_create_ctrl()
575 INIT_LIST_HEAD(&ctrl->list); in nvme_loop_create_ctrl()
577 INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work); in nvme_loop_create_ctrl()
579 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops, in nvme_loop_create_ctrl()
584 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) in nvme_loop_create_ctrl()
587 ret = -ENOMEM; in nvme_loop_create_ctrl()
589 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_loop_create_ctrl()
590 ctrl->ctrl.kato = opts->kato; in nvme_loop_create_ctrl()
591 ctrl->port = nvme_loop_find_port(&ctrl->ctrl); in nvme_loop_create_ctrl()
593 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues), in nvme_loop_create_ctrl()
595 if (!ctrl->queues) in nvme_loop_create_ctrl()
598 ret = nvme_loop_configure_admin_queue(ctrl); in nvme_loop_create_ctrl()
602 if (opts->queue_size > ctrl->ctrl.maxcmd) { in nvme_loop_create_ctrl()
604 dev_warn(ctrl->ctrl.device, in nvme_loop_create_ctrl()
605 "queue_size %zu > ctrl maxcmd %u, clamping down\n", in nvme_loop_create_ctrl()
606 opts->queue_size, ctrl->ctrl.maxcmd); in nvme_loop_create_ctrl()
607 opts->queue_size = ctrl->ctrl.maxcmd; in nvme_loop_create_ctrl()
610 if (opts->nr_io_queues) { in nvme_loop_create_ctrl()
611 ret = nvme_loop_create_io_queues(ctrl); in nvme_loop_create_ctrl()
616 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0); in nvme_loop_create_ctrl()
618 dev_info(ctrl->ctrl.device, in nvme_loop_create_ctrl()
619 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn); in nvme_loop_create_ctrl()
621 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE)) in nvme_loop_create_ctrl()
625 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list); in nvme_loop_create_ctrl()
628 nvme_start_ctrl(&ctrl->ctrl); in nvme_loop_create_ctrl()
630 return &ctrl->ctrl; in nvme_loop_create_ctrl()
633 nvme_loop_destroy_admin_queue(ctrl); in nvme_loop_create_ctrl()
635 kfree(ctrl->queues); in nvme_loop_create_ctrl()
637 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_loop_create_ctrl()
638 nvme_put_ctrl(&ctrl->ctrl); in nvme_loop_create_ctrl()
641 ret = -EIO; in nvme_loop_create_ctrl()
648 list_add_tail(&port->entry, &nvme_loop_ports); in nvme_loop_add_port()
656 list_del_init(&port->entry); in nvme_loop_remove_port()
679 .module = THIS_MODULE,
701 struct nvme_loop_ctrl *ctrl, *next; in nvme_loop_cleanup_module() local
707 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list) in nvme_loop_cleanup_module()
708 nvme_delete_ctrl(&ctrl->ctrl); in nvme_loop_cleanup_module()
718 MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */