Lines Matching +full:ctrl +full:- +full:module
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2017-2018 Western Digital Corporation or its
7 * Copyright (c) 2019-2020, Eideticom Inc.
11 #include <linux/module.h>
25 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_passthru_override_id_ctrl() local
26 struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl; in nvmet_passthru_override_id_ctrl()
40 id->cntlid = cpu_to_le16(ctrl->cntlid); in nvmet_passthru_override_id_ctrl()
41 id->ver = cpu_to_le32(ctrl->subsys->ver); in nvmet_passthru_override_id_ctrl()
48 max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9), in nvmet_passthru_override_id_ctrl()
49 pctrl->max_hw_sectors); in nvmet_passthru_override_id_ctrl()
55 max_hw_sectors = min_not_zero(BIO_MAX_PAGES << (PAGE_SHIFT - 9), in nvmet_passthru_override_id_ctrl()
58 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12; in nvmet_passthru_override_id_ctrl()
60 id->mdts = ilog2(max_hw_sectors) + 9 - page_shift; in nvmet_passthru_override_id_ctrl()
62 id->acl = 3; in nvmet_passthru_override_id_ctrl()
67 id->aerl = NVMET_ASYNC_EVENTS - 1; in nvmet_passthru_override_id_ctrl()
69 /* emulate kas as most of the PCIe ctrl don't have a support for kas */ in nvmet_passthru_override_id_ctrl()
70 id->kas = cpu_to_le16(NVMET_KAS); in nvmet_passthru_override_id_ctrl()
73 id->hmpre = 0; in nvmet_passthru_override_id_ctrl()
74 id->hmmin = 0; in nvmet_passthru_override_id_ctrl()
76 id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes); in nvmet_passthru_override_id_ctrl()
77 id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes); in nvmet_passthru_override_id_ctrl()
78 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); in nvmet_passthru_override_id_ctrl()
81 id->fuses = 0; in nvmet_passthru_override_id_ctrl()
83 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ in nvmet_passthru_override_id_ctrl()
84 if (ctrl->ops->flags & NVMF_KEYED_SGLS) in nvmet_passthru_override_id_ctrl()
85 id->sgls |= cpu_to_le32(1 << 2); in nvmet_passthru_override_id_ctrl()
86 if (req->port->inline_data_size) in nvmet_passthru_override_id_ctrl()
87 id->sgls |= cpu_to_le32(1 << 20); in nvmet_passthru_override_id_ctrl()
90 * When passsthru controller is setup using nvme-loop transport it will in nvmet_passthru_override_id_ctrl()
91 * export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in in nvmet_passthru_override_id_ctrl()
92 * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl() in nvmet_passthru_override_id_ctrl()
94 * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn. in nvmet_passthru_override_id_ctrl()
96 memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn)); in nvmet_passthru_override_id_ctrl()
98 /* use fabric id-ctrl values */ in nvmet_passthru_override_id_ctrl()
99 id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) + in nvmet_passthru_override_id_ctrl()
100 req->port->inline_data_size) / 16); in nvmet_passthru_override_id_ctrl()
101 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16); in nvmet_passthru_override_id_ctrl()
103 id->msdbd = ctrl->ops->msdbd; in nvmet_passthru_override_id_ctrl()
106 id->cmic |= 1 << 1; in nvmet_passthru_override_id_ctrl()
109 id->oncs &= cpu_to_le16(~NVME_CTRL_ONCS_RESERVATIONS); in nvmet_passthru_override_id_ctrl()
132 for (i = 0; i < (id->nlbaf + 1); i++) in nvmet_passthru_override_id_ns()
133 if (id->lbaf[i].ms) in nvmet_passthru_override_id_ns()
134 memset(&id->lbaf[i], 0, sizeof(id->lbaf[i])); in nvmet_passthru_override_id_ns()
136 id->flbas = id->flbas & ~(1 << 4); in nvmet_passthru_override_id_ns()
143 id->mc = 0; in nvmet_passthru_override_id_ns()
155 struct request *rq = req->p.rq; in nvmet_passthru_execute_cmd_work()
160 status = nvme_req(rq)->status; in nvmet_passthru_execute_cmd_work()
162 req->cmd->common.opcode == nvme_admin_identify) { in nvmet_passthru_execute_cmd_work()
163 switch (req->cmd->identify.cns) { in nvmet_passthru_execute_cmd_work()
173 req->cqe->result = nvme_req(rq)->result; in nvmet_passthru_execute_cmd_work()
181 struct nvmet_req *req = rq->end_io_data; in nvmet_passthru_req_done()
183 req->cqe->result = nvme_req(rq)->result; in nvmet_passthru_req_done()
184 nvmet_req_complete(req, nvme_req(rq)->status); in nvmet_passthru_req_done()
195 if (req->sg_cnt > BIO_MAX_PAGES) in nvmet_passthru_map_sg()
196 return -EINVAL; in nvmet_passthru_map_sg()
198 if (req->cmd->common.opcode == nvme_cmd_flush) in nvmet_passthru_map_sg()
200 else if (nvme_is_write(req->cmd)) in nvmet_passthru_map_sg()
203 bio = bio_alloc(GFP_KERNEL, req->sg_cnt); in nvmet_passthru_map_sg()
204 bio->bi_end_io = bio_put; in nvmet_passthru_map_sg()
205 bio->bi_opf = req_op(rq) | op_flags; in nvmet_passthru_map_sg()
207 for_each_sg(req->sg, sg, req->sg_cnt, i) { in nvmet_passthru_map_sg()
208 if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length, in nvmet_passthru_map_sg()
209 sg->offset) < sg->length) { in nvmet_passthru_map_sg()
211 return -EINVAL; in nvmet_passthru_map_sg()
226 struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req); in nvmet_passthru_execute_cmd() local
227 struct request_queue *q = ctrl->admin_q; in nvmet_passthru_execute_cmd()
234 if (likely(req->sq->qid != 0)) { in nvmet_passthru_execute_cmd()
235 u32 nsid = le32_to_cpu(req->cmd->common.nsid); in nvmet_passthru_execute_cmd()
237 ns = nvme_find_get_ns(ctrl, nsid); in nvmet_passthru_execute_cmd()
244 q = ns->queue; in nvmet_passthru_execute_cmd()
247 rq = nvme_alloc_request(q, req->cmd, 0, NVME_QID_ANY); in nvmet_passthru_execute_cmd()
253 if (req->sg_cnt) { in nvmet_passthru_execute_cmd()
268 effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode); in nvmet_passthru_execute_cmd()
269 if (req->p.use_workqueue || effects) { in nvmet_passthru_execute_cmd()
270 INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work); in nvmet_passthru_execute_cmd()
271 req->p.rq = rq; in nvmet_passthru_execute_cmd()
272 schedule_work(&req->p.work); in nvmet_passthru_execute_cmd()
274 rq->end_io_data = req; in nvmet_passthru_execute_cmd()
275 blk_execute_rq_nowait(rq->q, ns ? ns->disk : NULL, rq, 0, in nvmet_passthru_execute_cmd()
300 struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req); in nvmet_passthru_set_host_behaviour() local
309 ret = nvme_get_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, in nvmet_passthru_set_host_behaviour()
331 req->p.use_workqueue = false; in nvmet_setup_passthru_command()
332 req->execute = nvmet_passthru_execute_cmd; in nvmet_setup_passthru_command()
338 /* Reject any commands with non-sgl flags set (ie. fused commands) */ in nvmet_parse_passthru_io_cmd()
339 if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL) in nvmet_parse_passthru_io_cmd()
342 switch (req->cmd->common.opcode) { in nvmet_parse_passthru_io_cmd()
367 switch (le32_to_cpu(req->cmd->features.fid)) { in nvmet_passthru_get_set_features()
399 * The Pre-Boot Software Load Count doesn't make much in nvmet_passthru_get_set_features()
412 /* Reject any commands with non-sgl flags set (ie. fused commands) */ in nvmet_parse_passthru_admin_cmd()
413 if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL) in nvmet_parse_passthru_admin_cmd()
419 if (req->cmd->common.opcode >= nvme_admin_vendor_start) in nvmet_parse_passthru_admin_cmd()
422 switch (req->cmd->common.opcode) { in nvmet_parse_passthru_admin_cmd()
424 req->execute = nvmet_execute_async_event; in nvmet_parse_passthru_admin_cmd()
429 * alive to the non-passthru mode. In future please change this in nvmet_parse_passthru_admin_cmd()
432 req->execute = nvmet_execute_keep_alive; in nvmet_parse_passthru_admin_cmd()
435 switch (le32_to_cpu(req->cmd->features.fid)) { in nvmet_parse_passthru_admin_cmd()
440 req->execute = nvmet_execute_set_features; in nvmet_parse_passthru_admin_cmd()
443 req->execute = nvmet_passthru_set_host_behaviour; in nvmet_parse_passthru_admin_cmd()
450 switch (le32_to_cpu(req->cmd->features.fid)) { in nvmet_parse_passthru_admin_cmd()
455 req->execute = nvmet_execute_get_features; in nvmet_parse_passthru_admin_cmd()
462 switch (req->cmd->identify.cns) { in nvmet_parse_passthru_admin_cmd()
464 req->execute = nvmet_passthru_execute_cmd; in nvmet_parse_passthru_admin_cmd()
465 req->p.use_workqueue = true; in nvmet_parse_passthru_admin_cmd()
468 switch (req->cmd->identify.csi) { in nvmet_parse_passthru_admin_cmd()
470 req->execute = nvmet_passthru_execute_cmd; in nvmet_parse_passthru_admin_cmd()
471 req->p.use_workqueue = true; in nvmet_parse_passthru_admin_cmd()
476 req->execute = nvmet_passthru_execute_cmd; in nvmet_parse_passthru_admin_cmd()
477 req->p.use_workqueue = true; in nvmet_parse_passthru_admin_cmd()
480 switch (req->cmd->identify.csi) { in nvmet_parse_passthru_admin_cmd()
482 req->execute = nvmet_passthru_execute_cmd; in nvmet_parse_passthru_admin_cmd()
483 req->p.use_workqueue = true; in nvmet_parse_passthru_admin_cmd()
500 struct nvme_ctrl *ctrl; in nvmet_passthru_ctrl_enable() local
502 int ret = -EINVAL; in nvmet_passthru_ctrl_enable()
505 mutex_lock(&subsys->lock); in nvmet_passthru_ctrl_enable()
506 if (!subsys->passthru_ctrl_path) in nvmet_passthru_ctrl_enable()
508 if (subsys->passthru_ctrl) in nvmet_passthru_ctrl_enable()
511 if (subsys->nr_namespaces) { in nvmet_passthru_ctrl_enable()
516 file = filp_open(subsys->passthru_ctrl_path, O_RDWR, 0); in nvmet_passthru_ctrl_enable()
522 ctrl = nvme_ctrl_from_file(file); in nvmet_passthru_ctrl_enable()
523 if (!ctrl) { in nvmet_passthru_ctrl_enable()
525 subsys->passthru_ctrl_path); in nvmet_passthru_ctrl_enable()
530 old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL, in nvmet_passthru_ctrl_enable()
540 subsys->passthru_ctrl = ctrl; in nvmet_passthru_ctrl_enable()
541 subsys->ver = ctrl->vs; in nvmet_passthru_ctrl_enable()
543 if (subsys->ver < NVME_VS(1, 2, 1)) { in nvmet_passthru_ctrl_enable()
545 NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver), in nvmet_passthru_ctrl_enable()
546 NVME_TERTIARY(subsys->ver)); in nvmet_passthru_ctrl_enable()
547 subsys->ver = NVME_VS(1, 2, 1); in nvmet_passthru_ctrl_enable()
549 nvme_get_ctrl(ctrl); in nvmet_passthru_ctrl_enable()
550 __module_get(subsys->passthru_ctrl->ops->module); in nvmet_passthru_ctrl_enable()
556 mutex_unlock(&subsys->lock); in nvmet_passthru_ctrl_enable()
562 if (subsys->passthru_ctrl) { in __nvmet_passthru_ctrl_disable()
563 xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid); in __nvmet_passthru_ctrl_disable()
564 module_put(subsys->passthru_ctrl->ops->module); in __nvmet_passthru_ctrl_disable()
565 nvme_put_ctrl(subsys->passthru_ctrl); in __nvmet_passthru_ctrl_disable()
567 subsys->passthru_ctrl = NULL; in __nvmet_passthru_ctrl_disable()
568 subsys->ver = NVMET_DEFAULT_VS; in __nvmet_passthru_ctrl_disable()
573 mutex_lock(&subsys->lock); in nvmet_passthru_ctrl_disable()
575 mutex_unlock(&subsys->lock); in nvmet_passthru_ctrl_disable()
580 mutex_lock(&subsys->lock); in nvmet_passthru_subsys_free()
582 mutex_unlock(&subsys->lock); in nvmet_passthru_subsys_free()
583 kfree(subsys->passthru_ctrl_path); in nvmet_passthru_subsys_free()