Lines Matching +full:host +full:- +full:id

1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2017-2018 Western Digital Corporation or its
7 * Copyright (c) 2019-2020, Eideticom Inc.
13 #include "../host/nvme.h"
29 if (!nvme_multi_css(ctrl->subsys->passthru_ctrl)) in nvmet_passthrough_override_cap()
30 ctrl->cap &= ~(1ULL << 43); in nvmet_passthrough_override_cap()
35 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_passthru_override_id_ctrl()
36 struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl; in nvmet_passthru_override_id_ctrl()
38 struct nvme_id_ctrl *id; in nvmet_passthru_override_id_ctrl() local
42 id = kzalloc(sizeof(*id), GFP_KERNEL); in nvmet_passthru_override_id_ctrl()
43 if (!id) in nvmet_passthru_override_id_ctrl()
46 status = nvmet_copy_from_sgl(req, 0, id, sizeof(*id)); in nvmet_passthru_override_id_ctrl()
50 id->cntlid = cpu_to_le16(ctrl->cntlid); in nvmet_passthru_override_id_ctrl()
51 id->ver = cpu_to_le32(ctrl->subsys->ver); in nvmet_passthru_override_id_ctrl()
55 * which depends on the host's memory fragementation. To solve this, in nvmet_passthru_override_id_ctrl()
58 max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9), in nvmet_passthru_override_id_ctrl()
59 pctrl->max_hw_sectors); in nvmet_passthru_override_id_ctrl()
65 max_hw_sectors = min_not_zero(BIO_MAX_VECS << (PAGE_SHIFT - 9), in nvmet_passthru_override_id_ctrl()
68 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12; in nvmet_passthru_override_id_ctrl()
70 id->mdts = ilog2(max_hw_sectors) + 9 - page_shift; in nvmet_passthru_override_id_ctrl()
72 id->acl = 3; in nvmet_passthru_override_id_ctrl()
77 id->aerl = NVMET_ASYNC_EVENTS - 1; in nvmet_passthru_override_id_ctrl()
80 id->kas = cpu_to_le16(NVMET_KAS); in nvmet_passthru_override_id_ctrl()
82 /* don't support host memory buffer */ in nvmet_passthru_override_id_ctrl()
83 id->hmpre = 0; in nvmet_passthru_override_id_ctrl()
84 id->hmmin = 0; in nvmet_passthru_override_id_ctrl()
86 id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes); in nvmet_passthru_override_id_ctrl()
87 id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes); in nvmet_passthru_override_id_ctrl()
88 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); in nvmet_passthru_override_id_ctrl()
91 id->fuses = 0; in nvmet_passthru_override_id_ctrl()
93 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ in nvmet_passthru_override_id_ctrl()
94 if (ctrl->ops->flags & NVMF_KEYED_SGLS) in nvmet_passthru_override_id_ctrl()
95 id->sgls |= cpu_to_le32(1 << 2); in nvmet_passthru_override_id_ctrl()
96 if (req->port->inline_data_size) in nvmet_passthru_override_id_ctrl()
97 id->sgls |= cpu_to_le32(1 << 20); in nvmet_passthru_override_id_ctrl()
100 * When passsthru controller is setup using nvme-loop transport it will in nvmet_passthru_override_id_ctrl()
102 * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl() in nvmet_passthru_override_id_ctrl()
104 * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn. in nvmet_passthru_override_id_ctrl()
106 memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn)); in nvmet_passthru_override_id_ctrl()
108 /* use fabric id-ctrl values */ in nvmet_passthru_override_id_ctrl()
109 id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) + in nvmet_passthru_override_id_ctrl()
110 req->port->inline_data_size) / 16); in nvmet_passthru_override_id_ctrl()
111 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16); in nvmet_passthru_override_id_ctrl()
113 id->msdbd = ctrl->ops->msdbd; in nvmet_passthru_override_id_ctrl()
116 id->cmic |= 1 << 1; in nvmet_passthru_override_id_ctrl()
119 id->oncs &= cpu_to_le16(~NVME_CTRL_ONCS_RESERVATIONS); in nvmet_passthru_override_id_ctrl()
121 status = nvmet_copy_to_sgl(req, 0, id, sizeof(struct nvme_id_ctrl)); in nvmet_passthru_override_id_ctrl()
124 kfree(id); in nvmet_passthru_override_id_ctrl()
131 struct nvme_id_ns *id; in nvmet_passthru_override_id_ns() local
134 id = kzalloc(sizeof(*id), GFP_KERNEL); in nvmet_passthru_override_id_ns()
135 if (!id) in nvmet_passthru_override_id_ns()
138 status = nvmet_copy_from_sgl(req, 0, id, sizeof(struct nvme_id_ns)); in nvmet_passthru_override_id_ns()
142 for (i = 0; i < (id->nlbaf + 1); i++) in nvmet_passthru_override_id_ns()
143 if (id->lbaf[i].ms) in nvmet_passthru_override_id_ns()
144 memset(&id->lbaf[i], 0, sizeof(id->lbaf[i])); in nvmet_passthru_override_id_ns()
146 id->flbas = id->flbas & ~(1 << 4); in nvmet_passthru_override_id_ns()
153 id->mc = 0; in nvmet_passthru_override_id_ns()
155 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); in nvmet_passthru_override_id_ns()
158 kfree(id); in nvmet_passthru_override_id_ns()
165 struct request *rq = req->p.rq; in nvmet_passthru_execute_cmd_work()
171 req->cmd->common.opcode == nvme_admin_identify) { in nvmet_passthru_execute_cmd_work()
172 switch (req->cmd->identify.cns) { in nvmet_passthru_execute_cmd_work()
183 req->cqe->result = nvme_req(rq)->result; in nvmet_passthru_execute_cmd_work()
191 struct nvmet_req *req = rq->end_io_data; in nvmet_passthru_req_done()
193 req->cqe->result = nvme_req(rq)->result; in nvmet_passthru_req_done()
194 nvmet_req_complete(req, nvme_req(rq)->status); in nvmet_passthru_req_done()
204 if (req->sg_cnt > BIO_MAX_VECS) in nvmet_passthru_map_sg()
205 return -EINVAL; in nvmet_passthru_map_sg()
208 bio = &req->p.inline_bio; in nvmet_passthru_map_sg()
209 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); in nvmet_passthru_map_sg()
211 bio = bio_alloc(GFP_KERNEL, bio_max_segs(req->sg_cnt)); in nvmet_passthru_map_sg()
212 bio->bi_end_io = bio_put; in nvmet_passthru_map_sg()
214 bio->bi_opf = req_op(rq); in nvmet_passthru_map_sg()
216 for_each_sg(req->sg, sg, req->sg_cnt, i) { in nvmet_passthru_map_sg()
217 if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length, in nvmet_passthru_map_sg()
218 sg->offset) < sg->length) { in nvmet_passthru_map_sg()
220 return -EINVAL; in nvmet_passthru_map_sg()
224 blk_rq_bio_prep(rq, bio, req->sg_cnt); in nvmet_passthru_map_sg()
231 struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl; in nvmet_passthru_execute_cmd()
232 struct request_queue *q = ctrl->admin_q; in nvmet_passthru_execute_cmd()
240 if (likely(req->sq->qid != 0)) { in nvmet_passthru_execute_cmd()
241 u32 nsid = le32_to_cpu(req->cmd->common.nsid); in nvmet_passthru_execute_cmd()
250 q = ns->queue; in nvmet_passthru_execute_cmd()
251 timeout = nvmet_req_subsys(req)->io_timeout; in nvmet_passthru_execute_cmd()
253 timeout = nvmet_req_subsys(req)->admin_timeout; in nvmet_passthru_execute_cmd()
256 rq = nvme_alloc_request(q, req->cmd, 0); in nvmet_passthru_execute_cmd()
263 rq->timeout = timeout; in nvmet_passthru_execute_cmd()
265 if (req->sg_cnt) { in nvmet_passthru_execute_cmd()
280 effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode); in nvmet_passthru_execute_cmd()
281 if (req->p.use_workqueue || effects) { in nvmet_passthru_execute_cmd()
282 INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work); in nvmet_passthru_execute_cmd()
283 req->p.rq = rq; in nvmet_passthru_execute_cmd()
284 schedule_work(&req->p.work); in nvmet_passthru_execute_cmd()
286 rq->end_io_data = req; in nvmet_passthru_execute_cmd()
287 blk_execute_rq_nowait(ns ? ns->disk : NULL, rq, 0, in nvmet_passthru_execute_cmd()
306 * We need to emulate set host behaviour to ensure that any requested
307 * behaviour of the target's host matches the requested behaviour
308 * of the device's host and fail otherwise.
312 struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl; in nvmet_passthru_set_host_behaviour()
313 struct nvme_feat_host_behavior *host; in nvmet_passthru_set_host_behaviour() local
317 host = kzalloc(sizeof(*host) * 2, GFP_KERNEL); in nvmet_passthru_set_host_behaviour()
318 if (!host) in nvmet_passthru_set_host_behaviour()
322 host, sizeof(*host), NULL); in nvmet_passthru_set_host_behaviour()
326 status = nvmet_copy_from_sgl(req, 0, &host[1], sizeof(*host)); in nvmet_passthru_set_host_behaviour()
330 if (memcmp(&host[0], &host[1], sizeof(host[0]))) { in nvmet_passthru_set_host_behaviour()
331 pr_warn("target host has requested different behaviour from the local host\n"); in nvmet_passthru_set_host_behaviour()
336 kfree(host); in nvmet_passthru_set_host_behaviour()
343 req->p.use_workqueue = false; in nvmet_setup_passthru_command()
344 req->execute = nvmet_passthru_execute_cmd; in nvmet_setup_passthru_command()
350 /* Reject any commands with non-sgl flags set (ie. fused commands) */ in nvmet_parse_passthru_io_cmd()
351 if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL) in nvmet_parse_passthru_io_cmd()
354 switch (req->cmd->common.opcode) { in nvmet_parse_passthru_io_cmd()
379 switch (le32_to_cpu(req->cmd->features.fid)) { in nvmet_passthru_get_set_features()
411 * The Pre-Boot Software Load Count doesn't make much in nvmet_passthru_get_set_features()
424 /* Reject any commands with non-sgl flags set (ie. fused commands) */ in nvmet_parse_passthru_admin_cmd()
425 if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL) in nvmet_parse_passthru_admin_cmd()
431 if (req->cmd->common.opcode >= nvme_admin_vendor_start) in nvmet_parse_passthru_admin_cmd()
434 switch (req->cmd->common.opcode) { in nvmet_parse_passthru_admin_cmd()
436 req->execute = nvmet_execute_async_event; in nvmet_parse_passthru_admin_cmd()
441 * alive to the non-passthru mode. In future please change this in nvmet_parse_passthru_admin_cmd()
444 req->execute = nvmet_execute_keep_alive; in nvmet_parse_passthru_admin_cmd()
447 switch (le32_to_cpu(req->cmd->features.fid)) { in nvmet_parse_passthru_admin_cmd()
452 req->execute = nvmet_execute_set_features; in nvmet_parse_passthru_admin_cmd()
455 req->execute = nvmet_passthru_set_host_behaviour; in nvmet_parse_passthru_admin_cmd()
462 switch (le32_to_cpu(req->cmd->features.fid)) { in nvmet_parse_passthru_admin_cmd()
467 req->execute = nvmet_execute_get_features; in nvmet_parse_passthru_admin_cmd()
474 switch (req->cmd->identify.cns) { in nvmet_parse_passthru_admin_cmd()
476 req->execute = nvmet_passthru_execute_cmd; in nvmet_parse_passthru_admin_cmd()
477 req->p.use_workqueue = true; in nvmet_parse_passthru_admin_cmd()
480 switch (req->cmd->identify.csi) { in nvmet_parse_passthru_admin_cmd()
482 req->execute = nvmet_passthru_execute_cmd; in nvmet_parse_passthru_admin_cmd()
483 req->p.use_workqueue = true; in nvmet_parse_passthru_admin_cmd()
488 req->execute = nvmet_passthru_execute_cmd; in nvmet_parse_passthru_admin_cmd()
489 req->p.use_workqueue = true; in nvmet_parse_passthru_admin_cmd()
492 switch (req->cmd->identify.csi) { in nvmet_parse_passthru_admin_cmd()
494 req->execute = nvmet_passthru_execute_cmd; in nvmet_parse_passthru_admin_cmd()
495 req->p.use_workqueue = true; in nvmet_parse_passthru_admin_cmd()
514 int ret = -EINVAL; in nvmet_passthru_ctrl_enable()
517 mutex_lock(&subsys->lock); in nvmet_passthru_ctrl_enable()
518 if (!subsys->passthru_ctrl_path) in nvmet_passthru_ctrl_enable()
520 if (subsys->passthru_ctrl) in nvmet_passthru_ctrl_enable()
523 if (subsys->nr_namespaces) { in nvmet_passthru_ctrl_enable()
528 file = filp_open(subsys->passthru_ctrl_path, O_RDWR, 0); in nvmet_passthru_ctrl_enable()
537 subsys->passthru_ctrl_path); in nvmet_passthru_ctrl_enable()
542 old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL, in nvmet_passthru_ctrl_enable()
552 subsys->passthru_ctrl = ctrl; in nvmet_passthru_ctrl_enable()
553 subsys->ver = ctrl->vs; in nvmet_passthru_ctrl_enable()
555 if (subsys->ver < NVME_VS(1, 2, 1)) { in nvmet_passthru_ctrl_enable()
557 NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver), in nvmet_passthru_ctrl_enable()
558 NVME_TERTIARY(subsys->ver)); in nvmet_passthru_ctrl_enable()
559 subsys->ver = NVME_VS(1, 2, 1); in nvmet_passthru_ctrl_enable()
562 __module_get(subsys->passthru_ctrl->ops->module); in nvmet_passthru_ctrl_enable()
568 mutex_unlock(&subsys->lock); in nvmet_passthru_ctrl_enable()
574 if (subsys->passthru_ctrl) { in __nvmet_passthru_ctrl_disable()
575 xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid); in __nvmet_passthru_ctrl_disable()
576 module_put(subsys->passthru_ctrl->ops->module); in __nvmet_passthru_ctrl_disable()
577 nvme_put_ctrl(subsys->passthru_ctrl); in __nvmet_passthru_ctrl_disable()
579 subsys->passthru_ctrl = NULL; in __nvmet_passthru_ctrl_disable()
580 subsys->ver = NVMET_DEFAULT_VS; in __nvmet_passthru_ctrl_disable()
585 mutex_lock(&subsys->lock); in nvmet_passthru_ctrl_disable()
587 mutex_unlock(&subsys->lock); in nvmet_passthru_ctrl_disable()
592 mutex_lock(&subsys->lock); in nvmet_passthru_subsys_free()
594 mutex_unlock(&subsys->lock); in nvmet_passthru_subsys_free()
595 kfree(subsys->passthru_ctrl_path); in nvmet_passthru_subsys_free()