Lines Matching +full:ctrl +full:- +full:len

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
16 u32 len = le16_to_cpu(cmd->get_log_page.numdu); in nvmet_get_log_page_len() local
18 len <<= 16; in nvmet_get_log_page_len()
19 len += le16_to_cpu(cmd->get_log_page.numdl); in nvmet_get_log_page_len()
21 len += 1; in nvmet_get_log_page_len()
22 len *= sizeof(u32); in nvmet_get_log_page_len()
24 return len; in nvmet_get_log_page_len()
29 return le64_to_cpu(cmd->get_log_page.lpo); in nvmet_get_log_page_offset()
34 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len)); in nvmet_execute_get_log_page_noop()
39 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_get_log_page_error() local
45 spin_lock_irqsave(&ctrl->error_lock, flags); in nvmet_execute_get_log_page_error()
46 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS; in nvmet_execute_get_log_page_error()
49 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot], in nvmet_execute_get_log_page_error()
54 slot = NVMET_ERROR_LOG_SLOTS - 1; in nvmet_execute_get_log_page_error()
56 slot--; in nvmet_execute_get_log_page_error()
59 spin_unlock_irqrestore(&ctrl->error_lock, flags); in nvmet_execute_get_log_page_error()
69 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid); in nvmet_get_smart_log_nsid()
72 le32_to_cpu(req->cmd->get_log_page.nsid)); in nvmet_get_smart_log_nsid()
73 req->error_loc = offsetof(struct nvme_rw_command, nsid); in nvmet_get_smart_log_nsid()
78 if (!ns->bdev) in nvmet_get_smart_log_nsid()
81 host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]); in nvmet_get_smart_log_nsid()
82 data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part, in nvmet_get_smart_log_nsid()
84 host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]); in nvmet_get_smart_log_nsid()
85 data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part, in nvmet_get_smart_log_nsid()
88 put_unaligned_le64(host_reads, &slog->host_reads[0]); in nvmet_get_smart_log_nsid()
89 put_unaligned_le64(data_units_read, &slog->data_units_read[0]); in nvmet_get_smart_log_nsid()
90 put_unaligned_le64(host_writes, &slog->host_writes[0]); in nvmet_get_smart_log_nsid()
91 put_unaligned_le64(data_units_written, &slog->data_units_written[0]); in nvmet_get_smart_log_nsid()
104 struct nvmet_ctrl *ctrl; in nvmet_get_smart_log_all() local
106 ctrl = req->sq->ctrl; in nvmet_get_smart_log_all()
109 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) { in nvmet_get_smart_log_all()
111 if (!ns->bdev) in nvmet_get_smart_log_all()
113 host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]); in nvmet_get_smart_log_all()
115 part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000); in nvmet_get_smart_log_all()
116 host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]); in nvmet_get_smart_log_all()
118 part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000); in nvmet_get_smart_log_all()
123 put_unaligned_le64(host_reads, &slog->host_reads[0]); in nvmet_get_smart_log_all()
124 put_unaligned_le64(data_units_read, &slog->data_units_read[0]); in nvmet_get_smart_log_all()
125 put_unaligned_le64(host_writes, &slog->host_writes[0]); in nvmet_get_smart_log_all()
126 put_unaligned_le64(data_units_written, &slog->data_units_written[0]); in nvmet_get_smart_log_all()
137 if (req->data_len != sizeof(*log)) in nvmet_execute_get_log_page_smart()
144 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL)) in nvmet_execute_get_log_page_smart()
151 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags); in nvmet_execute_get_log_page_smart()
152 put_unaligned_le64(req->sq->ctrl->err_counter, in nvmet_execute_get_log_page_smart()
153 &log->num_err_log_entries); in nvmet_execute_get_log_page_smart()
154 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags); in nvmet_execute_get_log_page_smart()
172 log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0); in nvmet_execute_get_log_cmd_effects_ns()
173 log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0); in nvmet_execute_get_log_cmd_effects_ns()
174 log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0); in nvmet_execute_get_log_cmd_effects_ns()
175 log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0); in nvmet_execute_get_log_cmd_effects_ns()
176 log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0); in nvmet_execute_get_log_cmd_effects_ns()
177 log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0); in nvmet_execute_get_log_cmd_effects_ns()
178 log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0); in nvmet_execute_get_log_cmd_effects_ns()
180 log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0); in nvmet_execute_get_log_cmd_effects_ns()
181 log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0); in nvmet_execute_get_log_cmd_effects_ns()
182 log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0); in nvmet_execute_get_log_cmd_effects_ns()
183 log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0); in nvmet_execute_get_log_cmd_effects_ns()
184 log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0); in nvmet_execute_get_log_cmd_effects_ns()
195 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_get_log_changed_ns() local
197 size_t len; in nvmet_execute_get_log_changed_ns() local
199 if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32)) in nvmet_execute_get_log_changed_ns()
202 mutex_lock(&ctrl->lock); in nvmet_execute_get_log_changed_ns()
203 if (ctrl->nr_changed_ns == U32_MAX) in nvmet_execute_get_log_changed_ns()
204 len = sizeof(__le32); in nvmet_execute_get_log_changed_ns()
206 len = ctrl->nr_changed_ns * sizeof(__le32); in nvmet_execute_get_log_changed_ns()
207 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len); in nvmet_execute_get_log_changed_ns()
209 status = nvmet_zero_sgl(req, len, req->data_len - len); in nvmet_execute_get_log_changed_ns()
210 ctrl->nr_changed_ns = 0; in nvmet_execute_get_log_changed_ns()
212 mutex_unlock(&ctrl->lock); in nvmet_execute_get_log_changed_ns()
220 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_format_ana_group() local
224 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) { in nvmet_format_ana_group()
226 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) in nvmet_format_ana_group()
227 if (ns->anagrpid == grpid) in nvmet_format_ana_group()
228 desc->nsids[count++] = cpu_to_le32(ns->nsid); in nvmet_format_ana_group()
232 desc->grpid = cpu_to_le32(grpid); in nvmet_format_ana_group()
233 desc->nnsids = cpu_to_le32(count); in nvmet_format_ana_group()
234 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt); in nvmet_format_ana_group()
235 desc->state = req->port->ana_state[grpid]; in nvmet_format_ana_group()
236 memset(desc->rsvd17, 0, sizeof(desc->rsvd17)); in nvmet_format_ana_group()
245 size_t len; in nvmet_execute_get_log_page_ana() local
260 len = nvmet_format_ana_group(req, grpid, desc); in nvmet_execute_get_log_page_ana()
261 status = nvmet_copy_to_sgl(req, offset, desc, len); in nvmet_execute_get_log_page_ana()
264 offset += len; in nvmet_execute_get_log_page_ana()
287 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_identify_ctrl() local
299 id->vid = 0; in nvmet_execute_identify_ctrl()
300 id->ssvid = 0; in nvmet_execute_identify_ctrl()
302 memset(id->sn, ' ', sizeof(id->sn)); in nvmet_execute_identify_ctrl()
303 bin2hex(id->sn, &ctrl->subsys->serial, in nvmet_execute_identify_ctrl()
304 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2)); in nvmet_execute_identify_ctrl()
305 memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' '); in nvmet_execute_identify_ctrl()
306 memcpy_and_pad(id->fr, sizeof(id->fr), in nvmet_execute_identify_ctrl()
309 id->rab = 6; in nvmet_execute_identify_ctrl()
317 id->cmic = (1 << 0) | (1 << 1) | (1 << 3); in nvmet_execute_identify_ctrl()
320 id->mdts = 0; in nvmet_execute_identify_ctrl()
321 id->cntlid = cpu_to_le16(ctrl->cntlid); in nvmet_execute_identify_ctrl()
322 id->ver = cpu_to_le32(ctrl->subsys->ver); in nvmet_execute_identify_ctrl()
325 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL); in nvmet_execute_identify_ctrl()
326 id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT | in nvmet_execute_identify_ctrl()
329 id->oacs = 0; in nvmet_execute_identify_ctrl()
336 id->acl = 3; in nvmet_execute_identify_ctrl()
338 id->aerl = NVMET_ASYNC_EVENTS - 1; in nvmet_execute_identify_ctrl()
340 /* first slot is read-only, only one slot supported */ in nvmet_execute_identify_ctrl()
341 id->frmw = (1 << 0) | (1 << 1); in nvmet_execute_identify_ctrl()
342 id->lpa = (1 << 0) | (1 << 1) | (1 << 2); in nvmet_execute_identify_ctrl()
343 id->elpe = NVMET_ERROR_LOG_SLOTS - 1; in nvmet_execute_identify_ctrl()
344 id->npss = 0; in nvmet_execute_identify_ctrl()
346 /* We support keep-alive timeout in granularity of seconds */ in nvmet_execute_identify_ctrl()
347 id->kas = cpu_to_le16(NVMET_KAS); in nvmet_execute_identify_ctrl()
349 id->sqes = (0x6 << 4) | 0x6; in nvmet_execute_identify_ctrl()
350 id->cqes = (0x4 << 4) | 0x4; in nvmet_execute_identify_ctrl()
352 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */ in nvmet_execute_identify_ctrl()
353 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); in nvmet_execute_identify_ctrl()
355 id->nn = cpu_to_le32(ctrl->subsys->max_nsid); in nvmet_execute_identify_ctrl()
356 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES); in nvmet_execute_identify_ctrl()
357 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM | in nvmet_execute_identify_ctrl()
361 id->vwc = NVME_CTRL_VWC_PRESENT; in nvmet_execute_identify_ctrl()
367 id->awun = 0; in nvmet_execute_identify_ctrl()
368 id->awupf = 0; in nvmet_execute_identify_ctrl()
370 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ in nvmet_execute_identify_ctrl()
371 if (ctrl->ops->has_keyed_sgls) in nvmet_execute_identify_ctrl()
372 id->sgls |= cpu_to_le32(1 << 2); in nvmet_execute_identify_ctrl()
373 if (req->port->inline_data_size) in nvmet_execute_identify_ctrl()
374 id->sgls |= cpu_to_le32(1 << 20); in nvmet_execute_identify_ctrl()
376 strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn)); in nvmet_execute_identify_ctrl()
378 /* Max command capsule size is sqe + single page of in-capsule data */ in nvmet_execute_identify_ctrl()
379 id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) + in nvmet_execute_identify_ctrl()
380 req->port->inline_data_size) / 16); in nvmet_execute_identify_ctrl()
382 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16); in nvmet_execute_identify_ctrl()
384 id->msdbd = ctrl->ops->msdbd; in nvmet_execute_identify_ctrl()
386 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4); in nvmet_execute_identify_ctrl()
387 id->anatt = 10; /* random value */ in nvmet_execute_identify_ctrl()
388 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS); in nvmet_execute_identify_ctrl()
389 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS); in nvmet_execute_identify_ctrl()
395 id->psd[0].max_power = cpu_to_le16(0x9c4); in nvmet_execute_identify_ctrl()
396 id->psd[0].entry_lat = cpu_to_le32(0x10); in nvmet_execute_identify_ctrl()
397 id->psd[0].exit_lat = cpu_to_le32(0x4); in nvmet_execute_identify_ctrl()
399 id->nwpc = 1 << 0; /* write protect and no write protect */ in nvmet_execute_identify_ctrl()
414 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { in nvmet_execute_identify_ns()
415 req->error_loc = offsetof(struct nvme_identify, nsid); in nvmet_execute_identify_ns()
427 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid); in nvmet_execute_identify_ns()
435 id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift); in nvmet_execute_identify_ns()
436 switch (req->port->ana_state[ns->anagrpid]) { in nvmet_execute_identify_ns()
441 id->nuse = id->nsze; in nvmet_execute_identify_ns()
445 if (ns->bdev) in nvmet_execute_identify_ns()
446 nvmet_bdev_set_limits(ns->bdev, id); in nvmet_execute_identify_ns()
452 id->nlbaf = 0; in nvmet_execute_identify_ns()
453 id->flbas = 0; in nvmet_execute_identify_ns()
459 id->nmic = (1 << 0); in nvmet_execute_identify_ns()
460 id->anagrpid = cpu_to_le32(ns->anagrpid); in nvmet_execute_identify_ns()
462 memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid)); in nvmet_execute_identify_ns()
464 id->lbaf[0].ds = ns->blksize_shift; in nvmet_execute_identify_ns()
466 if (ns->readonly) in nvmet_execute_identify_ns()
467 id->nsattr |= (1 << 0); in nvmet_execute_identify_ns()
479 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_identify_nslist() local
481 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid); in nvmet_execute_identify_nslist()
493 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) { in nvmet_execute_identify_nslist()
494 if (ns->nsid <= min_nsid) in nvmet_execute_identify_nslist()
496 list[i++] = cpu_to_le32(ns->nsid); in nvmet_execute_identify_nslist()
509 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len, in nvmet_copy_ns_identifier() argument
514 .nidl = len, in nvmet_copy_ns_identifier()
523 status = nvmet_copy_to_sgl(req, *off, id, len); in nvmet_copy_ns_identifier()
526 *off += len; in nvmet_copy_ns_identifier()
537 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid); in nvmet_execute_identify_desclist()
539 req->error_loc = offsetof(struct nvme_identify, nsid); in nvmet_execute_identify_desclist()
544 if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) { in nvmet_execute_identify_desclist()
547 &ns->uuid, &off); in nvmet_execute_identify_desclist()
551 if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) { in nvmet_execute_identify_desclist()
554 &ns->nguid, &off); in nvmet_execute_identify_desclist()
559 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off, in nvmet_execute_identify_desclist()
560 off) != NVME_IDENTIFY_DATA_SIZE - off) in nvmet_execute_identify_desclist()
585 if (req->ns->file) in nvmet_write_protect_flush_sync()
591 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid); in nvmet_write_protect_flush_sync()
597 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11); in nvmet_set_feat_write_protect()
598 struct nvmet_subsys *subsys = req->sq->ctrl->subsys; in nvmet_set_feat_write_protect()
601 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid); in nvmet_set_feat_write_protect()
602 if (unlikely(!req->ns)) { in nvmet_set_feat_write_protect()
603 req->error_loc = offsetof(struct nvme_common_command, nsid); in nvmet_set_feat_write_protect()
607 mutex_lock(&subsys->lock); in nvmet_set_feat_write_protect()
610 req->ns->readonly = true; in nvmet_set_feat_write_protect()
613 req->ns->readonly = false; in nvmet_set_feat_write_protect()
616 req->ns->readonly = false; in nvmet_set_feat_write_protect()
624 nvmet_ns_changed(subsys, req->ns->nsid); in nvmet_set_feat_write_protect()
625 mutex_unlock(&subsys->lock); in nvmet_set_feat_write_protect()
631 u32 val32 = le32_to_cpu(req->cmd->common.cdw11); in nvmet_set_feat_kato()
633 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000); in nvmet_set_feat_kato()
635 nvmet_set_result(req, req->sq->ctrl->kato); in nvmet_set_feat_kato()
642 u32 val32 = le32_to_cpu(req->cmd->common.cdw11); in nvmet_set_feat_async_event()
645 req->error_loc = offsetof(struct nvme_common_command, cdw11); in nvmet_set_feat_async_event()
649 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32); in nvmet_set_feat_async_event()
657 struct nvmet_subsys *subsys = req->sq->ctrl->subsys; in nvmet_execute_set_features()
658 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); in nvmet_execute_set_features()
664 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16)); in nvmet_execute_set_features()
679 req->error_loc = offsetof(struct nvme_common_command, cdw10); in nvmet_execute_set_features()
689 struct nvmet_subsys *subsys = req->sq->ctrl->subsys; in nvmet_get_feat_write_protect()
692 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid); in nvmet_get_feat_write_protect()
693 if (!req->ns) { in nvmet_get_feat_write_protect()
694 req->error_loc = offsetof(struct nvme_common_command, nsid); in nvmet_get_feat_write_protect()
697 mutex_lock(&subsys->lock); in nvmet_get_feat_write_protect()
698 if (req->ns->readonly == true) in nvmet_get_feat_write_protect()
703 mutex_unlock(&subsys->lock); in nvmet_get_feat_write_protect()
710 nvmet_set_result(req, req->sq->ctrl->kato * 1000); in nvmet_get_feat_kato()
715 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled)); in nvmet_get_feat_async_event()
720 struct nvmet_subsys *subsys = req->sq->ctrl->subsys; in nvmet_execute_get_features()
721 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); in nvmet_execute_get_features()
754 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16)); in nvmet_execute_get_features()
760 /* need 128-bit host identifier flag */ in nvmet_execute_get_features()
761 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) { in nvmet_execute_get_features()
762 req->error_loc = in nvmet_execute_get_features()
768 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid, in nvmet_execute_get_features()
769 sizeof(req->sq->ctrl->hostid)); in nvmet_execute_get_features()
775 req->error_loc = in nvmet_execute_get_features()
786 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_async_event() local
788 mutex_lock(&ctrl->lock); in nvmet_execute_async_event()
789 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) { in nvmet_execute_async_event()
790 mutex_unlock(&ctrl->lock); in nvmet_execute_async_event()
794 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; in nvmet_execute_async_event()
795 mutex_unlock(&ctrl->lock); in nvmet_execute_async_event()
797 schedule_work(&ctrl->async_event_work); in nvmet_execute_async_event()
802 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_keep_alive() local
804 pr_debug("ctrl %d update keep-alive timer for %d secs\n", in nvmet_execute_keep_alive()
805 ctrl->cntlid, ctrl->kato); in nvmet_execute_keep_alive()
807 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); in nvmet_execute_keep_alive()
813 struct nvme_command *cmd = req->cmd; in nvmet_parse_admin_cmd()
820 switch (cmd->common.opcode) { in nvmet_parse_admin_cmd()
822 req->data_len = nvmet_get_log_page_len(cmd); in nvmet_parse_admin_cmd()
824 switch (cmd->get_log_page.lid) { in nvmet_parse_admin_cmd()
826 req->execute = nvmet_execute_get_log_page_error; in nvmet_parse_admin_cmd()
829 req->execute = nvmet_execute_get_log_page_smart; in nvmet_parse_admin_cmd()
838 req->execute = nvmet_execute_get_log_page_noop; in nvmet_parse_admin_cmd()
841 req->execute = nvmet_execute_get_log_changed_ns; in nvmet_parse_admin_cmd()
844 req->execute = nvmet_execute_get_log_cmd_effects_ns; in nvmet_parse_admin_cmd()
847 req->execute = nvmet_execute_get_log_page_ana; in nvmet_parse_admin_cmd()
852 req->data_len = NVME_IDENTIFY_DATA_SIZE; in nvmet_parse_admin_cmd()
853 switch (cmd->identify.cns) { in nvmet_parse_admin_cmd()
855 req->execute = nvmet_execute_identify_ns; in nvmet_parse_admin_cmd()
858 req->execute = nvmet_execute_identify_ctrl; in nvmet_parse_admin_cmd()
861 req->execute = nvmet_execute_identify_nslist; in nvmet_parse_admin_cmd()
864 req->execute = nvmet_execute_identify_desclist; in nvmet_parse_admin_cmd()
869 req->execute = nvmet_execute_abort; in nvmet_parse_admin_cmd()
870 req->data_len = 0; in nvmet_parse_admin_cmd()
873 req->execute = nvmet_execute_set_features; in nvmet_parse_admin_cmd()
874 req->data_len = 0; in nvmet_parse_admin_cmd()
877 req->execute = nvmet_execute_get_features; in nvmet_parse_admin_cmd()
878 req->data_len = 0; in nvmet_parse_admin_cmd()
881 req->execute = nvmet_execute_async_event; in nvmet_parse_admin_cmd()
882 req->data_len = 0; in nvmet_parse_admin_cmd()
885 req->execute = nvmet_execute_keep_alive; in nvmet_parse_admin_cmd()
886 req->data_len = 0; in nvmet_parse_admin_cmd()
890 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode, in nvmet_parse_admin_cmd()
891 req->sq->qid); in nvmet_parse_admin_cmd()
892 req->error_loc = offsetof(struct nvme_common_command, opcode); in nvmet_parse_admin_cmd()