Lines Matching +full:ctrl +full:- +full:len
1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
17 u32 len = le16_to_cpu(cmd->get_log_page.numdu); in nvmet_get_log_page_len() local
19 len <<= 16; in nvmet_get_log_page_len()
20 len += le16_to_cpu(cmd->get_log_page.numdl); in nvmet_get_log_page_len()
22 len += 1; in nvmet_get_log_page_len()
23 len *= sizeof(u32); in nvmet_get_log_page_len()
25 return len; in nvmet_get_log_page_len()
32 return sizeof(req->sq->ctrl->hostid); in nvmet_feat_data_len()
40 return le64_to_cpu(cmd->get_log_page.lpo); in nvmet_get_log_page_offset()
45 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len)); in nvmet_execute_get_log_page_noop()
50 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_get_log_page_error() local
56 spin_lock_irqsave(&ctrl->error_lock, flags); in nvmet_execute_get_log_page_error()
57 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS; in nvmet_execute_get_log_page_error()
60 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot], in nvmet_execute_get_log_page_error()
65 slot = NVMET_ERROR_LOG_SLOTS - 1; in nvmet_execute_get_log_page_error()
67 slot--; in nvmet_execute_get_log_page_error()
70 spin_unlock_irqrestore(&ctrl->error_lock, flags); in nvmet_execute_get_log_page_error()
85 if (!req->ns->bdev) in nvmet_get_smart_log_nsid()
88 host_reads = part_stat_read(req->ns->bdev, ios[READ]); in nvmet_get_smart_log_nsid()
90 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000); in nvmet_get_smart_log_nsid()
91 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]); in nvmet_get_smart_log_nsid()
93 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000); in nvmet_get_smart_log_nsid()
95 put_unaligned_le64(host_reads, &slog->host_reads[0]); in nvmet_get_smart_log_nsid()
96 put_unaligned_le64(data_units_read, &slog->data_units_read[0]); in nvmet_get_smart_log_nsid()
97 put_unaligned_le64(host_writes, &slog->host_writes[0]); in nvmet_get_smart_log_nsid()
98 put_unaligned_le64(data_units_written, &slog->data_units_written[0]); in nvmet_get_smart_log_nsid()
109 struct nvmet_ctrl *ctrl; in nvmet_get_smart_log_all() local
112 ctrl = req->sq->ctrl; in nvmet_get_smart_log_all()
113 xa_for_each(&ctrl->subsys->namespaces, idx, ns) { in nvmet_get_smart_log_all()
115 if (!ns->bdev) in nvmet_get_smart_log_all()
117 host_reads += part_stat_read(ns->bdev, ios[READ]); in nvmet_get_smart_log_all()
119 part_stat_read(ns->bdev, sectors[READ]), 1000); in nvmet_get_smart_log_all()
120 host_writes += part_stat_read(ns->bdev, ios[WRITE]); in nvmet_get_smart_log_all()
122 part_stat_read(ns->bdev, sectors[WRITE]), 1000); in nvmet_get_smart_log_all()
125 put_unaligned_le64(host_reads, &slog->host_reads[0]); in nvmet_get_smart_log_all()
126 put_unaligned_le64(data_units_read, &slog->data_units_read[0]); in nvmet_get_smart_log_all()
127 put_unaligned_le64(host_writes, &slog->host_writes[0]); in nvmet_get_smart_log_all()
128 put_unaligned_le64(data_units_written, &slog->data_units_written[0]); in nvmet_get_smart_log_all()
139 if (req->transfer_len != sizeof(*log)) in nvmet_execute_get_log_page_smart()
146 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL)) in nvmet_execute_get_log_page_smart()
153 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags); in nvmet_execute_get_log_page_smart()
154 put_unaligned_le64(req->sq->ctrl->err_counter, in nvmet_execute_get_log_page_smart()
155 &log->num_err_log_entries); in nvmet_execute_get_log_page_smart()
156 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags); in nvmet_execute_get_log_page_smart()
167 log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0); in nvmet_get_cmd_effects_nvm()
168 log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0); in nvmet_get_cmd_effects_nvm()
169 log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0); in nvmet_get_cmd_effects_nvm()
170 log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0); in nvmet_get_cmd_effects_nvm()
171 log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0); in nvmet_get_cmd_effects_nvm()
172 log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0); in nvmet_get_cmd_effects_nvm()
173 log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0); in nvmet_get_cmd_effects_nvm()
175 log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0); in nvmet_get_cmd_effects_nvm()
176 log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0); in nvmet_get_cmd_effects_nvm()
177 log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0); in nvmet_get_cmd_effects_nvm()
178 log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0); in nvmet_get_cmd_effects_nvm()
179 log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0); in nvmet_get_cmd_effects_nvm()
184 log->iocs[nvme_cmd_zone_append] = cpu_to_le32(1 << 0); in nvmet_get_cmd_effects_zns()
185 log->iocs[nvme_cmd_zone_mgmt_send] = cpu_to_le32(1 << 0); in nvmet_get_cmd_effects_zns()
186 log->iocs[nvme_cmd_zone_mgmt_recv] = cpu_to_le32(1 << 0); in nvmet_get_cmd_effects_zns()
200 switch (req->cmd->get_log_page.csi) { in nvmet_execute_get_log_cmd_effects_ns()
226 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_get_log_changed_ns() local
228 size_t len; in nvmet_execute_get_log_changed_ns() local
230 if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32)) in nvmet_execute_get_log_changed_ns()
233 mutex_lock(&ctrl->lock); in nvmet_execute_get_log_changed_ns()
234 if (ctrl->nr_changed_ns == U32_MAX) in nvmet_execute_get_log_changed_ns()
235 len = sizeof(__le32); in nvmet_execute_get_log_changed_ns()
237 len = ctrl->nr_changed_ns * sizeof(__le32); in nvmet_execute_get_log_changed_ns()
238 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len); in nvmet_execute_get_log_changed_ns()
240 status = nvmet_zero_sgl(req, len, req->transfer_len - len); in nvmet_execute_get_log_changed_ns()
241 ctrl->nr_changed_ns = 0; in nvmet_execute_get_log_changed_ns()
243 mutex_unlock(&ctrl->lock); in nvmet_execute_get_log_changed_ns()
251 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_format_ana_group() local
256 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) { in nvmet_format_ana_group()
257 xa_for_each(&ctrl->subsys->namespaces, idx, ns) in nvmet_format_ana_group()
258 if (ns->anagrpid == grpid) in nvmet_format_ana_group()
259 desc->nsids[count++] = cpu_to_le32(ns->nsid); in nvmet_format_ana_group()
262 desc->grpid = cpu_to_le32(grpid); in nvmet_format_ana_group()
263 desc->nnsids = cpu_to_le32(count); in nvmet_format_ana_group()
264 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt); in nvmet_format_ana_group()
265 desc->state = req->port->ana_state[grpid]; in nvmet_format_ana_group()
266 memset(desc->rsvd17, 0, sizeof(desc->rsvd17)); in nvmet_format_ana_group()
275 size_t len; in nvmet_execute_get_log_page_ana() local
290 len = nvmet_format_ana_group(req, grpid, desc); in nvmet_execute_get_log_page_ana()
291 status = nvmet_copy_to_sgl(req, offset, desc, len); in nvmet_execute_get_log_page_ana()
294 offset += len; in nvmet_execute_get_log_page_ana()
317 if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd))) in nvmet_execute_get_log_page()
320 switch (req->cmd->get_log_page.lid) { in nvmet_execute_get_log_page()
340 req->cmd->get_log_page.lid, req->sq->qid); in nvmet_execute_get_log_page()
341 req->error_loc = offsetof(struct nvme_get_log_page_command, lid); in nvmet_execute_get_log_page()
347 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_identify_ctrl() local
348 struct nvmet_subsys *subsys = ctrl->subsys; in nvmet_execute_identify_ctrl()
353 if (!subsys->subsys_discovered) { in nvmet_execute_identify_ctrl()
354 mutex_lock(&subsys->lock); in nvmet_execute_identify_ctrl()
355 subsys->subsys_discovered = true; in nvmet_execute_identify_ctrl()
356 mutex_unlock(&subsys->lock); in nvmet_execute_identify_ctrl()
366 id->vid = 0; in nvmet_execute_identify_ctrl()
367 id->ssvid = 0; in nvmet_execute_identify_ctrl()
369 memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE); in nvmet_execute_identify_ctrl()
370 memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number, in nvmet_execute_identify_ctrl()
371 strlen(subsys->model_number), ' '); in nvmet_execute_identify_ctrl()
372 memcpy_and_pad(id->fr, sizeof(id->fr), in nvmet_execute_identify_ctrl()
375 id->rab = 6; in nvmet_execute_identify_ctrl()
377 if (nvmet_is_disc_subsys(ctrl->subsys)) in nvmet_execute_identify_ctrl()
378 id->cntrltype = NVME_CTRL_DISC; in nvmet_execute_identify_ctrl()
380 id->cntrltype = NVME_CTRL_IO; in nvmet_execute_identify_ctrl()
388 id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL | in nvmet_execute_identify_ctrl()
392 if (ctrl->ops->get_mdts) in nvmet_execute_identify_ctrl()
393 id->mdts = ctrl->ops->get_mdts(ctrl); in nvmet_execute_identify_ctrl()
395 id->mdts = 0; in nvmet_execute_identify_ctrl()
397 id->cntlid = cpu_to_le16(ctrl->cntlid); in nvmet_execute_identify_ctrl()
398 id->ver = cpu_to_le32(ctrl->subsys->ver); in nvmet_execute_identify_ctrl()
401 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL); in nvmet_execute_identify_ctrl()
402 id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT | in nvmet_execute_identify_ctrl()
405 id->oacs = 0; in nvmet_execute_identify_ctrl()
412 id->acl = 3; in nvmet_execute_identify_ctrl()
414 id->aerl = NVMET_ASYNC_EVENTS - 1; in nvmet_execute_identify_ctrl()
416 /* first slot is read-only, only one slot supported */ in nvmet_execute_identify_ctrl()
417 id->frmw = (1 << 0) | (1 << 1); in nvmet_execute_identify_ctrl()
418 id->lpa = (1 << 0) | (1 << 1) | (1 << 2); in nvmet_execute_identify_ctrl()
419 id->elpe = NVMET_ERROR_LOG_SLOTS - 1; in nvmet_execute_identify_ctrl()
420 id->npss = 0; in nvmet_execute_identify_ctrl()
422 /* We support keep-alive timeout in granularity of seconds */ in nvmet_execute_identify_ctrl()
423 id->kas = cpu_to_le16(NVMET_KAS); in nvmet_execute_identify_ctrl()
425 id->sqes = (0x6 << 4) | 0x6; in nvmet_execute_identify_ctrl()
426 id->cqes = (0x4 << 4) | 0x4; in nvmet_execute_identify_ctrl()
428 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */ in nvmet_execute_identify_ctrl()
429 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); in nvmet_execute_identify_ctrl()
431 id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES); in nvmet_execute_identify_ctrl()
432 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES); in nvmet_execute_identify_ctrl()
433 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM | in nvmet_execute_identify_ctrl()
437 id->vwc = NVME_CTRL_VWC_PRESENT; in nvmet_execute_identify_ctrl()
443 id->awun = 0; in nvmet_execute_identify_ctrl()
444 id->awupf = 0; in nvmet_execute_identify_ctrl()
446 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ in nvmet_execute_identify_ctrl()
447 if (ctrl->ops->flags & NVMF_KEYED_SGLS) in nvmet_execute_identify_ctrl()
448 id->sgls |= cpu_to_le32(1 << 2); in nvmet_execute_identify_ctrl()
449 if (req->port->inline_data_size) in nvmet_execute_identify_ctrl()
450 id->sgls |= cpu_to_le32(1 << 20); in nvmet_execute_identify_ctrl()
452 strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn)); in nvmet_execute_identify_ctrl()
455 * Max command capsule size is sqe + in-capsule data size. in nvmet_execute_identify_ctrl()
456 * Disable in-capsule data for Metadata capable controllers. in nvmet_execute_identify_ctrl()
459 if (!ctrl->pi_support) in nvmet_execute_identify_ctrl()
460 cmd_capsule_size += req->port->inline_data_size; in nvmet_execute_identify_ctrl()
461 id->ioccsz = cpu_to_le32(cmd_capsule_size / 16); in nvmet_execute_identify_ctrl()
464 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16); in nvmet_execute_identify_ctrl()
466 id->msdbd = ctrl->ops->msdbd; in nvmet_execute_identify_ctrl()
468 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4); in nvmet_execute_identify_ctrl()
469 id->anatt = 10; /* random value */ in nvmet_execute_identify_ctrl()
470 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS); in nvmet_execute_identify_ctrl()
471 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS); in nvmet_execute_identify_ctrl()
477 id->psd[0].max_power = cpu_to_le16(0x9c4); in nvmet_execute_identify_ctrl()
478 id->psd[0].entry_lat = cpu_to_le32(0x10); in nvmet_execute_identify_ctrl()
479 id->psd[0].exit_lat = cpu_to_le32(0x4); in nvmet_execute_identify_ctrl()
481 id->nwpc = 1 << 0; /* write protect and no write protect */ in nvmet_execute_identify_ctrl()
495 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { in nvmet_execute_identify_ns()
496 req->error_loc = offsetof(struct nvme_identify, nsid); in nvmet_execute_identify_ns()
514 if (nvmet_ns_revalidate(req->ns)) { in nvmet_execute_identify_ns()
515 mutex_lock(&req->ns->subsys->lock); in nvmet_execute_identify_ns()
516 nvmet_ns_changed(req->ns->subsys, req->ns->nsid); in nvmet_execute_identify_ns()
517 mutex_unlock(&req->ns->subsys->lock); in nvmet_execute_identify_ns()
524 id->ncap = id->nsze = in nvmet_execute_identify_ns()
525 cpu_to_le64(req->ns->size >> req->ns->blksize_shift); in nvmet_execute_identify_ns()
526 switch (req->port->ana_state[req->ns->anagrpid]) { in nvmet_execute_identify_ns()
531 id->nuse = id->nsze; in nvmet_execute_identify_ns()
535 if (req->ns->bdev) in nvmet_execute_identify_ns()
536 nvmet_bdev_set_limits(req->ns->bdev, id); in nvmet_execute_identify_ns()
542 id->nlbaf = 0; in nvmet_execute_identify_ns()
543 id->flbas = 0; in nvmet_execute_identify_ns()
549 id->nmic = NVME_NS_NMIC_SHARED; in nvmet_execute_identify_ns()
550 id->anagrpid = cpu_to_le32(req->ns->anagrpid); in nvmet_execute_identify_ns()
552 memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid)); in nvmet_execute_identify_ns()
554 id->lbaf[0].ds = req->ns->blksize_shift; in nvmet_execute_identify_ns()
556 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) { in nvmet_execute_identify_ns()
557 id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST | in nvmet_execute_identify_ns()
560 id->mc = NVME_MC_EXTENDED_LBA; in nvmet_execute_identify_ns()
561 id->dps = req->ns->pi_type; in nvmet_execute_identify_ns()
562 id->flbas = NVME_NS_FLBAS_META_EXT; in nvmet_execute_identify_ns()
563 id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size); in nvmet_execute_identify_ns()
566 if (req->ns->readonly) in nvmet_execute_identify_ns()
567 id->nsattr |= (1 << 0); in nvmet_execute_identify_ns()
580 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_identify_nslist() local
583 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid); in nvmet_execute_identify_nslist()
594 xa_for_each(&ctrl->subsys->namespaces, idx, ns) { in nvmet_execute_identify_nslist()
595 if (ns->nsid <= min_nsid) in nvmet_execute_identify_nslist()
597 list[i++] = cpu_to_le32(ns->nsid); in nvmet_execute_identify_nslist()
609 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len, in nvmet_copy_ns_identifier() argument
614 .nidl = len, in nvmet_copy_ns_identifier()
623 status = nvmet_copy_to_sgl(req, *off, id, len); in nvmet_copy_ns_identifier()
626 *off += len; in nvmet_copy_ns_identifier()
640 if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) { in nvmet_execute_identify_desclist()
643 &req->ns->uuid, &off); in nvmet_execute_identify_desclist()
647 if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) { in nvmet_execute_identify_desclist()
650 &req->ns->nguid, &off); in nvmet_execute_identify_desclist()
657 &req->ns->csi, &off); in nvmet_execute_identify_desclist()
661 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off, in nvmet_execute_identify_desclist()
662 off) != NVME_IDENTIFY_DATA_SIZE - off) in nvmet_execute_identify_desclist()
671 switch (req->cmd->identify.csi) { in nvmet_handle_identify_desclist()
691 switch (req->cmd->identify.cns) { in nvmet_execute_identify()
693 switch (req->cmd->identify.csi) { in nvmet_execute_identify()
702 switch (req->cmd->identify.csi) { in nvmet_execute_identify()
711 switch (req->cmd->identify.csi) { in nvmet_execute_identify()
718 switch (req->cmd->identify.csi) { in nvmet_execute_identify()
727 switch (req->cmd->identify.csi) { in nvmet_execute_identify()
762 if (req->ns->file) in nvmet_write_protect_flush_sync()
768 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid); in nvmet_write_protect_flush_sync()
774 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11); in nvmet_set_feat_write_protect()
782 mutex_lock(&subsys->lock); in nvmet_set_feat_write_protect()
785 req->ns->readonly = true; in nvmet_set_feat_write_protect()
788 req->ns->readonly = false; in nvmet_set_feat_write_protect()
791 req->ns->readonly = false; in nvmet_set_feat_write_protect()
799 nvmet_ns_changed(subsys, req->ns->nsid); in nvmet_set_feat_write_protect()
800 mutex_unlock(&subsys->lock); in nvmet_set_feat_write_protect()
806 u32 val32 = le32_to_cpu(req->cmd->common.cdw11); in nvmet_set_feat_kato()
808 nvmet_stop_keep_alive_timer(req->sq->ctrl); in nvmet_set_feat_kato()
809 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000); in nvmet_set_feat_kato()
810 nvmet_start_keep_alive_timer(req->sq->ctrl); in nvmet_set_feat_kato()
812 nvmet_set_result(req, req->sq->ctrl->kato); in nvmet_set_feat_kato()
819 u32 val32 = le32_to_cpu(req->cmd->common.cdw11); in nvmet_set_feat_async_event()
822 req->error_loc = offsetof(struct nvme_common_command, cdw11); in nvmet_set_feat_async_event()
826 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32); in nvmet_set_feat_async_event()
835 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); in nvmet_execute_set_features()
836 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); in nvmet_execute_set_features()
853 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16)); in nvmet_execute_set_features()
868 req->error_loc = offsetof(struct nvme_common_command, cdw10); in nvmet_execute_set_features()
885 mutex_lock(&subsys->lock); in nvmet_get_feat_write_protect()
886 if (req->ns->readonly == true) in nvmet_get_feat_write_protect()
891 mutex_unlock(&subsys->lock); in nvmet_get_feat_write_protect()
898 nvmet_set_result(req, req->sq->ctrl->kato * 1000); in nvmet_get_feat_kato()
903 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled)); in nvmet_get_feat_async_event()
909 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); in nvmet_execute_get_features()
945 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16)); in nvmet_execute_get_features()
951 /* need 128-bit host identifier flag */ in nvmet_execute_get_features()
952 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) { in nvmet_execute_get_features()
953 req->error_loc = in nvmet_execute_get_features()
959 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid, in nvmet_execute_get_features()
960 sizeof(req->sq->ctrl->hostid)); in nvmet_execute_get_features()
966 req->error_loc = in nvmet_execute_get_features()
977 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_async_event() local
982 mutex_lock(&ctrl->lock); in nvmet_execute_async_event()
983 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) { in nvmet_execute_async_event()
984 mutex_unlock(&ctrl->lock); in nvmet_execute_async_event()
988 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; in nvmet_execute_async_event()
989 mutex_unlock(&ctrl->lock); in nvmet_execute_async_event()
991 queue_work(nvmet_wq, &ctrl->async_event_work); in nvmet_execute_async_event()
996 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_keep_alive() local
1002 if (!ctrl->kato) { in nvmet_execute_keep_alive()
1007 pr_debug("ctrl %d update keep-alive timer for %d secs\n", in nvmet_execute_keep_alive()
1008 ctrl->cntlid, ctrl->kato); in nvmet_execute_keep_alive()
1009 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); in nvmet_execute_keep_alive()
1016 struct nvme_command *cmd = req->cmd; in nvmet_parse_admin_cmd()
1033 switch (cmd->common.opcode) { in nvmet_parse_admin_cmd()
1035 req->execute = nvmet_execute_get_log_page; in nvmet_parse_admin_cmd()
1038 req->execute = nvmet_execute_identify; in nvmet_parse_admin_cmd()
1041 req->execute = nvmet_execute_abort; in nvmet_parse_admin_cmd()
1044 req->execute = nvmet_execute_set_features; in nvmet_parse_admin_cmd()
1047 req->execute = nvmet_execute_get_features; in nvmet_parse_admin_cmd()
1050 req->execute = nvmet_execute_async_event; in nvmet_parse_admin_cmd()
1053 req->execute = nvmet_execute_keep_alive; in nvmet_parse_admin_cmd()