Lines Matching full:ctrl
93 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
126 static void nvme_queue_scan(struct nvme_ctrl *ctrl) in nvme_queue_scan() argument
131 if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset) in nvme_queue_scan()
132 queue_work(nvme_wq, &ctrl->scan_work); in nvme_queue_scan()
141 int nvme_try_sched_reset(struct nvme_ctrl *ctrl) in nvme_try_sched_reset() argument
143 if (ctrl->state != NVME_CTRL_RESETTING) in nvme_try_sched_reset()
145 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) in nvme_try_sched_reset()
151 int nvme_reset_ctrl(struct nvme_ctrl *ctrl) in nvme_reset_ctrl() argument
153 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) in nvme_reset_ctrl()
155 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) in nvme_reset_ctrl()
161 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) in nvme_reset_ctrl_sync() argument
165 ret = nvme_reset_ctrl(ctrl); in nvme_reset_ctrl_sync()
167 flush_work(&ctrl->reset_work); in nvme_reset_ctrl_sync()
168 if (ctrl->state != NVME_CTRL_LIVE) in nvme_reset_ctrl_sync()
176 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) in nvme_do_delete_ctrl() argument
178 dev_info(ctrl->device, in nvme_do_delete_ctrl()
179 "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn); in nvme_do_delete_ctrl()
181 flush_work(&ctrl->reset_work); in nvme_do_delete_ctrl()
182 nvme_stop_ctrl(ctrl); in nvme_do_delete_ctrl()
183 nvme_remove_namespaces(ctrl); in nvme_do_delete_ctrl()
184 ctrl->ops->delete_ctrl(ctrl); in nvme_do_delete_ctrl()
185 nvme_uninit_ctrl(ctrl); in nvme_do_delete_ctrl()
190 struct nvme_ctrl *ctrl = in nvme_delete_ctrl_work() local
193 nvme_do_delete_ctrl(ctrl); in nvme_delete_ctrl_work()
196 int nvme_delete_ctrl(struct nvme_ctrl *ctrl) in nvme_delete_ctrl() argument
198 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) in nvme_delete_ctrl()
200 if (!queue_work(nvme_delete_wq, &ctrl->delete_work)) in nvme_delete_ctrl()
206 static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) in nvme_delete_ctrl_sync() argument
212 nvme_get_ctrl(ctrl); in nvme_delete_ctrl_sync()
213 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) in nvme_delete_ctrl_sync()
214 nvme_do_delete_ctrl(ctrl); in nvme_delete_ctrl_sync()
215 nvme_put_ctrl(ctrl); in nvme_delete_ctrl_sync()
269 delay = ns->ctrl->crdt[crd - 1] * 100; in nvme_retry_req()
322 if (nvme_req(req)->ctrl->kas) in nvme_complete_rq()
323 nvme_req(req)->ctrl->comp_seen = true; in nvme_complete_rq()
354 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, in nvme_change_ctrl_state() argument
361 spin_lock_irqsave(&ctrl->lock, flags); in nvme_change_ctrl_state()
363 old_state = ctrl->state; in nvme_change_ctrl_state()
431 ctrl->state = new_state; in nvme_change_ctrl_state()
432 wake_up_all(&ctrl->state_wq); in nvme_change_ctrl_state()
435 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_change_ctrl_state()
436 if (changed && ctrl->state == NVME_CTRL_LIVE) in nvme_change_ctrl_state()
437 nvme_kick_requeue_lists(ctrl); in nvme_change_ctrl_state()
445 static bool nvme_state_terminal(struct nvme_ctrl *ctrl) in nvme_state_terminal() argument
447 switch (ctrl->state) { in nvme_state_terminal()
458 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state); in nvme_state_terminal()
467 bool nvme_wait_reset(struct nvme_ctrl *ctrl) in nvme_wait_reset() argument
469 wait_event(ctrl->state_wq, in nvme_wait_reset()
470 nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) || in nvme_wait_reset()
471 nvme_state_terminal(ctrl)); in nvme_wait_reset()
472 return ctrl->state == NVME_CTRL_RESETTING; in nvme_wait_reset()
502 nvme_put_ctrl(ns->ctrl); in nvme_free_ns()
544 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable) in nvme_toggle_streams() argument
557 return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0); in nvme_toggle_streams()
560 static int nvme_disable_streams(struct nvme_ctrl *ctrl) in nvme_disable_streams() argument
562 return nvme_toggle_streams(ctrl, false); in nvme_disable_streams()
565 static int nvme_enable_streams(struct nvme_ctrl *ctrl) in nvme_enable_streams() argument
567 return nvme_toggle_streams(ctrl, true); in nvme_enable_streams()
570 static int nvme_get_stream_params(struct nvme_ctrl *ctrl, in nvme_get_stream_params() argument
584 return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s)); in nvme_get_stream_params()
587 static int nvme_configure_directives(struct nvme_ctrl *ctrl) in nvme_configure_directives() argument
592 if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES)) in nvme_configure_directives()
597 ret = nvme_enable_streams(ctrl); in nvme_configure_directives()
601 ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL); in nvme_configure_directives()
605 ctrl->nssa = le16_to_cpu(s.nssa); in nvme_configure_directives()
606 if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) { in nvme_configure_directives()
607 dev_info(ctrl->device, "too few streams (%u) available\n", in nvme_configure_directives()
608 ctrl->nssa); in nvme_configure_directives()
612 ctrl->nr_streams = min_t(u16, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1); in nvme_configure_directives()
613 dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams); in nvme_configure_directives()
617 nvme_disable_streams(ctrl); in nvme_configure_directives()
625 static void nvme_assign_write_stream(struct nvme_ctrl *ctrl, in nvme_assign_write_stream() argument
635 if (WARN_ON_ONCE(streamid > ctrl->nr_streams)) in nvme_assign_write_stream()
682 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy)) in nvme_setup_discard()
685 range = page_address(ns->ctrl->discard_page); in nvme_setup_discard()
701 if (virt_to_page(range) == ns->ctrl->discard_page) in nvme_setup_discard()
702 clear_bit_unlock(0, &ns->ctrl->discard_page_busy); in nvme_setup_discard()
724 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) in nvme_setup_write_zeroes()
741 struct nvme_ctrl *ctrl = ns->ctrl; in nvme_setup_rw() local
758 if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams) in nvme_setup_rw()
759 nvme_assign_write_stream(ctrl, req, &control, &dsmgmt); in nvme_setup_rw()
800 if (page == ns->ctrl->discard_page) in nvme_cleanup_cmd()
801 clear_bit_unlock(0, &ns->ctrl->discard_page_busy); in nvme_cleanup_cmd()
984 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) in nvme_command_effects() argument
992 dev_warn(ctrl->device, in nvme_command_effects()
998 if (ctrl->effects) in nvme_command_effects()
999 effects = le32_to_cpu(ctrl->effects->acs[opcode]); in nvme_command_effects()
1006 static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, in nvme_passthru_start() argument
1009 u32 effects = nvme_command_effects(ctrl, ns, opcode); in nvme_passthru_start()
1016 mutex_lock(&ctrl->scan_lock); in nvme_passthru_start()
1017 mutex_lock(&ctrl->subsys->lock); in nvme_passthru_start()
1018 nvme_mpath_start_freeze(ctrl->subsys); in nvme_passthru_start()
1019 nvme_mpath_wait_freeze(ctrl->subsys); in nvme_passthru_start()
1020 nvme_start_freeze(ctrl); in nvme_passthru_start()
1021 nvme_wait_freeze(ctrl); in nvme_passthru_start()
1026 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) in nvme_passthru_end() argument
1029 nvme_unfreeze(ctrl); in nvme_passthru_end()
1030 nvme_mpath_unfreeze(ctrl->subsys); in nvme_passthru_end()
1031 mutex_unlock(&ctrl->subsys->lock); in nvme_passthru_end()
1032 nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL); in nvme_passthru_end()
1033 mutex_unlock(&ctrl->scan_lock); in nvme_passthru_end()
1036 nvme_init_identify(ctrl); in nvme_passthru_end()
1038 nvme_queue_scan(ctrl); in nvme_passthru_end()
1039 flush_work(&ctrl->scan_work); in nvme_passthru_end()
1046 struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl; in nvme_execute_passthru_rq() local
1051 effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode); in nvme_execute_passthru_rq()
1053 nvme_passthru_end(ctrl, effects); in nvme_execute_passthru_rq()
1117 struct nvme_ctrl *ctrl = rq->end_io_data; in nvme_keep_alive_end_io() local
1124 dev_err(ctrl->device, in nvme_keep_alive_end_io()
1130 ctrl->comp_seen = false; in nvme_keep_alive_end_io()
1131 spin_lock_irqsave(&ctrl->lock, flags); in nvme_keep_alive_end_io()
1132 if (ctrl->state == NVME_CTRL_LIVE || in nvme_keep_alive_end_io()
1133 ctrl->state == NVME_CTRL_CONNECTING) in nvme_keep_alive_end_io()
1135 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_keep_alive_end_io()
1137 queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ); in nvme_keep_alive_end_io()
1140 static int nvme_keep_alive(struct nvme_ctrl *ctrl) in nvme_keep_alive() argument
1144 rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED, in nvme_keep_alive()
1149 rq->timeout = ctrl->kato * HZ; in nvme_keep_alive()
1150 rq->end_io_data = ctrl; in nvme_keep_alive()
1159 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), in nvme_keep_alive_work() local
1161 bool comp_seen = ctrl->comp_seen; in nvme_keep_alive_work()
1163 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { in nvme_keep_alive_work()
1164 dev_dbg(ctrl->device, in nvme_keep_alive_work()
1166 ctrl->comp_seen = false; in nvme_keep_alive_work()
1167 queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ); in nvme_keep_alive_work()
1171 if (nvme_keep_alive(ctrl)) { in nvme_keep_alive_work()
1173 dev_err(ctrl->device, "keep-alive failed\n"); in nvme_keep_alive_work()
1174 nvme_reset_ctrl(ctrl); in nvme_keep_alive_work()
1179 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) in nvme_start_keep_alive() argument
1181 if (unlikely(ctrl->kato == 0)) in nvme_start_keep_alive()
1184 queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ); in nvme_start_keep_alive()
1187 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) in nvme_stop_keep_alive() argument
1189 if (unlikely(ctrl->kato == 0)) in nvme_stop_keep_alive()
1192 cancel_delayed_work_sync(&ctrl->ka_work); in nvme_stop_keep_alive()
1202 static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl) in nvme_ctrl_limited_cns() argument
1204 if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS) in nvme_ctrl_limited_cns()
1205 return ctrl->vs < NVME_VS(1, 2, 0); in nvme_ctrl_limited_cns()
1206 return ctrl->vs < NVME_VS(1, 1, 0); in nvme_ctrl_limited_cns()
1229 static bool nvme_multi_css(struct nvme_ctrl *ctrl) in nvme_multi_css() argument
1231 return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI; in nvme_multi_css()
1234 static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, in nvme_process_ns_desc() argument
1237 const char *warn_str = "ctrl returned bogus length:"; in nvme_process_ns_desc()
1243 dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n", in nvme_process_ns_desc()
1251 dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n", in nvme_process_ns_desc()
1259 dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n", in nvme_process_ns_desc()
1267 dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n", in nvme_process_ns_desc()
1280 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, in nvme_identify_ns_descs() argument
1288 if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl)) in nvme_identify_ns_descs()
1290 if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST) in nvme_identify_ns_descs()
1301 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, in nvme_identify_ns_descs()
1304 dev_warn(ctrl->device, in nvme_identify_ns_descs()
1315 len = nvme_process_ns_desc(ctrl, ids, cur, &csi_seen); in nvme_identify_ns_descs()
1322 if (nvme_multi_css(ctrl) && !csi_seen) { in nvme_identify_ns_descs()
1323 dev_warn(ctrl->device, "Command set not reported for nsid:%d\n", in nvme_identify_ns_descs()
1333 static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, in nvme_identify_ns() argument
1348 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id)); in nvme_identify_ns()
1350 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error); in nvme_identify_ns()
1358 if (ctrl->vs >= NVME_VS(1, 1, 0) && in nvme_identify_ns()
1361 if (ctrl->vs >= NVME_VS(1, 2, 0) && in nvme_identify_ns()
1409 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) in nvme_set_queue_count() argument
1415 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, in nvme_set_queue_count()
1426 dev_err(ctrl->device, "Could not set queue count (%d)\n", status); in nvme_set_queue_count()
1441 static void nvme_enable_aen(struct nvme_ctrl *ctrl) in nvme_enable_aen() argument
1443 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED; in nvme_enable_aen()
1449 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens, in nvme_enable_aen()
1452 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", in nvme_enable_aen()
1455 queue_work(nvme_wq, &ctrl->async_event_work); in nvme_enable_aen()
1520 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, in nvme_user_cmd() argument
1552 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, in nvme_user_cmd()
1565 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, in nvme_user_cmd64() argument
1596 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, in nvme_user_cmd64()
1653 struct nvme_ctrl *ctrl = ns->ctrl; in nvme_handle_ctrl_ioctl() local
1656 nvme_get_ctrl(ns->ctrl); in nvme_handle_ctrl_ioctl()
1661 ret = nvme_user_cmd(ctrl, NULL, argp); in nvme_handle_ctrl_ioctl()
1664 ret = nvme_user_cmd64(ctrl, NULL, argp); in nvme_handle_ctrl_ioctl()
1667 ret = sed_ioctl(ctrl->opal_dev, cmd, argp); in nvme_handle_ctrl_ioctl()
1670 nvme_put_ctrl(ctrl); in nvme_handle_ctrl_ioctl()
1700 ret = nvme_user_cmd(ns->ctrl, ns, argp); in nvme_ioctl()
1706 ret = nvme_user_cmd64(ns->ctrl, ns, argp); in nvme_ioctl()
1771 if (!try_module_get(ns->ctrl->ops->module)) in nvme_open()
1786 module_put(ns->ctrl->ops->module); in nvme_release()
1835 struct nvme_ctrl *ctrl = ns->ctrl; in nvme_config_discard() local
1839 if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) { in nvme_config_discard()
1844 if (ctrl->nr_streams && ns->sws && ns->sgs) in nvme_config_discard()
1860 if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) in nvme_config_discard()
1868 if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) || in nvme_config_write_zeroes()
1869 (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) in nvme_config_write_zeroes()
1881 if (ns->ctrl->max_hw_sectors == UINT_MAX) in nvme_config_write_zeroes()
1884 max_blocks = ns->ctrl->max_hw_sectors + 1; in nvme_config_write_zeroes()
1905 static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns, in nvme_setup_streams_ns() argument
1911 if (!ctrl->nr_streams) in nvme_setup_streams_ns()
1914 ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id); in nvme_setup_streams_ns()
1932 struct nvme_ctrl *ctrl = ns->ctrl; in nvme_configure_metadata() local
1945 if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) in nvme_configure_metadata()
1947 if (ctrl->ops->flags & NVME_F_FABRICS) { in nvme_configure_metadata()
1955 if (ctrl->max_integrity_segments) in nvme_configure_metadata()
1974 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, in nvme_set_queue_limits() argument
1977 bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT; in nvme_set_queue_limits()
1979 if (ctrl->max_hw_sectors) { in nvme_set_queue_limits()
1981 (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1; in nvme_set_queue_limits()
1983 max_segments = min_not_zero(max_segments, ctrl->max_segments); in nvme_set_queue_limits()
1984 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); in nvme_set_queue_limits()
2011 nvme_setup_streams_ns(ns->ctrl, ns, &phys_bs, &io_opt); in nvme_update_disk_info()
2021 atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; in nvme_update_disk_info()
2051 ns->ctrl->max_integrity_segments); in nvme_update_disk_info()
2073 struct nvme_ctrl *ctrl = ns->ctrl; in nvme_set_chunk_sectors() local
2076 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && in nvme_set_chunk_sectors()
2077 is_power_of_2(ctrl->max_hw_sectors)) in nvme_set_chunk_sectors()
2078 iob = ctrl->max_hw_sectors; in nvme_set_chunk_sectors()
2109 nvme_set_queue_limits(ns->ctrl, ns->queue); in nvme_update_ns_info()
2252 struct nvme_ctrl *ctrl = data; in nvme_sec_submit() local
2264 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, in nvme_sec_submit()
2309 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) in nvme_wait_ready() argument
2316 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { in nvme_wait_ready()
2326 dev_err(ctrl->device, in nvme_wait_ready()
2342 int nvme_disable_ctrl(struct nvme_ctrl *ctrl) in nvme_disable_ctrl() argument
2346 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; in nvme_disable_ctrl()
2347 ctrl->ctrl_config &= ~NVME_CC_ENABLE; in nvme_disable_ctrl()
2349 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); in nvme_disable_ctrl()
2353 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) in nvme_disable_ctrl()
2356 return nvme_wait_ready(ctrl, ctrl->cap, false); in nvme_disable_ctrl()
2360 int nvme_enable_ctrl(struct nvme_ctrl *ctrl) in nvme_enable_ctrl() argument
2365 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); in nvme_enable_ctrl()
2367 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); in nvme_enable_ctrl()
2370 dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12; in nvme_enable_ctrl()
2373 dev_err(ctrl->device, in nvme_enable_ctrl()
2379 if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI) in nvme_enable_ctrl()
2380 ctrl->ctrl_config = NVME_CC_CSS_CSI; in nvme_enable_ctrl()
2382 ctrl->ctrl_config = NVME_CC_CSS_NVM; in nvme_enable_ctrl()
2383 ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; in nvme_enable_ctrl()
2384 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; in nvme_enable_ctrl()
2385 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; in nvme_enable_ctrl()
2386 ctrl->ctrl_config |= NVME_CC_ENABLE; in nvme_enable_ctrl()
2388 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); in nvme_enable_ctrl()
2391 return nvme_wait_ready(ctrl, ctrl->cap, true); in nvme_enable_ctrl()
2395 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) in nvme_shutdown_ctrl() argument
2397 unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ); in nvme_shutdown_ctrl()
2401 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; in nvme_shutdown_ctrl()
2402 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; in nvme_shutdown_ctrl()
2404 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); in nvme_shutdown_ctrl()
2408 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { in nvme_shutdown_ctrl()
2416 dev_err(ctrl->device, in nvme_shutdown_ctrl()
2426 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) in nvme_configure_timestamp() argument
2431 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP)) in nvme_configure_timestamp()
2435 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), in nvme_configure_timestamp()
2438 dev_warn_once(ctrl->device, in nvme_configure_timestamp()
2443 static int nvme_configure_acre(struct nvme_ctrl *ctrl) in nvme_configure_acre() argument
2449 if (!ctrl->crdt[0]) in nvme_configure_acre()
2457 ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, in nvme_configure_acre()
2463 static int nvme_configure_apst(struct nvme_ctrl *ctrl) in nvme_configure_apst() argument
2491 if (!ctrl->apsta) in nvme_configure_apst()
2494 if (ctrl->npss > 31) { in nvme_configure_apst()
2495 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); in nvme_configure_apst()
2503 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { in nvme_configure_apst()
2506 dev_dbg(ctrl->device, "APST disabled\n"); in nvme_configure_apst()
2517 for (state = (int)ctrl->npss; state >= 0; state--) { in nvme_configure_apst()
2527 if (state == ctrl->npss && in nvme_configure_apst()
2528 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) in nvme_configure_apst()
2535 if (!(ctrl->psd[state].flags & in nvme_configure_apst()
2540 (u64)le32_to_cpu(ctrl->psd[state].exit_lat); in nvme_configure_apst()
2541 if (exit_latency_us > ctrl->ps_max_latency_us) in nvme_configure_apst()
2546 le32_to_cpu(ctrl->psd[state].entry_lat); in nvme_configure_apst()
2570 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n"); in nvme_configure_apst()
2572 …dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n… in nvme_configure_apst()
2577 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, in nvme_configure_apst()
2580 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); in nvme_configure_apst()
2588 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_set_latency_tolerance() local
2601 if (ctrl->ps_max_latency_us != latency) { in nvme_set_latency_tolerance()
2602 ctrl->ps_max_latency_us = latency; in nvme_set_latency_tolerance()
2603 nvme_configure_apst(ctrl); in nvme_set_latency_tolerance()
2670 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, in nvme_init_subnqn() argument
2676 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) { in nvme_init_subnqn()
2683 if (ctrl->vs >= NVME_VS(1, 2, 1)) in nvme_init_subnqn()
2684 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); in nvme_init_subnqn()
2806 struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) in nvme_validate_cntlid() argument
2816 if (tmp->cntlid == ctrl->cntlid) { in nvme_validate_cntlid()
2817 dev_err(ctrl->device, in nvme_validate_cntlid()
2819 ctrl->cntlid, dev_name(tmp->device)); in nvme_validate_cntlid()
2824 (ctrl->opts && ctrl->opts->discovery_nqn)) in nvme_validate_cntlid()
2827 dev_err(ctrl->device, in nvme_validate_cntlid()
2835 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) in nvme_init_subsystem() argument
2849 nvme_init_subnqn(subsys, ctrl, id); in nvme_init_subsystem()
2863 dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance); in nvme_init_subsystem()
2872 if (!nvme_validate_cntlid(subsys, ctrl, id)) { in nvme_init_subsystem()
2879 dev_err(ctrl->device, in nvme_init_subsystem()
2888 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, in nvme_init_subsystem()
2889 dev_name(ctrl->device)); in nvme_init_subsystem()
2891 dev_err(ctrl->device, in nvme_init_subsystem()
2897 subsys->instance = ctrl->instance; in nvme_init_subsystem()
2898 ctrl->subsys = subsys; in nvme_init_subsystem()
2899 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); in nvme_init_subsystem()
2910 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, in nvme_get_log() argument
2926 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); in nvme_get_log()
2929 static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, in nvme_get_effects_log() argument
2932 struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi); in nvme_get_effects_log()
2942 ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi, in nvme_get_effects_log()
2949 xa_store(&ctrl->cels, csi, cel, GFP_KERNEL); in nvme_get_effects_log()
2960 int nvme_init_identify(struct nvme_ctrl *ctrl) in nvme_init_identify() argument
2967 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); in nvme_init_identify()
2969 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); in nvme_init_identify()
2972 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12; in nvme_init_identify()
2973 ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize); in nvme_init_identify()
2975 if (ctrl->vs >= NVME_VS(1, 1, 0)) in nvme_init_identify()
2976 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap); in nvme_init_identify()
2978 ret = nvme_identify_ctrl(ctrl, &id); in nvme_init_identify()
2980 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); in nvme_init_identify()
2985 ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects); in nvme_init_identify()
2990 if (!(ctrl->ops->flags & NVME_F_FABRICS)) in nvme_init_identify()
2991 ctrl->cntlid = le16_to_cpu(id->cntlid); in nvme_init_identify()
2993 if (!ctrl->identified) { in nvme_init_identify()
2996 ret = nvme_init_subsystem(ctrl, id); in nvme_init_identify()
3010 ctrl->quirks |= core_quirks[i].quirks; in nvme_init_identify()
3014 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { in nvme_init_identify()
3015 …dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at y… in nvme_init_identify()
3016 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; in nvme_init_identify()
3019 ctrl->crdt[0] = le16_to_cpu(id->crdt1); in nvme_init_identify()
3020 ctrl->crdt[1] = le16_to_cpu(id->crdt2); in nvme_init_identify()
3021 ctrl->crdt[2] = le16_to_cpu(id->crdt3); in nvme_init_identify()
3023 ctrl->oacs = le16_to_cpu(id->oacs); in nvme_init_identify()
3024 ctrl->oncs = le16_to_cpu(id->oncs); in nvme_init_identify()
3025 ctrl->mtfa = le16_to_cpu(id->mtfa); in nvme_init_identify()
3026 ctrl->oaes = le32_to_cpu(id->oaes); in nvme_init_identify()
3027 ctrl->wctemp = le16_to_cpu(id->wctemp); in nvme_init_identify()
3028 ctrl->cctemp = le16_to_cpu(id->cctemp); in nvme_init_identify()
3030 atomic_set(&ctrl->abort_limit, id->acl + 1); in nvme_init_identify()
3031 ctrl->vwc = id->vwc; in nvme_init_identify()
3036 ctrl->max_hw_sectors = in nvme_init_identify()
3037 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); in nvme_init_identify()
3039 nvme_set_queue_limits(ctrl, ctrl->admin_q); in nvme_init_identify()
3040 ctrl->sgls = le32_to_cpu(id->sgls); in nvme_init_identify()
3041 ctrl->kas = le16_to_cpu(id->kas); in nvme_init_identify()
3042 ctrl->max_namespaces = le32_to_cpu(id->mnan); in nvme_init_identify()
3043 ctrl->ctratt = le32_to_cpu(id->ctratt); in nvme_init_identify()
3049 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time, in nvme_init_identify()
3052 if (ctrl->shutdown_timeout != shutdown_timeout) in nvme_init_identify()
3053 dev_info(ctrl->device, in nvme_init_identify()
3055 ctrl->shutdown_timeout); in nvme_init_identify()
3057 ctrl->shutdown_timeout = shutdown_timeout; in nvme_init_identify()
3059 ctrl->npss = id->npss; in nvme_init_identify()
3060 ctrl->apsta = id->apsta; in nvme_init_identify()
3061 prev_apst_enabled = ctrl->apst_enabled; in nvme_init_identify()
3062 if (ctrl->quirks & NVME_QUIRK_NO_APST) { in nvme_init_identify()
3064 …dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk… in nvme_init_identify()
3065 ctrl->apst_enabled = true; in nvme_init_identify()
3067 ctrl->apst_enabled = false; in nvme_init_identify()
3070 ctrl->apst_enabled = id->apsta; in nvme_init_identify()
3072 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); in nvme_init_identify()
3074 if (ctrl->ops->flags & NVME_F_FABRICS) { in nvme_init_identify()
3075 ctrl->icdoff = le16_to_cpu(id->icdoff); in nvme_init_identify()
3076 ctrl->ioccsz = le32_to_cpu(id->ioccsz); in nvme_init_identify()
3077 ctrl->iorcsz = le32_to_cpu(id->iorcsz); in nvme_init_identify()
3078 ctrl->maxcmd = le16_to_cpu(id->maxcmd); in nvme_init_identify()
3084 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { in nvme_init_identify()
3085 dev_err(ctrl->device, in nvme_init_identify()
3088 ctrl->cntlid, le16_to_cpu(id->cntlid)); in nvme_init_identify()
3093 if (!ctrl->opts->discovery_nqn && !ctrl->kas) { in nvme_init_identify()
3094 dev_err(ctrl->device, in nvme_init_identify()
3100 ctrl->hmpre = le32_to_cpu(id->hmpre); in nvme_init_identify()
3101 ctrl->hmmin = le32_to_cpu(id->hmmin); in nvme_init_identify()
3102 ctrl->hmminds = le32_to_cpu(id->hmminds); in nvme_init_identify()
3103 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); in nvme_init_identify()
3106 ret = nvme_mpath_init(ctrl, id); in nvme_init_identify()
3112 if (ctrl->apst_enabled && !prev_apst_enabled) in nvme_init_identify()
3113 dev_pm_qos_expose_latency_tolerance(ctrl->device); in nvme_init_identify()
3114 else if (!ctrl->apst_enabled && prev_apst_enabled) in nvme_init_identify()
3115 dev_pm_qos_hide_latency_tolerance(ctrl->device); in nvme_init_identify()
3117 ret = nvme_configure_apst(ctrl); in nvme_init_identify()
3121 ret = nvme_configure_timestamp(ctrl); in nvme_init_identify()
3125 ret = nvme_configure_directives(ctrl); in nvme_init_identify()
3129 ret = nvme_configure_acre(ctrl); in nvme_init_identify()
3133 if (!ctrl->identified) { in nvme_init_identify()
3134 ret = nvme_hwmon_init(ctrl); in nvme_init_identify()
3139 ctrl->identified = true; in nvme_init_identify()
3151 struct nvme_ctrl *ctrl = in nvme_dev_open() local
3154 switch (ctrl->state) { in nvme_dev_open()
3161 nvme_get_ctrl(ctrl); in nvme_dev_open()
3162 if (!try_module_get(ctrl->ops->module)) { in nvme_dev_open()
3163 nvme_put_ctrl(ctrl); in nvme_dev_open()
3167 file->private_data = ctrl; in nvme_dev_open()
3173 struct nvme_ctrl *ctrl = in nvme_dev_release() local
3176 module_put(ctrl->ops->module); in nvme_dev_release()
3177 nvme_put_ctrl(ctrl); in nvme_dev_release()
3181 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) in nvme_dev_user_cmd() argument
3186 down_read(&ctrl->namespaces_rwsem); in nvme_dev_user_cmd()
3187 if (list_empty(&ctrl->namespaces)) { in nvme_dev_user_cmd()
3192 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); in nvme_dev_user_cmd()
3193 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { in nvme_dev_user_cmd()
3194 dev_warn(ctrl->device, in nvme_dev_user_cmd()
3200 dev_warn(ctrl->device, in nvme_dev_user_cmd()
3203 up_read(&ctrl->namespaces_rwsem); in nvme_dev_user_cmd()
3205 ret = nvme_user_cmd(ctrl, ns, argp); in nvme_dev_user_cmd()
3210 up_read(&ctrl->namespaces_rwsem); in nvme_dev_user_cmd()
3217 struct nvme_ctrl *ctrl = file->private_data; in nvme_dev_ioctl() local
3222 return nvme_user_cmd(ctrl, NULL, argp); in nvme_dev_ioctl()
3224 return nvme_user_cmd64(ctrl, NULL, argp); in nvme_dev_ioctl()
3226 return nvme_dev_user_cmd(ctrl, argp); in nvme_dev_ioctl()
3228 dev_warn(ctrl->device, "resetting controller\n"); in nvme_dev_ioctl()
3229 return nvme_reset_ctrl_sync(ctrl); in nvme_dev_ioctl()
3231 return nvme_reset_subsystem(ctrl); in nvme_dev_ioctl()
3233 nvme_queue_scan(ctrl); in nvme_dev_ioctl()
3252 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_sysfs_reset() local
3255 ret = nvme_reset_ctrl_sync(ctrl); in nvme_sysfs_reset()
3266 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_sysfs_rescan() local
3268 nvme_queue_scan(ctrl); in nvme_sysfs_rescan()
3388 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl)) in nvme_ns_id_attrs_are_visible()
3412 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
3414 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
3426 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
3427 return sprintf(buf, "%d\n", ctrl->field); \
3440 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_sysfs_delete() local
3443 nvme_delete_ctrl_sync(ctrl); in nvme_sysfs_delete()
3452 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_sysfs_show_transport() local
3454 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name); in nvme_sysfs_show_transport()
3462 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_sysfs_show_state() local
3473 if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && in nvme_sysfs_show_state()
3474 state_name[ctrl->state]) in nvme_sysfs_show_state()
3475 return sprintf(buf, "%s\n", state_name[ctrl->state]); in nvme_sysfs_show_state()
3486 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_sysfs_show_subsysnqn() local
3488 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn); in nvme_sysfs_show_subsysnqn()
3496 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_sysfs_show_hostnqn() local
3498 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->opts->host->nqn); in nvme_sysfs_show_hostnqn()
3506 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_sysfs_show_hostid() local
3508 return snprintf(buf, PAGE_SIZE, "%pU\n", &ctrl->opts->host->id); in nvme_sysfs_show_hostid()
3516 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_sysfs_show_address() local
3518 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); in nvme_sysfs_show_address()
3525 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_ctrl_loss_tmo_show() local
3526 struct nvmf_ctrl_options *opts = ctrl->opts; in nvme_ctrl_loss_tmo_show()
3528 if (ctrl->opts->max_reconnects == -1) in nvme_ctrl_loss_tmo_show()
3537 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_ctrl_loss_tmo_store() local
3538 struct nvmf_ctrl_options *opts = ctrl->opts; in nvme_ctrl_loss_tmo_store()
3558 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_ctrl_reconnect_delay_show() local
3560 if (ctrl->opts->reconnect_delay == -1) in nvme_ctrl_reconnect_delay_show()
3562 return sprintf(buf, "%d\n", ctrl->opts->reconnect_delay); in nvme_ctrl_reconnect_delay_show()
3568 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_ctrl_reconnect_delay_store() local
3576 ctrl->opts->reconnect_delay = v; in nvme_ctrl_reconnect_delay_store()
3608 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_dev_attrs_are_visible() local
3610 if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl) in nvme_dev_attrs_are_visible()
3612 if (a == &dev_attr_address.attr && !ctrl->ops->get_address) in nvme_dev_attrs_are_visible()
3614 if (a == &dev_attr_hostnqn.attr && !ctrl->opts) in nvme_dev_attrs_are_visible()
3616 if (a == &dev_attr_hostid.attr && !ctrl->opts) in nvme_dev_attrs_are_visible()
3618 if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts) in nvme_dev_attrs_are_visible()
3620 if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts) in nvme_dev_attrs_are_visible()
3667 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, in nvme_alloc_ns_head() argument
3681 ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL); in nvme_alloc_ns_head()
3689 head->subsys = ctrl->subsys; in nvme_alloc_ns_head()
3694 ret = __nvme_check_ids(ctrl->subsys, head); in nvme_alloc_ns_head()
3696 dev_err(ctrl->device, in nvme_alloc_ns_head()
3702 ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects); in nvme_alloc_ns_head()
3706 head->effects = ctrl->effects; in nvme_alloc_ns_head()
3708 ret = nvme_mpath_alloc_disk(ctrl, head); in nvme_alloc_ns_head()
3712 list_add_tail(&head->entry, &ctrl->subsys->nsheads); in nvme_alloc_ns_head()
3714 kref_get(&ctrl->subsys->ref); in nvme_alloc_ns_head()
3720 ida_simple_remove(&ctrl->subsys->ns_ida, head->instance); in nvme_alloc_ns_head()
3732 struct nvme_ctrl *ctrl = ns->ctrl; in nvme_init_ns_head() local
3736 mutex_lock(&ctrl->subsys->lock); in nvme_init_ns_head()
3737 head = nvme_find_ns_head(ctrl->subsys, nsid); in nvme_init_ns_head()
3739 head = nvme_alloc_ns_head(ctrl, nsid, ids); in nvme_init_ns_head()
3748 dev_err(ctrl->device, in nvme_init_ns_head()
3753 dev_err(ctrl->device, in nvme_init_ns_head()
3762 mutex_unlock(&ctrl->subsys->lock); in nvme_init_ns_head()
3768 mutex_unlock(&ctrl->subsys->lock); in nvme_init_ns_head()
3780 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) in nvme_find_get_ns() argument
3784 down_read(&ctrl->namespaces_rwsem); in nvme_find_get_ns()
3785 list_for_each_entry(ns, &ctrl->namespaces, list) { in nvme_find_get_ns()
3795 up_read(&ctrl->namespaces_rwsem); in nvme_find_get_ns()
3800 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid, in nvme_alloc_ns() argument
3807 int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT, ret; in nvme_alloc_ns()
3809 if (nvme_identify_ns(ctrl, nsid, ids, &id)) in nvme_alloc_ns()
3816 ns->queue = blk_mq_init_queue(ctrl->tagset); in nvme_alloc_ns()
3820 if (ctrl->opts && ctrl->opts->data_digest) in nvme_alloc_ns()
3824 if (ctrl->ops->flags & NVME_F_PCI_P2PDMA) in nvme_alloc_ns()
3828 ns->ctrl = ctrl; in nvme_alloc_ns()
3834 nvme_set_disk_name(disk_name, ns, ctrl, &flags); in nvme_alloc_ns()
3850 if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) { in nvme_alloc_ns()
3853 dev_warn(ctrl->device, "LightNVM init failure\n"); in nvme_alloc_ns()
3858 down_write(&ctrl->namespaces_rwsem); in nvme_alloc_ns()
3859 list_add_tail(&ns->list, &ctrl->namespaces); in nvme_alloc_ns()
3860 up_write(&ctrl->namespaces_rwsem); in nvme_alloc_ns()
3862 nvme_get_ctrl(ctrl); in nvme_alloc_ns()
3864 device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups); in nvme_alloc_ns()
3876 mutex_lock(&ctrl->subsys->lock); in nvme_alloc_ns()
3880 mutex_unlock(&ctrl->subsys->lock); in nvme_alloc_ns()
3898 mutex_lock(&ns->ctrl->subsys->lock); in nvme_ns_remove()
3902 mutex_unlock(&ns->ctrl->subsys->lock); in nvme_ns_remove()
3915 down_write(&ns->ctrl->namespaces_rwsem); in nvme_ns_remove()
3917 up_write(&ns->ctrl->namespaces_rwsem); in nvme_ns_remove()
3923 static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid) in nvme_ns_remove_by_nsid() argument
3925 struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid); in nvme_ns_remove_by_nsid()
3941 ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, ids, &id); in nvme_validate_ns()
3947 dev_err(ns->ctrl->device, in nvme_validate_ns()
3969 static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) in nvme_validate_or_alloc_ns() argument
3974 if (nvme_identify_ns_descs(ctrl, nsid, &ids)) in nvme_validate_or_alloc_ns()
3977 ns = nvme_find_get_ns(ctrl, nsid); in nvme_validate_or_alloc_ns()
3986 nvme_alloc_ns(ctrl, nsid, &ids); in nvme_validate_or_alloc_ns()
3990 dev_warn(ctrl->device, in nvme_validate_or_alloc_ns()
3995 nvme_alloc_ns(ctrl, nsid, &ids); in nvme_validate_or_alloc_ns()
3998 dev_warn(ctrl->device, "unknown csi %u for nsid %u\n", in nvme_validate_or_alloc_ns()
4004 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, in nvme_remove_invalid_namespaces() argument
4010 down_write(&ctrl->namespaces_rwsem); in nvme_remove_invalid_namespaces()
4011 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { in nvme_remove_invalid_namespaces()
4015 up_write(&ctrl->namespaces_rwsem); in nvme_remove_invalid_namespaces()
4022 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl) in nvme_scan_ns_list() argument
4029 if (nvme_ctrl_limited_cns(ctrl)) in nvme_scan_ns_list()
4043 ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list, in nvme_scan_ns_list()
4053 nvme_validate_or_alloc_ns(ctrl, nsid); in nvme_scan_ns_list()
4055 nvme_ns_remove_by_nsid(ctrl, prev); in nvme_scan_ns_list()
4059 nvme_remove_invalid_namespaces(ctrl, prev); in nvme_scan_ns_list()
4065 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl) in nvme_scan_ns_sequential() argument
4070 if (nvme_identify_ctrl(ctrl, &id)) in nvme_scan_ns_sequential()
4076 nvme_validate_or_alloc_ns(ctrl, i); in nvme_scan_ns_sequential()
4078 nvme_remove_invalid_namespaces(ctrl, nn); in nvme_scan_ns_sequential()
4081 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl) in nvme_clear_changed_ns_log() argument
4097 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, in nvme_clear_changed_ns_log()
4100 dev_warn(ctrl->device, in nvme_clear_changed_ns_log()
4108 struct nvme_ctrl *ctrl = in nvme_scan_work() local
4111 /* No tagset on a live ctrl means IO queues could not created */ in nvme_scan_work()
4112 if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset) in nvme_scan_work()
4115 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) { in nvme_scan_work()
4116 dev_info(ctrl->device, "rescanning namespaces.\n"); in nvme_scan_work()
4117 nvme_clear_changed_ns_log(ctrl); in nvme_scan_work()
4120 mutex_lock(&ctrl->scan_lock); in nvme_scan_work()
4121 if (nvme_scan_ns_list(ctrl) != 0) in nvme_scan_work()
4122 nvme_scan_ns_sequential(ctrl); in nvme_scan_work()
4123 mutex_unlock(&ctrl->scan_lock); in nvme_scan_work()
4125 down_write(&ctrl->namespaces_rwsem); in nvme_scan_work()
4126 list_sort(NULL, &ctrl->namespaces, ns_cmp); in nvme_scan_work()
4127 up_write(&ctrl->namespaces_rwsem); in nvme_scan_work()
4135 void nvme_remove_namespaces(struct nvme_ctrl *ctrl) in nvme_remove_namespaces() argument
4145 nvme_mpath_clear_ctrl_paths(ctrl); in nvme_remove_namespaces()
4148 flush_work(&ctrl->scan_work); in nvme_remove_namespaces()
4156 if (ctrl->state == NVME_CTRL_DEAD) in nvme_remove_namespaces()
4157 nvme_kill_queues(ctrl); in nvme_remove_namespaces()
4160 nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO); in nvme_remove_namespaces()
4162 down_write(&ctrl->namespaces_rwsem); in nvme_remove_namespaces()
4163 list_splice_init(&ctrl->namespaces, &ns_list); in nvme_remove_namespaces()
4164 up_write(&ctrl->namespaces_rwsem); in nvme_remove_namespaces()
4173 struct nvme_ctrl *ctrl = in nvme_class_uevent() local
4175 struct nvmf_ctrl_options *opts = ctrl->opts; in nvme_class_uevent()
4178 ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name); in nvme_class_uevent()
4198 static void nvme_aen_uevent(struct nvme_ctrl *ctrl) in nvme_aen_uevent() argument
4201 u32 aen_result = ctrl->aen_result; in nvme_aen_uevent()
4203 ctrl->aen_result = 0; in nvme_aen_uevent()
4210 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); in nvme_aen_uevent()
4216 struct nvme_ctrl *ctrl = in nvme_async_event_work() local
4219 nvme_aen_uevent(ctrl); in nvme_async_event_work()
4220 ctrl->ops->submit_async_event(ctrl); in nvme_async_event_work()
4223 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) in nvme_ctrl_pp_status() argument
4228 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) in nvme_ctrl_pp_status()
4234 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP)); in nvme_ctrl_pp_status()
4237 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) in nvme_get_fw_slot_info() argument
4245 if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM, in nvme_get_fw_slot_info()
4247 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); in nvme_get_fw_slot_info()
4253 struct nvme_ctrl *ctrl = container_of(work, in nvme_fw_act_work() local
4257 if (ctrl->mtfa) in nvme_fw_act_work()
4259 msecs_to_jiffies(ctrl->mtfa * 100); in nvme_fw_act_work()
4264 nvme_stop_queues(ctrl); in nvme_fw_act_work()
4265 while (nvme_ctrl_pp_status(ctrl)) { in nvme_fw_act_work()
4267 dev_warn(ctrl->device, in nvme_fw_act_work()
4269 nvme_try_sched_reset(ctrl); in nvme_fw_act_work()
4275 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) in nvme_fw_act_work()
4278 nvme_start_queues(ctrl); in nvme_fw_act_work()
4280 nvme_get_fw_slot_info(ctrl); in nvme_fw_act_work()
4283 static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) in nvme_handle_aen_notice() argument
4287 trace_nvme_async_event(ctrl, aer_notice_type); in nvme_handle_aen_notice()
4291 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events); in nvme_handle_aen_notice()
4292 nvme_queue_scan(ctrl); in nvme_handle_aen_notice()
4300 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) in nvme_handle_aen_notice()
4301 queue_work(nvme_wq, &ctrl->fw_act_work); in nvme_handle_aen_notice()
4305 if (!ctrl->ana_log_buf) in nvme_handle_aen_notice()
4307 queue_work(nvme_wq, &ctrl->ana_work); in nvme_handle_aen_notice()
4311 ctrl->aen_result = result; in nvme_handle_aen_notice()
4314 dev_warn(ctrl->device, "async event result %08x\n", result); in nvme_handle_aen_notice()
4318 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, in nvme_complete_async_event() argument
4329 nvme_handle_aen_notice(ctrl, result); in nvme_complete_async_event()
4335 trace_nvme_async_event(ctrl, aer_type); in nvme_complete_async_event()
4336 ctrl->aen_result = result; in nvme_complete_async_event()
4341 queue_work(nvme_wq, &ctrl->async_event_work); in nvme_complete_async_event()
4345 void nvme_stop_ctrl(struct nvme_ctrl *ctrl) in nvme_stop_ctrl() argument
4347 nvme_mpath_stop(ctrl); in nvme_stop_ctrl()
4348 nvme_stop_keep_alive(ctrl); in nvme_stop_ctrl()
4349 flush_work(&ctrl->async_event_work); in nvme_stop_ctrl()
4350 cancel_work_sync(&ctrl->fw_act_work); in nvme_stop_ctrl()
4354 void nvme_start_ctrl(struct nvme_ctrl *ctrl) in nvme_start_ctrl() argument
4356 nvme_start_keep_alive(ctrl); in nvme_start_ctrl()
4358 nvme_enable_aen(ctrl); in nvme_start_ctrl()
4360 if (ctrl->queue_count > 1) { in nvme_start_ctrl()
4361 nvme_queue_scan(ctrl); in nvme_start_ctrl()
4362 nvme_start_queues(ctrl); in nvme_start_ctrl()
4367 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) in nvme_uninit_ctrl() argument
4369 nvme_fault_inject_fini(&ctrl->fault_inject); in nvme_uninit_ctrl()
4370 dev_pm_qos_hide_latency_tolerance(ctrl->device); in nvme_uninit_ctrl()
4371 cdev_device_del(&ctrl->cdev, ctrl->device); in nvme_uninit_ctrl()
4372 nvme_put_ctrl(ctrl); in nvme_uninit_ctrl()
4376 static void nvme_free_cels(struct nvme_ctrl *ctrl) in nvme_free_cels() argument
4381 xa_for_each (&ctrl->cels, i, cel) { in nvme_free_cels()
4382 xa_erase(&ctrl->cels, i); in nvme_free_cels()
4386 xa_destroy(&ctrl->cels); in nvme_free_cels()
4391 struct nvme_ctrl *ctrl = in nvme_free_ctrl() local
4393 struct nvme_subsystem *subsys = ctrl->subsys; in nvme_free_ctrl()
4395 if (!subsys || ctrl->instance != subsys->instance) in nvme_free_ctrl()
4396 ida_simple_remove(&nvme_instance_ida, ctrl->instance); in nvme_free_ctrl()
4398 nvme_free_cels(ctrl); in nvme_free_ctrl()
4399 nvme_mpath_uninit(ctrl); in nvme_free_ctrl()
4400 __free_page(ctrl->discard_page); in nvme_free_ctrl()
4404 list_del(&ctrl->subsys_entry); in nvme_free_ctrl()
4405 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device)); in nvme_free_ctrl()
4409 ctrl->ops->free_ctrl(ctrl); in nvme_free_ctrl()
4420 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, in nvme_init_ctrl() argument
4425 ctrl->state = NVME_CTRL_NEW; in nvme_init_ctrl()
4426 spin_lock_init(&ctrl->lock); in nvme_init_ctrl()
4427 mutex_init(&ctrl->scan_lock); in nvme_init_ctrl()
4428 INIT_LIST_HEAD(&ctrl->namespaces); in nvme_init_ctrl()
4429 xa_init(&ctrl->cels); in nvme_init_ctrl()
4430 init_rwsem(&ctrl->namespaces_rwsem); in nvme_init_ctrl()
4431 ctrl->dev = dev; in nvme_init_ctrl()
4432 ctrl->ops = ops; in nvme_init_ctrl()
4433 ctrl->quirks = quirks; in nvme_init_ctrl()
4434 ctrl->numa_node = NUMA_NO_NODE; in nvme_init_ctrl()
4435 INIT_WORK(&ctrl->scan_work, nvme_scan_work); in nvme_init_ctrl()
4436 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); in nvme_init_ctrl()
4437 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); in nvme_init_ctrl()
4438 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); in nvme_init_ctrl()
4439 init_waitqueue_head(&ctrl->state_wq); in nvme_init_ctrl()
4441 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); in nvme_init_ctrl()
4442 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); in nvme_init_ctrl()
4443 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; in nvme_init_ctrl()
4447 ctrl->discard_page = alloc_page(GFP_KERNEL); in nvme_init_ctrl()
4448 if (!ctrl->discard_page) { in nvme_init_ctrl()
4456 ctrl->instance = ret; in nvme_init_ctrl()
4458 device_initialize(&ctrl->ctrl_device); in nvme_init_ctrl()
4459 ctrl->device = &ctrl->ctrl_device; in nvme_init_ctrl()
4460 ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance); in nvme_init_ctrl()
4461 ctrl->device->class = nvme_class; in nvme_init_ctrl()
4462 ctrl->device->parent = ctrl->dev; in nvme_init_ctrl()
4463 ctrl->device->groups = nvme_dev_attr_groups; in nvme_init_ctrl()
4464 ctrl->device->release = nvme_free_ctrl; in nvme_init_ctrl()
4465 dev_set_drvdata(ctrl->device, ctrl); in nvme_init_ctrl()
4466 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); in nvme_init_ctrl()
4470 nvme_get_ctrl(ctrl); in nvme_init_ctrl()
4471 cdev_init(&ctrl->cdev, &nvme_dev_fops); in nvme_init_ctrl()
4472 ctrl->cdev.owner = ops->module; in nvme_init_ctrl()
4473 ret = cdev_device_add(&ctrl->cdev, ctrl->device); in nvme_init_ctrl()
4481 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; in nvme_init_ctrl()
4482 dev_pm_qos_update_user_latency_tolerance(ctrl->device, in nvme_init_ctrl()
4485 nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device)); in nvme_init_ctrl()
4489 nvme_put_ctrl(ctrl); in nvme_init_ctrl()
4490 kfree_const(ctrl->device->kobj.name); in nvme_init_ctrl()
4492 ida_simple_remove(&nvme_instance_ida, ctrl->instance); in nvme_init_ctrl()
4494 if (ctrl->discard_page) in nvme_init_ctrl()
4495 __free_page(ctrl->discard_page); in nvme_init_ctrl()
4502 * @ctrl: the dead controller that needs to end
4507 void nvme_kill_queues(struct nvme_ctrl *ctrl) in nvme_kill_queues() argument
4511 down_read(&ctrl->namespaces_rwsem); in nvme_kill_queues()
4514 if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q)) in nvme_kill_queues()
4515 blk_mq_unquiesce_queue(ctrl->admin_q); in nvme_kill_queues()
4517 list_for_each_entry(ns, &ctrl->namespaces, list) in nvme_kill_queues()
4520 up_read(&ctrl->namespaces_rwsem); in nvme_kill_queues()
4524 void nvme_unfreeze(struct nvme_ctrl *ctrl) in nvme_unfreeze() argument
4528 down_read(&ctrl->namespaces_rwsem); in nvme_unfreeze()
4529 list_for_each_entry(ns, &ctrl->namespaces, list) in nvme_unfreeze()
4531 up_read(&ctrl->namespaces_rwsem); in nvme_unfreeze()
4535 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) in nvme_wait_freeze_timeout() argument
4539 down_read(&ctrl->namespaces_rwsem); in nvme_wait_freeze_timeout()
4540 list_for_each_entry(ns, &ctrl->namespaces, list) { in nvme_wait_freeze_timeout()
4545 up_read(&ctrl->namespaces_rwsem); in nvme_wait_freeze_timeout()
4550 void nvme_wait_freeze(struct nvme_ctrl *ctrl) in nvme_wait_freeze() argument
4554 down_read(&ctrl->namespaces_rwsem); in nvme_wait_freeze()
4555 list_for_each_entry(ns, &ctrl->namespaces, list) in nvme_wait_freeze()
4557 up_read(&ctrl->namespaces_rwsem); in nvme_wait_freeze()
4561 void nvme_start_freeze(struct nvme_ctrl *ctrl) in nvme_start_freeze() argument
4565 down_read(&ctrl->namespaces_rwsem); in nvme_start_freeze()
4566 list_for_each_entry(ns, &ctrl->namespaces, list) in nvme_start_freeze()
4568 up_read(&ctrl->namespaces_rwsem); in nvme_start_freeze()
4572 void nvme_stop_queues(struct nvme_ctrl *ctrl) in nvme_stop_queues() argument
4576 down_read(&ctrl->namespaces_rwsem); in nvme_stop_queues()
4577 list_for_each_entry(ns, &ctrl->namespaces, list) in nvme_stop_queues()
4579 up_read(&ctrl->namespaces_rwsem); in nvme_stop_queues()
4583 void nvme_start_queues(struct nvme_ctrl *ctrl) in nvme_start_queues() argument
4587 down_read(&ctrl->namespaces_rwsem); in nvme_start_queues()
4588 list_for_each_entry(ns, &ctrl->namespaces, list) in nvme_start_queues()
4590 up_read(&ctrl->namespaces_rwsem); in nvme_start_queues()
4594 void nvme_sync_io_queues(struct nvme_ctrl *ctrl) in nvme_sync_io_queues() argument
4598 down_read(&ctrl->namespaces_rwsem); in nvme_sync_io_queues()
4599 list_for_each_entry(ns, &ctrl->namespaces, list) in nvme_sync_io_queues()
4601 up_read(&ctrl->namespaces_rwsem); in nvme_sync_io_queues()
4605 void nvme_sync_queues(struct nvme_ctrl *ctrl) in nvme_sync_queues() argument
4607 nvme_sync_io_queues(ctrl); in nvme_sync_queues()
4608 if (ctrl->admin_q) in nvme_sync_queues()
4609 blk_sync_queue(ctrl->admin_q); in nvme_sync_queues()