Lines Matching refs:ctrl
94 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
114 static void nvme_queue_scan(struct nvme_ctrl *ctrl) in nvme_queue_scan() argument
119 if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset) in nvme_queue_scan()
120 queue_work(nvme_wq, &ctrl->scan_work); in nvme_queue_scan()
129 int nvme_try_sched_reset(struct nvme_ctrl *ctrl) in nvme_try_sched_reset() argument
131 if (ctrl->state != NVME_CTRL_RESETTING) in nvme_try_sched_reset()
133 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) in nvme_try_sched_reset()
139 int nvme_reset_ctrl(struct nvme_ctrl *ctrl) in nvme_reset_ctrl() argument
141 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) in nvme_reset_ctrl()
143 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) in nvme_reset_ctrl()
149 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) in nvme_reset_ctrl_sync() argument
153 ret = nvme_reset_ctrl(ctrl); in nvme_reset_ctrl_sync()
155 flush_work(&ctrl->reset_work); in nvme_reset_ctrl_sync()
156 if (ctrl->state != NVME_CTRL_LIVE) in nvme_reset_ctrl_sync()
164 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) in nvme_do_delete_ctrl() argument
166 dev_info(ctrl->device, in nvme_do_delete_ctrl()
167 "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn); in nvme_do_delete_ctrl()
169 flush_work(&ctrl->reset_work); in nvme_do_delete_ctrl()
170 nvme_stop_ctrl(ctrl); in nvme_do_delete_ctrl()
171 nvme_remove_namespaces(ctrl); in nvme_do_delete_ctrl()
172 ctrl->ops->delete_ctrl(ctrl); in nvme_do_delete_ctrl()
173 nvme_uninit_ctrl(ctrl); in nvme_do_delete_ctrl()
174 nvme_put_ctrl(ctrl); in nvme_do_delete_ctrl()
179 struct nvme_ctrl *ctrl = in nvme_delete_ctrl_work() local
182 nvme_do_delete_ctrl(ctrl); in nvme_delete_ctrl_work()
185 int nvme_delete_ctrl(struct nvme_ctrl *ctrl) in nvme_delete_ctrl() argument
187 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) in nvme_delete_ctrl()
189 if (!queue_work(nvme_delete_wq, &ctrl->delete_work)) in nvme_delete_ctrl()
195 static int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) in nvme_delete_ctrl_sync() argument
203 nvme_get_ctrl(ctrl); in nvme_delete_ctrl_sync()
204 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) in nvme_delete_ctrl_sync()
207 nvme_do_delete_ctrl(ctrl); in nvme_delete_ctrl_sync()
208 nvme_put_ctrl(ctrl); in nvme_delete_ctrl_sync()
273 delay = ns->ctrl->crdt[crd - 1] * 100; in nvme_retry_req()
286 if (nvme_req(req)->ctrl->kas) in nvme_complete_rq()
287 nvme_req(req)->ctrl->comp_seen = true; in nvme_complete_rq()
322 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, in nvme_change_ctrl_state() argument
329 spin_lock_irqsave(&ctrl->lock, flags); in nvme_change_ctrl_state()
331 old_state = ctrl->state; in nvme_change_ctrl_state()
389 ctrl->state = new_state; in nvme_change_ctrl_state()
390 wake_up_all(&ctrl->state_wq); in nvme_change_ctrl_state()
393 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_change_ctrl_state()
394 if (changed && ctrl->state == NVME_CTRL_LIVE) in nvme_change_ctrl_state()
395 nvme_kick_requeue_lists(ctrl); in nvme_change_ctrl_state()
403 static bool nvme_state_terminal(struct nvme_ctrl *ctrl) in nvme_state_terminal() argument
405 switch (ctrl->state) { in nvme_state_terminal()
415 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state); in nvme_state_terminal()
424 bool nvme_wait_reset(struct nvme_ctrl *ctrl) in nvme_wait_reset() argument
426 wait_event(ctrl->state_wq, in nvme_wait_reset()
427 nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) || in nvme_wait_reset()
428 nvme_state_terminal(ctrl)); in nvme_wait_reset()
429 return ctrl->state == NVME_CTRL_RESETTING; in nvme_wait_reset()
460 nvme_put_ctrl(ns->ctrl); in nvme_free_ns()
501 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable) in nvme_toggle_streams() argument
514 return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0); in nvme_toggle_streams()
517 static int nvme_disable_streams(struct nvme_ctrl *ctrl) in nvme_disable_streams() argument
519 return nvme_toggle_streams(ctrl, false); in nvme_disable_streams()
522 static int nvme_enable_streams(struct nvme_ctrl *ctrl) in nvme_enable_streams() argument
524 return nvme_toggle_streams(ctrl, true); in nvme_enable_streams()
527 static int nvme_get_stream_params(struct nvme_ctrl *ctrl, in nvme_get_stream_params() argument
541 return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s)); in nvme_get_stream_params()
544 static int nvme_configure_directives(struct nvme_ctrl *ctrl) in nvme_configure_directives() argument
549 if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES)) in nvme_configure_directives()
554 ret = nvme_enable_streams(ctrl); in nvme_configure_directives()
558 ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL); in nvme_configure_directives()
562 ctrl->nssa = le16_to_cpu(s.nssa); in nvme_configure_directives()
563 if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) { in nvme_configure_directives()
564 dev_info(ctrl->device, "too few streams (%u) available\n", in nvme_configure_directives()
565 ctrl->nssa); in nvme_configure_directives()
566 nvme_disable_streams(ctrl); in nvme_configure_directives()
570 ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1); in nvme_configure_directives()
571 dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams); in nvme_configure_directives()
579 static void nvme_assign_write_stream(struct nvme_ctrl *ctrl, in nvme_assign_write_stream() argument
589 if (WARN_ON_ONCE(streamid > ctrl->nr_streams)) in nvme_assign_write_stream()
622 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy)) in nvme_setup_discard()
625 range = page_address(ns->ctrl->discard_page); in nvme_setup_discard()
641 if (virt_to_page(range) == ns->ctrl->discard_page) in nvme_setup_discard()
642 clear_bit_unlock(0, &ns->ctrl->discard_page_busy); in nvme_setup_discard()
664 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) in nvme_setup_write_zeroes()
680 struct nvme_ctrl *ctrl = ns->ctrl; in nvme_setup_rw() local
697 if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams) in nvme_setup_rw()
698 nvme_assign_write_stream(ctrl, req, &control, &dsmgmt); in nvme_setup_rw()
737 if (page == ns->ctrl->discard_page) in nvme_cleanup_cmd()
738 clear_bit_unlock(0, &ns->ctrl->discard_page_busy); in nvme_cleanup_cmd()
949 struct nvme_ctrl *ctrl = rq->end_io_data; in nvme_keep_alive_end_io() local
956 dev_err(ctrl->device, in nvme_keep_alive_end_io()
962 ctrl->comp_seen = false; in nvme_keep_alive_end_io()
963 spin_lock_irqsave(&ctrl->lock, flags); in nvme_keep_alive_end_io()
964 if (ctrl->state == NVME_CTRL_LIVE || in nvme_keep_alive_end_io()
965 ctrl->state == NVME_CTRL_CONNECTING) in nvme_keep_alive_end_io()
967 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_keep_alive_end_io()
969 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); in nvme_keep_alive_end_io()
972 static int nvme_keep_alive(struct nvme_ctrl *ctrl) in nvme_keep_alive() argument
976 rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED, in nvme_keep_alive()
981 rq->timeout = ctrl->kato * HZ; in nvme_keep_alive()
982 rq->end_io_data = ctrl; in nvme_keep_alive()
991 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), in nvme_keep_alive_work() local
993 bool comp_seen = ctrl->comp_seen; in nvme_keep_alive_work()
995 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { in nvme_keep_alive_work()
996 dev_dbg(ctrl->device, in nvme_keep_alive_work()
998 ctrl->comp_seen = false; in nvme_keep_alive_work()
999 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); in nvme_keep_alive_work()
1003 if (nvme_keep_alive(ctrl)) { in nvme_keep_alive_work()
1005 dev_err(ctrl->device, "keep-alive failed\n"); in nvme_keep_alive_work()
1006 nvme_reset_ctrl(ctrl); in nvme_keep_alive_work()
1011 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) in nvme_start_keep_alive() argument
1013 if (unlikely(ctrl->kato == 0)) in nvme_start_keep_alive()
1016 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); in nvme_start_keep_alive()
1019 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) in nvme_stop_keep_alive() argument
1021 if (unlikely(ctrl->kato == 0)) in nvme_stop_keep_alive()
1024 cancel_delayed_work_sync(&ctrl->ka_work); in nvme_stop_keep_alive()
1048 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, in nvme_identify_ns_descs() argument
1065 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, in nvme_identify_ns_descs()
1079 dev_warn(ctrl->device, in nvme_identify_ns_descs()
1089 dev_warn(ctrl->device, in nvme_identify_ns_descs()
1099 dev_warn(ctrl->device, in nvme_identify_ns_descs()
1131 static int nvme_identify_ns(struct nvme_ctrl *ctrl, in nvme_identify_ns() argument
1146 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id)); in nvme_identify_ns()
1148 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error); in nvme_identify_ns()
1192 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) in nvme_set_queue_count() argument
1198 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, in nvme_set_queue_count()
1209 dev_err(ctrl->device, "Could not set queue count (%d)\n", status); in nvme_set_queue_count()
1224 static void nvme_enable_aen(struct nvme_ctrl *ctrl) in nvme_enable_aen() argument
1226 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED; in nvme_enable_aen()
1232 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens, in nvme_enable_aen()
1235 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", in nvme_enable_aen()
1238 queue_work(nvme_wq, &ctrl->async_event_work); in nvme_enable_aen()
1305 static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, in nvme_passthru_start() argument
1311 if (ctrl->effects) in nvme_passthru_start()
1312 effects = le32_to_cpu(ctrl->effects->iocs[opcode]); in nvme_passthru_start()
1314 dev_warn(ctrl->device, in nvme_passthru_start()
1320 if (ctrl->effects) in nvme_passthru_start()
1321 effects = le32_to_cpu(ctrl->effects->acs[opcode]); in nvme_passthru_start()
1329 mutex_lock(&ctrl->scan_lock); in nvme_passthru_start()
1330 mutex_lock(&ctrl->subsys->lock); in nvme_passthru_start()
1331 nvme_mpath_start_freeze(ctrl->subsys); in nvme_passthru_start()
1332 nvme_mpath_wait_freeze(ctrl->subsys); in nvme_passthru_start()
1333 nvme_start_freeze(ctrl); in nvme_passthru_start()
1334 nvme_wait_freeze(ctrl); in nvme_passthru_start()
1339 static void nvme_update_formats(struct nvme_ctrl *ctrl) in nvme_update_formats() argument
1343 down_read(&ctrl->namespaces_rwsem); in nvme_update_formats()
1344 list_for_each_entry(ns, &ctrl->namespaces, list) in nvme_update_formats()
1347 up_read(&ctrl->namespaces_rwsem); in nvme_update_formats()
1350 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) in nvme_passthru_end() argument
1358 nvme_update_formats(ctrl); in nvme_passthru_end()
1360 nvme_unfreeze(ctrl); in nvme_passthru_end()
1361 nvme_mpath_unfreeze(ctrl->subsys); in nvme_passthru_end()
1362 mutex_unlock(&ctrl->subsys->lock); in nvme_passthru_end()
1363 nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL); in nvme_passthru_end()
1364 mutex_unlock(&ctrl->scan_lock); in nvme_passthru_end()
1367 nvme_init_identify(ctrl); in nvme_passthru_end()
1369 nvme_queue_scan(ctrl); in nvme_passthru_end()
1372 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, in nvme_user_cmd() argument
1405 effects = nvme_passthru_start(ctrl, ns, cmd.opcode); in nvme_user_cmd()
1406 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, in nvme_user_cmd()
1410 nvme_passthru_end(ctrl, effects); in nvme_user_cmd()
1420 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, in nvme_user_cmd64() argument
1452 effects = nvme_passthru_start(ctrl, ns, cmd.opcode); in nvme_user_cmd64()
1453 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, in nvme_user_cmd64()
1457 nvme_passthru_end(ctrl, effects); in nvme_user_cmd64()
1511 struct nvme_ctrl *ctrl = ns->ctrl; in nvme_handle_ctrl_ioctl() local
1514 nvme_get_ctrl(ns->ctrl); in nvme_handle_ctrl_ioctl()
1519 ret = nvme_user_cmd(ctrl, NULL, argp); in nvme_handle_ctrl_ioctl()
1522 ret = nvme_user_cmd64(ctrl, NULL, argp); in nvme_handle_ctrl_ioctl()
1525 ret = sed_ioctl(ctrl->opal_dev, cmd, argp); in nvme_handle_ctrl_ioctl()
1528 nvme_put_ctrl(ctrl); in nvme_handle_ctrl_ioctl()
1558 ret = nvme_user_cmd(ns->ctrl, ns, argp); in nvme_ioctl()
1564 ret = nvme_user_cmd64(ns->ctrl, ns, argp); in nvme_ioctl()
1588 if (!try_module_get(ns->ctrl->ops->module)) in nvme_open()
1603 module_put(ns->ctrl->ops->module); in nvme_release()
1656 struct nvme_ctrl *ctrl = ns->ctrl; in nvme_config_discard() local
1660 if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) { in nvme_config_discard()
1665 if (ctrl->nr_streams && ns->sws && ns->sgs) in nvme_config_discard()
1681 if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) in nvme_config_discard()
1690 if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) || in nvme_config_write_zeroes()
1691 (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) in nvme_config_write_zeroes()
1703 if (ns->ctrl->max_hw_sectors == UINT_MAX) in nvme_config_write_zeroes()
1706 max_sectors = ((u32)(ns->ctrl->max_hw_sectors + 1) * bs) >> 9; in nvme_config_write_zeroes()
1711 static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, in nvme_report_ns_ids() argument
1718 if (ctrl->vs >= NVME_VS(1, 1, 0)) in nvme_report_ns_ids()
1720 if (ctrl->vs >= NVME_VS(1, 2, 0)) in nvme_report_ns_ids()
1722 if (ctrl->vs >= NVME_VS(1, 3, 0)) { in nvme_report_ns_ids()
1726 ret = nvme_identify_ns_descs(ctrl, nsid, ids); in nvme_report_ns_ids()
1728 dev_warn(ctrl->device, in nvme_report_ns_ids()
1771 atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; in nvme_update_disk_info()
1795 (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) in nvme_update_disk_info()
1849 struct nvme_ctrl *ctrl = ns->ctrl; in nvme_revalidate_disk() local
1859 ret = nvme_identify_ns(ctrl, ns->head->ns_id, &id); in nvme_revalidate_disk()
1869 ret = nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids); in nvme_revalidate_disk()
1874 dev_err(ctrl->device, in nvme_revalidate_disk()
1997 struct nvme_ctrl *ctrl = data; in nvme_sec_submit() local
2009 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, in nvme_sec_submit()
2052 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) in nvme_wait_ready() argument
2059 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { in nvme_wait_ready()
2069 dev_err(ctrl->device, in nvme_wait_ready()
2085 int nvme_disable_ctrl(struct nvme_ctrl *ctrl) in nvme_disable_ctrl() argument
2089 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; in nvme_disable_ctrl()
2090 ctrl->ctrl_config &= ~NVME_CC_ENABLE; in nvme_disable_ctrl()
2092 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); in nvme_disable_ctrl()
2096 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) in nvme_disable_ctrl()
2099 return nvme_wait_ready(ctrl, ctrl->cap, false); in nvme_disable_ctrl()
2103 int nvme_enable_ctrl(struct nvme_ctrl *ctrl) in nvme_enable_ctrl() argument
2113 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); in nvme_enable_ctrl()
2115 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); in nvme_enable_ctrl()
2118 dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12; in nvme_enable_ctrl()
2121 dev_err(ctrl->device, in nvme_enable_ctrl()
2127 ctrl->page_size = 1 << page_shift; in nvme_enable_ctrl()
2129 ctrl->ctrl_config = NVME_CC_CSS_NVM; in nvme_enable_ctrl()
2130 ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT; in nvme_enable_ctrl()
2131 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; in nvme_enable_ctrl()
2132 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; in nvme_enable_ctrl()
2133 ctrl->ctrl_config |= NVME_CC_ENABLE; in nvme_enable_ctrl()
2135 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); in nvme_enable_ctrl()
2138 return nvme_wait_ready(ctrl, ctrl->cap, true); in nvme_enable_ctrl()
2142 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) in nvme_shutdown_ctrl() argument
2144 unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ); in nvme_shutdown_ctrl()
2148 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; in nvme_shutdown_ctrl()
2149 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; in nvme_shutdown_ctrl()
2151 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); in nvme_shutdown_ctrl()
2155 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { in nvme_shutdown_ctrl()
2163 dev_err(ctrl->device, in nvme_shutdown_ctrl()
2173 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, in nvme_set_queue_limits() argument
2178 if (ctrl->max_hw_sectors) { in nvme_set_queue_limits()
2180 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1; in nvme_set_queue_limits()
2182 max_segments = min_not_zero(max_segments, ctrl->max_segments); in nvme_set_queue_limits()
2183 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); in nvme_set_queue_limits()
2186 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && in nvme_set_queue_limits()
2187 is_power_of_2(ctrl->max_hw_sectors)) in nvme_set_queue_limits()
2188 blk_queue_chunk_sectors(q, ctrl->max_hw_sectors); in nvme_set_queue_limits()
2189 blk_queue_virt_boundary(q, ctrl->page_size - 1); in nvme_set_queue_limits()
2190 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) in nvme_set_queue_limits()
2195 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) in nvme_configure_timestamp() argument
2200 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP)) in nvme_configure_timestamp()
2204 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), in nvme_configure_timestamp()
2207 dev_warn_once(ctrl->device, in nvme_configure_timestamp()
2212 static int nvme_configure_acre(struct nvme_ctrl *ctrl) in nvme_configure_acre() argument
2218 if (!ctrl->crdt[0]) in nvme_configure_acre()
2226 ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, in nvme_configure_acre()
2232 static int nvme_configure_apst(struct nvme_ctrl *ctrl) in nvme_configure_apst() argument
2260 if (!ctrl->apsta) in nvme_configure_apst()
2263 if (ctrl->npss > 31) { in nvme_configure_apst()
2264 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); in nvme_configure_apst()
2272 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { in nvme_configure_apst()
2275 dev_dbg(ctrl->device, "APST disabled\n"); in nvme_configure_apst()
2286 for (state = (int)ctrl->npss; state >= 0; state--) { in nvme_configure_apst()
2296 if (state == ctrl->npss && in nvme_configure_apst()
2297 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) in nvme_configure_apst()
2304 if (!(ctrl->psd[state].flags & in nvme_configure_apst()
2309 (u64)le32_to_cpu(ctrl->psd[state].exit_lat); in nvme_configure_apst()
2310 if (exit_latency_us > ctrl->ps_max_latency_us) in nvme_configure_apst()
2315 le32_to_cpu(ctrl->psd[state].entry_lat); in nvme_configure_apst()
2339 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n"); in nvme_configure_apst()
2341 …dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n… in nvme_configure_apst()
2346 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, in nvme_configure_apst()
2349 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); in nvme_configure_apst()
2357 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_set_latency_tolerance() local
2370 if (ctrl->ps_max_latency_us != latency) { in nvme_set_latency_tolerance()
2371 ctrl->ps_max_latency_us = latency; in nvme_set_latency_tolerance()
2372 nvme_configure_apst(ctrl); in nvme_set_latency_tolerance()
2449 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, in nvme_init_subnqn() argument
2455 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) { in nvme_init_subnqn()
2462 if (ctrl->vs >= NVME_VS(1, 2, 1)) in nvme_init_subnqn()
2463 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); in nvme_init_subnqn()
2585 struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) in nvme_validate_cntlid() argument
2596 if (tmp->cntlid == ctrl->cntlid) { in nvme_validate_cntlid()
2597 dev_err(ctrl->device, in nvme_validate_cntlid()
2599 ctrl->cntlid, dev_name(tmp->device)); in nvme_validate_cntlid()
2604 (ctrl->opts && ctrl->opts->discovery_nqn)) in nvme_validate_cntlid()
2607 dev_err(ctrl->device, in nvme_validate_cntlid()
2615 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) in nvme_init_subsystem() argument
2629 nvme_init_subnqn(subsys, ctrl, id); in nvme_init_subsystem()
2643 dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance); in nvme_init_subsystem()
2652 if (!nvme_validate_cntlid(subsys, ctrl, id)) { in nvme_init_subsystem()
2659 dev_err(ctrl->device, in nvme_init_subsystem()
2668 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, in nvme_init_subsystem()
2669 dev_name(ctrl->device)); in nvme_init_subsystem()
2671 dev_err(ctrl->device, in nvme_init_subsystem()
2677 subsys->instance = ctrl->instance; in nvme_init_subsystem()
2678 ctrl->subsys = subsys; in nvme_init_subsystem()
2679 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); in nvme_init_subsystem()
2690 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, in nvme_get_log() argument
2705 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); in nvme_get_log()
2708 static int nvme_get_effects_log(struct nvme_ctrl *ctrl) in nvme_get_effects_log() argument
2712 if (!ctrl->effects) in nvme_get_effects_log()
2713 ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL); in nvme_get_effects_log()
2715 if (!ctrl->effects) in nvme_get_effects_log()
2718 ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0, in nvme_get_effects_log()
2719 ctrl->effects, sizeof(*ctrl->effects), 0); in nvme_get_effects_log()
2721 kfree(ctrl->effects); in nvme_get_effects_log()
2722 ctrl->effects = NULL; in nvme_get_effects_log()
2732 int nvme_init_identify(struct nvme_ctrl *ctrl) in nvme_init_identify() argument
2739 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); in nvme_init_identify()
2741 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); in nvme_init_identify()
2744 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12; in nvme_init_identify()
2745 ctrl->sqsize = min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize); in nvme_init_identify()
2747 if (ctrl->vs >= NVME_VS(1, 1, 0)) in nvme_init_identify()
2748 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap); in nvme_init_identify()
2750 ret = nvme_identify_ctrl(ctrl, &id); in nvme_init_identify()
2752 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); in nvme_init_identify()
2757 ret = nvme_get_effects_log(ctrl); in nvme_init_identify()
2762 if (!(ctrl->ops->flags & NVME_F_FABRICS)) in nvme_init_identify()
2763 ctrl->cntlid = le16_to_cpu(id->cntlid); in nvme_init_identify()
2765 if (!ctrl->identified) { in nvme_init_identify()
2768 ret = nvme_init_subsystem(ctrl, id); in nvme_init_identify()
2782 ctrl->quirks |= core_quirks[i].quirks; in nvme_init_identify()
2786 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { in nvme_init_identify()
2787 …dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at y… in nvme_init_identify()
2788 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; in nvme_init_identify()
2791 ctrl->crdt[0] = le16_to_cpu(id->crdt1); in nvme_init_identify()
2792 ctrl->crdt[1] = le16_to_cpu(id->crdt2); in nvme_init_identify()
2793 ctrl->crdt[2] = le16_to_cpu(id->crdt3); in nvme_init_identify()
2795 ctrl->oacs = le16_to_cpu(id->oacs); in nvme_init_identify()
2796 ctrl->oncs = le16_to_cpu(id->oncs); in nvme_init_identify()
2797 ctrl->mtfa = le16_to_cpu(id->mtfa); in nvme_init_identify()
2798 ctrl->oaes = le32_to_cpu(id->oaes); in nvme_init_identify()
2799 atomic_set(&ctrl->abort_limit, id->acl + 1); in nvme_init_identify()
2800 ctrl->vwc = id->vwc; in nvme_init_identify()
2805 ctrl->max_hw_sectors = in nvme_init_identify()
2806 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); in nvme_init_identify()
2808 nvme_set_queue_limits(ctrl, ctrl->admin_q); in nvme_init_identify()
2809 ctrl->sgls = le32_to_cpu(id->sgls); in nvme_init_identify()
2810 ctrl->kas = le16_to_cpu(id->kas); in nvme_init_identify()
2811 ctrl->max_namespaces = le32_to_cpu(id->mnan); in nvme_init_identify()
2812 ctrl->ctratt = le32_to_cpu(id->ctratt); in nvme_init_identify()
2818 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time, in nvme_init_identify()
2821 if (ctrl->shutdown_timeout != shutdown_timeout) in nvme_init_identify()
2822 dev_info(ctrl->device, in nvme_init_identify()
2824 ctrl->shutdown_timeout); in nvme_init_identify()
2826 ctrl->shutdown_timeout = shutdown_timeout; in nvme_init_identify()
2828 ctrl->npss = id->npss; in nvme_init_identify()
2829 ctrl->apsta = id->apsta; in nvme_init_identify()
2830 prev_apst_enabled = ctrl->apst_enabled; in nvme_init_identify()
2831 if (ctrl->quirks & NVME_QUIRK_NO_APST) { in nvme_init_identify()
2833 …dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk… in nvme_init_identify()
2834 ctrl->apst_enabled = true; in nvme_init_identify()
2836 ctrl->apst_enabled = false; in nvme_init_identify()
2839 ctrl->apst_enabled = id->apsta; in nvme_init_identify()
2841 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); in nvme_init_identify()
2843 if (ctrl->ops->flags & NVME_F_FABRICS) { in nvme_init_identify()
2844 ctrl->icdoff = le16_to_cpu(id->icdoff); in nvme_init_identify()
2845 ctrl->ioccsz = le32_to_cpu(id->ioccsz); in nvme_init_identify()
2846 ctrl->iorcsz = le32_to_cpu(id->iorcsz); in nvme_init_identify()
2847 ctrl->maxcmd = le16_to_cpu(id->maxcmd); in nvme_init_identify()
2853 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { in nvme_init_identify()
2858 if (!ctrl->opts->discovery_nqn && !ctrl->kas) { in nvme_init_identify()
2859 dev_err(ctrl->device, in nvme_init_identify()
2865 ctrl->hmpre = le32_to_cpu(id->hmpre); in nvme_init_identify()
2866 ctrl->hmmin = le32_to_cpu(id->hmmin); in nvme_init_identify()
2867 ctrl->hmminds = le32_to_cpu(id->hmminds); in nvme_init_identify()
2868 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); in nvme_init_identify()
2871 ret = nvme_mpath_init(ctrl, id); in nvme_init_identify()
2877 if (ctrl->apst_enabled && !prev_apst_enabled) in nvme_init_identify()
2878 dev_pm_qos_expose_latency_tolerance(ctrl->device); in nvme_init_identify()
2879 else if (!ctrl->apst_enabled && prev_apst_enabled) in nvme_init_identify()
2880 dev_pm_qos_hide_latency_tolerance(ctrl->device); in nvme_init_identify()
2882 ret = nvme_configure_apst(ctrl); in nvme_init_identify()
2886 ret = nvme_configure_timestamp(ctrl); in nvme_init_identify()
2890 ret = nvme_configure_directives(ctrl); in nvme_init_identify()
2894 ret = nvme_configure_acre(ctrl); in nvme_init_identify()
2898 ctrl->identified = true; in nvme_init_identify()
2910 struct nvme_ctrl *ctrl = in nvme_dev_open() local
2913 switch (ctrl->state) { in nvme_dev_open()
2920 file->private_data = ctrl; in nvme_dev_open()
2924 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) in nvme_dev_user_cmd() argument
2929 down_read(&ctrl->namespaces_rwsem); in nvme_dev_user_cmd()
2930 if (list_empty(&ctrl->namespaces)) { in nvme_dev_user_cmd()
2935 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); in nvme_dev_user_cmd()
2936 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { in nvme_dev_user_cmd()
2937 dev_warn(ctrl->device, in nvme_dev_user_cmd()
2943 dev_warn(ctrl->device, in nvme_dev_user_cmd()
2946 up_read(&ctrl->namespaces_rwsem); in nvme_dev_user_cmd()
2948 ret = nvme_user_cmd(ctrl, ns, argp); in nvme_dev_user_cmd()
2953 up_read(&ctrl->namespaces_rwsem); in nvme_dev_user_cmd()
2960 struct nvme_ctrl *ctrl = file->private_data; in nvme_dev_ioctl() local
2965 return nvme_user_cmd(ctrl, NULL, argp); in nvme_dev_ioctl()
2967 return nvme_user_cmd64(ctrl, NULL, argp); in nvme_dev_ioctl()
2969 return nvme_dev_user_cmd(ctrl, argp); in nvme_dev_ioctl()
2971 dev_warn(ctrl->device, "resetting controller\n"); in nvme_dev_ioctl()
2972 return nvme_reset_ctrl_sync(ctrl); in nvme_dev_ioctl()
2974 return nvme_reset_subsystem(ctrl); in nvme_dev_ioctl()
2976 nvme_queue_scan(ctrl); in nvme_dev_ioctl()
2994 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_sysfs_reset() local
2997 ret = nvme_reset_ctrl_sync(ctrl); in nvme_sysfs_reset()
3008 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_sysfs_rescan() local
3010 nvme_queue_scan(ctrl); in nvme_sysfs_rescan()
3130 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl)) in nvme_ns_id_attrs_are_visible()
3154 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
3156 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
3168 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
3169 return sprintf(buf, "%d\n", ctrl->field); \
3182 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_sysfs_delete() local
3185 nvme_delete_ctrl_sync(ctrl); in nvme_sysfs_delete()
3194 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_sysfs_show_transport() local
3196 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name); in nvme_sysfs_show_transport()
3204 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_sysfs_show_state() local
3214 if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && in nvme_sysfs_show_state()
3215 state_name[ctrl->state]) in nvme_sysfs_show_state()
3216 return sprintf(buf, "%s\n", state_name[ctrl->state]); in nvme_sysfs_show_state()
3227 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_sysfs_show_subsysnqn() local
3229 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn); in nvme_sysfs_show_subsysnqn()
3237 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_sysfs_show_address() local
3239 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); in nvme_sysfs_show_address()
3265 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); in nvme_dev_attrs_are_visible() local
3267 if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl) in nvme_dev_attrs_are_visible()
3269 if (a == &dev_attr_address.attr && !ctrl->ops->get_address) in nvme_dev_attrs_are_visible()
3317 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, in nvme_alloc_ns_head() argument
3331 ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL); in nvme_alloc_ns_head()
3339 head->subsys = ctrl->subsys; in nvme_alloc_ns_head()
3343 ret = nvme_report_ns_ids(ctrl, nsid, id, &head->ids); in nvme_alloc_ns_head()
3347 ret = __nvme_check_ids(ctrl->subsys, head); in nvme_alloc_ns_head()
3349 dev_err(ctrl->device, in nvme_alloc_ns_head()
3354 ret = nvme_mpath_alloc_disk(ctrl, head); in nvme_alloc_ns_head()
3358 list_add_tail(&head->entry, &ctrl->subsys->nsheads); in nvme_alloc_ns_head()
3360 kref_get(&ctrl->subsys->ref); in nvme_alloc_ns_head()
3366 ida_simple_remove(&ctrl->subsys->ns_ida, head->instance); in nvme_alloc_ns_head()
3378 struct nvme_ctrl *ctrl = ns->ctrl; in nvme_init_ns_head() local
3383 mutex_lock(&ctrl->subsys->lock); in nvme_init_ns_head()
3385 head = __nvme_find_ns_head(ctrl->subsys, nsid); in nvme_init_ns_head()
3387 head = nvme_alloc_ns_head(ctrl, nsid, id); in nvme_init_ns_head()
3395 ret = nvme_report_ns_ids(ctrl, nsid, id, &ids); in nvme_init_ns_head()
3400 dev_err(ctrl->device, in nvme_init_ns_head()
3412 mutex_unlock(&ctrl->subsys->lock); in nvme_init_ns_head()
3426 static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) in nvme_find_get_ns() argument
3430 down_read(&ctrl->namespaces_rwsem); in nvme_find_get_ns()
3431 list_for_each_entry(ns, &ctrl->namespaces, list) { in nvme_find_get_ns()
3441 up_read(&ctrl->namespaces_rwsem); in nvme_find_get_ns()
3445 static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns) in nvme_setup_streams_ns() argument
3450 if (!ctrl->nr_streams) in nvme_setup_streams_ns()
3453 ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id); in nvme_setup_streams_ns()
3471 static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) in nvme_alloc_ns() argument
3477 int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT, ret; in nvme_alloc_ns()
3483 ns->queue = blk_mq_init_queue(ctrl->tagset); in nvme_alloc_ns()
3489 if (ctrl->opts && ctrl->opts->data_digest) in nvme_alloc_ns()
3494 if (ctrl->ops->flags & NVME_F_PCI_P2PDMA) in nvme_alloc_ns()
3498 ns->ctrl = ctrl; in nvme_alloc_ns()
3504 nvme_set_queue_limits(ctrl, ns->queue); in nvme_alloc_ns()
3506 ret = nvme_identify_ns(ctrl, nsid, &id); in nvme_alloc_ns()
3518 nvme_setup_streams_ns(ctrl, ns); in nvme_alloc_ns()
3519 nvme_set_disk_name(disk_name, ns, ctrl, &flags); in nvme_alloc_ns()
3536 if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) { in nvme_alloc_ns()
3539 dev_warn(ctrl->device, "LightNVM init failure\n"); in nvme_alloc_ns()
3544 down_write(&ctrl->namespaces_rwsem); in nvme_alloc_ns()
3545 list_add_tail(&ns->list, &ctrl->namespaces); in nvme_alloc_ns()
3546 up_write(&ctrl->namespaces_rwsem); in nvme_alloc_ns()
3548 nvme_get_ctrl(ctrl); in nvme_alloc_ns()
3550 device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups); in nvme_alloc_ns()
3560 mutex_lock(&ctrl->subsys->lock); in nvme_alloc_ns()
3562 mutex_unlock(&ctrl->subsys->lock); in nvme_alloc_ns()
3582 mutex_lock(&ns->ctrl->subsys->lock); in nvme_ns_remove()
3584 mutex_unlock(&ns->ctrl->subsys->lock); in nvme_ns_remove()
3596 down_write(&ns->ctrl->namespaces_rwsem); in nvme_ns_remove()
3598 up_write(&ns->ctrl->namespaces_rwsem); in nvme_ns_remove()
3604 static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid) in nvme_validate_ns() argument
3608 ns = nvme_find_get_ns(ctrl, nsid); in nvme_validate_ns()
3614 nvme_alloc_ns(ctrl, nsid); in nvme_validate_ns()
3617 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, in nvme_remove_invalid_namespaces() argument
3623 down_write(&ctrl->namespaces_rwsem); in nvme_remove_invalid_namespaces()
3624 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { in nvme_remove_invalid_namespaces()
3628 up_write(&ctrl->namespaces_rwsem); in nvme_remove_invalid_namespaces()
3635 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn) in nvme_scan_ns_list() argument
3648 ret = nvme_identify_ns_list(ctrl, prev, ns_list); in nvme_scan_ns_list()
3657 nvme_validate_ns(ctrl, nsid); in nvme_scan_ns_list()
3660 ns = nvme_find_get_ns(ctrl, prev); in nvme_scan_ns_list()
3670 nvme_remove_invalid_namespaces(ctrl, prev); in nvme_scan_ns_list()
3676 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn) in nvme_scan_ns_sequential() argument
3681 nvme_validate_ns(ctrl, i); in nvme_scan_ns_sequential()
3683 nvme_remove_invalid_namespaces(ctrl, nn); in nvme_scan_ns_sequential()
3686 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl) in nvme_clear_changed_ns_log() argument
3702 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, log, in nvme_clear_changed_ns_log()
3705 dev_warn(ctrl->device, in nvme_clear_changed_ns_log()
3713 struct nvme_ctrl *ctrl = in nvme_scan_work() local
3719 if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset) in nvme_scan_work()
3722 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) { in nvme_scan_work()
3723 dev_info(ctrl->device, "rescanning namespaces.\n"); in nvme_scan_work()
3724 nvme_clear_changed_ns_log(ctrl); in nvme_scan_work()
3727 if (nvme_identify_ctrl(ctrl, &id)) in nvme_scan_work()
3730 mutex_lock(&ctrl->scan_lock); in nvme_scan_work()
3732 if (ctrl->vs >= NVME_VS(1, 1, 0) && in nvme_scan_work()
3733 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { in nvme_scan_work()
3734 if (!nvme_scan_ns_list(ctrl, nn)) in nvme_scan_work()
3737 nvme_scan_ns_sequential(ctrl, nn); in nvme_scan_work()
3739 mutex_unlock(&ctrl->scan_lock); in nvme_scan_work()
3741 down_write(&ctrl->namespaces_rwsem); in nvme_scan_work()
3742 list_sort(NULL, &ctrl->namespaces, ns_cmp); in nvme_scan_work()
3743 up_write(&ctrl->namespaces_rwsem); in nvme_scan_work()
3751 void nvme_remove_namespaces(struct nvme_ctrl *ctrl) in nvme_remove_namespaces() argument
3761 nvme_mpath_clear_ctrl_paths(ctrl); in nvme_remove_namespaces()
3764 flush_work(&ctrl->scan_work); in nvme_remove_namespaces()
3772 if (ctrl->state == NVME_CTRL_DEAD) in nvme_remove_namespaces()
3773 nvme_kill_queues(ctrl); in nvme_remove_namespaces()
3775 down_write(&ctrl->namespaces_rwsem); in nvme_remove_namespaces()
3776 list_splice_init(&ctrl->namespaces, &ns_list); in nvme_remove_namespaces()
3777 up_write(&ctrl->namespaces_rwsem); in nvme_remove_namespaces()
3786 struct nvme_ctrl *ctrl = in nvme_class_uevent() local
3788 struct nvmf_ctrl_options *opts = ctrl->opts; in nvme_class_uevent()
3791 ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name); in nvme_class_uevent()
3811 static void nvme_aen_uevent(struct nvme_ctrl *ctrl) in nvme_aen_uevent() argument
3814 u32 aen_result = ctrl->aen_result; in nvme_aen_uevent()
3816 ctrl->aen_result = 0; in nvme_aen_uevent()
3823 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); in nvme_aen_uevent()
3829 struct nvme_ctrl *ctrl = in nvme_async_event_work() local
3832 nvme_aen_uevent(ctrl); in nvme_async_event_work()
3833 ctrl->ops->submit_async_event(ctrl); in nvme_async_event_work()
3836 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) in nvme_ctrl_pp_status() argument
3841 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) in nvme_ctrl_pp_status()
3847 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP)); in nvme_ctrl_pp_status()
3850 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) in nvme_get_fw_slot_info() argument
3858 if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log, in nvme_get_fw_slot_info()
3860 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); in nvme_get_fw_slot_info()
3866 struct nvme_ctrl *ctrl = container_of(work, in nvme_fw_act_work() local
3870 if (ctrl->mtfa) in nvme_fw_act_work()
3872 msecs_to_jiffies(ctrl->mtfa * 100); in nvme_fw_act_work()
3877 nvme_stop_queues(ctrl); in nvme_fw_act_work()
3878 while (nvme_ctrl_pp_status(ctrl)) { in nvme_fw_act_work()
3880 dev_warn(ctrl->device, in nvme_fw_act_work()
3882 nvme_try_sched_reset(ctrl); in nvme_fw_act_work()
3888 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) in nvme_fw_act_work()
3891 nvme_start_queues(ctrl); in nvme_fw_act_work()
3893 nvme_get_fw_slot_info(ctrl); in nvme_fw_act_work()
3896 static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) in nvme_handle_aen_notice() argument
3900 trace_nvme_async_event(ctrl, aer_notice_type); in nvme_handle_aen_notice()
3904 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events); in nvme_handle_aen_notice()
3905 nvme_queue_scan(ctrl); in nvme_handle_aen_notice()
3913 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) in nvme_handle_aen_notice()
3914 queue_work(nvme_wq, &ctrl->fw_act_work); in nvme_handle_aen_notice()
3918 if (!ctrl->ana_log_buf) in nvme_handle_aen_notice()
3920 queue_work(nvme_wq, &ctrl->ana_work); in nvme_handle_aen_notice()
3924 ctrl->aen_result = result; in nvme_handle_aen_notice()
3927 dev_warn(ctrl->device, "async event result %08x\n", result); in nvme_handle_aen_notice()
3931 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, in nvme_complete_async_event() argument
3942 nvme_handle_aen_notice(ctrl, result); in nvme_complete_async_event()
3948 trace_nvme_async_event(ctrl, aer_type); in nvme_complete_async_event()
3949 ctrl->aen_result = result; in nvme_complete_async_event()
3954 queue_work(nvme_wq, &ctrl->async_event_work); in nvme_complete_async_event()
3958 void nvme_stop_ctrl(struct nvme_ctrl *ctrl) in nvme_stop_ctrl() argument
3960 nvme_mpath_stop(ctrl); in nvme_stop_ctrl()
3961 nvme_stop_keep_alive(ctrl); in nvme_stop_ctrl()
3962 flush_work(&ctrl->async_event_work); in nvme_stop_ctrl()
3963 cancel_work_sync(&ctrl->fw_act_work); in nvme_stop_ctrl()
3967 void nvme_start_ctrl(struct nvme_ctrl *ctrl) in nvme_start_ctrl() argument
3969 if (ctrl->kato) in nvme_start_ctrl()
3970 nvme_start_keep_alive(ctrl); in nvme_start_ctrl()
3972 nvme_enable_aen(ctrl); in nvme_start_ctrl()
3974 if (ctrl->queue_count > 1) { in nvme_start_ctrl()
3975 nvme_queue_scan(ctrl); in nvme_start_ctrl()
3976 nvme_start_queues(ctrl); in nvme_start_ctrl()
3981 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) in nvme_uninit_ctrl() argument
3983 nvme_fault_inject_fini(&ctrl->fault_inject); in nvme_uninit_ctrl()
3984 dev_pm_qos_hide_latency_tolerance(ctrl->device); in nvme_uninit_ctrl()
3985 cdev_device_del(&ctrl->cdev, ctrl->device); in nvme_uninit_ctrl()
3991 struct nvme_ctrl *ctrl = in nvme_free_ctrl() local
3993 struct nvme_subsystem *subsys = ctrl->subsys; in nvme_free_ctrl()
3995 if (subsys && ctrl->instance != subsys->instance) in nvme_free_ctrl()
3996 ida_simple_remove(&nvme_instance_ida, ctrl->instance); in nvme_free_ctrl()
3998 kfree(ctrl->effects); in nvme_free_ctrl()
3999 nvme_mpath_uninit(ctrl); in nvme_free_ctrl()
4000 __free_page(ctrl->discard_page); in nvme_free_ctrl()
4004 list_del(&ctrl->subsys_entry); in nvme_free_ctrl()
4005 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device)); in nvme_free_ctrl()
4009 ctrl->ops->free_ctrl(ctrl); in nvme_free_ctrl()
4020 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, in nvme_init_ctrl() argument
4025 ctrl->state = NVME_CTRL_NEW; in nvme_init_ctrl()
4026 spin_lock_init(&ctrl->lock); in nvme_init_ctrl()
4027 mutex_init(&ctrl->scan_lock); in nvme_init_ctrl()
4028 INIT_LIST_HEAD(&ctrl->namespaces); in nvme_init_ctrl()
4029 init_rwsem(&ctrl->namespaces_rwsem); in nvme_init_ctrl()
4030 ctrl->dev = dev; in nvme_init_ctrl()
4031 ctrl->ops = ops; in nvme_init_ctrl()
4032 ctrl->quirks = quirks; in nvme_init_ctrl()
4033 INIT_WORK(&ctrl->scan_work, nvme_scan_work); in nvme_init_ctrl()
4034 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); in nvme_init_ctrl()
4035 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); in nvme_init_ctrl()
4036 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); in nvme_init_ctrl()
4037 init_waitqueue_head(&ctrl->state_wq); in nvme_init_ctrl()
4039 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); in nvme_init_ctrl()
4040 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); in nvme_init_ctrl()
4041 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; in nvme_init_ctrl()
4045 ctrl->discard_page = alloc_page(GFP_KERNEL); in nvme_init_ctrl()
4046 if (!ctrl->discard_page) { in nvme_init_ctrl()
4054 ctrl->instance = ret; in nvme_init_ctrl()
4056 device_initialize(&ctrl->ctrl_device); in nvme_init_ctrl()
4057 ctrl->device = &ctrl->ctrl_device; in nvme_init_ctrl()
4058 ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance); in nvme_init_ctrl()
4059 ctrl->device->class = nvme_class; in nvme_init_ctrl()
4060 ctrl->device->parent = ctrl->dev; in nvme_init_ctrl()
4061 ctrl->device->groups = nvme_dev_attr_groups; in nvme_init_ctrl()
4062 ctrl->device->release = nvme_free_ctrl; in nvme_init_ctrl()
4063 dev_set_drvdata(ctrl->device, ctrl); in nvme_init_ctrl()
4064 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); in nvme_init_ctrl()
4068 cdev_init(&ctrl->cdev, &nvme_dev_fops); in nvme_init_ctrl()
4069 ctrl->cdev.owner = ops->module; in nvme_init_ctrl()
4070 ret = cdev_device_add(&ctrl->cdev, ctrl->device); in nvme_init_ctrl()
4078 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; in nvme_init_ctrl()
4079 dev_pm_qos_update_user_latency_tolerance(ctrl->device, in nvme_init_ctrl()
4082 nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device)); in nvme_init_ctrl()
4086 kfree_const(ctrl->device->kobj.name); in nvme_init_ctrl()
4088 ida_simple_remove(&nvme_instance_ida, ctrl->instance); in nvme_init_ctrl()
4090 if (ctrl->discard_page) in nvme_init_ctrl()
4091 __free_page(ctrl->discard_page); in nvme_init_ctrl()
4103 void nvme_kill_queues(struct nvme_ctrl *ctrl) in nvme_kill_queues() argument
4107 down_read(&ctrl->namespaces_rwsem); in nvme_kill_queues()
4110 if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q)) in nvme_kill_queues()
4111 blk_mq_unquiesce_queue(ctrl->admin_q); in nvme_kill_queues()
4113 list_for_each_entry(ns, &ctrl->namespaces, list) in nvme_kill_queues()
4116 up_read(&ctrl->namespaces_rwsem); in nvme_kill_queues()
4120 void nvme_unfreeze(struct nvme_ctrl *ctrl) in nvme_unfreeze() argument
4124 down_read(&ctrl->namespaces_rwsem); in nvme_unfreeze()
4125 list_for_each_entry(ns, &ctrl->namespaces, list) in nvme_unfreeze()
4127 up_read(&ctrl->namespaces_rwsem); in nvme_unfreeze()
4131 void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) in nvme_wait_freeze_timeout() argument
4135 down_read(&ctrl->namespaces_rwsem); in nvme_wait_freeze_timeout()
4136 list_for_each_entry(ns, &ctrl->namespaces, list) { in nvme_wait_freeze_timeout()
4141 up_read(&ctrl->namespaces_rwsem); in nvme_wait_freeze_timeout()
4145 void nvme_wait_freeze(struct nvme_ctrl *ctrl) in nvme_wait_freeze() argument
4149 down_read(&ctrl->namespaces_rwsem); in nvme_wait_freeze()
4150 list_for_each_entry(ns, &ctrl->namespaces, list) in nvme_wait_freeze()
4152 up_read(&ctrl->namespaces_rwsem); in nvme_wait_freeze()
4156 void nvme_start_freeze(struct nvme_ctrl *ctrl) in nvme_start_freeze() argument
4160 down_read(&ctrl->namespaces_rwsem); in nvme_start_freeze()
4161 list_for_each_entry(ns, &ctrl->namespaces, list) in nvme_start_freeze()
4163 up_read(&ctrl->namespaces_rwsem); in nvme_start_freeze()
4167 void nvme_stop_queues(struct nvme_ctrl *ctrl) in nvme_stop_queues() argument
4171 down_read(&ctrl->namespaces_rwsem); in nvme_stop_queues()
4172 list_for_each_entry(ns, &ctrl->namespaces, list) in nvme_stop_queues()
4174 up_read(&ctrl->namespaces_rwsem); in nvme_stop_queues()
4178 void nvme_start_queues(struct nvme_ctrl *ctrl) in nvme_start_queues() argument
4182 down_read(&ctrl->namespaces_rwsem); in nvme_start_queues()
4183 list_for_each_entry(ns, &ctrl->namespaces, list) in nvme_start_queues()
4185 up_read(&ctrl->namespaces_rwsem); in nvme_start_queues()
4190 void nvme_sync_queues(struct nvme_ctrl *ctrl) in nvme_sync_queues() argument
4194 down_read(&ctrl->namespaces_rwsem); in nvme_sync_queues()
4195 list_for_each_entry(ns, &ctrl->namespaces, list) in nvme_sync_queues()
4197 up_read(&ctrl->namespaces_rwsem); in nvme_sync_queues()
4199 if (ctrl->admin_q) in nvme_sync_queues()
4200 blk_sync_queue(ctrl->admin_q); in nvme_sync_queues()