Lines Matching +full:ctrl +full:- +full:module
1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (c) 2011-2014, Intel Corporation.
13 #include <linux/blk-mq.h>
14 #include <linux/sed-opal.h>
15 #include <linux/fault-inject.h>
18 #include <linux/t10-pi.h>
115 * Use non-standard 128 bytes SQEs.
151 * this structure as the first member of their request-private data.
160 struct nvme_ctrl *ctrl; member
180 if (!req->q->queuedata) in nvme_req_qid()
183 return req->mq_hctx->queue_num + 1; in nvme_req_qid()
207 * @NVME_CTRL_DEAD: Controller is non-present/unresponsive during
397 * there is a 1:1 relation to our namespace structures, that is ->list
429 return IS_ENABLED(CONFIG_NVME_MULTIPATH) && head->disk; in nvme_ns_head_multipath()
440 struct nvme_ctrl *ctrl; member
477 return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple); in nvme_ns_has_pi()
482 struct module *module; member
487 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
488 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
489 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
490 void (*free_ctrl)(struct nvme_ctrl *ctrl);
491 void (*submit_async_event)(struct nvme_ctrl *ctrl);
492 void (*delete_ctrl)(struct nvme_ctrl *ctrl);
493 int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
508 return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag; in nvme_cid()
524 if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) { in nvme_find_rq()
525 dev_err(nvme_req(rq)->ctrl->device, in nvme_find_rq()
527 tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr)); in nvme_find_rq()
555 static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) in nvme_reset_subsystem() argument
557 if (!ctrl->subsystem) in nvme_reset_subsystem()
558 return -ENOTTY; in nvme_reset_subsystem()
559 return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); in nvme_reset_subsystem()
567 return sector >> (ns->lba_shift - SECTOR_SHIFT); in nvme_sect_to_lba()
575 return lba << (ns->lba_shift - SECTOR_SHIFT); in nvme_lba_to_sect()
579 * Convert byte length to nvme's 0-based num dwords
583 return (len >> 2) - 1; in nvme_bytes_to_numd()
606 * if blk-mq will need to use IPI magic to complete the request, and if yes do
615 rq->status = le16_to_cpu(status) >> 1; in nvme_try_complete_req()
616 rq->result = result; in nvme_try_complete_req()
619 if (unlikely(blk_should_fake_timeout(req->q))) in nvme_try_complete_req()
624 static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl) in nvme_get_ctrl() argument
626 get_device(ctrl->device); in nvme_get_ctrl()
629 static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl) in nvme_put_ctrl() argument
631 put_device(ctrl->device); in nvme_put_ctrl()
643 void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
644 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
645 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
647 bool nvme_wait_reset(struct nvme_ctrl *ctrl);
648 int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
649 int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
650 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
651 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
653 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
654 void nvme_start_ctrl(struct nvme_ctrl *ctrl);
655 void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
656 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);
658 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
663 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
666 void nvme_stop_queues(struct nvme_ctrl *ctrl);
667 void nvme_start_queues(struct nvme_ctrl *ctrl);
668 void nvme_kill_queues(struct nvme_ctrl *ctrl);
669 void nvme_sync_queues(struct nvme_ctrl *ctrl);
670 void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
671 void nvme_unfreeze(struct nvme_ctrl *ctrl);
672 void nvme_wait_freeze(struct nvme_ctrl *ctrl);
673 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
674 void nvme_start_freeze(struct nvme_ctrl *ctrl);
676 #define NVME_QID_ANY -1
681 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
683 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
686 static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, in nvme_check_ready() argument
689 if (likely(ctrl->state == NVME_CTRL_LIVE)) in nvme_check_ready()
691 if (ctrl->ops->flags & NVME_F_FABRICS && in nvme_check_ready()
692 ctrl->state == NVME_CTRL_DELETING) in nvme_check_ready()
694 return __nvme_check_ready(ctrl, rq, queue_live); in nvme_check_ready()
708 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
709 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
710 int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
711 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
712 int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
713 int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
714 void nvme_queue_scan(struct nvme_ctrl *ctrl);
715 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
720 const struct file_operations *fops, struct module *owner);
739 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) in nvme_ctrl_use_ana() argument
741 return ctrl->ana_log_buf != NULL; in nvme_ctrl_use_ana()
749 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
750 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
753 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
754 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
755 void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
756 void nvme_mpath_stop(struct nvme_ctrl *ctrl);
759 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
764 struct nvme_ns *ns = req->q->queuedata; in nvme_trace_bio_complete()
766 if (req->cmd_flags & REQ_NVME_MPATH) in nvme_trace_bio_complete()
767 trace_block_bio_complete(ns->head->disk->queue, req->bio); in nvme_trace_bio_complete()
775 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) in nvme_ctrl_use_ana() argument
787 static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) in nvme_kick_requeue_lists() argument
790 static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, in nvme_mpath_alloc_disk() argument
809 static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) in nvme_mpath_clear_ctrl_paths() argument
818 static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl) in nvme_mpath_init_ctrl() argument
821 static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, in nvme_mpath_init_identify() argument
824 if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) in nvme_mpath_init_identify()
825 dev_warn(ctrl->device, in nvme_mpath_init_identify()
826 "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n"); in nvme_mpath_init_identify()
829 static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) in nvme_mpath_uninit() argument
832 static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl) in nvme_mpath_stop() argument
864 dev_warn(ns->ctrl->device, in nvme_update_zone_info()
866 return -EPROTONOSUPPORT; in nvme_update_zone_info()
872 return dev_to_disk(dev)->private_data; in nvme_get_ns_from_dev()
876 int nvme_hwmon_init(struct nvme_ctrl *ctrl);
877 void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
879 static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl) in nvme_hwmon_init() argument
884 static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl) in nvme_hwmon_exit() argument
889 static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl) in nvme_ctrl_sgl_supported() argument
891 return ctrl->sgls & ((1 << 0) | (1 << 1)); in nvme_ctrl_sgl_supported()
894 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
898 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
901 static inline bool nvme_multi_css(struct nvme_ctrl *ctrl) in nvme_multi_css() argument
903 return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI; in nvme_multi_css()