Lines Matching full:hba
109 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, in ufshcd_dump_regs() argument
127 regs[pos / 4] = ufshcd_readl(hba, offset + pos); in ufshcd_dump_regs()
247 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
249 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
251 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
252 static void ufshcd_hba_exit(struct ufs_hba *hba);
253 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
254 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
255 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
256 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
257 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
258 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
259 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
260 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
262 static int ufshcd_change_power_mode(struct ufs_hba *hba,
264 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
265 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
266 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
268 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
269 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
271 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
272 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
274 static inline void ufshcd_enable_irq(struct ufs_hba *hba) in ufshcd_enable_irq() argument
276 if (!hba->is_irq_enabled) { in ufshcd_enable_irq()
277 enable_irq(hba->irq); in ufshcd_enable_irq()
278 hba->is_irq_enabled = true; in ufshcd_enable_irq()
282 static inline void ufshcd_disable_irq(struct ufs_hba *hba) in ufshcd_disable_irq() argument
284 if (hba->is_irq_enabled) { in ufshcd_disable_irq()
285 disable_irq(hba->irq); in ufshcd_disable_irq()
286 hba->is_irq_enabled = false; in ufshcd_disable_irq()
290 static void ufshcd_configure_wb(struct ufs_hba *hba) in ufshcd_configure_wb() argument
292 if (!ufshcd_is_wb_allowed(hba)) in ufshcd_configure_wb()
295 ufshcd_wb_toggle(hba, true); in ufshcd_configure_wb()
297 ufshcd_wb_toggle_buf_flush_during_h8(hba, true); in ufshcd_configure_wb()
299 if (ufshcd_is_wb_buf_flush_allowed(hba)) in ufshcd_configure_wb()
300 ufshcd_wb_toggle_buf_flush(hba, true); in ufshcd_configure_wb()
303 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba) in ufshcd_scsi_unblock_requests() argument
305 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt)) in ufshcd_scsi_unblock_requests()
306 scsi_unblock_requests(hba->host); in ufshcd_scsi_unblock_requests()
309 static void ufshcd_scsi_block_requests(struct ufs_hba *hba) in ufshcd_scsi_block_requests() argument
311 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1) in ufshcd_scsi_block_requests()
312 scsi_block_requests(hba->host); in ufshcd_scsi_block_requests()
315 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, in ufshcd_add_cmd_upiu_trace() argument
318 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr; in ufshcd_add_cmd_upiu_trace()
327 header = &hba->lrb[tag].ucd_rsp_ptr->header; in ufshcd_add_cmd_upiu_trace()
329 trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb, in ufshcd_add_cmd_upiu_trace()
333 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, in ufshcd_add_query_upiu_trace() argument
340 trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header, in ufshcd_add_query_upiu_trace()
344 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag, in ufshcd_add_tm_upiu_trace() argument
347 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag]; in ufshcd_add_tm_upiu_trace()
353 trace_ufshcd_upiu(dev_name(hba->dev), str_t, in ufshcd_add_tm_upiu_trace()
358 trace_ufshcd_upiu(dev_name(hba->dev), str_t, in ufshcd_add_tm_upiu_trace()
364 static void ufshcd_add_uic_command_trace(struct ufs_hba *hba, in ufshcd_add_uic_command_trace() argument
376 cmd = ufshcd_readl(hba, REG_UIC_COMMAND); in ufshcd_add_uic_command_trace()
378 trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd, in ufshcd_add_uic_command_trace()
379 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1), in ufshcd_add_uic_command_trace()
380 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2), in ufshcd_add_uic_command_trace()
381 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3)); in ufshcd_add_uic_command_trace()
384 static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, in ufshcd_add_command_trace() argument
390 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; in ufshcd_add_command_trace()
399 ufshcd_add_cmd_upiu_trace(hba, tag, str_t); in ufshcd_add_command_trace()
422 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS); in ufshcd_add_command_trace()
423 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_add_command_trace()
424 trace_ufshcd_command(dev_name(hba->dev), str_t, tag, in ufshcd_add_command_trace()
428 static void ufshcd_print_clk_freqs(struct ufs_hba *hba) in ufshcd_print_clk_freqs() argument
431 struct list_head *head = &hba->clk_list_head; in ufshcd_print_clk_freqs()
439 dev_err(hba->dev, "clk: %s, rate: %u\n", in ufshcd_print_clk_freqs()
444 static void ufshcd_print_evt(struct ufs_hba *hba, u32 id, in ufshcd_print_evt() argument
454 e = &hba->ufs_stats.event[id]; in ufshcd_print_evt()
461 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p, in ufshcd_print_evt()
467 dev_err(hba->dev, "No record of %s\n", err_name); in ufshcd_print_evt()
469 dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt); in ufshcd_print_evt()
472 static void ufshcd_print_evt_hist(struct ufs_hba *hba) in ufshcd_print_evt_hist() argument
474 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); in ufshcd_print_evt_hist()
476 ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err"); in ufshcd_print_evt_hist()
477 ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err"); in ufshcd_print_evt_hist()
478 ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err"); in ufshcd_print_evt_hist()
479 ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err"); in ufshcd_print_evt_hist()
480 ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err"); in ufshcd_print_evt_hist()
481 ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR, in ufshcd_print_evt_hist()
483 ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err"); in ufshcd_print_evt_hist()
484 ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL, in ufshcd_print_evt_hist()
486 ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail"); in ufshcd_print_evt_hist()
487 ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR, in ufshcd_print_evt_hist()
489 ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset"); in ufshcd_print_evt_hist()
490 ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset"); in ufshcd_print_evt_hist()
491 ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort"); in ufshcd_print_evt_hist()
493 ufshcd_vops_dbg_register_dump(hba); in ufshcd_print_evt_hist()
497 void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt) in ufshcd_print_trs() argument
503 for_each_set_bit(tag, &bitmap, hba->nutrs) { in ufshcd_print_trs()
504 lrbp = &hba->lrb[tag]; in ufshcd_print_trs()
506 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n", in ufshcd_print_trs()
508 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n", in ufshcd_print_trs()
510 dev_err(hba->dev, in ufshcd_print_trs()
516 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag, in ufshcd_print_trs()
520 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag, in ufshcd_print_trs()
527 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) in ufshcd_print_trs()
530 dev_err(hba->dev, in ufshcd_print_trs()
541 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) in ufshcd_print_tmrs() argument
545 for_each_set_bit(tag, &bitmap, hba->nutmrs) { in ufshcd_print_tmrs()
546 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag]; in ufshcd_print_tmrs()
548 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag); in ufshcd_print_tmrs()
553 static void ufshcd_print_host_state(struct ufs_hba *hba) in ufshcd_print_host_state() argument
555 const struct scsi_device *sdev_ufs = hba->ufs_device_wlun; in ufshcd_print_host_state()
557 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); in ufshcd_print_host_state()
558 dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n", in ufshcd_print_host_state()
559 hba->outstanding_reqs, hba->outstanding_tasks); in ufshcd_print_host_state()
560 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", in ufshcd_print_host_state()
561 hba->saved_err, hba->saved_uic_err); in ufshcd_print_host_state()
562 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", in ufshcd_print_host_state()
563 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_print_host_state()
564 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n", in ufshcd_print_host_state()
565 hba->pm_op_in_progress, hba->is_sys_suspended); in ufshcd_print_host_state()
566 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n", in ufshcd_print_host_state()
567 hba->auto_bkops_enabled, hba->host->host_self_blocked); in ufshcd_print_host_state()
568 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state); in ufshcd_print_host_state()
569 dev_err(hba->dev, in ufshcd_print_host_state()
571 div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000), in ufshcd_print_host_state()
572 hba->ufs_stats.hibern8_exit_cnt); in ufshcd_print_host_state()
573 dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n", in ufshcd_print_host_state()
574 div_u64(hba->ufs_stats.last_intr_ts, 1000), in ufshcd_print_host_state()
575 hba->ufs_stats.last_intr_status); in ufshcd_print_host_state()
576 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n", in ufshcd_print_host_state()
577 hba->eh_flags, hba->req_abort_count); in ufshcd_print_host_state()
578 dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n", in ufshcd_print_host_state()
579 hba->ufs_version, hba->capabilities, hba->caps); in ufshcd_print_host_state()
580 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks, in ufshcd_print_host_state()
581 hba->dev_quirks); in ufshcd_print_host_state()
583 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n", in ufshcd_print_host_state()
586 ufshcd_print_clk_freqs(hba); in ufshcd_print_host_state()
590 * ufshcd_print_pwr_info - print power params as saved in hba
592 * @hba: per-adapter instance
594 static void ufshcd_print_pwr_info(struct ufs_hba *hba) in ufshcd_print_pwr_info() argument
611 dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", in ufshcd_print_pwr_info()
613 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, in ufshcd_print_pwr_info()
614 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, in ufshcd_print_pwr_info()
615 names[hba->pwr_info.pwr_rx], in ufshcd_print_pwr_info()
616 names[hba->pwr_info.pwr_tx], in ufshcd_print_pwr_info()
617 hba->pwr_info.hs_rate); in ufshcd_print_pwr_info()
620 static void ufshcd_device_reset(struct ufs_hba *hba) in ufshcd_device_reset() argument
624 err = ufshcd_vops_device_reset(hba); in ufshcd_device_reset()
627 ufshcd_set_ufs_dev_active(hba); in ufshcd_device_reset()
628 if (ufshcd_is_wb_allowed(hba)) { in ufshcd_device_reset()
629 hba->dev_info.wb_enabled = false; in ufshcd_device_reset()
630 hba->dev_info.wb_buf_flush_enabled = false; in ufshcd_device_reset()
634 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err); in ufshcd_device_reset()
651 * @hba: per-adapter interface
661 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, in ufshcd_wait_for_register() argument
671 while ((ufshcd_readl(hba, reg) & mask) != val) { in ufshcd_wait_for_register()
674 if ((ufshcd_readl(hba, reg) & mask) != val) in ufshcd_wait_for_register()
685 * @hba: Pointer to adapter instance
689 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) in ufshcd_get_intr_mask() argument
691 if (hba->ufs_version == ufshci_version(1, 0)) in ufshcd_get_intr_mask()
693 if (hba->ufs_version <= ufshci_version(2, 0)) in ufshcd_get_intr_mask()
700 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
701 * @hba: Pointer to adapter instance
705 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) in ufshcd_get_ufs_version() argument
709 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION) in ufshcd_get_ufs_version()
710 ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba); in ufshcd_get_ufs_version()
712 ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION); in ufshcd_get_ufs_version()
728 * @hba: pointer to adapter instance
732 static inline bool ufshcd_is_device_present(struct ufs_hba *hba) in ufshcd_is_device_present() argument
734 return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & DEVICE_PRESENT; in ufshcd_is_device_present()
751 * @hba: per adapter instance
754 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask) in ufshcd_utrl_clear() argument
756 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) in ufshcd_utrl_clear()
771 ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR); in ufshcd_utrl_clear()
776 * @hba: per adapter instance
779 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos) in ufshcd_utmrl_clear() argument
781 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) in ufshcd_utmrl_clear()
782 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); in ufshcd_utmrl_clear()
784 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); in ufshcd_utmrl_clear()
800 * @hba: Pointer to adapter instance
805 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) in ufshcd_get_uic_cmd_result() argument
807 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & in ufshcd_get_uic_cmd_result()
813 * @hba: Pointer to adapter instance
818 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) in ufshcd_get_dme_attr_val() argument
820 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); in ufshcd_get_dme_attr_val()
877 * @hba: per adapter instance
880 ufshcd_reset_intr_aggr(struct ufs_hba *hba) in ufshcd_reset_intr_aggr() argument
882 ufshcd_writel(hba, INT_AGGR_ENABLE | in ufshcd_reset_intr_aggr()
889 * @hba: per adapter instance
894 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout) in ufshcd_config_intr_aggr() argument
896 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | in ufshcd_config_intr_aggr()
904 * @hba: per adapter instance
906 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba) in ufshcd_disable_intr_aggr() argument
908 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); in ufshcd_disable_intr_aggr()
915 * @hba: per adapter instance
917 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) in ufshcd_enable_run_stop_reg() argument
919 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, in ufshcd_enable_run_stop_reg()
921 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, in ufshcd_enable_run_stop_reg()
927 * @hba: per adapter instance
929 static inline void ufshcd_hba_start(struct ufs_hba *hba) in ufshcd_hba_start() argument
933 if (ufshcd_crypto_enable(hba)) in ufshcd_hba_start()
936 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE); in ufshcd_hba_start()
941 * @hba: per adapter instance
945 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba) in ufshcd_is_hba_active() argument
947 return ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE; in ufshcd_is_hba_active()
950 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba) in ufshcd_get_local_unipro_ver() argument
953 if (hba->ufs_version <= ufshci_version(1, 1)) in ufshcd_get_local_unipro_ver()
960 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba) in ufshcd_is_unipro_pa_params_tuning_req() argument
971 return ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6; in ufshcd_is_unipro_pa_params_tuning_req()
976 * @hba: per adapter instance
982 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up) in ufshcd_set_clk_freq() argument
986 struct list_head *head = &hba->clk_list_head; in ufshcd_set_clk_freq()
999 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", in ufshcd_set_clk_freq()
1004 trace_ufshcd_clk_scaling(dev_name(hba->dev), in ufshcd_set_clk_freq()
1017 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", in ufshcd_set_clk_freq()
1022 trace_ufshcd_clk_scaling(dev_name(hba->dev), in ufshcd_set_clk_freq()
1029 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, in ufshcd_set_clk_freq()
1039 * @hba: per adapter instance
1045 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) in ufshcd_scale_clks() argument
1050 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE); in ufshcd_scale_clks()
1054 ret = ufshcd_set_clk_freq(hba, scale_up); in ufshcd_scale_clks()
1058 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); in ufshcd_scale_clks()
1060 ufshcd_set_clk_freq(hba, !scale_up); in ufshcd_scale_clks()
1063 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), in ufshcd_scale_clks()
1071 * @hba: per adapter instance
1076 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, in ufshcd_is_devfreq_scaling_required() argument
1080 struct list_head *head = &hba->clk_list_head; in ufshcd_is_devfreq_scaling_required()
1109 static u32 ufshcd_pending_cmds(struct ufs_hba *hba) in ufshcd_pending_cmds() argument
1114 lockdep_assert_held(hba->host->host_lock); in ufshcd_pending_cmds()
1115 __shost_for_each_device(sdev, hba->host) in ufshcd_pending_cmds()
1121 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, in ufshcd_wait_for_doorbell_clr() argument
1131 ufshcd_hold(hba, false); in ufshcd_wait_for_doorbell_clr()
1132 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_wait_for_doorbell_clr()
1139 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { in ufshcd_wait_for_doorbell_clr()
1144 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); in ufshcd_wait_for_doorbell_clr()
1145 tr_pending = ufshcd_pending_cmds(hba); in ufshcd_wait_for_doorbell_clr()
1153 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_wait_for_doorbell_clr()
1165 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_wait_for_doorbell_clr()
1169 dev_err(hba->dev, in ufshcd_wait_for_doorbell_clr()
1175 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_wait_for_doorbell_clr()
1176 ufshcd_release(hba); in ufshcd_wait_for_doorbell_clr()
1182 * @hba: per adapter instance
1189 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up) in ufshcd_scale_gear() argument
1195 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info, in ufshcd_scale_gear()
1198 memcpy(&new_pwr_info, &hba->pwr_info, in ufshcd_scale_gear()
1201 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear || in ufshcd_scale_gear()
1202 hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) { in ufshcd_scale_gear()
1204 memcpy(&hba->clk_scaling.saved_pwr_info.info, in ufshcd_scale_gear()
1205 &hba->pwr_info, in ufshcd_scale_gear()
1209 new_pwr_info.gear_tx = hba->clk_scaling.min_gear; in ufshcd_scale_gear()
1210 new_pwr_info.gear_rx = hba->clk_scaling.min_gear; in ufshcd_scale_gear()
1215 ret = ufshcd_config_pwr_mode(hba, &new_pwr_info); in ufshcd_scale_gear()
1217 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)", in ufshcd_scale_gear()
1219 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx, in ufshcd_scale_gear()
1225 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba) in ufshcd_clock_scaling_prepare() argument
1233 ufshcd_scsi_block_requests(hba); in ufshcd_clock_scaling_prepare()
1234 down_write(&hba->clk_scaling_lock); in ufshcd_clock_scaling_prepare()
1236 if (!hba->clk_scaling.is_allowed || in ufshcd_clock_scaling_prepare()
1237 ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) { in ufshcd_clock_scaling_prepare()
1239 up_write(&hba->clk_scaling_lock); in ufshcd_clock_scaling_prepare()
1240 ufshcd_scsi_unblock_requests(hba); in ufshcd_clock_scaling_prepare()
1245 ufshcd_hold(hba, false); in ufshcd_clock_scaling_prepare()
1251 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock) in ufshcd_clock_scaling_unprepare() argument
1254 up_write(&hba->clk_scaling_lock); in ufshcd_clock_scaling_unprepare()
1256 up_read(&hba->clk_scaling_lock); in ufshcd_clock_scaling_unprepare()
1257 ufshcd_scsi_unblock_requests(hba); in ufshcd_clock_scaling_unprepare()
1258 ufshcd_release(hba); in ufshcd_clock_scaling_unprepare()
1263 * @hba: per adapter instance
1270 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) in ufshcd_devfreq_scale() argument
1275 ret = ufshcd_clock_scaling_prepare(hba); in ufshcd_devfreq_scale()
1281 ret = ufshcd_scale_gear(hba, false); in ufshcd_devfreq_scale()
1286 ret = ufshcd_scale_clks(hba, scale_up); in ufshcd_devfreq_scale()
1289 ufshcd_scale_gear(hba, true); in ufshcd_devfreq_scale()
1295 ret = ufshcd_scale_gear(hba, true); in ufshcd_devfreq_scale()
1297 ufshcd_scale_clks(hba, false); in ufshcd_devfreq_scale()
1303 if (ufshcd_enable_wb_if_scaling_up(hba)) { in ufshcd_devfreq_scale()
1304 downgrade_write(&hba->clk_scaling_lock); in ufshcd_devfreq_scale()
1306 ufshcd_wb_toggle(hba, scale_up); in ufshcd_devfreq_scale()
1310 ufshcd_clock_scaling_unprepare(hba, is_writelock); in ufshcd_devfreq_scale()
1316 struct ufs_hba *hba = container_of(work, struct ufs_hba, in ufshcd_clk_scaling_suspend_work() local
1320 spin_lock_irqsave(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_suspend_work()
1321 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) { in ufshcd_clk_scaling_suspend_work()
1322 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_suspend_work()
1325 hba->clk_scaling.is_suspended = true; in ufshcd_clk_scaling_suspend_work()
1326 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_suspend_work()
1328 __ufshcd_suspend_clkscaling(hba); in ufshcd_clk_scaling_suspend_work()
1333 struct ufs_hba *hba = container_of(work, struct ufs_hba, in ufshcd_clk_scaling_resume_work() local
1337 spin_lock_irqsave(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_resume_work()
1338 if (!hba->clk_scaling.is_suspended) { in ufshcd_clk_scaling_resume_work()
1339 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_resume_work()
1342 hba->clk_scaling.is_suspended = false; in ufshcd_clk_scaling_resume_work()
1343 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_resume_work()
1345 devfreq_resume_device(hba->devfreq); in ufshcd_clk_scaling_resume_work()
1352 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_devfreq_target() local
1355 struct list_head *clk_list = &hba->clk_list_head; in ufshcd_devfreq_target()
1359 if (!ufshcd_is_clkscaling_supported(hba)) in ufshcd_devfreq_target()
1362 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list); in ufshcd_devfreq_target()
1365 spin_lock_irqsave(hba->host->host_lock, irq_flags); in ufshcd_devfreq_target()
1366 if (ufshcd_eh_in_progress(hba)) { in ufshcd_devfreq_target()
1367 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_devfreq_target()
1371 if (!hba->clk_scaling.active_reqs) in ufshcd_devfreq_target()
1375 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_devfreq_target()
1384 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) { in ufshcd_devfreq_target()
1385 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_devfreq_target()
1389 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_devfreq_target()
1392 ret = ufshcd_devfreq_scale(hba, scale_up); in ufshcd_devfreq_target()
1394 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), in ufshcd_devfreq_target()
1400 queue_work(hba->clk_scaling.workq, in ufshcd_devfreq_target()
1401 &hba->clk_scaling.suspend_work); in ufshcd_devfreq_target()
1409 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_devfreq_get_dev_status() local
1410 struct ufs_clk_scaling *scaling = &hba->clk_scaling; in ufshcd_devfreq_get_dev_status()
1412 struct list_head *clk_list = &hba->clk_list_head; in ufshcd_devfreq_get_dev_status()
1416 if (!ufshcd_is_clkscaling_supported(hba)) in ufshcd_devfreq_get_dev_status()
1421 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_devfreq_get_dev_status()
1443 if (hba->outstanding_reqs) { in ufshcd_devfreq_get_dev_status()
1450 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_devfreq_get_dev_status()
1454 static int ufshcd_devfreq_init(struct ufs_hba *hba) in ufshcd_devfreq_init() argument
1456 struct list_head *clk_list = &hba->clk_list_head; in ufshcd_devfreq_init()
1466 dev_pm_opp_add(hba->dev, clki->min_freq, 0); in ufshcd_devfreq_init()
1467 dev_pm_opp_add(hba->dev, clki->max_freq, 0); in ufshcd_devfreq_init()
1469 ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile, in ufshcd_devfreq_init()
1470 &hba->vps->ondemand_data); in ufshcd_devfreq_init()
1471 devfreq = devfreq_add_device(hba->dev, in ufshcd_devfreq_init()
1472 &hba->vps->devfreq_profile, in ufshcd_devfreq_init()
1474 &hba->vps->ondemand_data); in ufshcd_devfreq_init()
1477 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret); in ufshcd_devfreq_init()
1479 dev_pm_opp_remove(hba->dev, clki->min_freq); in ufshcd_devfreq_init()
1480 dev_pm_opp_remove(hba->dev, clki->max_freq); in ufshcd_devfreq_init()
1484 hba->devfreq = devfreq; in ufshcd_devfreq_init()
1489 static void ufshcd_devfreq_remove(struct ufs_hba *hba) in ufshcd_devfreq_remove() argument
1491 struct list_head *clk_list = &hba->clk_list_head; in ufshcd_devfreq_remove()
1494 if (!hba->devfreq) in ufshcd_devfreq_remove()
1497 devfreq_remove_device(hba->devfreq); in ufshcd_devfreq_remove()
1498 hba->devfreq = NULL; in ufshcd_devfreq_remove()
1501 dev_pm_opp_remove(hba->dev, clki->min_freq); in ufshcd_devfreq_remove()
1502 dev_pm_opp_remove(hba->dev, clki->max_freq); in ufshcd_devfreq_remove()
1505 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba) in __ufshcd_suspend_clkscaling() argument
1509 devfreq_suspend_device(hba->devfreq); in __ufshcd_suspend_clkscaling()
1510 spin_lock_irqsave(hba->host->host_lock, flags); in __ufshcd_suspend_clkscaling()
1511 hba->clk_scaling.window_start_t = 0; in __ufshcd_suspend_clkscaling()
1512 spin_unlock_irqrestore(hba->host->host_lock, flags); in __ufshcd_suspend_clkscaling()
1515 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) in ufshcd_suspend_clkscaling() argument
1520 cancel_work_sync(&hba->clk_scaling.suspend_work); in ufshcd_suspend_clkscaling()
1521 cancel_work_sync(&hba->clk_scaling.resume_work); in ufshcd_suspend_clkscaling()
1523 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_suspend_clkscaling()
1524 if (!hba->clk_scaling.is_suspended) { in ufshcd_suspend_clkscaling()
1526 hba->clk_scaling.is_suspended = true; in ufshcd_suspend_clkscaling()
1528 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_suspend_clkscaling()
1531 __ufshcd_suspend_clkscaling(hba); in ufshcd_suspend_clkscaling()
1534 static void ufshcd_resume_clkscaling(struct ufs_hba *hba) in ufshcd_resume_clkscaling() argument
1539 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_resume_clkscaling()
1540 if (hba->clk_scaling.is_suspended) { in ufshcd_resume_clkscaling()
1542 hba->clk_scaling.is_suspended = false; in ufshcd_resume_clkscaling()
1544 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_resume_clkscaling()
1547 devfreq_resume_device(hba->devfreq); in ufshcd_resume_clkscaling()
1553 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkscale_enable_show() local
1555 return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled); in ufshcd_clkscale_enable_show()
1561 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkscale_enable_store() local
1568 down(&hba->host_sem); in ufshcd_clkscale_enable_store()
1569 if (!ufshcd_is_user_access_allowed(hba)) { in ufshcd_clkscale_enable_store()
1575 if (value == hba->clk_scaling.is_enabled) in ufshcd_clkscale_enable_store()
1578 ufshcd_rpm_get_sync(hba); in ufshcd_clkscale_enable_store()
1579 ufshcd_hold(hba, false); in ufshcd_clkscale_enable_store()
1581 hba->clk_scaling.is_enabled = value; in ufshcd_clkscale_enable_store()
1584 ufshcd_resume_clkscaling(hba); in ufshcd_clkscale_enable_store()
1586 ufshcd_suspend_clkscaling(hba); in ufshcd_clkscale_enable_store()
1587 err = ufshcd_devfreq_scale(hba, true); in ufshcd_clkscale_enable_store()
1589 dev_err(hba->dev, "%s: failed to scale clocks up %d\n", in ufshcd_clkscale_enable_store()
1593 ufshcd_release(hba); in ufshcd_clkscale_enable_store()
1594 ufshcd_rpm_put_sync(hba); in ufshcd_clkscale_enable_store()
1596 up(&hba->host_sem); in ufshcd_clkscale_enable_store()
1600 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba) in ufshcd_init_clk_scaling_sysfs() argument
1602 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show; in ufshcd_init_clk_scaling_sysfs()
1603 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store; in ufshcd_init_clk_scaling_sysfs()
1604 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr); in ufshcd_init_clk_scaling_sysfs()
1605 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable"; in ufshcd_init_clk_scaling_sysfs()
1606 hba->clk_scaling.enable_attr.attr.mode = 0644; in ufshcd_init_clk_scaling_sysfs()
1607 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr)) in ufshcd_init_clk_scaling_sysfs()
1608 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n"); in ufshcd_init_clk_scaling_sysfs()
1611 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba) in ufshcd_remove_clk_scaling_sysfs() argument
1613 if (hba->clk_scaling.enable_attr.attr.name) in ufshcd_remove_clk_scaling_sysfs()
1614 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr); in ufshcd_remove_clk_scaling_sysfs()
1617 static void ufshcd_init_clk_scaling(struct ufs_hba *hba) in ufshcd_init_clk_scaling() argument
1621 if (!ufshcd_is_clkscaling_supported(hba)) in ufshcd_init_clk_scaling()
1624 if (!hba->clk_scaling.min_gear) in ufshcd_init_clk_scaling()
1625 hba->clk_scaling.min_gear = UFS_HS_G1; in ufshcd_init_clk_scaling()
1627 INIT_WORK(&hba->clk_scaling.suspend_work, in ufshcd_init_clk_scaling()
1629 INIT_WORK(&hba->clk_scaling.resume_work, in ufshcd_init_clk_scaling()
1633 hba->host->host_no); in ufshcd_init_clk_scaling()
1634 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name); in ufshcd_init_clk_scaling()
1636 hba->clk_scaling.is_initialized = true; in ufshcd_init_clk_scaling()
1639 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba) in ufshcd_exit_clk_scaling() argument
1641 if (!hba->clk_scaling.is_initialized) in ufshcd_exit_clk_scaling()
1644 ufshcd_remove_clk_scaling_sysfs(hba); in ufshcd_exit_clk_scaling()
1645 destroy_workqueue(hba->clk_scaling.workq); in ufshcd_exit_clk_scaling()
1646 ufshcd_devfreq_remove(hba); in ufshcd_exit_clk_scaling()
1647 hba->clk_scaling.is_initialized = false; in ufshcd_exit_clk_scaling()
1654 struct ufs_hba *hba = container_of(work, struct ufs_hba, in ufshcd_ungate_work() local
1657 cancel_delayed_work_sync(&hba->clk_gating.gate_work); in ufshcd_ungate_work()
1659 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_ungate_work()
1660 if (hba->clk_gating.state == CLKS_ON) { in ufshcd_ungate_work()
1661 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_ungate_work()
1665 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_ungate_work()
1666 ufshcd_hba_vreg_set_hpm(hba); in ufshcd_ungate_work()
1667 ufshcd_setup_clocks(hba, true); in ufshcd_ungate_work()
1669 ufshcd_enable_irq(hba); in ufshcd_ungate_work()
1672 if (ufshcd_can_hibern8_during_gating(hba)) { in ufshcd_ungate_work()
1674 hba->clk_gating.is_suspended = true; in ufshcd_ungate_work()
1675 if (ufshcd_is_link_hibern8(hba)) { in ufshcd_ungate_work()
1676 ret = ufshcd_uic_hibern8_exit(hba); in ufshcd_ungate_work()
1678 dev_err(hba->dev, "%s: hibern8 exit failed %d\n", in ufshcd_ungate_work()
1681 ufshcd_set_link_active(hba); in ufshcd_ungate_work()
1683 hba->clk_gating.is_suspended = false; in ufshcd_ungate_work()
1686 ufshcd_scsi_unblock_requests(hba); in ufshcd_ungate_work()
1692 * @hba: per adapter instance
1695 int ufshcd_hold(struct ufs_hba *hba, bool async) in ufshcd_hold() argument
1701 if (!ufshcd_is_clkgating_allowed(hba) || in ufshcd_hold()
1702 !hba->clk_gating.is_initialized) in ufshcd_hold()
1704 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_hold()
1705 hba->clk_gating.active_reqs++; in ufshcd_hold()
1708 switch (hba->clk_gating.state) { in ufshcd_hold()
1718 if (ufshcd_can_hibern8_during_gating(hba) && in ufshcd_hold()
1719 ufshcd_is_link_hibern8(hba)) { in ufshcd_hold()
1722 hba->clk_gating.active_reqs--; in ufshcd_hold()
1725 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_hold()
1726 flush_result = flush_work(&hba->clk_gating.ungate_work); in ufshcd_hold()
1727 if (hba->clk_gating.is_suspended && !flush_result) in ufshcd_hold()
1729 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_hold()
1734 if (cancel_delayed_work(&hba->clk_gating.gate_work)) { in ufshcd_hold()
1735 hba->clk_gating.state = CLKS_ON; in ufshcd_hold()
1736 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_hold()
1737 hba->clk_gating.state); in ufshcd_hold()
1747 hba->clk_gating.state = REQ_CLKS_ON; in ufshcd_hold()
1748 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_hold()
1749 hba->clk_gating.state); in ufshcd_hold()
1750 if (queue_work(hba->clk_gating.clk_gating_workq, in ufshcd_hold()
1751 &hba->clk_gating.ungate_work)) in ufshcd_hold()
1752 ufshcd_scsi_block_requests(hba); in ufshcd_hold()
1761 hba->clk_gating.active_reqs--; in ufshcd_hold()
1765 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_hold()
1766 flush_work(&hba->clk_gating.ungate_work); in ufshcd_hold()
1768 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_hold()
1771 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", in ufshcd_hold()
1772 __func__, hba->clk_gating.state); in ufshcd_hold()
1775 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_hold()
1783 struct ufs_hba *hba = container_of(work, struct ufs_hba, in ufshcd_gate_work() local
1788 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_gate_work()
1795 if (hba->clk_gating.is_suspended || in ufshcd_gate_work()
1796 (hba->clk_gating.state != REQ_CLKS_OFF)) { in ufshcd_gate_work()
1797 hba->clk_gating.state = CLKS_ON; in ufshcd_gate_work()
1798 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_gate_work()
1799 hba->clk_gating.state); in ufshcd_gate_work()
1803 if (hba->clk_gating.active_reqs in ufshcd_gate_work()
1804 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL in ufshcd_gate_work()
1805 || hba->outstanding_reqs || hba->outstanding_tasks in ufshcd_gate_work()
1806 || hba->active_uic_cmd || hba->uic_async_done) in ufshcd_gate_work()
1809 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_gate_work()
1812 if (ufshcd_can_hibern8_during_gating(hba)) { in ufshcd_gate_work()
1813 ret = ufshcd_uic_hibern8_enter(hba); in ufshcd_gate_work()
1815 hba->clk_gating.state = CLKS_ON; in ufshcd_gate_work()
1816 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", in ufshcd_gate_work()
1818 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_gate_work()
1819 hba->clk_gating.state); in ufshcd_gate_work()
1822 ufshcd_set_link_hibern8(hba); in ufshcd_gate_work()
1825 ufshcd_disable_irq(hba); in ufshcd_gate_work()
1827 ufshcd_setup_clocks(hba, false); in ufshcd_gate_work()
1830 ufshcd_hba_vreg_set_lpm(hba); in ufshcd_gate_work()
1840 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_gate_work()
1841 if (hba->clk_gating.state == REQ_CLKS_OFF) { in ufshcd_gate_work()
1842 hba->clk_gating.state = CLKS_OFF; in ufshcd_gate_work()
1843 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_gate_work()
1844 hba->clk_gating.state); in ufshcd_gate_work()
1847 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_gate_work()
1853 static void __ufshcd_release(struct ufs_hba *hba) in __ufshcd_release() argument
1855 if (!ufshcd_is_clkgating_allowed(hba)) in __ufshcd_release()
1858 hba->clk_gating.active_reqs--; in __ufshcd_release()
1860 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended || in __ufshcd_release()
1861 hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL || in __ufshcd_release()
1862 hba->outstanding_tasks || !hba->clk_gating.is_initialized || in __ufshcd_release()
1863 hba->active_uic_cmd || hba->uic_async_done || in __ufshcd_release()
1864 hba->clk_gating.state == CLKS_OFF) in __ufshcd_release()
1867 hba->clk_gating.state = REQ_CLKS_OFF; in __ufshcd_release()
1868 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); in __ufshcd_release()
1869 queue_delayed_work(hba->clk_gating.clk_gating_workq, in __ufshcd_release()
1870 &hba->clk_gating.gate_work, in __ufshcd_release()
1871 msecs_to_jiffies(hba->clk_gating.delay_ms)); in __ufshcd_release()
1874 void ufshcd_release(struct ufs_hba *hba) in ufshcd_release() argument
1878 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_release()
1879 __ufshcd_release(hba); in ufshcd_release()
1880 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_release()
1887 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkgate_delay_show() local
1889 return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms); in ufshcd_clkgate_delay_show()
1894 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkgate_delay_set() local
1897 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clkgate_delay_set()
1898 hba->clk_gating.delay_ms = value; in ufshcd_clkgate_delay_set()
1899 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clkgate_delay_set()
1918 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkgate_enable_show() local
1920 return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled); in ufshcd_clkgate_enable_show()
1926 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkgate_enable_store() local
1935 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clkgate_enable_store()
1936 if (value == hba->clk_gating.is_enabled) in ufshcd_clkgate_enable_store()
1940 __ufshcd_release(hba); in ufshcd_clkgate_enable_store()
1942 hba->clk_gating.active_reqs++; in ufshcd_clkgate_enable_store()
1944 hba->clk_gating.is_enabled = value; in ufshcd_clkgate_enable_store()
1946 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clkgate_enable_store()
1950 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba) in ufshcd_init_clk_gating_sysfs() argument
1952 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; in ufshcd_init_clk_gating_sysfs()
1953 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; in ufshcd_init_clk_gating_sysfs()
1954 sysfs_attr_init(&hba->clk_gating.delay_attr.attr); in ufshcd_init_clk_gating_sysfs()
1955 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; in ufshcd_init_clk_gating_sysfs()
1956 hba->clk_gating.delay_attr.attr.mode = 0644; in ufshcd_init_clk_gating_sysfs()
1957 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) in ufshcd_init_clk_gating_sysfs()
1958 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); in ufshcd_init_clk_gating_sysfs()
1960 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show; in ufshcd_init_clk_gating_sysfs()
1961 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store; in ufshcd_init_clk_gating_sysfs()
1962 sysfs_attr_init(&hba->clk_gating.enable_attr.attr); in ufshcd_init_clk_gating_sysfs()
1963 hba->clk_gating.enable_attr.attr.name = "clkgate_enable"; in ufshcd_init_clk_gating_sysfs()
1964 hba->clk_gating.enable_attr.attr.mode = 0644; in ufshcd_init_clk_gating_sysfs()
1965 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr)) in ufshcd_init_clk_gating_sysfs()
1966 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n"); in ufshcd_init_clk_gating_sysfs()
1969 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba) in ufshcd_remove_clk_gating_sysfs() argument
1971 if (hba->clk_gating.delay_attr.attr.name) in ufshcd_remove_clk_gating_sysfs()
1972 device_remove_file(hba->dev, &hba->clk_gating.delay_attr); in ufshcd_remove_clk_gating_sysfs()
1973 if (hba->clk_gating.enable_attr.attr.name) in ufshcd_remove_clk_gating_sysfs()
1974 device_remove_file(hba->dev, &hba->clk_gating.enable_attr); in ufshcd_remove_clk_gating_sysfs()
1977 static void ufshcd_init_clk_gating(struct ufs_hba *hba) in ufshcd_init_clk_gating() argument
1981 if (!ufshcd_is_clkgating_allowed(hba)) in ufshcd_init_clk_gating()
1984 hba->clk_gating.state = CLKS_ON; in ufshcd_init_clk_gating()
1986 hba->clk_gating.delay_ms = 150; in ufshcd_init_clk_gating()
1987 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); in ufshcd_init_clk_gating()
1988 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); in ufshcd_init_clk_gating()
1991 hba->host->host_no); in ufshcd_init_clk_gating()
1992 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name, in ufshcd_init_clk_gating()
1995 ufshcd_init_clk_gating_sysfs(hba); in ufshcd_init_clk_gating()
1997 hba->clk_gating.is_enabled = true; in ufshcd_init_clk_gating()
1998 hba->clk_gating.is_initialized = true; in ufshcd_init_clk_gating()
2001 static void ufshcd_exit_clk_gating(struct ufs_hba *hba) in ufshcd_exit_clk_gating() argument
2003 if (!hba->clk_gating.is_initialized) in ufshcd_exit_clk_gating()
2006 ufshcd_remove_clk_gating_sysfs(hba); in ufshcd_exit_clk_gating()
2009 ufshcd_hold(hba, false); in ufshcd_exit_clk_gating()
2010 hba->clk_gating.is_initialized = false; in ufshcd_exit_clk_gating()
2011 ufshcd_release(hba); in ufshcd_exit_clk_gating()
2013 destroy_workqueue(hba->clk_gating.clk_gating_workq); in ufshcd_exit_clk_gating()
2017 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) in ufshcd_clk_scaling_start_busy() argument
2023 if (!ufshcd_is_clkscaling_supported(hba)) in ufshcd_clk_scaling_start_busy()
2026 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clk_scaling_start_busy()
2027 if (!hba->clk_scaling.active_reqs++) in ufshcd_clk_scaling_start_busy()
2030 if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) { in ufshcd_clk_scaling_start_busy()
2031 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clk_scaling_start_busy()
2036 queue_work(hba->clk_scaling.workq, in ufshcd_clk_scaling_start_busy()
2037 &hba->clk_scaling.resume_work); in ufshcd_clk_scaling_start_busy()
2039 if (!hba->clk_scaling.window_start_t) { in ufshcd_clk_scaling_start_busy()
2040 hba->clk_scaling.window_start_t = curr_t; in ufshcd_clk_scaling_start_busy()
2041 hba->clk_scaling.tot_busy_t = 0; in ufshcd_clk_scaling_start_busy()
2042 hba->clk_scaling.is_busy_started = false; in ufshcd_clk_scaling_start_busy()
2045 if (!hba->clk_scaling.is_busy_started) { in ufshcd_clk_scaling_start_busy()
2046 hba->clk_scaling.busy_start_t = curr_t; in ufshcd_clk_scaling_start_busy()
2047 hba->clk_scaling.is_busy_started = true; in ufshcd_clk_scaling_start_busy()
2049 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clk_scaling_start_busy()
2052 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) in ufshcd_clk_scaling_update_busy() argument
2054 struct ufs_clk_scaling *scaling = &hba->clk_scaling; in ufshcd_clk_scaling_update_busy()
2057 if (!ufshcd_is_clkscaling_supported(hba)) in ufshcd_clk_scaling_update_busy()
2060 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clk_scaling_update_busy()
2061 hba->clk_scaling.active_reqs--; in ufshcd_clk_scaling_update_busy()
2062 if (!hba->outstanding_reqs && scaling->is_busy_started) { in ufshcd_clk_scaling_update_busy()
2068 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clk_scaling_update_busy()
2081 static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba, in ufshcd_should_inform_monitor() argument
2084 const struct ufs_hba_monitor *m = &hba->monitor; in ufshcd_should_inform_monitor()
2088 ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp)); in ufshcd_should_inform_monitor()
2091 static void ufshcd_start_monitor(struct ufs_hba *hba, in ufshcd_start_monitor() argument
2097 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_start_monitor()
2098 if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0) in ufshcd_start_monitor()
2099 hba->monitor.busy_start_ts[dir] = ktime_get(); in ufshcd_start_monitor()
2100 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_start_monitor()
2103 static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp) in ufshcd_update_monitor() argument
2108 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_update_monitor()
2109 if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) { in ufshcd_update_monitor()
2111 struct ufs_hba_monitor *m = &hba->monitor; in ufshcd_update_monitor()
2132 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_update_monitor()
2137 * @hba: per adapter instance
2141 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) in ufshcd_send_command() argument
2143 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; in ufshcd_send_command()
2150 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND); in ufshcd_send_command()
2151 ufshcd_clk_scaling_start_busy(hba); in ufshcd_send_command()
2152 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) in ufshcd_send_command()
2153 ufshcd_start_monitor(hba, lrbp); in ufshcd_send_command()
2155 spin_lock_irqsave(&hba->outstanding_lock, flags); in ufshcd_send_command()
2156 if (hba->vops && hba->vops->setup_xfer_req) in ufshcd_send_command()
2157 hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd); in ufshcd_send_command()
2158 __set_bit(task_tag, &hba->outstanding_reqs); in ufshcd_send_command()
2159 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_send_command()
2160 spin_unlock_irqrestore(&hba->outstanding_lock, flags); in ufshcd_send_command()
2187 * @hba: per adapter instance
2191 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_copy_query_response() argument
2193 struct ufs_query_res *query_res = &hba->dev_cmd.query.response; in ufshcd_copy_query_response()
2198 if (hba->dev_cmd.query.descriptor && in ufshcd_copy_query_response()
2209 hba->dev_cmd.query.request.upiu_req.length); in ufshcd_copy_query_response()
2211 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); in ufshcd_copy_query_response()
2213 dev_warn(hba->dev, in ufshcd_copy_query_response()
2225 * @hba: per adapter instance
2229 static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) in ufshcd_hba_capabilities() argument
2233 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); in ufshcd_hba_capabilities()
2234 if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) in ufshcd_hba_capabilities()
2235 hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT; in ufshcd_hba_capabilities()
2238 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; in ufshcd_hba_capabilities()
2239 hba->nutmrs = in ufshcd_hba_capabilities()
2240 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; in ufshcd_hba_capabilities()
2241 hba->reserved_slot = hba->nutrs - 1; in ufshcd_hba_capabilities()
2244 err = ufshcd_hba_init_crypto_capabilities(hba); in ufshcd_hba_capabilities()
2246 dev_err(hba->dev, "crypto setup failed\n"); in ufshcd_hba_capabilities()
2254 * @hba: per adapter instance
2257 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) in ufshcd_ready_for_uic_cmd() argument
2259 return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY; in ufshcd_ready_for_uic_cmd()
2264 * @hba: Pointer to adapter instance
2269 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) in ufshcd_get_upmcrs() argument
2271 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; in ufshcd_get_upmcrs()
2276 * @hba: per adapter instance
2280 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) in ufshcd_dispatch_uic_cmd() argument
2282 lockdep_assert_held(&hba->uic_cmd_mutex); in ufshcd_dispatch_uic_cmd()
2284 WARN_ON(hba->active_uic_cmd); in ufshcd_dispatch_uic_cmd()
2286 hba->active_uic_cmd = uic_cmd; in ufshcd_dispatch_uic_cmd()
2289 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); in ufshcd_dispatch_uic_cmd()
2290 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); in ufshcd_dispatch_uic_cmd()
2291 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); in ufshcd_dispatch_uic_cmd()
2293 ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND); in ufshcd_dispatch_uic_cmd()
2296 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, in ufshcd_dispatch_uic_cmd()
2302 * @hba: per adapter instance
2308 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) in ufshcd_wait_for_uic_cmd() argument
2313 lockdep_assert_held(&hba->uic_cmd_mutex); in ufshcd_wait_for_uic_cmd()
2320 dev_err(hba->dev, in ufshcd_wait_for_uic_cmd()
2325 dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n", in ufshcd_wait_for_uic_cmd()
2331 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_wait_for_uic_cmd()
2332 hba->active_uic_cmd = NULL; in ufshcd_wait_for_uic_cmd()
2333 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_wait_for_uic_cmd()
2340 * @hba: per adapter instance
2347 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, in __ufshcd_send_uic_cmd() argument
2350 lockdep_assert_held(&hba->uic_cmd_mutex); in __ufshcd_send_uic_cmd()
2351 lockdep_assert_held(hba->host->host_lock); in __ufshcd_send_uic_cmd()
2353 if (!ufshcd_ready_for_uic_cmd(hba)) { in __ufshcd_send_uic_cmd()
2354 dev_err(hba->dev, in __ufshcd_send_uic_cmd()
2363 ufshcd_dispatch_uic_cmd(hba, uic_cmd); in __ufshcd_send_uic_cmd()
2370 * @hba: per adapter instance
2375 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) in ufshcd_send_uic_cmd() argument
2380 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) in ufshcd_send_uic_cmd()
2383 ufshcd_hold(hba, false); in ufshcd_send_uic_cmd()
2384 mutex_lock(&hba->uic_cmd_mutex); in ufshcd_send_uic_cmd()
2385 ufshcd_add_delay_before_dme_cmd(hba); in ufshcd_send_uic_cmd()
2387 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_send_uic_cmd()
2388 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true); in ufshcd_send_uic_cmd()
2389 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_send_uic_cmd()
2391 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); in ufshcd_send_uic_cmd()
2393 mutex_unlock(&hba->uic_cmd_mutex); in ufshcd_send_uic_cmd()
2395 ufshcd_release(hba); in ufshcd_send_uic_cmd()
2401 * @hba: per adapter instance
2406 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_map_sg() argument
2421 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) in ufshcd_map_sg()
2456 * @hba: per adapter instance
2459 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) in ufshcd_enable_intr() argument
2461 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); in ufshcd_enable_intr()
2463 if (hba->ufs_version == ufshci_version(1, 0)) { in ufshcd_enable_intr()
2471 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); in ufshcd_enable_intr()
2476 * @hba: per adapter instance
2479 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) in ufshcd_disable_intr() argument
2481 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); in ufshcd_disable_intr()
2483 if (hba->ufs_version == ufshci_version(1, 0)) { in ufshcd_disable_intr()
2493 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); in ufshcd_disable_intr()
2581 * @hba: UFS hba
2585 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, in ufshcd_prepare_utp_query_req_upiu() argument
2589 struct ufs_query *query = &hba->dev_cmd.query; in ufshcd_prepare_utp_query_req_upiu()
2637 * @hba: per adapter instance
2640 static int ufshcd_compose_devman_upiu(struct ufs_hba *hba, in ufshcd_compose_devman_upiu() argument
2646 if (hba->ufs_version <= ufshci_version(1, 1)) in ufshcd_compose_devman_upiu()
2652 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) in ufshcd_compose_devman_upiu()
2653 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags); in ufshcd_compose_devman_upiu()
2654 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) in ufshcd_compose_devman_upiu()
2665 * @hba: per adapter instance
2668 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_comp_scsi_upiu() argument
2673 if (hba->ufs_version <= ufshci_version(1, 1)) in ufshcd_comp_scsi_upiu()
2733 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i) in ufshcd_init_lrb() argument
2735 struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr; in ufshcd_init_lrb()
2736 struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr; in ufshcd_init_lrb()
2737 dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr + in ufshcd_init_lrb()
2744 lrb->utrd_dma_addr = hba->utrdl_dma_addr + in ufshcd_init_lrb()
2763 struct ufs_hba *hba = shost_priv(host); in ufshcd_queuecommand() local
2768 WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag); in ufshcd_queuecommand()
2776 switch (hba->ufshcd_state) { in ufshcd_queuecommand()
2787 if (ufshcd_eh_in_progress(hba)) { in ufshcd_queuecommand()
2795 * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's in ufshcd_queuecommand()
2803 if (hba->pm_op_in_progress) { in ufshcd_queuecommand()
2804 hba->force_reset = true; in ufshcd_queuecommand()
2819 hba->req_abort_count = 0; in ufshcd_queuecommand()
2821 err = ufshcd_hold(hba, true); in ufshcd_queuecommand()
2826 WARN_ON(ufshcd_is_clkgating_allowed(hba) && in ufshcd_queuecommand()
2827 (hba->clk_gating.state != CLKS_ON)); in ufshcd_queuecommand()
2829 lrbp = &hba->lrb[tag]; in ufshcd_queuecommand()
2834 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba); in ufshcd_queuecommand()
2840 ufshpb_prep(hba, lrbp); in ufshcd_queuecommand()
2842 ufshcd_comp_scsi_upiu(hba, lrbp); in ufshcd_queuecommand()
2844 err = ufshcd_map_sg(hba, lrbp); in ufshcd_queuecommand()
2847 ufshcd_release(hba); in ufshcd_queuecommand()
2851 ufshcd_send_command(hba, tag); in ufshcd_queuecommand()
2859 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_queuecommand()
2860 ufshcd_schedule_eh_work(hba); in ufshcd_queuecommand()
2861 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_queuecommand()
2867 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, in ufshcd_compose_dev_cmd() argument
2875 hba->dev_cmd.type = cmd_type; in ufshcd_compose_dev_cmd()
2877 return ufshcd_compose_devman_upiu(hba, lrbp); in ufshcd_compose_dev_cmd()
2885 static int ufshcd_clear_cmds(struct ufs_hba *hba, u32 mask) in ufshcd_clear_cmds() argument
2890 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clear_cmds()
2891 ufshcd_utrl_clear(hba, mask); in ufshcd_clear_cmds()
2892 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clear_cmds()
2898 return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL, in ufshcd_clear_cmds()
2903 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_check_query_response() argument
2905 struct ufs_query_res *query_res = &hba->dev_cmd.query.response; in ufshcd_check_query_response()
2915 * @hba: per adapter instance
2919 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_dev_cmd_completion() argument
2924 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); in ufshcd_dev_cmd_completion()
2929 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) { in ufshcd_dev_cmd_completion()
2931 dev_err(hba->dev, "%s: unexpected response %x\n", in ufshcd_dev_cmd_completion()
2936 err = ufshcd_check_query_response(hba, lrbp); in ufshcd_dev_cmd_completion()
2938 err = ufshcd_copy_query_response(hba, lrbp); in ufshcd_dev_cmd_completion()
2943 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", in ufshcd_dev_cmd_completion()
2948 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", in ufshcd_dev_cmd_completion()
2956 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, in ufshcd_wait_for_dev_cmd() argument
2965 time_left = wait_for_completion_timeout(hba->dev_cmd.complete, in ufshcd_wait_for_dev_cmd()
2974 hba->dev_cmd.complete = NULL; in ufshcd_wait_for_dev_cmd()
2977 err = ufshcd_dev_cmd_completion(hba, lrbp); in ufshcd_wait_for_dev_cmd()
2980 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n", in ufshcd_wait_for_dev_cmd()
2982 if (ufshcd_clear_cmds(hba, 1U << lrbp->task_tag) == 0) { in ufshcd_wait_for_dev_cmd()
2990 spin_lock_irqsave(&hba->outstanding_lock, flags); in ufshcd_wait_for_dev_cmd()
2992 &hba->outstanding_reqs); in ufshcd_wait_for_dev_cmd()
2994 hba->dev_cmd.complete = NULL; in ufshcd_wait_for_dev_cmd()
2996 &hba->outstanding_reqs); in ufshcd_wait_for_dev_cmd()
2998 spin_unlock_irqrestore(&hba->outstanding_lock, flags); in ufshcd_wait_for_dev_cmd()
3009 dev_err(hba->dev, "%s: failed to clear tag %d\n", in ufshcd_wait_for_dev_cmd()
3019 * @hba: UFS hba
3024 * it is expected you hold the hba->dev_cmd.lock mutex.
3026 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, in ufshcd_exec_dev_cmd() argument
3030 const u32 tag = hba->reserved_slot; in ufshcd_exec_dev_cmd()
3034 /* Protects use of hba->reserved_slot. */ in ufshcd_exec_dev_cmd()
3035 lockdep_assert_held(&hba->dev_cmd.lock); in ufshcd_exec_dev_cmd()
3037 down_read(&hba->clk_scaling_lock); in ufshcd_exec_dev_cmd()
3039 lrbp = &hba->lrb[tag]; in ufshcd_exec_dev_cmd()
3041 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); in ufshcd_exec_dev_cmd()
3045 hba->dev_cmd.complete = &wait; in ufshcd_exec_dev_cmd()
3047 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); in ufshcd_exec_dev_cmd()
3049 ufshcd_send_command(hba, tag); in ufshcd_exec_dev_cmd()
3050 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); in ufshcd_exec_dev_cmd()
3051 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, in ufshcd_exec_dev_cmd()
3055 up_read(&hba->clk_scaling_lock); in ufshcd_exec_dev_cmd()
3061 * @hba: per-adapter instance
3069 static inline void ufshcd_init_query(struct ufs_hba *hba, in ufshcd_init_query() argument
3073 *request = &hba->dev_cmd.query.request; in ufshcd_init_query()
3074 *response = &hba->dev_cmd.query.response; in ufshcd_init_query()
3083 static int ufshcd_query_flag_retry(struct ufs_hba *hba, in ufshcd_query_flag_retry() argument
3090 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res); in ufshcd_query_flag_retry()
3092 dev_dbg(hba->dev, in ufshcd_query_flag_retry()
3100 dev_err(hba->dev, in ufshcd_query_flag_retry()
3108 * @hba: per-adapter instance
3116 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, in ufshcd_query_flag() argument
3124 BUG_ON(!hba); in ufshcd_query_flag()
3126 ufshcd_hold(hba, false); in ufshcd_query_flag()
3127 mutex_lock(&hba->dev_cmd.lock); in ufshcd_query_flag()
3128 ufshcd_init_query(hba, &request, &response, opcode, idn, index, in ufshcd_query_flag()
3141 dev_err(hba->dev, "%s: Invalid argument for read request\n", in ufshcd_query_flag()
3148 dev_err(hba->dev, in ufshcd_query_flag()
3155 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); in ufshcd_query_flag()
3158 dev_err(hba->dev, in ufshcd_query_flag()
3169 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_query_flag()
3170 ufshcd_release(hba); in ufshcd_query_flag()
3176 * @hba: per-adapter instance
3185 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, in ufshcd_query_attr() argument
3192 BUG_ON(!hba); in ufshcd_query_attr()
3195 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", in ufshcd_query_attr()
3200 ufshcd_hold(hba, false); in ufshcd_query_attr()
3202 mutex_lock(&hba->dev_cmd.lock); in ufshcd_query_attr()
3203 ufshcd_init_query(hba, &request, &response, opcode, idn, index, in ufshcd_query_attr()
3215 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n", in ufshcd_query_attr()
3221 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); in ufshcd_query_attr()
3224 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", in ufshcd_query_attr()
3232 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_query_attr()
3233 ufshcd_release(hba); in ufshcd_query_attr()
3240 * @hba: per-adapter instance
3250 int ufshcd_query_attr_retry(struct ufs_hba *hba, in ufshcd_query_attr_retry() argument
3258 ret = ufshcd_query_attr(hba, opcode, idn, index, in ufshcd_query_attr_retry()
3261 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n", in ufshcd_query_attr_retry()
3268 dev_err(hba->dev, in ufshcd_query_attr_retry()
3274 static int __ufshcd_query_descriptor(struct ufs_hba *hba, in __ufshcd_query_descriptor() argument
3282 BUG_ON(!hba); in __ufshcd_query_descriptor()
3285 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", in __ufshcd_query_descriptor()
3291 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", in __ufshcd_query_descriptor()
3296 ufshcd_hold(hba, false); in __ufshcd_query_descriptor()
3298 mutex_lock(&hba->dev_cmd.lock); in __ufshcd_query_descriptor()
3299 ufshcd_init_query(hba, &request, &response, opcode, idn, index, in __ufshcd_query_descriptor()
3301 hba->dev_cmd.query.descriptor = desc_buf; in __ufshcd_query_descriptor()
3312 dev_err(hba->dev, in __ufshcd_query_descriptor()
3319 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); in __ufshcd_query_descriptor()
3322 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", in __ufshcd_query_descriptor()
3330 hba->dev_cmd.query.descriptor = NULL; in __ufshcd_query_descriptor()
3331 mutex_unlock(&hba->dev_cmd.lock); in __ufshcd_query_descriptor()
3332 ufshcd_release(hba); in __ufshcd_query_descriptor()
3338 * @hba: per-adapter instance
3350 int ufshcd_query_descriptor_retry(struct ufs_hba *hba, in ufshcd_query_descriptor_retry() argument
3360 err = __ufshcd_query_descriptor(hba, opcode, idn, index, in ufshcd_query_descriptor_retry()
3371 * @hba: Pointer to adapter instance
3375 void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, in ufshcd_map_desc_id_to_length() argument
3382 *desc_len = hba->desc_size[desc_id]; in ufshcd_map_desc_id_to_length()
3386 static void ufshcd_update_desc_length(struct ufs_hba *hba, in ufshcd_update_desc_length() argument
3390 if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE && in ufshcd_update_desc_length()
3397 hba->desc_size[desc_id] = desc_len; in ufshcd_update_desc_length()
3402 * @hba: Pointer to adapter instance
3411 int ufshcd_read_desc_param(struct ufs_hba *hba, in ufshcd_read_desc_param() argument
3428 ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len); in ufshcd_read_desc_param()
3430 dev_err(hba->dev, "%s: Failed to get desc length\n", __func__); in ufshcd_read_desc_param()
3435 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n", in ufshcd_read_desc_param()
3451 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, in ufshcd_read_desc_param()
3456 …dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret … in ufshcd_read_desc_param()
3463 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n", in ufshcd_read_desc_param()
3471 ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len); in ufshcd_read_desc_param()
3508 * @hba: pointer to adapter instance
3520 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, in ufshcd_read_string_desc() argument
3534 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0, in ufshcd_read_string_desc()
3537 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n", in ufshcd_read_string_desc()
3544 dev_dbg(hba->dev, "String Desc is of zero length\n"); in ufshcd_read_string_desc()
3591 * @hba: Pointer to adapter instance
3599 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, in ufshcd_read_unit_desc_param() argument
3609 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset)) in ufshcd_read_unit_desc_param()
3612 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, in ufshcd_read_unit_desc_param()
3616 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba) in ufshcd_get_ref_clk_gating_wait() argument
3621 if (hba->dev_info.wspecversion >= 0x300) { in ufshcd_get_ref_clk_gating_wait()
3622 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_get_ref_clk_gating_wait()
3626 dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n", in ufshcd_get_ref_clk_gating_wait()
3631 dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n", in ufshcd_get_ref_clk_gating_wait()
3635 hba->dev_info.clk_gating_wait_us = gating_wait; in ufshcd_get_ref_clk_gating_wait()
3643 * @hba: per adapter instance
3654 static int ufshcd_memory_alloc(struct ufs_hba *hba) in ufshcd_memory_alloc() argument
3659 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); in ufshcd_memory_alloc()
3660 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, in ufshcd_memory_alloc()
3662 &hba->ucdl_dma_addr, in ufshcd_memory_alloc()
3667 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE in ufshcd_memory_alloc()
3668 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will in ufshcd_memory_alloc()
3671 if (!hba->ucdl_base_addr || in ufshcd_memory_alloc()
3672 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) { in ufshcd_memory_alloc()
3673 dev_err(hba->dev, in ufshcd_memory_alloc()
3682 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); in ufshcd_memory_alloc()
3683 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev, in ufshcd_memory_alloc()
3685 &hba->utrdl_dma_addr, in ufshcd_memory_alloc()
3687 if (!hba->utrdl_base_addr || in ufshcd_memory_alloc()
3688 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) { in ufshcd_memory_alloc()
3689 dev_err(hba->dev, in ufshcd_memory_alloc()
3698 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; in ufshcd_memory_alloc()
3699 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev, in ufshcd_memory_alloc()
3701 &hba->utmrdl_dma_addr, in ufshcd_memory_alloc()
3703 if (!hba->utmrdl_base_addr || in ufshcd_memory_alloc()
3704 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) { in ufshcd_memory_alloc()
3705 dev_err(hba->dev, in ufshcd_memory_alloc()
3711 hba->lrb = devm_kcalloc(hba->dev, in ufshcd_memory_alloc()
3712 hba->nutrs, sizeof(struct ufshcd_lrb), in ufshcd_memory_alloc()
3714 if (!hba->lrb) { in ufshcd_memory_alloc()
3715 dev_err(hba->dev, "LRB Memory allocation failed\n"); in ufshcd_memory_alloc()
3726 * @hba: per adapter instance
3736 static void ufshcd_host_memory_configure(struct ufs_hba *hba) in ufshcd_host_memory_configure() argument
3746 utrdlp = hba->utrdl_base_addr; in ufshcd_host_memory_configure()
3754 cmd_desc_dma_addr = hba->ucdl_dma_addr; in ufshcd_host_memory_configure()
3756 for (i = 0; i < hba->nutrs; i++) { in ufshcd_host_memory_configure()
3766 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) { in ufshcd_host_memory_configure()
3782 ufshcd_init_lrb(hba, &hba->lrb[i], i); in ufshcd_host_memory_configure()
3788 * @hba: per adapter instance
3797 static int ufshcd_dme_link_startup(struct ufs_hba *hba) in ufshcd_dme_link_startup() argument
3804 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_link_startup()
3806 dev_dbg(hba->dev, in ufshcd_dme_link_startup()
3812 * @hba: per adapter instance
3819 static int ufshcd_dme_reset(struct ufs_hba *hba) in ufshcd_dme_reset() argument
3826 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_reset()
3828 dev_err(hba->dev, in ufshcd_dme_reset()
3834 int ufshcd_dme_configure_adapt(struct ufs_hba *hba, in ufshcd_dme_configure_adapt() argument
3843 ret = ufshcd_dme_set(hba, in ufshcd_dme_configure_adapt()
3852 * @hba: per adapter instance
3858 static int ufshcd_dme_enable(struct ufs_hba *hba) in ufshcd_dme_enable() argument
3865 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_enable()
3867 dev_err(hba->dev, in ufshcd_dme_enable()
3873 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) in ufshcd_add_delay_before_dme_cmd() argument
3878 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)) in ufshcd_add_delay_before_dme_cmd()
3885 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) { in ufshcd_add_delay_before_dme_cmd()
3891 hba->last_dme_cmd_tstamp)); in ufshcd_add_delay_before_dme_cmd()
3906 * @hba: per adapter instance
3914 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, in ufshcd_dme_set_attr() argument
3934 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_set_attr()
3936 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", in ufshcd_dme_set_attr()
3941 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", in ufshcd_dme_set_attr()
3951 * @hba: per adapter instance
3958 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, in ufshcd_dme_get_attr() argument
3973 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) { in ufshcd_dme_get_attr()
3974 orig_pwr_info = hba->pwr_info; in ufshcd_dme_get_attr()
3989 ret = ufshcd_change_power_mode(hba, &temp_pwr_info); in ufshcd_dme_get_attr()
4001 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_get_attr()
4003 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n", in ufshcd_dme_get_attr()
4008 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", in ufshcd_dme_get_attr()
4015 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) in ufshcd_dme_get_attr()
4017 ufshcd_change_power_mode(hba, &orig_pwr_info); in ufshcd_dme_get_attr()
4027 * @hba: per adapter instance
4039 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) in ufshcd_uic_pwr_ctrl() argument
4047 mutex_lock(&hba->uic_cmd_mutex); in ufshcd_uic_pwr_ctrl()
4048 ufshcd_add_delay_before_dme_cmd(hba); in ufshcd_uic_pwr_ctrl()
4050 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
4051 if (ufshcd_is_link_broken(hba)) { in ufshcd_uic_pwr_ctrl()
4055 hba->uic_async_done = &uic_async_done; in ufshcd_uic_pwr_ctrl()
4056 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) { in ufshcd_uic_pwr_ctrl()
4057 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL); in ufshcd_uic_pwr_ctrl()
4065 ret = __ufshcd_send_uic_cmd(hba, cmd, false); in ufshcd_uic_pwr_ctrl()
4066 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
4068 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
4074 if (!wait_for_completion_timeout(hba->uic_async_done, in ufshcd_uic_pwr_ctrl()
4076 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
4081 dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n", in ufshcd_uic_pwr_ctrl()
4091 status = ufshcd_get_upmcrs(hba); in ufshcd_uic_pwr_ctrl()
4093 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
4100 ufshcd_print_host_state(hba); in ufshcd_uic_pwr_ctrl()
4101 ufshcd_print_pwr_info(hba); in ufshcd_uic_pwr_ctrl()
4102 ufshcd_print_evt_hist(hba); in ufshcd_uic_pwr_ctrl()
4105 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
4106 hba->active_uic_cmd = NULL; in ufshcd_uic_pwr_ctrl()
4107 hba->uic_async_done = NULL; in ufshcd_uic_pwr_ctrl()
4109 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); in ufshcd_uic_pwr_ctrl()
4111 ufshcd_set_link_broken(hba); in ufshcd_uic_pwr_ctrl()
4112 ufshcd_schedule_eh_work(hba); in ufshcd_uic_pwr_ctrl()
4115 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
4116 mutex_unlock(&hba->uic_cmd_mutex); in ufshcd_uic_pwr_ctrl()
4124 * @hba: per adapter instance
4129 int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) in ufshcd_uic_change_pwr_mode() argument
4134 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) { in ufshcd_uic_change_pwr_mode()
4135 ret = ufshcd_dme_set(hba, in ufshcd_uic_change_pwr_mode()
4138 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n", in ufshcd_uic_change_pwr_mode()
4147 ufshcd_hold(hba, false); in ufshcd_uic_change_pwr_mode()
4148 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); in ufshcd_uic_change_pwr_mode()
4149 ufshcd_release(hba); in ufshcd_uic_change_pwr_mode()
4156 int ufshcd_link_recovery(struct ufs_hba *hba) in ufshcd_link_recovery() argument
4161 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_link_recovery()
4162 hba->ufshcd_state = UFSHCD_STATE_RESET; in ufshcd_link_recovery()
4163 ufshcd_set_eh_in_progress(hba); in ufshcd_link_recovery()
4164 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_link_recovery()
4167 ufshcd_device_reset(hba); in ufshcd_link_recovery()
4169 ret = ufshcd_host_reset_and_restore(hba); in ufshcd_link_recovery()
4171 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_link_recovery()
4173 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_link_recovery()
4174 ufshcd_clear_eh_in_progress(hba); in ufshcd_link_recovery()
4175 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_link_recovery()
4178 dev_err(hba->dev, "%s: link recovery failed, err %d", in ufshcd_link_recovery()
4185 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) in ufshcd_uic_hibern8_enter() argument
4191 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE); in ufshcd_uic_hibern8_enter()
4194 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); in ufshcd_uic_hibern8_enter()
4195 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter", in ufshcd_uic_hibern8_enter()
4199 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n", in ufshcd_uic_hibern8_enter()
4202 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, in ufshcd_uic_hibern8_enter()
4209 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) in ufshcd_uic_hibern8_exit() argument
4215 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE); in ufshcd_uic_hibern8_exit()
4218 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); in ufshcd_uic_hibern8_exit()
4219 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit", in ufshcd_uic_hibern8_exit()
4223 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n", in ufshcd_uic_hibern8_exit()
4226 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, in ufshcd_uic_hibern8_exit()
4228 hba->ufs_stats.last_hibern8_exit_tstamp = local_clock(); in ufshcd_uic_hibern8_exit()
4229 hba->ufs_stats.hibern8_exit_cnt++; in ufshcd_uic_hibern8_exit()
4236 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) in ufshcd_auto_hibern8_update() argument
4241 if (!ufshcd_is_auto_hibern8_supported(hba)) in ufshcd_auto_hibern8_update()
4244 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_auto_hibern8_update()
4245 if (hba->ahit != ahit) { in ufshcd_auto_hibern8_update()
4246 hba->ahit = ahit; in ufshcd_auto_hibern8_update()
4249 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_auto_hibern8_update()
4252 !pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) { in ufshcd_auto_hibern8_update()
4253 ufshcd_rpm_get_sync(hba); in ufshcd_auto_hibern8_update()
4254 ufshcd_hold(hba, false); in ufshcd_auto_hibern8_update()
4255 ufshcd_auto_hibern8_enable(hba); in ufshcd_auto_hibern8_update()
4256 ufshcd_release(hba); in ufshcd_auto_hibern8_update()
4257 ufshcd_rpm_put_sync(hba); in ufshcd_auto_hibern8_update()
4262 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba) in ufshcd_auto_hibern8_enable() argument
4264 if (!ufshcd_is_auto_hibern8_supported(hba)) in ufshcd_auto_hibern8_enable()
4267 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); in ufshcd_auto_hibern8_enable()
4272 * values in hba power info
4273 * @hba: per-adapter instance
4275 static void ufshcd_init_pwr_info(struct ufs_hba *hba) in ufshcd_init_pwr_info() argument
4277 hba->pwr_info.gear_rx = UFS_PWM_G1; in ufshcd_init_pwr_info()
4278 hba->pwr_info.gear_tx = UFS_PWM_G1; in ufshcd_init_pwr_info()
4279 hba->pwr_info.lane_rx = 1; in ufshcd_init_pwr_info()
4280 hba->pwr_info.lane_tx = 1; in ufshcd_init_pwr_info()
4281 hba->pwr_info.pwr_rx = SLOWAUTO_MODE; in ufshcd_init_pwr_info()
4282 hba->pwr_info.pwr_tx = SLOWAUTO_MODE; in ufshcd_init_pwr_info()
4283 hba->pwr_info.hs_rate = 0; in ufshcd_init_pwr_info()
4288 * @hba: per-adapter instance
4290 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) in ufshcd_get_max_pwr_mode() argument
4292 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; in ufshcd_get_max_pwr_mode()
4294 if (hba->max_pwr_info.is_valid) in ufshcd_get_max_pwr_mode()
4297 if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) { in ufshcd_get_max_pwr_mode()
4307 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), in ufshcd_get_max_pwr_mode()
4309 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), in ufshcd_get_max_pwr_mode()
4313 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", in ufshcd_get_max_pwr_mode()
4325 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); in ufshcd_get_max_pwr_mode()
4327 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), in ufshcd_get_max_pwr_mode()
4330 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", in ufshcd_get_max_pwr_mode()
4337 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), in ufshcd_get_max_pwr_mode()
4340 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), in ufshcd_get_max_pwr_mode()
4343 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", in ufshcd_get_max_pwr_mode()
4350 hba->max_pwr_info.is_valid = true; in ufshcd_get_max_pwr_mode()
4354 static int ufshcd_change_power_mode(struct ufs_hba *hba, in ufshcd_change_power_mode() argument
4360 if (!hba->force_pmc && in ufshcd_change_power_mode()
4361 pwr_mode->gear_rx == hba->pwr_info.gear_rx && in ufshcd_change_power_mode()
4362 pwr_mode->gear_tx == hba->pwr_info.gear_tx && in ufshcd_change_power_mode()
4363 pwr_mode->lane_rx == hba->pwr_info.lane_rx && in ufshcd_change_power_mode()
4364 pwr_mode->lane_tx == hba->pwr_info.lane_tx && in ufshcd_change_power_mode()
4365 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && in ufshcd_change_power_mode()
4366 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && in ufshcd_change_power_mode()
4367 pwr_mode->hs_rate == hba->pwr_info.hs_rate) { in ufshcd_change_power_mode()
4368 dev_dbg(hba->dev, "%s: power already configured\n", __func__); in ufshcd_change_power_mode()
4378 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); in ufshcd_change_power_mode()
4379 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), in ufshcd_change_power_mode()
4383 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true); in ufshcd_change_power_mode()
4385 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), false); in ufshcd_change_power_mode()
4387 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); in ufshcd_change_power_mode()
4388 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), in ufshcd_change_power_mode()
4392 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true); in ufshcd_change_power_mode()
4394 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), false); in ufshcd_change_power_mode()
4400 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), in ufshcd_change_power_mode()
4403 if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) { in ufshcd_change_power_mode()
4404 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), in ufshcd_change_power_mode()
4406 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), in ufshcd_change_power_mode()
4408 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), in ufshcd_change_power_mode()
4410 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3), in ufshcd_change_power_mode()
4412 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4), in ufshcd_change_power_mode()
4414 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5), in ufshcd_change_power_mode()
4417 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal), in ufshcd_change_power_mode()
4419 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal), in ufshcd_change_power_mode()
4421 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal), in ufshcd_change_power_mode()
4425 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 in ufshcd_change_power_mode()
4429 dev_err(hba->dev, in ufshcd_change_power_mode()
4432 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL, in ufshcd_change_power_mode()
4435 memcpy(&hba->pwr_info, pwr_mode, in ufshcd_change_power_mode()
4444 * @hba: per-adapter instance
4447 int ufshcd_config_pwr_mode(struct ufs_hba *hba, in ufshcd_config_pwr_mode() argument
4453 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE, in ufshcd_config_pwr_mode()
4459 ret = ufshcd_change_power_mode(hba, &final_params); in ufshcd_config_pwr_mode()
4467 * @hba: per-adapter instance
4471 static int ufshcd_complete_dev_init(struct ufs_hba *hba) in ufshcd_complete_dev_init() argument
4477 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, in ufshcd_complete_dev_init()
4480 dev_err(hba->dev, in ufshcd_complete_dev_init()
4489 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, in ufshcd_complete_dev_init()
4497 dev_err(hba->dev, in ufshcd_complete_dev_init()
4501 dev_err(hba->dev, in ufshcd_complete_dev_init()
4512 * @hba: per adapter instance
4522 int ufshcd_make_hba_operational(struct ufs_hba *hba) in ufshcd_make_hba_operational() argument
4528 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); in ufshcd_make_hba_operational()
4531 if (ufshcd_is_intr_aggr_allowed(hba)) in ufshcd_make_hba_operational()
4532 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); in ufshcd_make_hba_operational()
4534 ufshcd_disable_intr_aggr(hba); in ufshcd_make_hba_operational()
4537 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), in ufshcd_make_hba_operational()
4539 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), in ufshcd_make_hba_operational()
4541 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), in ufshcd_make_hba_operational()
4543 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), in ufshcd_make_hba_operational()
4555 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); in ufshcd_make_hba_operational()
4557 ufshcd_enable_run_stop_reg(hba); in ufshcd_make_hba_operational()
4559 dev_err(hba->dev, in ufshcd_make_hba_operational()
4570 * @hba: per adapter instance
4572 void ufshcd_hba_stop(struct ufs_hba *hba) in ufshcd_hba_stop() argument
4581 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_hba_stop()
4582 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); in ufshcd_hba_stop()
4583 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_hba_stop()
4585 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, in ufshcd_hba_stop()
4589 dev_err(hba->dev, "%s: Controller disable failed\n", __func__); in ufshcd_hba_stop()
4595 * @hba: per adapter instance
4603 static int ufshcd_hba_execute_hce(struct ufs_hba *hba) in ufshcd_hba_execute_hce() argument
4609 if (ufshcd_is_hba_active(hba)) in ufshcd_hba_execute_hce()
4611 ufshcd_hba_stop(hba); in ufshcd_hba_execute_hce()
4614 ufshcd_set_link_off(hba); in ufshcd_hba_execute_hce()
4616 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); in ufshcd_hba_execute_hce()
4619 ufshcd_hba_start(hba); in ufshcd_hba_execute_hce()
4631 ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100); in ufshcd_hba_execute_hce()
4635 while (!ufshcd_is_hba_active(hba)) { in ufshcd_hba_execute_hce()
4639 dev_err(hba->dev, in ufshcd_hba_execute_hce()
4651 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); in ufshcd_hba_execute_hce()
4653 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); in ufshcd_hba_execute_hce()
4658 int ufshcd_hba_enable(struct ufs_hba *hba) in ufshcd_hba_enable() argument
4662 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) { in ufshcd_hba_enable()
4663 ufshcd_set_link_off(hba); in ufshcd_hba_enable()
4664 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); in ufshcd_hba_enable()
4667 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); in ufshcd_hba_enable()
4668 ret = ufshcd_dme_reset(hba); in ufshcd_hba_enable()
4670 ret = ufshcd_dme_enable(hba); in ufshcd_hba_enable()
4672 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); in ufshcd_hba_enable()
4674 dev_err(hba->dev, in ufshcd_hba_enable()
4678 ret = ufshcd_hba_execute_hce(hba); in ufshcd_hba_enable()
4685 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) in ufshcd_disable_tx_lcc() argument
4690 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), in ufshcd_disable_tx_lcc()
4693 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), in ufshcd_disable_tx_lcc()
4697 err = ufshcd_dme_set(hba, in ufshcd_disable_tx_lcc()
4702 err = ufshcd_dme_peer_set(hba, in ufshcd_disable_tx_lcc()
4707 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d", in ufshcd_disable_tx_lcc()
4716 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) in ufshcd_disable_device_tx_lcc() argument
4718 return ufshcd_disable_tx_lcc(hba, true); in ufshcd_disable_device_tx_lcc()
4721 void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val) in ufshcd_update_evt_hist() argument
4728 e = &hba->ufs_stats.event[id]; in ufshcd_update_evt_hist()
4734 ufshcd_vops_event_notify(hba, id, &val); in ufshcd_update_evt_hist()
4740 * @hba: per adapter instance
4744 static int ufshcd_link_startup(struct ufs_hba *hba) in ufshcd_link_startup() argument
4754 if (!ufshcd_is_ufs_dev_active(hba)) in ufshcd_link_startup()
4759 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE); in ufshcd_link_startup()
4761 ret = ufshcd_dme_link_startup(hba); in ufshcd_link_startup()
4764 if (!ret && !ufshcd_is_device_present(hba)) { in ufshcd_link_startup()
4765 ufshcd_update_evt_hist(hba, in ufshcd_link_startup()
4768 dev_err(hba->dev, "%s: Device not present\n", __func__); in ufshcd_link_startup()
4778 if (ret && retries && ufshcd_hba_enable(hba)) { in ufshcd_link_startup()
4779 ufshcd_update_evt_hist(hba, in ufshcd_link_startup()
4788 ufshcd_update_evt_hist(hba, in ufshcd_link_startup()
4801 ufshcd_init_pwr_info(hba); in ufshcd_link_startup()
4802 ufshcd_print_pwr_info(hba); in ufshcd_link_startup()
4804 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { in ufshcd_link_startup()
4805 ret = ufshcd_disable_device_tx_lcc(hba); in ufshcd_link_startup()
4811 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE); in ufshcd_link_startup()
4816 ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); in ufshcd_link_startup()
4817 ret = ufshcd_make_hba_operational(hba); in ufshcd_link_startup()
4820 dev_err(hba->dev, "link startup failed %d\n", ret); in ufshcd_link_startup()
4821 ufshcd_print_host_state(hba); in ufshcd_link_startup()
4822 ufshcd_print_pwr_info(hba); in ufshcd_link_startup()
4823 ufshcd_print_evt_hist(hba); in ufshcd_link_startup()
4830 * @hba: per-adapter instance
4838 static int ufshcd_verify_dev_init(struct ufs_hba *hba) in ufshcd_verify_dev_init() argument
4843 ufshcd_hold(hba, false); in ufshcd_verify_dev_init()
4844 mutex_lock(&hba->dev_cmd.lock); in ufshcd_verify_dev_init()
4846 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, in ufshcd_verify_dev_init()
4847 hba->nop_out_timeout); in ufshcd_verify_dev_init()
4852 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); in ufshcd_verify_dev_init()
4854 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_verify_dev_init()
4855 ufshcd_release(hba); in ufshcd_verify_dev_init()
4858 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); in ufshcd_verify_dev_init()
4875 struct ufs_hba *hba; in ufshcd_set_queue_depth() local
4877 hba = shost_priv(sdev->host); in ufshcd_set_queue_depth()
4879 lun_qdepth = hba->nutrs; in ufshcd_set_queue_depth()
4880 ret = ufshcd_read_unit_desc_param(hba, in ufshcd_set_queue_depth()
4891 lun_qdepth = hba->nutrs; in ufshcd_set_queue_depth()
4893 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs); in ufshcd_set_queue_depth()
4895 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", in ufshcd_set_queue_depth()
4902 * @hba: per-adapter instance
4911 static int ufshcd_get_lu_wp(struct ufs_hba *hba, in ufshcd_get_lu_wp() argument
4924 else if (lun >= hba->dev_info.max_lu_supported) in ufshcd_get_lu_wp()
4927 ret = ufshcd_read_unit_desc_param(hba, in ufshcd_get_lu_wp()
4938 * @hba: per-adapter instance
4942 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba, in ufshcd_get_lu_power_on_wp_status() argument
4945 if (hba->dev_info.f_power_on_wp_en && in ufshcd_get_lu_power_on_wp_status()
4946 !hba->dev_info.is_lu_power_on_wp) { in ufshcd_get_lu_power_on_wp_status()
4949 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun), in ufshcd_get_lu_power_on_wp_status()
4952 hba->dev_info.is_lu_power_on_wp = true; in ufshcd_get_lu_power_on_wp_status()
4959 * @hba: pointer to ufs hba
4961 static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev) in ufshcd_setup_links() argument
4969 if (hba->ufs_device_wlun) { in ufshcd_setup_links()
4971 &hba->ufs_device_wlun->sdev_gendev, in ufshcd_setup_links()
4975 dev_name(&hba->ufs_device_wlun->sdev_gendev)); in ufshcd_setup_links()
4978 hba->luns_avail--; in ufshcd_setup_links()
4980 if (hba->luns_avail == 1) { in ufshcd_setup_links()
4981 ufshcd_rpm_put(hba); in ufshcd_setup_links()
4989 hba->luns_avail--; in ufshcd_setup_links()
5001 struct ufs_hba *hba; in ufshcd_slave_alloc() local
5003 hba = shost_priv(sdev->host); in ufshcd_slave_alloc()
5022 ufshcd_get_lu_power_on_wp_status(hba, sdev); in ufshcd_slave_alloc()
5024 ufshcd_setup_links(hba, sdev); in ufshcd_slave_alloc()
5041 static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev) in ufshcd_hpb_destroy() argument
5045 !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba)) in ufshcd_hpb_destroy()
5048 ufshpb_destroy_lu(hba, sdev); in ufshcd_hpb_destroy()
5051 static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev) in ufshcd_hpb_configure() argument
5055 !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba)) in ufshcd_hpb_configure()
5058 ufshpb_init_hpb_lu(hba, sdev); in ufshcd_hpb_configure()
5067 struct ufs_hba *hba = shost_priv(sdev->host); in ufshcd_slave_configure() local
5070 ufshcd_hpb_configure(hba, sdev); in ufshcd_slave_configure()
5073 if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE) in ufshcd_slave_configure()
5081 else if (ufshcd_is_rpm_autosuspend_allowed(hba)) in ufshcd_slave_configure()
5090 ufshcd_crypto_register(hba, q); in ufshcd_slave_configure()
5101 struct ufs_hba *hba; in ufshcd_slave_destroy() local
5104 hba = shost_priv(sdev->host); in ufshcd_slave_destroy()
5106 ufshcd_hpb_destroy(hba, sdev); in ufshcd_slave_destroy()
5110 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_slave_destroy()
5111 hba->ufs_device_wlun = NULL; in ufshcd_slave_destroy()
5112 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_slave_destroy()
5113 } else if (hba->ufs_device_wlun) { in ufshcd_slave_destroy()
5117 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_slave_destroy()
5118 if (hba->ufs_device_wlun) { in ufshcd_slave_destroy()
5119 supplier = &hba->ufs_device_wlun->sdev_gendev; in ufshcd_slave_destroy()
5122 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_slave_destroy()
5171 * @hba: per adapter instance
5177 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_transfer_rsp_status() argument
5186 if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) { in ufshcd_transfer_rsp_status()
5195 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); in ufshcd_transfer_rsp_status()
5223 if (!hba->pm_op_in_progress && in ufshcd_transfer_rsp_status()
5224 !ufshcd_eh_in_progress(hba) && in ufshcd_transfer_rsp_status()
5227 schedule_work(&hba->eeh_work); in ufshcd_transfer_rsp_status()
5230 ufshpb_rsp_upiu(hba, lrbp); in ufshcd_transfer_rsp_status()
5235 dev_err(hba->dev, in ufshcd_transfer_rsp_status()
5239 dev_err(hba->dev, in ufshcd_transfer_rsp_status()
5263 dev_err(hba->dev, in ufshcd_transfer_rsp_status()
5266 ufshcd_print_evt_hist(hba); in ufshcd_transfer_rsp_status()
5267 ufshcd_print_host_state(hba); in ufshcd_transfer_rsp_status()
5272 (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs) in ufshcd_transfer_rsp_status()
5273 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true); in ufshcd_transfer_rsp_status()
5277 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, in ufshcd_is_auto_hibern8_error() argument
5280 if (!ufshcd_is_auto_hibern8_supported(hba) || in ufshcd_is_auto_hibern8_error()
5281 !ufshcd_is_auto_hibern8_enabled(hba)) in ufshcd_is_auto_hibern8_error()
5287 if (hba->active_uic_cmd && in ufshcd_is_auto_hibern8_error()
5288 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER || in ufshcd_is_auto_hibern8_error()
5289 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT)) in ufshcd_is_auto_hibern8_error()
5297 * @hba: per adapter instance
5304 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) in ufshcd_uic_cmd_compl() argument
5308 spin_lock(hba->host->host_lock); in ufshcd_uic_cmd_compl()
5309 if (ufshcd_is_auto_hibern8_error(hba, intr_status)) in ufshcd_uic_cmd_compl()
5310 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); in ufshcd_uic_cmd_compl()
5312 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { in ufshcd_uic_cmd_compl()
5313 hba->active_uic_cmd->argument2 |= in ufshcd_uic_cmd_compl()
5314 ufshcd_get_uic_cmd_result(hba); in ufshcd_uic_cmd_compl()
5315 hba->active_uic_cmd->argument3 = in ufshcd_uic_cmd_compl()
5316 ufshcd_get_dme_attr_val(hba); in ufshcd_uic_cmd_compl()
5317 if (!hba->uic_async_done) in ufshcd_uic_cmd_compl()
5318 hba->active_uic_cmd->cmd_active = 0; in ufshcd_uic_cmd_compl()
5319 complete(&hba->active_uic_cmd->done); in ufshcd_uic_cmd_compl()
5323 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) { in ufshcd_uic_cmd_compl()
5324 hba->active_uic_cmd->cmd_active = 0; in ufshcd_uic_cmd_compl()
5325 complete(hba->uic_async_done); in ufshcd_uic_cmd_compl()
5330 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd, in ufshcd_uic_cmd_compl()
5332 spin_unlock(hba->host->host_lock); in ufshcd_uic_cmd_compl()
5337 static void ufshcd_release_scsi_cmd(struct ufs_hba *hba, in ufshcd_release_scsi_cmd() argument
5344 ufshcd_release(hba); in ufshcd_release_scsi_cmd()
5345 ufshcd_clk_scaling_update_busy(hba); in ufshcd_release_scsi_cmd()
5350 * @hba: per adapter instance
5353 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, in __ufshcd_transfer_req_compl() argument
5360 for_each_set_bit(index, &completed_reqs, hba->nutrs) { in __ufshcd_transfer_req_compl()
5361 lrbp = &hba->lrb[index]; in __ufshcd_transfer_req_compl()
5366 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) in __ufshcd_transfer_req_compl()
5367 ufshcd_update_monitor(hba, lrbp); in __ufshcd_transfer_req_compl()
5368 ufshcd_add_command_trace(hba, index, UFS_CMD_COMP); in __ufshcd_transfer_req_compl()
5369 cmd->result = ufshcd_transfer_rsp_status(hba, lrbp); in __ufshcd_transfer_req_compl()
5370 ufshcd_release_scsi_cmd(hba, lrbp); in __ufshcd_transfer_req_compl()
5375 if (hba->dev_cmd.complete) { in __ufshcd_transfer_req_compl()
5376 ufshcd_add_command_trace(hba, index, in __ufshcd_transfer_req_compl()
5378 complete(hba->dev_cmd.complete); in __ufshcd_transfer_req_compl()
5379 ufshcd_clk_scaling_update_busy(hba); in __ufshcd_transfer_req_compl()
5391 struct ufs_hba *hba = shost_priv(shost); in ufshcd_poll() local
5395 spin_lock_irqsave(&hba->outstanding_lock, flags); in ufshcd_poll()
5396 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_poll()
5397 completed_reqs = ~tr_doorbell & hba->outstanding_reqs; in ufshcd_poll()
5398 WARN_ONCE(completed_reqs & ~hba->outstanding_reqs, in ufshcd_poll()
5400 hba->outstanding_reqs); in ufshcd_poll()
5401 hba->outstanding_reqs &= ~completed_reqs; in ufshcd_poll()
5402 spin_unlock_irqrestore(&hba->outstanding_lock, flags); in ufshcd_poll()
5405 __ufshcd_transfer_req_compl(hba, completed_reqs); in ufshcd_poll()
5412 * @hba: per adapter instance
5418 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba) in ufshcd_transfer_req_compl() argument
5427 if (ufshcd_is_intr_aggr_allowed(hba) && in ufshcd_transfer_req_compl()
5428 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR)) in ufshcd_transfer_req_compl()
5429 ufshcd_reset_intr_aggr(hba); in ufshcd_transfer_req_compl()
5438 ufshcd_poll(hba->host, 0); in ufshcd_transfer_req_compl()
5443 int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask) in __ufshcd_write_ee_control() argument
5445 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, in __ufshcd_write_ee_control()
5450 int ufshcd_write_ee_control(struct ufs_hba *hba) in ufshcd_write_ee_control() argument
5454 mutex_lock(&hba->ee_ctrl_mutex); in ufshcd_write_ee_control()
5455 err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask); in ufshcd_write_ee_control()
5456 mutex_unlock(&hba->ee_ctrl_mutex); in ufshcd_write_ee_control()
5458 dev_err(hba->dev, "%s: failed to write ee control %d\n", in ufshcd_write_ee_control()
5463 int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, in ufshcd_update_ee_control() argument
5469 mutex_lock(&hba->ee_ctrl_mutex); in ufshcd_update_ee_control()
5472 if (ee_ctrl_mask != hba->ee_ctrl_mask) in ufshcd_update_ee_control()
5473 err = __ufshcd_write_ee_control(hba, ee_ctrl_mask); in ufshcd_update_ee_control()
5476 hba->ee_ctrl_mask = ee_ctrl_mask; in ufshcd_update_ee_control()
5479 mutex_unlock(&hba->ee_ctrl_mutex); in ufshcd_update_ee_control()
5485 * @hba: per-adapter instance
5493 static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask) in ufshcd_disable_ee() argument
5495 return ufshcd_update_ee_drv_mask(hba, 0, mask); in ufshcd_disable_ee()
5500 * @hba: per-adapter instance
5508 static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) in ufshcd_enable_ee() argument
5510 return ufshcd_update_ee_drv_mask(hba, mask, 0); in ufshcd_enable_ee()
5515 * @hba: per-adapter instance
5524 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) in ufshcd_enable_auto_bkops() argument
5528 if (hba->auto_bkops_enabled) in ufshcd_enable_auto_bkops()
5531 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, in ufshcd_enable_auto_bkops()
5534 dev_err(hba->dev, "%s: failed to enable bkops %d\n", in ufshcd_enable_auto_bkops()
5539 hba->auto_bkops_enabled = true; in ufshcd_enable_auto_bkops()
5540 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled"); in ufshcd_enable_auto_bkops()
5543 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); in ufshcd_enable_auto_bkops()
5545 dev_err(hba->dev, "%s: failed to disable exception event %d\n", in ufshcd_enable_auto_bkops()
5553 * @hba: per-adapter instance
5563 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) in ufshcd_disable_auto_bkops() argument
5567 if (!hba->auto_bkops_enabled) in ufshcd_disable_auto_bkops()
5574 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS); in ufshcd_disable_auto_bkops()
5576 dev_err(hba->dev, "%s: failed to enable exception event %d\n", in ufshcd_disable_auto_bkops()
5581 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, in ufshcd_disable_auto_bkops()
5584 dev_err(hba->dev, "%s: failed to disable bkops %d\n", in ufshcd_disable_auto_bkops()
5586 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); in ufshcd_disable_auto_bkops()
5590 hba->auto_bkops_enabled = false; in ufshcd_disable_auto_bkops()
5591 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled"); in ufshcd_disable_auto_bkops()
5592 hba->is_urgent_bkops_lvl_checked = false; in ufshcd_disable_auto_bkops()
5599 * @hba: per adapter instance
5606 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) in ufshcd_force_reset_auto_bkops() argument
5608 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) { in ufshcd_force_reset_auto_bkops()
5609 hba->auto_bkops_enabled = false; in ufshcd_force_reset_auto_bkops()
5610 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; in ufshcd_force_reset_auto_bkops()
5611 ufshcd_enable_auto_bkops(hba); in ufshcd_force_reset_auto_bkops()
5613 hba->auto_bkops_enabled = true; in ufshcd_force_reset_auto_bkops()
5614 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS; in ufshcd_force_reset_auto_bkops()
5615 ufshcd_disable_auto_bkops(hba); in ufshcd_force_reset_auto_bkops()
5617 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; in ufshcd_force_reset_auto_bkops()
5618 hba->is_urgent_bkops_lvl_checked = false; in ufshcd_force_reset_auto_bkops()
5621 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) in ufshcd_get_bkops_status() argument
5623 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_get_bkops_status()
5629 * @hba: per-adapter instance
5639 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5643 static int ufshcd_bkops_ctrl(struct ufs_hba *hba, in ufshcd_bkops_ctrl() argument
5649 err = ufshcd_get_bkops_status(hba, &curr_status); in ufshcd_bkops_ctrl()
5651 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", in ufshcd_bkops_ctrl()
5655 dev_err(hba->dev, "%s: invalid BKOPS status %d\n", in ufshcd_bkops_ctrl()
5662 err = ufshcd_enable_auto_bkops(hba); in ufshcd_bkops_ctrl()
5664 err = ufshcd_disable_auto_bkops(hba); in ufshcd_bkops_ctrl()
5671 * @hba: per-adapter instance
5679 static int ufshcd_urgent_bkops(struct ufs_hba *hba) in ufshcd_urgent_bkops() argument
5681 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl); in ufshcd_urgent_bkops()
5684 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) in ufshcd_get_ee_status() argument
5686 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_get_ee_status()
5690 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba) in ufshcd_bkops_exception_event_handler() argument
5695 if (hba->is_urgent_bkops_lvl_checked) in ufshcd_bkops_exception_event_handler()
5698 err = ufshcd_get_bkops_status(hba, &curr_status); in ufshcd_bkops_exception_event_handler()
5700 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", in ufshcd_bkops_exception_event_handler()
5712 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n", in ufshcd_bkops_exception_event_handler()
5715 hba->urgent_bkops_lvl = curr_status; in ufshcd_bkops_exception_event_handler()
5716 hba->is_urgent_bkops_lvl_checked = true; in ufshcd_bkops_exception_event_handler()
5720 err = ufshcd_enable_auto_bkops(hba); in ufshcd_bkops_exception_event_handler()
5723 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", in ufshcd_bkops_exception_event_handler()
5727 static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status) in ufshcd_temp_exception_event_handler() argument
5731 if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_temp_exception_event_handler()
5735 dev_info(hba->dev, "exception Tcase %d\n", value - 80); in ufshcd_temp_exception_event_handler()
5737 ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP); in ufshcd_temp_exception_event_handler()
5745 static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn) in __ufshcd_wb_toggle() argument
5751 index = ufshcd_wb_get_query_index(hba); in __ufshcd_wb_toggle()
5752 return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL); in __ufshcd_wb_toggle()
5755 int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable) in ufshcd_wb_toggle() argument
5759 if (!ufshcd_is_wb_allowed(hba) || in ufshcd_wb_toggle()
5760 hba->dev_info.wb_enabled == enable) in ufshcd_wb_toggle()
5763 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN); in ufshcd_wb_toggle()
5765 dev_err(hba->dev, "%s: Write Booster %s failed %d\n", in ufshcd_wb_toggle()
5770 hba->dev_info.wb_enabled = enable; in ufshcd_wb_toggle()
5771 dev_dbg(hba->dev, "%s: Write Booster %s\n", in ufshcd_wb_toggle()
5777 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba, in ufshcd_wb_toggle_buf_flush_during_h8() argument
5782 ret = __ufshcd_wb_toggle(hba, enable, in ufshcd_wb_toggle_buf_flush_during_h8()
5785 dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed %d\n", in ufshcd_wb_toggle_buf_flush_during_h8()
5789 dev_dbg(hba->dev, "%s: WB-Buf Flush during H8 %s\n", in ufshcd_wb_toggle_buf_flush_during_h8()
5793 int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable) in ufshcd_wb_toggle_buf_flush() argument
5797 if (!ufshcd_is_wb_allowed(hba) || in ufshcd_wb_toggle_buf_flush()
5798 hba->dev_info.wb_buf_flush_enabled == enable) in ufshcd_wb_toggle_buf_flush()
5801 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN); in ufshcd_wb_toggle_buf_flush()
5803 dev_err(hba->dev, "%s: WB-Buf Flush %s failed %d\n", in ufshcd_wb_toggle_buf_flush()
5808 hba->dev_info.wb_buf_flush_enabled = enable; in ufshcd_wb_toggle_buf_flush()
5809 dev_dbg(hba->dev, "%s: WB-Buf Flush %s\n", in ufshcd_wb_toggle_buf_flush()
5815 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba, in ufshcd_wb_presrv_usrspc_keep_vcc_on() argument
5822 index = ufshcd_wb_get_query_index(hba); in ufshcd_wb_presrv_usrspc_keep_vcc_on()
5823 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_wb_presrv_usrspc_keep_vcc_on()
5827 dev_err(hba->dev, "%s: dCurWriteBoosterBufferSize read failed %d\n", in ufshcd_wb_presrv_usrspc_keep_vcc_on()
5833 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n", in ufshcd_wb_presrv_usrspc_keep_vcc_on()
5838 return avail_buf < hba->vps->wb_flush_threshold; in ufshcd_wb_presrv_usrspc_keep_vcc_on()
5841 static void ufshcd_wb_force_disable(struct ufs_hba *hba) in ufshcd_wb_force_disable() argument
5843 if (ufshcd_is_wb_buf_flush_allowed(hba)) in ufshcd_wb_force_disable()
5844 ufshcd_wb_toggle_buf_flush(hba, false); in ufshcd_wb_force_disable()
5846 ufshcd_wb_toggle_buf_flush_during_h8(hba, false); in ufshcd_wb_force_disable()
5847 ufshcd_wb_toggle(hba, false); in ufshcd_wb_force_disable()
5848 hba->caps &= ~UFSHCD_CAP_WB_EN; in ufshcd_wb_force_disable()
5850 dev_info(hba->dev, "%s: WB force disabled\n", __func__); in ufshcd_wb_force_disable()
5853 static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba *hba) in ufshcd_is_wb_buf_lifetime_available() argument
5859 index = ufshcd_wb_get_query_index(hba); in ufshcd_is_wb_buf_lifetime_available()
5860 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_is_wb_buf_lifetime_available()
5864 dev_err(hba->dev, in ufshcd_is_wb_buf_lifetime_available()
5871 dev_err(hba->dev, "%s: WB buf lifetime is exhausted 0x%02X\n", in ufshcd_is_wb_buf_lifetime_available()
5876 dev_dbg(hba->dev, "%s: WB buf lifetime is 0x%02X\n", in ufshcd_is_wb_buf_lifetime_available()
5882 static bool ufshcd_wb_need_flush(struct ufs_hba *hba) in ufshcd_wb_need_flush() argument
5888 if (!ufshcd_is_wb_allowed(hba)) in ufshcd_wb_need_flush()
5891 if (!ufshcd_is_wb_buf_lifetime_available(hba)) { in ufshcd_wb_need_flush()
5892 ufshcd_wb_force_disable(hba); in ufshcd_wb_need_flush()
5907 index = ufshcd_wb_get_query_index(hba); in ufshcd_wb_need_flush()
5908 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_wb_need_flush()
5912 dev_warn(hba->dev, "%s: dAvailableWriteBoosterBufferSize read failed %d\n", in ufshcd_wb_need_flush()
5917 if (!hba->dev_info.b_presrv_uspc_en) in ufshcd_wb_need_flush()
5920 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf); in ufshcd_wb_need_flush()
5925 struct ufs_hba *hba = container_of(to_delayed_work(work), in ufshcd_rpm_dev_flush_recheck_work() local
5934 ufshcd_rpm_get_sync(hba); in ufshcd_rpm_dev_flush_recheck_work()
5935 ufshcd_rpm_put_sync(hba); in ufshcd_rpm_dev_flush_recheck_work()
5947 struct ufs_hba *hba; in ufshcd_exception_event_handler() local
5950 hba = container_of(work, struct ufs_hba, eeh_work); in ufshcd_exception_event_handler()
5952 ufshcd_scsi_block_requests(hba); in ufshcd_exception_event_handler()
5953 err = ufshcd_get_ee_status(hba, &status); in ufshcd_exception_event_handler()
5955 dev_err(hba->dev, "%s: failed to get exception status %d\n", in ufshcd_exception_event_handler()
5960 trace_ufshcd_exception_event(dev_name(hba->dev), status); in ufshcd_exception_event_handler()
5962 if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS) in ufshcd_exception_event_handler()
5963 ufshcd_bkops_exception_event_handler(hba); in ufshcd_exception_event_handler()
5965 if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP) in ufshcd_exception_event_handler()
5966 ufshcd_temp_exception_event_handler(hba, status); in ufshcd_exception_event_handler()
5968 ufs_debugfs_exception_event(hba, status); in ufshcd_exception_event_handler()
5970 ufshcd_scsi_unblock_requests(hba); in ufshcd_exception_event_handler()
5974 static void ufshcd_complete_requests(struct ufs_hba *hba) in ufshcd_complete_requests() argument
5976 ufshcd_transfer_req_compl(hba); in ufshcd_complete_requests()
5977 ufshcd_tmc_handler(hba); in ufshcd_complete_requests()
5983 * @hba: per-adapter instance
5987 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba) in ufshcd_quirk_dl_nac_errors() argument
5992 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
5997 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR)) in ufshcd_quirk_dl_nac_errors()
6000 if ((hba->saved_err & DEVICE_FATAL_ERROR) || in ufshcd_quirk_dl_nac_errors()
6001 ((hba->saved_err & UIC_ERROR) && in ufshcd_quirk_dl_nac_errors()
6002 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) in ufshcd_quirk_dl_nac_errors()
6005 if ((hba->saved_err & UIC_ERROR) && in ufshcd_quirk_dl_nac_errors()
6006 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) { in ufshcd_quirk_dl_nac_errors()
6011 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
6013 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
6019 if ((hba->saved_err & INT_FATAL_ERRORS) || in ufshcd_quirk_dl_nac_errors()
6020 ((hba->saved_err & UIC_ERROR) && in ufshcd_quirk_dl_nac_errors()
6021 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) in ufshcd_quirk_dl_nac_errors()
6031 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
6032 err = ufshcd_verify_dev_init(hba); in ufshcd_quirk_dl_nac_errors()
6033 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
6039 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR) in ufshcd_quirk_dl_nac_errors()
6040 hba->saved_err &= ~UIC_ERROR; in ufshcd_quirk_dl_nac_errors()
6042 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; in ufshcd_quirk_dl_nac_errors()
6043 if (!hba->saved_uic_err) in ufshcd_quirk_dl_nac_errors()
6047 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
6052 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba) in ufshcd_is_saved_err_fatal() argument
6054 return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) || in ufshcd_is_saved_err_fatal()
6055 (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)); in ufshcd_is_saved_err_fatal()
6058 void ufshcd_schedule_eh_work(struct ufs_hba *hba) in ufshcd_schedule_eh_work() argument
6060 lockdep_assert_held(hba->host->host_lock); in ufshcd_schedule_eh_work()
6063 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) { in ufshcd_schedule_eh_work()
6064 if (hba->force_reset || ufshcd_is_link_broken(hba) || in ufshcd_schedule_eh_work()
6065 ufshcd_is_saved_err_fatal(hba)) in ufshcd_schedule_eh_work()
6066 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL; in ufshcd_schedule_eh_work()
6068 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL; in ufshcd_schedule_eh_work()
6069 queue_work(hba->eh_wq, &hba->eh_work); in ufshcd_schedule_eh_work()
6073 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow) in ufshcd_clk_scaling_allow() argument
6075 down_write(&hba->clk_scaling_lock); in ufshcd_clk_scaling_allow()
6076 hba->clk_scaling.is_allowed = allow; in ufshcd_clk_scaling_allow()
6077 up_write(&hba->clk_scaling_lock); in ufshcd_clk_scaling_allow()
6080 static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend) in ufshcd_clk_scaling_suspend() argument
6083 if (hba->clk_scaling.is_enabled) in ufshcd_clk_scaling_suspend()
6084 ufshcd_suspend_clkscaling(hba); in ufshcd_clk_scaling_suspend()
6085 ufshcd_clk_scaling_allow(hba, false); in ufshcd_clk_scaling_suspend()
6087 ufshcd_clk_scaling_allow(hba, true); in ufshcd_clk_scaling_suspend()
6088 if (hba->clk_scaling.is_enabled) in ufshcd_clk_scaling_suspend()
6089 ufshcd_resume_clkscaling(hba); in ufshcd_clk_scaling_suspend()
6093 static void ufshcd_err_handling_prepare(struct ufs_hba *hba) in ufshcd_err_handling_prepare() argument
6095 ufshcd_rpm_get_sync(hba); in ufshcd_err_handling_prepare()
6096 if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) || in ufshcd_err_handling_prepare()
6097 hba->is_sys_suspended) { in ufshcd_err_handling_prepare()
6105 ufshcd_setup_hba_vreg(hba, true); in ufshcd_err_handling_prepare()
6106 ufshcd_enable_irq(hba); in ufshcd_err_handling_prepare()
6107 ufshcd_setup_vreg(hba, true); in ufshcd_err_handling_prepare()
6108 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); in ufshcd_err_handling_prepare()
6109 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); in ufshcd_err_handling_prepare()
6110 ufshcd_hold(hba, false); in ufshcd_err_handling_prepare()
6111 if (!ufshcd_is_clkgating_allowed(hba)) in ufshcd_err_handling_prepare()
6112 ufshcd_setup_clocks(hba, true); in ufshcd_err_handling_prepare()
6113 ufshcd_release(hba); in ufshcd_err_handling_prepare()
6114 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM; in ufshcd_err_handling_prepare()
6115 ufshcd_vops_resume(hba, pm_op); in ufshcd_err_handling_prepare()
6117 ufshcd_hold(hba, false); in ufshcd_err_handling_prepare()
6118 if (ufshcd_is_clkscaling_supported(hba) && in ufshcd_err_handling_prepare()
6119 hba->clk_scaling.is_enabled) in ufshcd_err_handling_prepare()
6120 ufshcd_suspend_clkscaling(hba); in ufshcd_err_handling_prepare()
6121 ufshcd_clk_scaling_allow(hba, false); in ufshcd_err_handling_prepare()
6123 ufshcd_scsi_block_requests(hba); in ufshcd_err_handling_prepare()
6126 cancel_work_sync(&hba->eeh_work); in ufshcd_err_handling_prepare()
6129 static void ufshcd_err_handling_unprepare(struct ufs_hba *hba) in ufshcd_err_handling_unprepare() argument
6131 ufshcd_scsi_unblock_requests(hba); in ufshcd_err_handling_unprepare()
6132 ufshcd_release(hba); in ufshcd_err_handling_unprepare()
6133 if (ufshcd_is_clkscaling_supported(hba)) in ufshcd_err_handling_unprepare()
6134 ufshcd_clk_scaling_suspend(hba, false); in ufshcd_err_handling_unprepare()
6135 ufshcd_rpm_put(hba); in ufshcd_err_handling_unprepare()
6138 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba) in ufshcd_err_handling_should_stop() argument
6140 return (!hba->is_powered || hba->shutting_down || in ufshcd_err_handling_should_stop()
6141 !hba->ufs_device_wlun || in ufshcd_err_handling_should_stop()
6142 hba->ufshcd_state == UFSHCD_STATE_ERROR || in ufshcd_err_handling_should_stop()
6143 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset || in ufshcd_err_handling_should_stop()
6144 ufshcd_is_link_broken(hba)))); in ufshcd_err_handling_should_stop()
6148 static void ufshcd_recover_pm_error(struct ufs_hba *hba) in ufshcd_recover_pm_error() argument
6150 struct Scsi_Host *shost = hba->host; in ufshcd_recover_pm_error()
6155 hba->is_sys_suspended = false; in ufshcd_recover_pm_error()
6160 ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev); in ufshcd_recover_pm_error()
6162 /* hba device might have a runtime error otherwise */ in ufshcd_recover_pm_error()
6164 ret = pm_runtime_set_active(hba->dev); in ufshcd_recover_pm_error()
6181 static inline void ufshcd_recover_pm_error(struct ufs_hba *hba) in ufshcd_recover_pm_error() argument
6186 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba) in ufshcd_is_pwr_mode_restore_needed() argument
6188 struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info; in ufshcd_is_pwr_mode_restore_needed()
6191 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode); in ufshcd_is_pwr_mode_restore_needed()
6209 struct ufs_hba *hba; in ufshcd_err_handler() local
6218 hba = container_of(work, struct ufs_hba, eh_work); in ufshcd_err_handler()
6220 dev_info(hba->dev, in ufshcd_err_handler()
6221 …"%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force… in ufshcd_err_handler()
6222 __func__, ufshcd_state_name[hba->ufshcd_state], in ufshcd_err_handler()
6223 hba->is_powered, hba->shutting_down, hba->saved_err, in ufshcd_err_handler()
6224 hba->saved_uic_err, hba->force_reset, in ufshcd_err_handler()
6225 ufshcd_is_link_broken(hba) ? "; link is broken" : ""); in ufshcd_err_handler()
6227 down(&hba->host_sem); in ufshcd_err_handler()
6228 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6229 if (ufshcd_err_handling_should_stop(hba)) { in ufshcd_err_handler()
6230 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) in ufshcd_err_handler()
6231 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; in ufshcd_err_handler()
6232 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6233 up(&hba->host_sem); in ufshcd_err_handler()
6236 ufshcd_set_eh_in_progress(hba); in ufshcd_err_handler()
6237 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6238 ufshcd_err_handling_prepare(hba); in ufshcd_err_handler()
6240 ufshcd_complete_requests(hba); in ufshcd_err_handler()
6241 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6248 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) in ufshcd_err_handler()
6249 hba->ufshcd_state = UFSHCD_STATE_RESET; in ufshcd_err_handler()
6254 if (ufshcd_err_handling_should_stop(hba)) in ufshcd_err_handler()
6257 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { in ufshcd_err_handler()
6260 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6262 ret = ufshcd_quirk_dl_nac_errors(hba); in ufshcd_err_handler()
6263 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6264 if (!ret && ufshcd_err_handling_should_stop(hba)) in ufshcd_err_handler()
6268 if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) || in ufshcd_err_handler()
6269 (hba->saved_uic_err && in ufshcd_err_handler()
6270 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) { in ufshcd_err_handler()
6271 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR); in ufshcd_err_handler()
6273 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6274 ufshcd_print_host_state(hba); in ufshcd_err_handler()
6275 ufshcd_print_pwr_info(hba); in ufshcd_err_handler()
6276 ufshcd_print_evt_hist(hba); in ufshcd_err_handler()
6277 ufshcd_print_tmrs(hba, hba->outstanding_tasks); in ufshcd_err_handler()
6278 ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt); in ufshcd_err_handler()
6279 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6287 if (hba->force_reset || ufshcd_is_link_broken(hba) || in ufshcd_err_handler()
6288 ufshcd_is_saved_err_fatal(hba) || in ufshcd_err_handler()
6289 ((hba->saved_err & UIC_ERROR) && in ufshcd_err_handler()
6290 (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR | in ufshcd_err_handler()
6300 if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) { in ufshcd_err_handler()
6301 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR; in ufshcd_err_handler()
6302 if (!hba->saved_uic_err) in ufshcd_err_handler()
6303 hba->saved_err &= ~UIC_ERROR; in ufshcd_err_handler()
6304 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6305 if (ufshcd_is_pwr_mode_restore_needed(hba)) in ufshcd_err_handler()
6307 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6308 if (!hba->saved_err && !needs_restore) in ufshcd_err_handler()
6312 hba->silence_err_logs = true; in ufshcd_err_handler()
6314 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6316 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) { in ufshcd_err_handler()
6317 if (ufshcd_try_to_abort_task(hba, tag)) { in ufshcd_err_handler()
6321 dev_err(hba->dev, "Aborted tag %d / CDB %#02x\n", tag, in ufshcd_err_handler()
6322 hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1); in ufshcd_err_handler()
6326 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) { in ufshcd_err_handler()
6327 if (ufshcd_clear_tm_cmd(hba, tag)) { in ufshcd_err_handler()
6335 ufshcd_complete_requests(hba); in ufshcd_err_handler()
6337 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6338 hba->silence_err_logs = false; in ufshcd_err_handler()
6349 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6354 down_write(&hba->clk_scaling_lock); in ufshcd_err_handler()
6355 hba->force_pmc = true; in ufshcd_err_handler()
6356 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info)); in ufshcd_err_handler()
6359 dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n", in ufshcd_err_handler()
6362 hba->force_pmc = false; in ufshcd_err_handler()
6363 ufshcd_print_pwr_info(hba); in ufshcd_err_handler()
6364 up_write(&hba->clk_scaling_lock); in ufshcd_err_handler()
6365 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6373 hba->force_reset = false; in ufshcd_err_handler()
6374 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6375 err = ufshcd_reset_and_restore(hba); in ufshcd_err_handler()
6377 dev_err(hba->dev, "%s: reset and restore failed with err %d\n", in ufshcd_err_handler()
6380 ufshcd_recover_pm_error(hba); in ufshcd_err_handler()
6381 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6386 if (hba->ufshcd_state == UFSHCD_STATE_RESET) in ufshcd_err_handler()
6387 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; in ufshcd_err_handler()
6388 if (hba->saved_err || hba->saved_uic_err) in ufshcd_err_handler()
6389 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x", in ufshcd_err_handler()
6390 __func__, hba->saved_err, hba->saved_uic_err); in ufshcd_err_handler()
6393 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL && in ufshcd_err_handler()
6394 hba->ufshcd_state != UFSHCD_STATE_ERROR) { in ufshcd_err_handler()
6397 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_err_handler()
6399 ufshcd_clear_eh_in_progress(hba); in ufshcd_err_handler()
6400 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6401 ufshcd_err_handling_unprepare(hba); in ufshcd_err_handler()
6402 up(&hba->host_sem); in ufshcd_err_handler()
6404 dev_info(hba->dev, "%s finished; HBA state %s\n", __func__, in ufshcd_err_handler()
6405 ufshcd_state_name[hba->ufshcd_state]); in ufshcd_err_handler()
6410 * @hba: per-adapter instance
6416 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba) in ufshcd_update_uic_error() argument
6422 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); in ufshcd_update_uic_error()
6425 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg); in ufshcd_update_uic_error()
6431 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", in ufshcd_update_uic_error()
6438 hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR; in ufshcd_update_uic_error()
6439 if (hba->uic_async_done && hba->active_uic_cmd) in ufshcd_update_uic_error()
6440 cmd = hba->active_uic_cmd; in ufshcd_update_uic_error()
6446 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR; in ufshcd_update_uic_error()
6452 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); in ufshcd_update_uic_error()
6455 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg); in ufshcd_update_uic_error()
6458 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; in ufshcd_update_uic_error()
6459 else if (hba->dev_quirks & in ufshcd_update_uic_error()
6462 hba->uic_error |= in ufshcd_update_uic_error()
6465 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; in ufshcd_update_uic_error()
6471 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); in ufshcd_update_uic_error()
6474 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg); in ufshcd_update_uic_error()
6475 hba->uic_error |= UFSHCD_UIC_NL_ERROR; in ufshcd_update_uic_error()
6479 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); in ufshcd_update_uic_error()
6482 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg); in ufshcd_update_uic_error()
6483 hba->uic_error |= UFSHCD_UIC_TL_ERROR; in ufshcd_update_uic_error()
6487 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); in ufshcd_update_uic_error()
6490 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg); in ufshcd_update_uic_error()
6491 hba->uic_error |= UFSHCD_UIC_DME_ERROR; in ufshcd_update_uic_error()
6495 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", in ufshcd_update_uic_error()
6496 __func__, hba->uic_error); in ufshcd_update_uic_error()
6502 * @hba: per-adapter instance
6509 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status) in ufshcd_check_errors() argument
6514 spin_lock(hba->host->host_lock); in ufshcd_check_errors()
6515 hba->errors |= UFSHCD_ERROR_MASK & intr_status; in ufshcd_check_errors()
6517 if (hba->errors & INT_FATAL_ERRORS) { in ufshcd_check_errors()
6518 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR, in ufshcd_check_errors()
6519 hba->errors); in ufshcd_check_errors()
6523 if (hba->errors & UIC_ERROR) { in ufshcd_check_errors()
6524 hba->uic_error = 0; in ufshcd_check_errors()
6525 retval = ufshcd_update_uic_error(hba); in ufshcd_check_errors()
6526 if (hba->uic_error) in ufshcd_check_errors()
6530 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) { in ufshcd_check_errors()
6531 dev_err(hba->dev, in ufshcd_check_errors()
6533 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ? in ufshcd_check_errors()
6535 hba->errors, ufshcd_get_upmcrs(hba)); in ufshcd_check_errors()
6536 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR, in ufshcd_check_errors()
6537 hba->errors); in ufshcd_check_errors()
6538 ufshcd_set_link_broken(hba); in ufshcd_check_errors()
6547 hba->saved_err |= hba->errors; in ufshcd_check_errors()
6548 hba->saved_uic_err |= hba->uic_error; in ufshcd_check_errors()
6551 if ((hba->saved_err & in ufshcd_check_errors()
6553 (hba->saved_uic_err && in ufshcd_check_errors()
6554 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) { in ufshcd_check_errors()
6555 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n", in ufshcd_check_errors()
6556 __func__, hba->saved_err, in ufshcd_check_errors()
6557 hba->saved_uic_err); in ufshcd_check_errors()
6558 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, in ufshcd_check_errors()
6560 ufshcd_print_pwr_info(hba); in ufshcd_check_errors()
6562 ufshcd_schedule_eh_work(hba); in ufshcd_check_errors()
6571 hba->errors = 0; in ufshcd_check_errors()
6572 hba->uic_error = 0; in ufshcd_check_errors()
6573 spin_unlock(hba->host->host_lock); in ufshcd_check_errors()
6579 * @hba: per adapter instance
6585 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) in ufshcd_tmc_handler() argument
6591 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_tmc_handler()
6592 pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); in ufshcd_tmc_handler()
6593 issued = hba->outstanding_tasks & ~pending; in ufshcd_tmc_handler()
6594 for_each_set_bit(tag, &issued, hba->nutmrs) { in ufshcd_tmc_handler()
6595 struct request *req = hba->tmf_rqs[tag]; in ufshcd_tmc_handler()
6601 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_tmc_handler()
6608 * @hba: per adapter instance
6615 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) in ufshcd_sl_intr() argument
6620 retval |= ufshcd_uic_cmd_compl(hba, intr_status); in ufshcd_sl_intr()
6622 if (intr_status & UFSHCD_ERROR_MASK || hba->errors) in ufshcd_sl_intr()
6623 retval |= ufshcd_check_errors(hba, intr_status); in ufshcd_sl_intr()
6626 retval |= ufshcd_tmc_handler(hba); in ufshcd_sl_intr()
6629 retval |= ufshcd_transfer_req_compl(hba); in ufshcd_sl_intr()
6647 struct ufs_hba *hba = __hba; in ufshcd_intr() local
6648 int retries = hba->nutrs; in ufshcd_intr()
6650 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); in ufshcd_intr()
6651 hba->ufs_stats.last_intr_status = intr_status; in ufshcd_intr()
6652 hba->ufs_stats.last_intr_ts = local_clock(); in ufshcd_intr()
6655 * There could be max of hba->nutrs reqs in flight and in worst case in ufshcd_intr()
6662 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); in ufshcd_intr()
6663 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); in ufshcd_intr()
6665 retval |= ufshcd_sl_intr(hba, enabled_intr_status); in ufshcd_intr()
6667 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); in ufshcd_intr()
6672 hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) { in ufshcd_intr()
6673 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n", in ufshcd_intr()
6676 hba->ufs_stats.last_intr_status, in ufshcd_intr()
6678 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); in ufshcd_intr()
6684 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) in ufshcd_clear_tm_cmd() argument
6690 if (!test_bit(tag, &hba->outstanding_tasks)) in ufshcd_clear_tm_cmd()
6693 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clear_tm_cmd()
6694 ufshcd_utmrl_clear(hba, tag); in ufshcd_clear_tm_cmd()
6695 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clear_tm_cmd()
6698 err = ufshcd_wait_for_register(hba, in ufshcd_clear_tm_cmd()
6702 dev_err(hba->dev, "Clearing task management function with tag %d %s\n", in ufshcd_clear_tm_cmd()
6709 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, in __ufshcd_issue_tm_cmd() argument
6712 struct request_queue *q = hba->tmf_queue; in __ufshcd_issue_tm_cmd()
6713 struct Scsi_Host *host = hba->host; in __ufshcd_issue_tm_cmd()
6727 ufshcd_hold(hba, false); in __ufshcd_issue_tm_cmd()
6732 WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n", in __ufshcd_issue_tm_cmd()
6734 hba->tmf_rqs[req->tag] = req; in __ufshcd_issue_tm_cmd()
6737 memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); in __ufshcd_issue_tm_cmd()
6738 ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function); in __ufshcd_issue_tm_cmd()
6741 __set_bit(task_tag, &hba->outstanding_tasks); in __ufshcd_issue_tm_cmd()
6743 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); in __ufshcd_issue_tm_cmd()
6749 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND); in __ufshcd_issue_tm_cmd()
6755 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR); in __ufshcd_issue_tm_cmd()
6756 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", in __ufshcd_issue_tm_cmd()
6758 if (ufshcd_clear_tm_cmd(hba, task_tag)) in __ufshcd_issue_tm_cmd()
6759 dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n", in __ufshcd_issue_tm_cmd()
6764 memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq)); in __ufshcd_issue_tm_cmd()
6766 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP); in __ufshcd_issue_tm_cmd()
6769 spin_lock_irqsave(hba->host->host_lock, flags); in __ufshcd_issue_tm_cmd()
6770 hba->tmf_rqs[req->tag] = NULL; in __ufshcd_issue_tm_cmd()
6771 __clear_bit(task_tag, &hba->outstanding_tasks); in __ufshcd_issue_tm_cmd()
6772 spin_unlock_irqrestore(hba->host->host_lock, flags); in __ufshcd_issue_tm_cmd()
6774 ufshcd_release(hba); in __ufshcd_issue_tm_cmd()
6782 * @hba: per adapter instance
6790 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, in ufshcd_issue_tm_cmd() argument
6813 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function); in ufshcd_issue_tm_cmd()
6819 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", in ufshcd_issue_tm_cmd()
6829 * @hba: per-adapter instance
6842 * the caller is expected to hold the hba->dev_cmd.lock mutex.
6844 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, in ufshcd_issue_devman_upiu_cmd() argument
6852 const u32 tag = hba->reserved_slot; in ufshcd_issue_devman_upiu_cmd()
6857 /* Protects use of hba->reserved_slot. */ in ufshcd_issue_devman_upiu_cmd()
6858 lockdep_assert_held(&hba->dev_cmd.lock); in ufshcd_issue_devman_upiu_cmd()
6860 down_read(&hba->clk_scaling_lock); in ufshcd_issue_devman_upiu_cmd()
6862 lrbp = &hba->lrb[tag]; in ufshcd_issue_devman_upiu_cmd()
6869 hba->dev_cmd.type = cmd_type; in ufshcd_issue_devman_upiu_cmd()
6871 if (hba->ufs_version <= ufshci_version(1, 1)) in ufshcd_issue_devman_upiu_cmd()
6894 hba->dev_cmd.complete = &wait; in ufshcd_issue_devman_upiu_cmd()
6896 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); in ufshcd_issue_devman_upiu_cmd()
6898 ufshcd_send_command(hba, tag); in ufshcd_issue_devman_upiu_cmd()
6904 ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT); in ufshcd_issue_devman_upiu_cmd()
6917 dev_warn(hba->dev, in ufshcd_issue_devman_upiu_cmd()
6924 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, in ufshcd_issue_devman_upiu_cmd()
6927 up_read(&hba->clk_scaling_lock); in ufshcd_issue_devman_upiu_cmd()
6933 * @hba: per-adapter instance
6946 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, in ufshcd_exec_raw_upiu_cmd() argument
6964 ufshcd_hold(hba, false); in ufshcd_exec_raw_upiu_cmd()
6965 mutex_lock(&hba->dev_cmd.lock); in ufshcd_exec_raw_upiu_cmd()
6966 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu, in ufshcd_exec_raw_upiu_cmd()
6969 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_exec_raw_upiu_cmd()
6970 ufshcd_release(hba); in ufshcd_exec_raw_upiu_cmd()
6979 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f); in ufshcd_exec_raw_upiu_cmd()
6985 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__, in ufshcd_exec_raw_upiu_cmd()
7012 struct ufs_hba *hba; in ufshcd_eh_device_reset_handler() local
7018 hba = shost_priv(host); in ufshcd_eh_device_reset_handler()
7021 err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp); in ufshcd_eh_device_reset_handler()
7029 spin_lock_irqsave(&hba->outstanding_lock, flags); in ufshcd_eh_device_reset_handler()
7030 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) in ufshcd_eh_device_reset_handler()
7031 if (hba->lrb[pos].lun == lun) in ufshcd_eh_device_reset_handler()
7033 hba->outstanding_reqs &= ~pending_reqs; in ufshcd_eh_device_reset_handler()
7034 spin_unlock_irqrestore(&hba->outstanding_lock, flags); in ufshcd_eh_device_reset_handler()
7036 if (ufshcd_clear_cmds(hba, pending_reqs) < 0) { in ufshcd_eh_device_reset_handler()
7037 spin_lock_irqsave(&hba->outstanding_lock, flags); in ufshcd_eh_device_reset_handler()
7039 ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_eh_device_reset_handler()
7040 hba->outstanding_reqs |= not_cleared; in ufshcd_eh_device_reset_handler()
7041 spin_unlock_irqrestore(&hba->outstanding_lock, flags); in ufshcd_eh_device_reset_handler()
7043 dev_err(hba->dev, "%s: failed to clear requests %#lx\n", in ufshcd_eh_device_reset_handler()
7046 __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared); in ufshcd_eh_device_reset_handler()
7049 hba->req_abort_count = 0; in ufshcd_eh_device_reset_handler()
7050 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err); in ufshcd_eh_device_reset_handler()
7054 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); in ufshcd_eh_device_reset_handler()
7060 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap) in ufshcd_set_req_abort_skip() argument
7065 for_each_set_bit(tag, &bitmap, hba->nutrs) { in ufshcd_set_req_abort_skip()
7066 lrbp = &hba->lrb[tag]; in ufshcd_set_req_abort_skip()
7073 * @hba: Pointer to adapter instance
7084 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) in ufshcd_try_to_abort_task() argument
7086 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; in ufshcd_try_to_abort_task()
7093 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, in ufshcd_try_to_abort_task()
7097 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n", in ufshcd_try_to_abort_task()
7105 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n", in ufshcd_try_to_abort_task()
7107 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_try_to_abort_task()
7114 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n", in ufshcd_try_to_abort_task()
7118 dev_err(hba->dev, in ufshcd_try_to_abort_task()
7132 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, in ufshcd_try_to_abort_task()
7137 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n", in ufshcd_try_to_abort_task()
7143 err = ufshcd_clear_cmds(hba, 1U << tag); in ufshcd_try_to_abort_task()
7145 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n", in ufshcd_try_to_abort_task()
7161 struct ufs_hba *hba = shost_priv(host); in ufshcd_abort() local
7163 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; in ufshcd_abort()
7171 ufshcd_hold(hba, false); in ufshcd_abort()
7172 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_abort()
7174 if (!(test_bit(tag, &hba->outstanding_reqs))) { in ufshcd_abort()
7175 dev_err(hba->dev, in ufshcd_abort()
7177 __func__, tag, hba->outstanding_reqs, reg); in ufshcd_abort()
7182 dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag); in ufshcd_abort()
7192 if (!hba->req_abort_count) { in ufshcd_abort()
7193 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag); in ufshcd_abort()
7194 ufshcd_print_evt_hist(hba); in ufshcd_abort()
7195 ufshcd_print_host_state(hba); in ufshcd_abort()
7196 ufshcd_print_pwr_info(hba); in ufshcd_abort()
7197 ufshcd_print_trs(hba, 1 << tag, true); in ufshcd_abort()
7199 ufshcd_print_trs(hba, 1 << tag, false); in ufshcd_abort()
7201 hba->req_abort_count++; in ufshcd_abort()
7204 dev_err(hba->dev, in ufshcd_abort()
7207 __ufshcd_transfer_req_compl(hba, 1UL << tag); in ufshcd_abort()
7220 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun); in ufshcd_abort()
7223 hba->force_reset = true; in ufshcd_abort()
7224 ufshcd_schedule_eh_work(hba); in ufshcd_abort()
7231 dev_err(hba->dev, "%s: skipping abort\n", __func__); in ufshcd_abort()
7232 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); in ufshcd_abort()
7236 err = ufshcd_try_to_abort_task(hba, tag); in ufshcd_abort()
7238 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); in ufshcd_abort()
7239 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); in ufshcd_abort()
7248 spin_lock_irqsave(&hba->outstanding_lock, flags); in ufshcd_abort()
7249 outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs); in ufshcd_abort()
7250 spin_unlock_irqrestore(&hba->outstanding_lock, flags); in ufshcd_abort()
7253 ufshcd_release_scsi_cmd(hba, lrbp); in ufshcd_abort()
7259 ufshcd_release(hba); in ufshcd_abort()
7265 * @hba: per-adapter instance
7273 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) in ufshcd_host_reset_and_restore() argument
7281 ufshpb_toggle_state(hba, HPB_PRESENT, HPB_RESET); in ufshcd_host_reset_and_restore()
7282 ufshcd_hba_stop(hba); in ufshcd_host_reset_and_restore()
7283 hba->silence_err_logs = true; in ufshcd_host_reset_and_restore()
7284 ufshcd_complete_requests(hba); in ufshcd_host_reset_and_restore()
7285 hba->silence_err_logs = false; in ufshcd_host_reset_and_restore()
7288 ufshcd_scale_clks(hba, true); in ufshcd_host_reset_and_restore()
7290 err = ufshcd_hba_enable(hba); in ufshcd_host_reset_and_restore()
7294 err = ufshcd_probe_hba(hba, false); in ufshcd_host_reset_and_restore()
7297 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); in ufshcd_host_reset_and_restore()
7298 ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err); in ufshcd_host_reset_and_restore()
7304 * @hba: per-adapter instance
7311 static int ufshcd_reset_and_restore(struct ufs_hba *hba) in ufshcd_reset_and_restore() argument
7319 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
7325 saved_err |= hba->saved_err; in ufshcd_reset_and_restore()
7326 saved_uic_err |= hba->saved_uic_err; in ufshcd_reset_and_restore()
7327 hba->saved_err = 0; in ufshcd_reset_and_restore()
7328 hba->saved_uic_err = 0; in ufshcd_reset_and_restore()
7329 hba->force_reset = false; in ufshcd_reset_and_restore()
7330 hba->ufshcd_state = UFSHCD_STATE_RESET; in ufshcd_reset_and_restore()
7331 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
7334 ufshcd_device_reset(hba); in ufshcd_reset_and_restore()
7336 err = ufshcd_host_reset_and_restore(hba); in ufshcd_reset_and_restore()
7338 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
7342 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL && in ufshcd_reset_and_restore()
7343 hba->ufshcd_state != UFSHCD_STATE_ERROR && in ufshcd_reset_and_restore()
7344 hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL) in ufshcd_reset_and_restore()
7352 scsi_report_bus_reset(hba->host, 0); in ufshcd_reset_and_restore()
7354 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_reset_and_restore()
7355 hba->saved_err |= saved_err; in ufshcd_reset_and_restore()
7356 hba->saved_uic_err |= saved_uic_err; in ufshcd_reset_and_restore()
7358 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
7373 struct ufs_hba *hba; in ufshcd_eh_host_reset_handler() local
7375 hba = shost_priv(cmd->device->host); in ufshcd_eh_host_reset_handler()
7377 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
7378 hba->force_reset = true; in ufshcd_eh_host_reset_handler()
7379 ufshcd_schedule_eh_work(hba); in ufshcd_eh_host_reset_handler()
7380 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__); in ufshcd_eh_host_reset_handler()
7381 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
7383 flush_work(&hba->eh_work); in ufshcd_eh_host_reset_handler()
7385 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
7386 if (hba->ufshcd_state == UFSHCD_STATE_ERROR) in ufshcd_eh_host_reset_handler()
7388 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
7442 * @hba: per-adapter instance
7448 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, in ufshcd_find_max_sup_active_icc_level() argument
7453 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq || in ufshcd_find_max_sup_active_icc_level()
7454 !hba->vreg_info.vccq2) { in ufshcd_find_max_sup_active_icc_level()
7461 dev_dbg(hba->dev, in ufshcd_find_max_sup_active_icc_level()
7467 if (hba->vreg_info.vcc->max_uA) in ufshcd_find_max_sup_active_icc_level()
7469 hba->vreg_info.vcc->max_uA, in ufshcd_find_max_sup_active_icc_level()
7473 if (hba->vreg_info.vccq->max_uA) in ufshcd_find_max_sup_active_icc_level()
7475 hba->vreg_info.vccq->max_uA, in ufshcd_find_max_sup_active_icc_level()
7479 if (hba->vreg_info.vccq2->max_uA) in ufshcd_find_max_sup_active_icc_level()
7481 hba->vreg_info.vccq2->max_uA, in ufshcd_find_max_sup_active_icc_level()
7488 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba) in ufshcd_set_active_icc_lvl() argument
7491 int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER]; in ufshcd_set_active_icc_lvl()
7499 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0, in ufshcd_set_active_icc_lvl()
7502 dev_err(hba->dev, in ufshcd_set_active_icc_lvl()
7508 icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf, in ufshcd_set_active_icc_lvl()
7510 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level); in ufshcd_set_active_icc_lvl()
7512 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, in ufshcd_set_active_icc_lvl()
7516 dev_err(hba->dev, in ufshcd_set_active_icc_lvl()
7536 * @hba: per-adapter instance
7560 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) in ufshcd_scsi_add_wlus() argument
7565 hba->ufs_device_wlun = __scsi_add_device(hba->host, 0, 0, in ufshcd_scsi_add_wlus()
7567 if (IS_ERR(hba->ufs_device_wlun)) { in ufshcd_scsi_add_wlus()
7568 ret = PTR_ERR(hba->ufs_device_wlun); in ufshcd_scsi_add_wlus()
7569 hba->ufs_device_wlun = NULL; in ufshcd_scsi_add_wlus()
7572 scsi_device_put(hba->ufs_device_wlun); in ufshcd_scsi_add_wlus()
7574 sdev_rpmb = __scsi_add_device(hba->host, 0, 0, in ufshcd_scsi_add_wlus()
7583 sdev_boot = __scsi_add_device(hba->host, 0, 0, in ufshcd_scsi_add_wlus()
7586 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__); in ufshcd_scsi_add_wlus()
7594 scsi_remove_device(hba->ufs_device_wlun); in ufshcd_scsi_add_wlus()
7599 static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf) in ufshcd_wb_probe() argument
7601 struct ufs_dev_info *dev_info = &hba->dev_info; in ufshcd_wb_probe()
7606 if (!ufshcd_is_wb_allowed(hba)) in ufshcd_wb_probe()
7616 (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES))) in ufshcd_wb_probe()
7619 if (hba->desc_size[QUERY_DESC_IDN_DEVICE] < in ufshcd_wb_probe()
7646 ufshcd_read_unit_desc_param(hba, in ufshcd_wb_probe()
7661 if (!ufshcd_is_wb_buf_lifetime_available(hba)) in ufshcd_wb_probe()
7667 hba->caps &= ~UFSHCD_CAP_WB_EN; in ufshcd_wb_probe()
7670 static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf) in ufshcd_temp_notif_probe() argument
7672 struct ufs_dev_info *dev_info = &hba->dev_info; in ufshcd_temp_notif_probe()
7676 if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300) in ufshcd_temp_notif_probe()
7688 ufshcd_enable_ee(hba, mask); in ufshcd_temp_notif_probe()
7689 ufs_hwmon_probe(hba, mask); in ufshcd_temp_notif_probe()
7693 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, in ufshcd_fixup_dev_quirks() argument
7697 struct ufs_dev_info *dev_info = &hba->dev_info; in ufshcd_fixup_dev_quirks()
7708 hba->dev_quirks |= f->quirk; in ufshcd_fixup_dev_quirks()
7713 static void ufs_fixup_device_setup(struct ufs_hba *hba) in ufs_fixup_device_setup() argument
7716 ufshcd_fixup_dev_quirks(hba, ufs_fixups); in ufs_fixup_device_setup()
7719 ufshcd_vops_fixup_dev_quirks(hba); in ufs_fixup_device_setup()
7722 static int ufs_get_device_desc(struct ufs_hba *hba) in ufs_get_device_desc() argument
7728 struct ufs_dev_info *dev_info = &hba->dev_info; in ufs_get_device_desc()
7736 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf, in ufs_get_device_desc()
7737 hba->desc_size[QUERY_DESC_IDN_DEVICE]); in ufs_get_device_desc()
7739 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", in ufs_get_device_desc()
7762 ufshpb_get_dev_info(hba, desc_buf); in ufs_get_device_desc()
7764 if (!ufshpb_is_legacy(hba)) in ufs_get_device_desc()
7765 err = ufshcd_query_flag_retry(hba, in ufs_get_device_desc()
7770 if (ufshpb_is_legacy(hba) || (!err && hpb_en)) in ufs_get_device_desc()
7774 err = ufshcd_read_string_desc(hba, model_index, in ufs_get_device_desc()
7777 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", in ufs_get_device_desc()
7782 hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] + in ufs_get_device_desc()
7785 ufs_fixup_device_setup(hba); in ufs_get_device_desc()
7787 ufshcd_wb_probe(hba, desc_buf); in ufs_get_device_desc()
7789 ufshcd_temp_notif_probe(hba, desc_buf); in ufs_get_device_desc()
7802 static void ufs_put_device_desc(struct ufs_hba *hba) in ufs_put_device_desc() argument
7804 struct ufs_dev_info *dev_info = &hba->dev_info; in ufs_put_device_desc()
7812 * @hba: per-adapter instance
7821 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba) in ufshcd_tune_pa_tactivate() argument
7826 ret = ufshcd_dme_peer_get(hba, in ufshcd_tune_pa_tactivate()
7838 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), in ufshcd_tune_pa_tactivate()
7847 * @hba: per-adapter instance
7856 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba) in ufshcd_tune_pa_hibern8time() argument
7862 ret = ufshcd_dme_get(hba, in ufshcd_tune_pa_hibern8time()
7869 ret = ufshcd_dme_peer_get(hba, in ufshcd_tune_pa_hibern8time()
7881 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), in ufshcd_tune_pa_hibern8time()
7890 * @hba: per-adapter instance
7898 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba) in ufshcd_quirk_tune_host_pa_tactivate() argument
7906 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), in ufshcd_quirk_tune_host_pa_tactivate()
7911 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), in ufshcd_quirk_tune_host_pa_tactivate()
7918 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d", in ufshcd_quirk_tune_host_pa_tactivate()
7925 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d", in ufshcd_quirk_tune_host_pa_tactivate()
7930 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate); in ufshcd_quirk_tune_host_pa_tactivate()
7934 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), in ufshcd_quirk_tune_host_pa_tactivate()
7949 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), in ufshcd_quirk_tune_host_pa_tactivate()
7957 static void ufshcd_tune_unipro_params(struct ufs_hba *hba) in ufshcd_tune_unipro_params() argument
7959 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) { in ufshcd_tune_unipro_params()
7960 ufshcd_tune_pa_tactivate(hba); in ufshcd_tune_unipro_params()
7961 ufshcd_tune_pa_hibern8time(hba); in ufshcd_tune_unipro_params()
7964 ufshcd_vops_apply_dev_quirks(hba); in ufshcd_tune_unipro_params()
7966 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE) in ufshcd_tune_unipro_params()
7968 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10); in ufshcd_tune_unipro_params()
7970 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE) in ufshcd_tune_unipro_params()
7971 ufshcd_quirk_tune_host_pa_tactivate(hba); in ufshcd_tune_unipro_params()
7974 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) in ufshcd_clear_dbg_ufs_stats() argument
7976 hba->ufs_stats.hibern8_exit_cnt = 0; in ufshcd_clear_dbg_ufs_stats()
7977 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); in ufshcd_clear_dbg_ufs_stats()
7978 hba->req_abort_count = 0; in ufshcd_clear_dbg_ufs_stats()
7981 static int ufshcd_device_geo_params_init(struct ufs_hba *hba) in ufshcd_device_geo_params_init() argument
7987 buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY]; in ufshcd_device_geo_params_init()
7994 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0, in ufshcd_device_geo_params_init()
7997 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n", in ufshcd_device_geo_params_init()
8003 hba->dev_info.max_lu_supported = 32; in ufshcd_device_geo_params_init()
8005 hba->dev_info.max_lu_supported = 8; in ufshcd_device_geo_params_init()
8007 if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >= in ufshcd_device_geo_params_init()
8009 ufshpb_get_geo_info(hba, desc_buf); in ufshcd_device_geo_params_init()
8041 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk) in ufshcd_parse_dev_ref_clk_freq() argument
8047 hba->dev_ref_clk_freq = in ufshcd_parse_dev_ref_clk_freq()
8050 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL) in ufshcd_parse_dev_ref_clk_freq()
8051 dev_err(hba->dev, in ufshcd_parse_dev_ref_clk_freq()
8055 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba) in ufshcd_set_dev_ref_clk() argument
8059 u32 freq = hba->dev_ref_clk_freq; in ufshcd_set_dev_ref_clk()
8061 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_set_dev_ref_clk()
8065 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n", in ufshcd_set_dev_ref_clk()
8073 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, in ufshcd_set_dev_ref_clk()
8077 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n", in ufshcd_set_dev_ref_clk()
8082 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n", in ufshcd_set_dev_ref_clk()
8089 static int ufshcd_device_params_init(struct ufs_hba *hba) in ufshcd_device_params_init() argument
8096 hba->desc_size[i] = QUERY_DESC_MAX_SIZE; in ufshcd_device_params_init()
8099 ret = ufshcd_device_geo_params_init(hba); in ufshcd_device_params_init()
8104 ret = ufs_get_device_desc(hba); in ufshcd_device_params_init()
8106 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", in ufshcd_device_params_init()
8111 ufshcd_get_ref_clk_gating_wait(hba); in ufshcd_device_params_init()
8113 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, in ufshcd_device_params_init()
8115 hba->dev_info.f_power_on_wp_en = flag; in ufshcd_device_params_init()
8118 if (ufshcd_get_max_pwr_mode(hba)) in ufshcd_device_params_init()
8119 dev_err(hba->dev, in ufshcd_device_params_init()
8128 * @hba: per-adapter instance
8130 static int ufshcd_add_lus(struct ufs_hba *hba) in ufshcd_add_lus() argument
8135 ret = ufshcd_scsi_add_wlus(hba); in ufshcd_add_lus()
8140 if (ufshcd_is_clkscaling_supported(hba)) { in ufshcd_add_lus()
8141 memcpy(&hba->clk_scaling.saved_pwr_info.info, in ufshcd_add_lus()
8142 &hba->pwr_info, in ufshcd_add_lus()
8144 hba->clk_scaling.saved_pwr_info.is_valid = true; in ufshcd_add_lus()
8145 hba->clk_scaling.is_allowed = true; in ufshcd_add_lus()
8147 ret = ufshcd_devfreq_init(hba); in ufshcd_add_lus()
8151 hba->clk_scaling.is_enabled = true; in ufshcd_add_lus()
8152 ufshcd_init_clk_scaling_sysfs(hba); in ufshcd_add_lus()
8155 ufs_bsg_probe(hba); in ufshcd_add_lus()
8156 ufshpb_init(hba); in ufshcd_add_lus()
8157 scsi_scan_host(hba->host); in ufshcd_add_lus()
8158 pm_runtime_put_sync(hba->dev); in ufshcd_add_lus()
8165 * ufshcd_probe_hba - probe hba to detect device and initialize it
8166 * @hba: per-adapter instance
8171 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) in ufshcd_probe_hba() argument
8177 hba->ufshcd_state = UFSHCD_STATE_RESET; in ufshcd_probe_hba()
8179 ret = ufshcd_link_startup(hba); in ufshcd_probe_hba()
8183 if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION) in ufshcd_probe_hba()
8187 ufshcd_clear_dbg_ufs_stats(hba); in ufshcd_probe_hba()
8190 ufshcd_set_link_active(hba); in ufshcd_probe_hba()
8193 ret = ufshcd_verify_dev_init(hba); in ufshcd_probe_hba()
8198 ret = ufshcd_complete_dev_init(hba); in ufshcd_probe_hba()
8207 ret = ufshcd_device_params_init(hba); in ufshcd_probe_hba()
8212 ufshcd_tune_unipro_params(hba); in ufshcd_probe_hba()
8215 ufshcd_set_ufs_dev_active(hba); in ufshcd_probe_hba()
8216 ufshcd_force_reset_auto_bkops(hba); in ufshcd_probe_hba()
8219 if (hba->max_pwr_info.is_valid) { in ufshcd_probe_hba()
8224 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) in ufshcd_probe_hba()
8225 ufshcd_set_dev_ref_clk(hba); in ufshcd_probe_hba()
8226 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); in ufshcd_probe_hba()
8228 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", in ufshcd_probe_hba()
8232 ufshcd_print_pwr_info(hba); in ufshcd_probe_hba()
8241 ufshcd_set_active_icc_lvl(hba); in ufshcd_probe_hba()
8244 ufshcd_configure_wb(hba); in ufshcd_probe_hba()
8246 if (hba->ee_usr_mask) in ufshcd_probe_hba()
8247 ufshcd_write_ee_control(hba); in ufshcd_probe_hba()
8249 ufshcd_auto_hibern8_enable(hba); in ufshcd_probe_hba()
8251 ufshpb_toggle_state(hba, HPB_RESET, HPB_PRESENT); in ufshcd_probe_hba()
8253 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_probe_hba()
8255 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_probe_hba()
8256 else if (hba->ufshcd_state == UFSHCD_STATE_RESET) in ufshcd_probe_hba()
8257 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; in ufshcd_probe_hba()
8258 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_probe_hba()
8260 trace_ufshcd_init(dev_name(hba->dev), ret, in ufshcd_probe_hba()
8262 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_probe_hba()
8267 * ufshcd_async_scan - asynchronous execution for probing hba
8273 struct ufs_hba *hba = (struct ufs_hba *)data; in ufshcd_async_scan() local
8276 down(&hba->host_sem); in ufshcd_async_scan()
8277 /* Initialize hba, detect and initialize UFS device */ in ufshcd_async_scan()
8278 ret = ufshcd_probe_hba(hba, true); in ufshcd_async_scan()
8279 up(&hba->host_sem); in ufshcd_async_scan()
8284 ret = ufshcd_add_lus(hba); in ufshcd_async_scan()
8291 pm_runtime_put_sync(hba->dev); in ufshcd_async_scan()
8292 ufshcd_hba_exit(hba); in ufshcd_async_scan()
8369 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, in ufshcd_config_vreg_lpm() argument
8372 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); in ufshcd_config_vreg_lpm()
8375 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, in ufshcd_config_vreg_hpm() argument
8381 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); in ufshcd_config_vreg_hpm()
8434 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on) in ufshcd_setup_vreg() argument
8437 struct device *dev = hba->dev; in ufshcd_setup_vreg()
8438 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_setup_vreg()
8459 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on) in ufshcd_setup_hba_vreg() argument
8461 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_setup_hba_vreg()
8463 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on); in ufshcd_setup_hba_vreg()
8484 static int ufshcd_init_vreg(struct ufs_hba *hba) in ufshcd_init_vreg() argument
8487 struct device *dev = hba->dev; in ufshcd_init_vreg()
8488 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_init_vreg()
8501 static int ufshcd_init_hba_vreg(struct ufs_hba *hba) in ufshcd_init_hba_vreg() argument
8503 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_init_hba_vreg()
8505 return ufshcd_get_vreg(hba->dev, info->vdd_hba); in ufshcd_init_hba_vreg()
8508 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) in ufshcd_setup_clocks() argument
8512 struct list_head *head = &hba->clk_list_head; in ufshcd_setup_clocks()
8520 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE); in ufshcd_setup_clocks()
8530 if (ufshcd_is_link_active(hba) && in ufshcd_setup_clocks()
8538 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n", in ufshcd_setup_clocks()
8546 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__, in ufshcd_setup_clocks()
8551 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE); in ufshcd_setup_clocks()
8562 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_setup_clocks()
8563 hba->clk_gating.state = CLKS_ON; in ufshcd_setup_clocks()
8564 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_setup_clocks()
8565 hba->clk_gating.state); in ufshcd_setup_clocks()
8566 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_setup_clocks()
8570 trace_ufshcd_profile_clk_gating(dev_name(hba->dev), in ufshcd_setup_clocks()
8576 static enum ufs_ref_clk_freq ufshcd_parse_ref_clk_property(struct ufs_hba *hba) in ufshcd_parse_ref_clk_property() argument
8579 int ret = device_property_read_u32(hba->dev, "ref-clk-freq", &freq); in ufshcd_parse_ref_clk_property()
8582 dev_dbg(hba->dev, "Cannot query 'ref-clk-freq' property = %d", ret); in ufshcd_parse_ref_clk_property()
8589 static int ufshcd_init_clocks(struct ufs_hba *hba) in ufshcd_init_clocks() argument
8593 struct device *dev = hba->dev; in ufshcd_init_clocks()
8594 struct list_head *head = &hba->clk_list_head; in ufshcd_init_clocks()
8617 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk); in ufshcd_init_clocks()
8622 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", in ufshcd_init_clocks()
8636 static int ufshcd_variant_hba_init(struct ufs_hba *hba) in ufshcd_variant_hba_init() argument
8640 if (!hba->vops) in ufshcd_variant_hba_init()
8643 err = ufshcd_vops_init(hba); in ufshcd_variant_hba_init()
8645 dev_err(hba->dev, "%s: variant %s init failed err %d\n", in ufshcd_variant_hba_init()
8646 __func__, ufshcd_get_var_name(hba), err); in ufshcd_variant_hba_init()
8651 static void ufshcd_variant_hba_exit(struct ufs_hba *hba) in ufshcd_variant_hba_exit() argument
8653 if (!hba->vops) in ufshcd_variant_hba_exit()
8656 ufshcd_vops_exit(hba); in ufshcd_variant_hba_exit()
8659 static int ufshcd_hba_init(struct ufs_hba *hba) in ufshcd_hba_init() argument
8670 err = ufshcd_init_hba_vreg(hba); in ufshcd_hba_init()
8674 err = ufshcd_setup_hba_vreg(hba, true); in ufshcd_hba_init()
8678 err = ufshcd_init_clocks(hba); in ufshcd_hba_init()
8682 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL) in ufshcd_hba_init()
8683 hba->dev_ref_clk_freq = ufshcd_parse_ref_clk_property(hba); in ufshcd_hba_init()
8685 err = ufshcd_setup_clocks(hba, true); in ufshcd_hba_init()
8689 err = ufshcd_init_vreg(hba); in ufshcd_hba_init()
8693 err = ufshcd_setup_vreg(hba, true); in ufshcd_hba_init()
8697 err = ufshcd_variant_hba_init(hba); in ufshcd_hba_init()
8701 ufs_debugfs_hba_init(hba); in ufshcd_hba_init()
8703 hba->is_powered = true; in ufshcd_hba_init()
8707 ufshcd_setup_vreg(hba, false); in ufshcd_hba_init()
8709 ufshcd_setup_clocks(hba, false); in ufshcd_hba_init()
8711 ufshcd_setup_hba_vreg(hba, false); in ufshcd_hba_init()
8716 static void ufshcd_hba_exit(struct ufs_hba *hba) in ufshcd_hba_exit() argument
8718 if (hba->is_powered) { in ufshcd_hba_exit()
8719 ufshcd_exit_clk_scaling(hba); in ufshcd_hba_exit()
8720 ufshcd_exit_clk_gating(hba); in ufshcd_hba_exit()
8721 if (hba->eh_wq) in ufshcd_hba_exit()
8722 destroy_workqueue(hba->eh_wq); in ufshcd_hba_exit()
8723 ufs_debugfs_hba_exit(hba); in ufshcd_hba_exit()
8724 ufshcd_variant_hba_exit(hba); in ufshcd_hba_exit()
8725 ufshcd_setup_vreg(hba, false); in ufshcd_hba_exit()
8726 ufshcd_setup_clocks(hba, false); in ufshcd_hba_exit()
8727 ufshcd_setup_hba_vreg(hba, false); in ufshcd_hba_exit()
8728 hba->is_powered = false; in ufshcd_hba_exit()
8729 ufs_put_device_desc(hba); in ufshcd_hba_exit()
8736 * @hba: per adapter instance
8742 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, in ufshcd_set_dev_pwr_mode() argument
8753 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_set_dev_pwr_mode()
8754 sdp = hba->ufs_device_wlun; in ufshcd_set_dev_pwr_mode()
8764 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_set_dev_pwr_mode()
8775 hba->host->eh_noresume = 1; in ufshcd_set_dev_pwr_mode()
8809 hba->curr_dev_pwr_mode = pwr_mode; in ufshcd_set_dev_pwr_mode()
8812 hba->host->eh_noresume = 0; in ufshcd_set_dev_pwr_mode()
8816 static int ufshcd_link_state_transition(struct ufs_hba *hba, in ufshcd_link_state_transition() argument
8822 if (req_link_state == hba->uic_link_state) in ufshcd_link_state_transition()
8826 ret = ufshcd_uic_hibern8_enter(hba); in ufshcd_link_state_transition()
8828 ufshcd_set_link_hibern8(hba); in ufshcd_link_state_transition()
8830 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", in ufshcd_link_state_transition()
8841 (!check_for_bkops || !hba->auto_bkops_enabled)) { in ufshcd_link_state_transition()
8852 ret = ufshcd_uic_hibern8_enter(hba); in ufshcd_link_state_transition()
8854 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", in ufshcd_link_state_transition()
8862 ufshcd_hba_stop(hba); in ufshcd_link_state_transition()
8867 ufshcd_set_link_off(hba); in ufshcd_link_state_transition()
8874 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) in ufshcd_vreg_set_lpm() argument
8884 if (!ufshcd_is_link_active(hba) && in ufshcd_vreg_set_lpm()
8885 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM) in ufshcd_vreg_set_lpm()
8903 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && in ufshcd_vreg_set_lpm()
8904 !hba->dev_info.is_lu_power_on_wp) { in ufshcd_vreg_set_lpm()
8905 ufshcd_setup_vreg(hba, false); in ufshcd_vreg_set_lpm()
8907 } else if (!ufshcd_is_ufs_dev_active(hba)) { in ufshcd_vreg_set_lpm()
8908 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); in ufshcd_vreg_set_lpm()
8910 if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) { in ufshcd_vreg_set_lpm()
8911 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); in ufshcd_vreg_set_lpm()
8912 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2); in ufshcd_vreg_set_lpm()
8919 if (vcc_off && hba->vreg_info.vcc && in ufshcd_vreg_set_lpm()
8920 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM) in ufshcd_vreg_set_lpm()
8925 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) in ufshcd_vreg_set_hpm() argument
8929 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && in ufshcd_vreg_set_hpm()
8930 !hba->dev_info.is_lu_power_on_wp) { in ufshcd_vreg_set_hpm()
8931 ret = ufshcd_setup_vreg(hba, true); in ufshcd_vreg_set_hpm()
8932 } else if (!ufshcd_is_ufs_dev_active(hba)) { in ufshcd_vreg_set_hpm()
8933 if (!ufshcd_is_link_active(hba)) { in ufshcd_vreg_set_hpm()
8934 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); in ufshcd_vreg_set_hpm()
8937 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); in ufshcd_vreg_set_hpm()
8941 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); in ufshcd_vreg_set_hpm()
8946 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); in ufshcd_vreg_set_hpm()
8948 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); in ufshcd_vreg_set_hpm()
8954 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba) in ufshcd_hba_vreg_set_lpm() argument
8956 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba)) in ufshcd_hba_vreg_set_lpm()
8957 ufshcd_setup_hba_vreg(hba, false); in ufshcd_hba_vreg_set_lpm()
8960 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba) in ufshcd_hba_vreg_set_hpm() argument
8962 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba)) in ufshcd_hba_vreg_set_hpm()
8963 ufshcd_setup_hba_vreg(hba, true); in ufshcd_hba_vreg_set_hpm()
8966 static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) in __ufshcd_wl_suspend() argument
8974 hba->pm_op_in_progress = true; in __ufshcd_wl_suspend()
8977 hba->rpm_lvl : hba->spm_lvl; in __ufshcd_wl_suspend()
8985 ufshpb_suspend(hba); in __ufshcd_wl_suspend()
8991 ufshcd_hold(hba, false); in __ufshcd_wl_suspend()
8992 hba->clk_gating.is_suspended = true; in __ufshcd_wl_suspend()
8994 if (ufshcd_is_clkscaling_supported(hba)) in __ufshcd_wl_suspend()
8995 ufshcd_clk_scaling_suspend(hba, true); in __ufshcd_wl_suspend()
9002 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && in __ufshcd_wl_suspend()
9003 (req_link_state == hba->uic_link_state)) in __ufshcd_wl_suspend()
9007 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { in __ufshcd_wl_suspend()
9013 if (ufshcd_can_autobkops_during_suspend(hba)) { in __ufshcd_wl_suspend()
9019 ret = ufshcd_urgent_bkops(hba); in __ufshcd_wl_suspend()
9024 ufshcd_disable_auto_bkops(hba); in __ufshcd_wl_suspend()
9031 hba->dev_info.b_rpm_dev_flush_capable = in __ufshcd_wl_suspend()
9032 hba->auto_bkops_enabled || in __ufshcd_wl_suspend()
9035 ufshcd_is_auto_hibern8_enabled(hba))) && in __ufshcd_wl_suspend()
9036 ufshcd_wb_need_flush(hba)); in __ufshcd_wl_suspend()
9039 flush_work(&hba->eeh_work); in __ufshcd_wl_suspend()
9041 ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE); in __ufshcd_wl_suspend()
9045 if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) { in __ufshcd_wl_suspend()
9048 ufshcd_disable_auto_bkops(hba); in __ufshcd_wl_suspend()
9050 if (!hba->dev_info.b_rpm_dev_flush_capable) { in __ufshcd_wl_suspend()
9051 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); in __ufshcd_wl_suspend()
9061 check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba); in __ufshcd_wl_suspend()
9062 ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops); in __ufshcd_wl_suspend()
9072 ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE); in __ufshcd_wl_suspend()
9083 if (ufshcd_is_ufs_dev_deepsleep(hba)) { in __ufshcd_wl_suspend()
9084 ufshcd_device_reset(hba); in __ufshcd_wl_suspend()
9085 WARN_ON(!ufshcd_is_link_off(hba)); in __ufshcd_wl_suspend()
9087 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) in __ufshcd_wl_suspend()
9088 ufshcd_set_link_active(hba); in __ufshcd_wl_suspend()
9089 else if (ufshcd_is_link_off(hba)) in __ufshcd_wl_suspend()
9090 ufshcd_host_reset_and_restore(hba); in __ufshcd_wl_suspend()
9093 if (ufshcd_is_ufs_dev_deepsleep(hba)) { in __ufshcd_wl_suspend()
9094 ufshcd_device_reset(hba); in __ufshcd_wl_suspend()
9095 ufshcd_host_reset_and_restore(hba); in __ufshcd_wl_suspend()
9097 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) in __ufshcd_wl_suspend()
9098 ufshcd_disable_auto_bkops(hba); in __ufshcd_wl_suspend()
9100 if (ufshcd_is_clkscaling_supported(hba)) in __ufshcd_wl_suspend()
9101 ufshcd_clk_scaling_suspend(hba, false); in __ufshcd_wl_suspend()
9103 hba->dev_info.b_rpm_dev_flush_capable = false; in __ufshcd_wl_suspend()
9105 if (hba->dev_info.b_rpm_dev_flush_capable) { in __ufshcd_wl_suspend()
9106 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work, in __ufshcd_wl_suspend()
9111 ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret); in __ufshcd_wl_suspend()
9112 hba->clk_gating.is_suspended = false; in __ufshcd_wl_suspend()
9113 ufshcd_release(hba); in __ufshcd_wl_suspend()
9114 ufshpb_resume(hba); in __ufshcd_wl_suspend()
9116 hba->pm_op_in_progress = false; in __ufshcd_wl_suspend()
9121 static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) in __ufshcd_wl_resume() argument
9124 enum uic_link_state old_link_state = hba->uic_link_state; in __ufshcd_wl_resume()
9126 hba->pm_op_in_progress = true; in __ufshcd_wl_resume()
9133 ret = ufshcd_vops_resume(hba, pm_op); in __ufshcd_wl_resume()
9138 WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba)); in __ufshcd_wl_resume()
9140 if (ufshcd_is_link_hibern8(hba)) { in __ufshcd_wl_resume()
9141 ret = ufshcd_uic_hibern8_exit(hba); in __ufshcd_wl_resume()
9143 ufshcd_set_link_active(hba); in __ufshcd_wl_resume()
9145 dev_err(hba->dev, "%s: hibern8 exit failed %d\n", in __ufshcd_wl_resume()
9149 } else if (ufshcd_is_link_off(hba)) { in __ufshcd_wl_resume()
9156 ret = ufshcd_reset_and_restore(hba); in __ufshcd_wl_resume()
9161 if (ret || !ufshcd_is_link_active(hba)) in __ufshcd_wl_resume()
9165 if (!ufshcd_is_ufs_dev_active(hba)) { in __ufshcd_wl_resume()
9166 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE); in __ufshcd_wl_resume()
9171 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) in __ufshcd_wl_resume()
9172 ufshcd_enable_auto_bkops(hba); in __ufshcd_wl_resume()
9178 ufshcd_urgent_bkops(hba); in __ufshcd_wl_resume()
9180 if (hba->ee_usr_mask) in __ufshcd_wl_resume()
9181 ufshcd_write_ee_control(hba); in __ufshcd_wl_resume()
9183 if (ufshcd_is_clkscaling_supported(hba)) in __ufshcd_wl_resume()
9184 ufshcd_clk_scaling_suspend(hba, false); in __ufshcd_wl_resume()
9186 if (hba->dev_info.b_rpm_dev_flush_capable) { in __ufshcd_wl_resume()
9187 hba->dev_info.b_rpm_dev_flush_capable = false; in __ufshcd_wl_resume()
9188 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work); in __ufshcd_wl_resume()
9192 ufshcd_auto_hibern8_enable(hba); in __ufshcd_wl_resume()
9194 ufshpb_resume(hba); in __ufshcd_wl_resume()
9198 ufshcd_link_state_transition(hba, old_link_state, 0); in __ufshcd_wl_resume()
9200 ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE); in __ufshcd_wl_resume()
9201 ufshcd_vops_suspend(hba, pm_op, POST_CHANGE); in __ufshcd_wl_resume()
9204 ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret); in __ufshcd_wl_resume()
9205 hba->clk_gating.is_suspended = false; in __ufshcd_wl_resume()
9206 ufshcd_release(hba); in __ufshcd_wl_resume()
9207 hba->pm_op_in_progress = false; in __ufshcd_wl_resume()
9214 struct ufs_hba *hba; in ufshcd_wl_runtime_suspend() local
9218 hba = shost_priv(sdev->host); in ufshcd_wl_runtime_suspend()
9220 ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM); in ufshcd_wl_runtime_suspend()
9226 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_wl_runtime_suspend()
9234 struct ufs_hba *hba; in ufshcd_wl_runtime_resume() local
9238 hba = shost_priv(sdev->host); in ufshcd_wl_runtime_resume()
9240 ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM); in ufshcd_wl_runtime_resume()
9246 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_wl_runtime_resume()
9256 struct ufs_hba *hba; in ufshcd_wl_suspend() local
9260 hba = shost_priv(sdev->host); in ufshcd_wl_suspend()
9261 down(&hba->host_sem); in ufshcd_wl_suspend()
9266 ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM); in ufshcd_wl_suspend()
9269 up(&hba->host_sem); in ufshcd_wl_suspend()
9274 hba->is_sys_suspended = true; in ufshcd_wl_suspend()
9277 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_wl_suspend()
9285 struct ufs_hba *hba; in ufshcd_wl_resume() local
9289 hba = shost_priv(sdev->host); in ufshcd_wl_resume()
9294 ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM); in ufshcd_wl_resume()
9300 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_wl_resume()
9302 hba->is_sys_suspended = false; in ufshcd_wl_resume()
9303 up(&hba->host_sem); in ufshcd_wl_resume()
9311 struct ufs_hba *hba; in ufshcd_wl_shutdown() local
9313 hba = shost_priv(sdev->host); in ufshcd_wl_shutdown()
9315 down(&hba->host_sem); in ufshcd_wl_shutdown()
9316 hba->shutting_down = true; in ufshcd_wl_shutdown()
9317 up(&hba->host_sem); in ufshcd_wl_shutdown()
9320 ufshcd_rpm_get_sync(hba); in ufshcd_wl_shutdown()
9322 shost_for_each_device(sdev, hba->host) { in ufshcd_wl_shutdown()
9323 if (sdev == hba->ufs_device_wlun) in ufshcd_wl_shutdown()
9327 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); in ufshcd_wl_shutdown()
9332 * @hba: per adapter instance
9335 * and set vreg and hba-vreg in lpm mode.
9337 static int ufshcd_suspend(struct ufs_hba *hba) in ufshcd_suspend() argument
9341 if (!hba->is_powered) in ufshcd_suspend()
9347 ufshcd_disable_irq(hba); in ufshcd_suspend()
9348 ret = ufshcd_setup_clocks(hba, false); in ufshcd_suspend()
9350 ufshcd_enable_irq(hba); in ufshcd_suspend()
9353 if (ufshcd_is_clkgating_allowed(hba)) { in ufshcd_suspend()
9354 hba->clk_gating.state = CLKS_OFF; in ufshcd_suspend()
9355 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_suspend()
9356 hba->clk_gating.state); in ufshcd_suspend()
9359 ufshcd_vreg_set_lpm(hba); in ufshcd_suspend()
9361 ufshcd_hba_vreg_set_lpm(hba); in ufshcd_suspend()
9368 * @hba: per adapter instance
9371 * irqs of the hba.
9375 static int ufshcd_resume(struct ufs_hba *hba) in ufshcd_resume() argument
9379 if (!hba->is_powered) in ufshcd_resume()
9382 ufshcd_hba_vreg_set_hpm(hba); in ufshcd_resume()
9383 ret = ufshcd_vreg_set_hpm(hba); in ufshcd_resume()
9388 ret = ufshcd_setup_clocks(hba, true); in ufshcd_resume()
9393 ufshcd_enable_irq(hba); in ufshcd_resume()
9397 ufshcd_vreg_set_lpm(hba); in ufshcd_resume()
9400 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret); in ufshcd_resume()
9417 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_system_suspend() local
9421 if (pm_runtime_suspended(hba->dev)) in ufshcd_system_suspend()
9424 ret = ufshcd_suspend(hba); in ufshcd_system_suspend()
9426 trace_ufshcd_system_suspend(dev_name(hba->dev), ret, in ufshcd_system_suspend()
9428 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_system_suspend()
9444 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_system_resume() local
9448 if (pm_runtime_suspended(hba->dev)) in ufshcd_system_resume()
9451 ret = ufshcd_resume(hba); in ufshcd_system_resume()
9454 trace_ufshcd_system_resume(dev_name(hba->dev), ret, in ufshcd_system_resume()
9456 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_system_resume()
9474 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_runtime_suspend() local
9478 ret = ufshcd_suspend(hba); in ufshcd_runtime_suspend()
9480 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret, in ufshcd_runtime_suspend()
9482 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_runtime_suspend()
9499 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_runtime_resume() local
9503 ret = ufshcd_resume(hba); in ufshcd_runtime_resume()
9505 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret, in ufshcd_runtime_resume()
9507 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_runtime_resume()
9515 * @hba: per adapter instance
9517 * This function would turn off both UFS device and UFS hba
9522 int ufshcd_shutdown(struct ufs_hba *hba) in ufshcd_shutdown() argument
9524 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) in ufshcd_shutdown()
9525 ufshcd_suspend(hba); in ufshcd_shutdown()
9527 hba->is_powered = false; in ufshcd_shutdown()
9536 * @hba: per adapter instance
9538 void ufshcd_remove(struct ufs_hba *hba) in ufshcd_remove() argument
9540 if (hba->ufs_device_wlun) in ufshcd_remove()
9541 ufshcd_rpm_get_sync(hba); in ufshcd_remove()
9542 ufs_hwmon_remove(hba); in ufshcd_remove()
9543 ufs_bsg_remove(hba); in ufshcd_remove()
9544 ufshpb_remove(hba); in ufshcd_remove()
9545 ufs_sysfs_remove_nodes(hba->dev); in ufshcd_remove()
9546 blk_mq_destroy_queue(hba->tmf_queue); in ufshcd_remove()
9547 blk_mq_free_tag_set(&hba->tmf_tag_set); in ufshcd_remove()
9548 scsi_remove_host(hba->host); in ufshcd_remove()
9550 ufshcd_disable_intr(hba, hba->intr_mask); in ufshcd_remove()
9551 ufshcd_hba_stop(hba); in ufshcd_remove()
9552 ufshcd_hba_exit(hba); in ufshcd_remove()
9557 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
9558 * @hba: pointer to Host Bus Adapter (HBA)
9560 void ufshcd_dealloc_host(struct ufs_hba *hba) in ufshcd_dealloc_host() argument
9562 scsi_host_put(hba->host); in ufshcd_dealloc_host()
9569 * @hba: per adapter instance
9573 static int ufshcd_set_dma_mask(struct ufs_hba *hba) in ufshcd_set_dma_mask() argument
9575 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { in ufshcd_set_dma_mask()
9576 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) in ufshcd_set_dma_mask()
9579 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); in ufshcd_set_dma_mask()
9583 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
9591 struct ufs_hba *hba; in ufshcd_alloc_host() local
9609 hba = shost_priv(host); in ufshcd_alloc_host()
9610 hba->host = host; in ufshcd_alloc_host()
9611 hba->dev = dev; in ufshcd_alloc_host()
9612 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL; in ufshcd_alloc_host()
9613 hba->nop_out_timeout = NOP_OUT_TIMEOUT; in ufshcd_alloc_host()
9614 INIT_LIST_HEAD(&hba->clk_list_head); in ufshcd_alloc_host()
9615 spin_lock_init(&hba->outstanding_lock); in ufshcd_alloc_host()
9617 *hba_handle = hba; in ufshcd_alloc_host()
9638 * @hba: per-adapter instance
9643 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) in ufshcd_init() argument
9646 struct Scsi_Host *host = hba->host; in ufshcd_init()
9647 struct device *dev = hba->dev; in ufshcd_init()
9655 dev_set_drvdata(dev, hba); in ufshcd_init()
9658 dev_err(hba->dev, in ufshcd_init()
9664 hba->mmio_base = mmio_base; in ufshcd_init()
9665 hba->irq = irq; in ufshcd_init()
9666 hba->vps = &ufs_hba_vps; in ufshcd_init()
9668 err = ufshcd_hba_init(hba); in ufshcd_init()
9673 err = ufshcd_hba_capabilities(hba); in ufshcd_init()
9678 hba->ufs_version = ufshcd_get_ufs_version(hba); in ufshcd_init()
9681 hba->intr_mask = ufshcd_get_intr_mask(hba); in ufshcd_init()
9683 err = ufshcd_set_dma_mask(hba); in ufshcd_init()
9685 dev_err(hba->dev, "set dma mask failed\n"); in ufshcd_init()
9690 err = ufshcd_memory_alloc(hba); in ufshcd_init()
9692 dev_err(hba->dev, "Memory allocation failed\n"); in ufshcd_init()
9697 ufshcd_host_memory_configure(hba); in ufshcd_init()
9699 host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; in ufshcd_init()
9700 host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED; in ufshcd_init()
9707 hba->max_pwr_info.is_valid = false; in ufshcd_init()
9711 hba->host->host_no); in ufshcd_init()
9712 hba->eh_wq = create_singlethread_workqueue(eh_wq_name); in ufshcd_init()
9713 if (!hba->eh_wq) { in ufshcd_init()
9714 dev_err(hba->dev, "%s: failed to create eh workqueue\n", in ufshcd_init()
9719 INIT_WORK(&hba->eh_work, ufshcd_err_handler); in ufshcd_init()
9720 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); in ufshcd_init()
9722 sema_init(&hba->host_sem, 1); in ufshcd_init()
9725 mutex_init(&hba->uic_cmd_mutex); in ufshcd_init()
9728 mutex_init(&hba->dev_cmd.lock); in ufshcd_init()
9731 mutex_init(&hba->ee_ctrl_mutex); in ufshcd_init()
9733 init_rwsem(&hba->clk_scaling_lock); in ufshcd_init()
9735 ufshcd_init_clk_gating(hba); in ufshcd_init()
9737 ufshcd_init_clk_scaling(hba); in ufshcd_init()
9744 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS), in ufshcd_init()
9746 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE); in ufshcd_init()
9754 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); in ufshcd_init()
9756 dev_err(hba->dev, "request irq failed\n"); in ufshcd_init()
9759 hba->is_irq_enabled = true; in ufshcd_init()
9762 err = scsi_add_host(host, hba->dev); in ufshcd_init()
9764 dev_err(hba->dev, "scsi_add_host failed\n"); in ufshcd_init()
9768 hba->tmf_tag_set = (struct blk_mq_tag_set) { in ufshcd_init()
9770 .queue_depth = hba->nutmrs, in ufshcd_init()
9774 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); in ufshcd_init()
9777 hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set); in ufshcd_init()
9778 if (IS_ERR(hba->tmf_queue)) { in ufshcd_init()
9779 err = PTR_ERR(hba->tmf_queue); in ufshcd_init()
9782 hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, in ufshcd_init()
9783 sizeof(*hba->tmf_rqs), GFP_KERNEL); in ufshcd_init()
9784 if (!hba->tmf_rqs) { in ufshcd_init()
9790 ufshcd_device_reset(hba); in ufshcd_init()
9792 ufshcd_init_crypto(hba); in ufshcd_init()
9795 err = ufshcd_hba_enable(hba); in ufshcd_init()
9797 dev_err(hba->dev, "Host controller enable failed\n"); in ufshcd_init()
9798 ufshcd_print_evt_hist(hba); in ufshcd_init()
9799 ufshcd_print_host_state(hba); in ufshcd_init()
9808 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( in ufshcd_init()
9811 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( in ufshcd_init()
9815 INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, in ufshcd_init()
9819 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) { in ufshcd_init()
9820 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) | in ufshcd_init()
9826 atomic_set(&hba->scsi_block_reqs_cnt, 0); in ufshcd_init()
9833 ufshcd_set_ufs_dev_active(hba); in ufshcd_init()
9835 async_schedule(ufshcd_async_scan, hba); in ufshcd_init()
9836 ufs_sysfs_add_nodes(hba->dev); in ufshcd_init()
9842 blk_mq_destroy_queue(hba->tmf_queue); in ufshcd_init()
9844 blk_mq_free_tag_set(&hba->tmf_tag_set); in ufshcd_init()
9846 scsi_remove_host(hba->host); in ufshcd_init()
9848 hba->is_irq_enabled = false; in ufshcd_init()
9849 ufshcd_hba_exit(hba); in ufshcd_init()
9857 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_resume_complete() local
9859 if (hba->complete_put) { in ufshcd_resume_complete()
9860 ufshcd_rpm_put(hba); in ufshcd_resume_complete()
9861 hba->complete_put = false; in ufshcd_resume_complete()
9866 static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba) in ufshcd_rpm_ok_for_spm() argument
9868 struct device *dev = &hba->ufs_device_wlun->sdev_gendev; in ufshcd_rpm_ok_for_spm()
9875 dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl); in ufshcd_rpm_ok_for_spm()
9876 link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl); in ufshcd_rpm_ok_for_spm()
9878 hba->curr_dev_pwr_mode == dev_pwr_mode && in ufshcd_rpm_ok_for_spm()
9879 hba->uic_link_state == link_state && in ufshcd_rpm_ok_for_spm()
9880 !hba->dev_info.b_rpm_dev_flush_capable; in ufshcd_rpm_ok_for_spm()
9888 struct ufs_hba *hba = dev_get_drvdata(dev); in __ufshcd_suspend_prepare() local
9897 if (hba->ufs_device_wlun) { in __ufshcd_suspend_prepare()
9899 ufshcd_rpm_get_noresume(hba); in __ufshcd_suspend_prepare()
9904 if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) { in __ufshcd_suspend_prepare()
9906 ret = ufshcd_rpm_resume(hba); in __ufshcd_suspend_prepare()
9908 ufshcd_rpm_put(hba); in __ufshcd_suspend_prepare()
9912 hba->complete_put = true; in __ufshcd_suspend_prepare()
9928 struct ufs_hba *hba = shost_priv(sdev->host); in ufshcd_wl_poweroff() local
9930 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); in ufshcd_wl_poweroff()