Lines Matching full:hba
87 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba) in ufs_mtk_is_boost_crypt_enabled() argument
89 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_is_boost_crypt_enabled()
94 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba) in ufs_mtk_is_va09_supported() argument
96 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_is_va09_supported()
101 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba) in ufs_mtk_is_broken_vcc() argument
103 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_is_broken_vcc()
108 static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba) in ufs_mtk_is_pmc_via_fastauto() argument
110 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_is_pmc_via_fastauto()
115 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) in ufs_mtk_cfg_unipro_cg() argument
120 ufshcd_dme_get(hba, in ufs_mtk_cfg_unipro_cg()
126 ufshcd_dme_set(hba, in ufs_mtk_cfg_unipro_cg()
129 ufshcd_dme_get(hba, in ufs_mtk_cfg_unipro_cg()
132 ufshcd_dme_set(hba, in ufs_mtk_cfg_unipro_cg()
135 ufshcd_dme_get(hba, in ufs_mtk_cfg_unipro_cg()
140 ufshcd_dme_set(hba, in ufs_mtk_cfg_unipro_cg()
143 ufshcd_dme_get(hba, in ufs_mtk_cfg_unipro_cg()
146 ufshcd_dme_set(hba, in ufs_mtk_cfg_unipro_cg()
151 static void ufs_mtk_crypto_enable(struct ufs_hba *hba) in ufs_mtk_crypto_enable() argument
157 dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n", in ufs_mtk_crypto_enable()
159 hba->caps &= ~UFSHCD_CAP_CRYPTO; in ufs_mtk_crypto_enable()
163 static void ufs_mtk_host_reset(struct ufs_hba *hba) in ufs_mtk_host_reset() argument
165 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_host_reset()
178 static void ufs_mtk_init_reset_control(struct ufs_hba *hba, in ufs_mtk_init_reset_control() argument
182 *rc = devm_reset_control_get(hba->dev, str); in ufs_mtk_init_reset_control()
184 dev_info(hba->dev, "Failed to get reset control %s: %ld\n", in ufs_mtk_init_reset_control()
190 static void ufs_mtk_init_reset(struct ufs_hba *hba) in ufs_mtk_init_reset() argument
192 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_init_reset()
194 ufs_mtk_init_reset_control(hba, &host->hci_reset, in ufs_mtk_init_reset()
196 ufs_mtk_init_reset_control(hba, &host->unipro_reset, in ufs_mtk_init_reset()
198 ufs_mtk_init_reset_control(hba, &host->crypto_reset, in ufs_mtk_init_reset()
202 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba, in ufs_mtk_hce_enable_notify() argument
205 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_hce_enable_notify()
209 hba->vps->hba_enable_delay_us = 0; in ufs_mtk_hce_enable_notify()
211 hba->vps->hba_enable_delay_us = 600; in ufs_mtk_hce_enable_notify()
212 ufs_mtk_host_reset(hba); in ufs_mtk_hce_enable_notify()
215 if (hba->caps & UFSHCD_CAP_CRYPTO) in ufs_mtk_hce_enable_notify()
216 ufs_mtk_crypto_enable(hba); in ufs_mtk_hce_enable_notify()
219 ufshcd_writel(hba, 0, in ufs_mtk_hce_enable_notify()
221 hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT; in ufs_mtk_hce_enable_notify()
222 hba->ahit = 0; in ufs_mtk_hce_enable_notify()
229 ufshcd_writel(hba, in ufs_mtk_hce_enable_notify()
230 ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80, in ufs_mtk_hce_enable_notify()
237 static int ufs_mtk_bind_mphy(struct ufs_hba *hba) in ufs_mtk_bind_mphy() argument
239 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_bind_mphy()
240 struct device *dev = hba->dev; in ufs_mtk_bind_mphy()
275 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on) in ufs_mtk_setup_ref_clk() argument
277 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_setup_ref_clk()
288 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL); in ufs_mtk_setup_ref_clk()
291 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL); in ufs_mtk_setup_ref_clk()
298 value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL); in ufs_mtk_setup_ref_clk()
307 dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value); in ufs_mtk_setup_ref_clk()
323 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba, in ufs_mtk_setup_ref_clk_wait_us() argument
326 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_setup_ref_clk_wait_us()
328 if (hba->dev_info.clk_gating_wait_us) { in ufs_mtk_setup_ref_clk_wait_us()
330 hba->dev_info.clk_gating_wait_us; in ufs_mtk_setup_ref_clk_wait_us()
338 static void ufs_mtk_dbg_sel(struct ufs_hba *hba) in ufs_mtk_dbg_sel() argument
340 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_dbg_sel()
343 ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL); in ufs_mtk_dbg_sel()
344 ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0); in ufs_mtk_dbg_sel()
345 ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1); in ufs_mtk_dbg_sel()
346 ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2); in ufs_mtk_dbg_sel()
347 ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3); in ufs_mtk_dbg_sel()
349 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL); in ufs_mtk_dbg_sel()
353 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba, in ufs_mtk_wait_idle_state() argument
369 ufs_mtk_dbg_sel(hba); in ufs_mtk_wait_idle_state()
370 val = ufshcd_readl(hba, REG_UFS_PROBE); in ufs_mtk_wait_idle_state()
390 dev_info(hba->dev, "wait idle tmo: 0x%x\n", val); in ufs_mtk_wait_idle_state()
393 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state, in ufs_mtk_wait_link_state() argument
402 ufs_mtk_dbg_sel(hba); in ufs_mtk_wait_link_state()
403 val = ufshcd_readl(hba, REG_UFS_PROBE); in ufs_mtk_wait_link_state()
419 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on) in ufs_mtk_mphy_power_on() argument
421 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_mphy_power_on()
430 if (ufs_mtk_is_va09_supported(hba)) { in ufs_mtk_mphy_power_on()
441 if (ufs_mtk_is_va09_supported(hba)) { in ufs_mtk_mphy_power_on()
450 dev_info(hba->dev, in ufs_mtk_mphy_power_on()
476 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost) in ufs_mtk_boost_crypt() argument
478 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_boost_crypt()
483 if (!ufs_mtk_is_boost_crypt_enabled(hba)) in ufs_mtk_boost_crypt()
492 dev_info(hba->dev, "clk_prepare_enable(): %d\n", in ufs_mtk_boost_crypt()
500 dev_info(hba->dev, in ufs_mtk_boost_crypt()
508 dev_info(hba->dev, in ufs_mtk_boost_crypt()
517 dev_info(hba->dev, in ufs_mtk_boost_crypt()
524 dev_info(hba->dev, in ufs_mtk_boost_crypt()
532 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name, in ufs_mtk_init_host_clk() argument
537 ret = ufs_mtk_get_host_clk(hba->dev, name, clk); in ufs_mtk_init_host_clk()
539 dev_info(hba->dev, "%s: failed to get %s: %d", __func__, in ufs_mtk_init_host_clk()
546 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba) in ufs_mtk_init_boost_crypt() argument
548 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_init_boost_crypt()
550 struct device *dev = hba->dev; in ufs_mtk_init_boost_crypt()
573 if (ufs_mtk_init_host_clk(hba, "crypt_mux", in ufs_mtk_init_boost_crypt()
577 if (ufs_mtk_init_host_clk(hba, "crypt_lp", in ufs_mtk_init_boost_crypt()
581 if (ufs_mtk_init_host_clk(hba, "crypt_perf", in ufs_mtk_init_boost_crypt()
593 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba) in ufs_mtk_init_va09_pwr_ctrl() argument
595 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_init_va09_pwr_ctrl()
597 host->reg_va09 = regulator_get(hba->dev, "va09"); in ufs_mtk_init_va09_pwr_ctrl()
599 dev_info(hba->dev, "failed to get va09"); in ufs_mtk_init_va09_pwr_ctrl()
604 static void ufs_mtk_init_host_caps(struct ufs_hba *hba) in ufs_mtk_init_host_caps() argument
606 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_init_host_caps()
607 struct device_node *np = hba->dev->of_node; in ufs_mtk_init_host_caps()
610 ufs_mtk_init_boost_crypt(hba); in ufs_mtk_init_host_caps()
613 ufs_mtk_init_va09_pwr_ctrl(hba); in ufs_mtk_init_host_caps()
624 dev_info(hba->dev, "caps: 0x%x", host->caps); in ufs_mtk_init_host_caps()
627 static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost) in ufs_mtk_boost_pm_qos() argument
629 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_boost_pm_qos()
638 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up) in ufs_mtk_scale_perf() argument
640 ufs_mtk_boost_crypt(hba, scale_up); in ufs_mtk_scale_perf()
641 ufs_mtk_boost_pm_qos(hba, scale_up); in ufs_mtk_scale_perf()
644 static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on) in ufs_mtk_pwr_ctrl() argument
646 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_pwr_ctrl()
650 ufs_mtk_setup_ref_clk(hba, on); in ufs_mtk_pwr_ctrl()
651 if (!ufshcd_is_clkscaling_supported(hba)) in ufs_mtk_pwr_ctrl()
652 ufs_mtk_scale_perf(hba, on); in ufs_mtk_pwr_ctrl()
654 if (!ufshcd_is_clkscaling_supported(hba)) in ufs_mtk_pwr_ctrl()
655 ufs_mtk_scale_perf(hba, on); in ufs_mtk_pwr_ctrl()
656 ufs_mtk_setup_ref_clk(hba, on); in ufs_mtk_pwr_ctrl()
663 * @hba: host controller instance
669 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, in ufs_mtk_setup_clocks() argument
672 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_setup_clocks()
685 if (ufshcd_is_link_off(hba)) { in ufs_mtk_setup_clocks()
687 } else if (ufshcd_is_link_hibern8(hba) || in ufs_mtk_setup_clocks()
688 (!ufshcd_can_hibern8_during_gating(hba) && in ufs_mtk_setup_clocks()
689 ufshcd_is_auto_hibern8_enabled(hba))) { in ufs_mtk_setup_clocks()
695 ret = ufs_mtk_wait_link_state(hba, in ufs_mtk_setup_clocks()
703 ufs_mtk_pwr_ctrl(hba, false); in ufs_mtk_setup_clocks()
705 ufs_mtk_pwr_ctrl(hba, true); in ufs_mtk_setup_clocks()
711 static void ufs_mtk_get_controller_version(struct ufs_hba *hba) in ufs_mtk_get_controller_version() argument
713 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_get_controller_version()
722 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver); in ufs_mtk_get_controller_version()
730 if (hba->ufs_version < ufshci_version(3, 0)) in ufs_mtk_get_controller_version()
731 hba->ufs_version = ufshci_version(3, 0); in ufs_mtk_get_controller_version()
736 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba) in ufs_mtk_get_ufs_hci_version() argument
738 return hba->ufs_version; in ufs_mtk_get_ufs_hci_version()
744 * @hba: per adapter instance
746 static void ufs_mtk_init_clocks(struct ufs_hba *hba) in ufs_mtk_init_clocks() argument
748 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_init_clocks()
749 struct list_head *head = &hba->clk_list_head; in ufs_mtk_init_clocks()
774 hba->caps &= ~UFSHCD_CAP_CLK_SCALING; in ufs_mtk_init_clocks()
775 dev_info(hba->dev, in ufs_mtk_init_clocks()
782 static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba) in ufs_mtk_vreg_fix_vcc() argument
784 struct ufs_vreg_info *info = &hba->vreg_info; in ufs_mtk_vreg_fix_vcc()
785 struct device_node *np = hba->dev->of_node; in ufs_mtk_vreg_fix_vcc()
786 struct device *dev = hba->dev; in ufs_mtk_vreg_fix_vcc()
791 if (hba->vreg_info.vcc) in ufs_mtk_vreg_fix_vcc()
801 ver = (hba->dev_info.wspecversion & 0xF00) >> 8; in ufs_mtk_vreg_fix_vcc()
824 static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba) in ufs_mtk_vreg_fix_vccqx() argument
826 struct ufs_vreg_info *info = &hba->vreg_info; in ufs_mtk_vreg_fix_vccqx()
829 if (hba->dev_info.wspecversion >= 0x0300) { in ufs_mtk_vreg_fix_vccqx()
842 devm_kfree(hba->dev, (*vreg_off)->name); in ufs_mtk_vreg_fix_vccqx()
843 devm_kfree(hba->dev, *vreg_off); in ufs_mtk_vreg_fix_vccqx()
850 * @hba: host controller instance
858 static int ufs_mtk_init(struct ufs_hba *hba) in ufs_mtk_init() argument
861 struct device *dev = hba->dev; in ufs_mtk_init()
872 host->hba = hba; in ufs_mtk_init()
873 ufshcd_set_variant(hba, host); in ufs_mtk_init()
882 ufs_mtk_init_host_caps(hba); in ufs_mtk_init()
884 err = ufs_mtk_bind_mphy(hba); in ufs_mtk_init()
888 ufs_mtk_init_reset(hba); in ufs_mtk_init()
891 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; in ufs_mtk_init()
894 hba->caps |= UFSHCD_CAP_CLK_GATING; in ufs_mtk_init()
897 hba->caps |= UFSHCD_CAP_CRYPTO; in ufs_mtk_init()
900 hba->caps |= UFSHCD_CAP_WB_EN; in ufs_mtk_init()
903 hba->caps |= UFSHCD_CAP_CLK_SCALING; in ufs_mtk_init()
905 hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL; in ufs_mtk_init()
906 hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80); in ufs_mtk_init()
909 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; in ufs_mtk_init()
911 ufs_mtk_init_clocks(hba); in ufs_mtk_init()
920 ufs_mtk_mphy_power_on(hba, true); in ufs_mtk_init()
921 ufs_mtk_setup_clocks(hba, true, POST_CHANGE); in ufs_mtk_init()
923 host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER); in ufs_mtk_init()
932 ufshcd_set_variant(hba, NULL); in ufs_mtk_init()
937 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba, in ufs_mtk_pmc_via_fastauto() argument
940 if (!ufs_mtk_is_pmc_via_fastauto(hba)) in ufs_mtk_pmc_via_fastauto()
943 if (dev_req_params->hs_rate == hba->pwr_info.hs_rate) in ufs_mtk_pmc_via_fastauto()
957 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, in ufs_mtk_pre_pwr_change() argument
961 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_pre_pwr_change()
977 if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) { in ufs_mtk_pre_pwr_change()
978 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true); in ufs_mtk_pre_pwr_change()
979 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1); in ufs_mtk_pre_pwr_change()
981 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true); in ufs_mtk_pre_pwr_change()
982 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1); in ufs_mtk_pre_pwr_change()
984 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), in ufs_mtk_pre_pwr_change()
986 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), in ufs_mtk_pre_pwr_change()
988 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), in ufs_mtk_pre_pwr_change()
991 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE), in ufs_mtk_pre_pwr_change()
994 ret = ufshcd_uic_change_pwr_mode(hba, in ufs_mtk_pre_pwr_change()
998 dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n", in ufs_mtk_pre_pwr_change()
1004 ret = ufshcd_dme_configure_adapt(hba, in ufs_mtk_pre_pwr_change()
1012 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba, in ufs_mtk_pwr_change_notify() argument
1021 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params, in ufs_mtk_pwr_change_notify()
1034 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm) in ufs_mtk_unipro_set_lpm() argument
1037 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_unipro_set_lpm()
1039 ret = ufshcd_dme_set(hba, in ufs_mtk_unipro_set_lpm()
1054 static int ufs_mtk_pre_link(struct ufs_hba *hba) in ufs_mtk_pre_link() argument
1059 ufs_mtk_get_controller_version(hba); in ufs_mtk_pre_link()
1061 ret = ufs_mtk_unipro_set_lpm(hba, false); in ufs_mtk_pre_link()
1070 ret = ufshcd_disable_host_tx_lcc(hba); in ufs_mtk_pre_link()
1075 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp); in ufs_mtk_pre_link()
1081 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp); in ufs_mtk_pre_link()
1086 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba) in ufs_mtk_setup_clk_gating() argument
1090 if (ufshcd_is_clkgating_allowed(hba)) { in ufs_mtk_setup_clk_gating()
1091 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) in ufs_mtk_setup_clk_gating()
1093 hba->ahit); in ufs_mtk_setup_clk_gating()
1096 ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5); in ufs_mtk_setup_clk_gating()
1100 static int ufs_mtk_post_link(struct ufs_hba *hba) in ufs_mtk_post_link() argument
1103 ufs_mtk_cfg_unipro_cg(hba, true); in ufs_mtk_post_link()
1105 /* will be configured during probe hba */ in ufs_mtk_post_link()
1106 if (ufshcd_is_auto_hibern8_supported(hba)) in ufs_mtk_post_link()
1107 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) | in ufs_mtk_post_link()
1110 ufs_mtk_setup_clk_gating(hba); in ufs_mtk_post_link()
1115 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba, in ufs_mtk_link_startup_notify() argument
1122 ret = ufs_mtk_pre_link(hba); in ufs_mtk_link_startup_notify()
1125 ret = ufs_mtk_post_link(hba); in ufs_mtk_link_startup_notify()
1135 static int ufs_mtk_device_reset(struct ufs_hba *hba) in ufs_mtk_device_reset() argument
1139 /* disable hba before device reset */ in ufs_mtk_device_reset()
1140 ufshcd_hba_stop(hba); in ufs_mtk_device_reset()
1158 dev_info(hba->dev, "device reset done\n"); in ufs_mtk_device_reset()
1163 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba) in ufs_mtk_link_set_hpm() argument
1167 err = ufshcd_hba_enable(hba); in ufs_mtk_link_set_hpm()
1171 err = ufs_mtk_unipro_set_lpm(hba, false); in ufs_mtk_link_set_hpm()
1175 err = ufshcd_uic_hibern8_exit(hba); in ufs_mtk_link_set_hpm()
1177 ufshcd_set_link_active(hba); in ufs_mtk_link_set_hpm()
1181 err = ufshcd_make_hba_operational(hba); in ufs_mtk_link_set_hpm()
1188 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba) in ufs_mtk_link_set_lpm() argument
1193 ufshcd_writel(hba, in ufs_mtk_link_set_lpm()
1194 (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100), in ufs_mtk_link_set_lpm()
1197 err = ufs_mtk_unipro_set_lpm(hba, true); in ufs_mtk_link_set_lpm()
1200 ufs_mtk_unipro_set_lpm(hba, false); in ufs_mtk_link_set_lpm()
1207 static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm) in ufs_mtk_vccqx_set_lpm() argument
1211 if (hba->vreg_info.vccq) in ufs_mtk_vccqx_set_lpm()
1212 vccqx = hba->vreg_info.vccq; in ufs_mtk_vccqx_set_lpm()
1214 vccqx = hba->vreg_info.vccq2; in ufs_mtk_vccqx_set_lpm()
1220 static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm) in ufs_mtk_vsx_set_lpm() argument
1225 (unsigned long)hba->dev_info.wspecversion, in ufs_mtk_vsx_set_lpm()
1229 static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm) in ufs_mtk_dev_vreg_set_lpm() argument
1231 if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2) in ufs_mtk_dev_vreg_set_lpm()
1235 if (!hba->vreg_info.vcc) in ufs_mtk_dev_vreg_set_lpm()
1239 if (lpm && ufshcd_is_ufs_dev_active(hba)) in ufs_mtk_dev_vreg_set_lpm()
1243 if (lpm && hba->vreg_info.vcc->enabled) in ufs_mtk_dev_vreg_set_lpm()
1247 ufs_mtk_vccqx_set_lpm(hba, lpm); in ufs_mtk_dev_vreg_set_lpm()
1248 ufs_mtk_vsx_set_lpm(hba, lpm); in ufs_mtk_dev_vreg_set_lpm()
1250 ufs_mtk_vsx_set_lpm(hba, lpm); in ufs_mtk_dev_vreg_set_lpm()
1251 ufs_mtk_vccqx_set_lpm(hba, lpm); in ufs_mtk_dev_vreg_set_lpm()
1255 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba) in ufs_mtk_auto_hibern8_disable() argument
1260 ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER); in ufs_mtk_auto_hibern8_disable()
1263 ufs_mtk_wait_idle_state(hba, 5); in ufs_mtk_auto_hibern8_disable()
1265 ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100); in ufs_mtk_auto_hibern8_disable()
1267 dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret); in ufs_mtk_auto_hibern8_disable()
1270 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, in ufs_mtk_suspend() argument
1277 if (!ufshcd_is_auto_hibern8_supported(hba)) in ufs_mtk_suspend()
1279 ufs_mtk_auto_hibern8_disable(hba); in ufs_mtk_suspend()
1283 if (ufshcd_is_link_hibern8(hba)) { in ufs_mtk_suspend()
1284 err = ufs_mtk_link_set_lpm(hba); in ufs_mtk_suspend()
1289 if (!ufshcd_is_link_active(hba)) { in ufs_mtk_suspend()
1295 err = ufs_mtk_mphy_power_on(hba, false); in ufs_mtk_suspend()
1300 if (ufshcd_is_link_off(hba)) in ufs_mtk_suspend()
1312 ufshcd_set_link_off(hba); in ufs_mtk_suspend()
1316 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) in ufs_mtk_resume() argument
1321 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) in ufs_mtk_resume()
1322 ufs_mtk_dev_vreg_set_lpm(hba, false); in ufs_mtk_resume()
1326 err = ufs_mtk_mphy_power_on(hba, true); in ufs_mtk_resume()
1330 if (ufshcd_is_link_hibern8(hba)) { in ufs_mtk_resume()
1331 err = ufs_mtk_link_set_hpm(hba); in ufs_mtk_resume()
1338 return ufshcd_link_recovery(hba); in ufs_mtk_resume()
1341 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba) in ufs_mtk_dbg_register_dump() argument
1344 ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10, in ufs_mtk_dbg_register_dump()
1347 ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg "); in ufs_mtk_dbg_register_dump()
1350 ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL, in ufs_mtk_dbg_register_dump()
1355 ufs_mtk_dbg_sel(hba); in ufs_mtk_dbg_register_dump()
1356 ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe "); in ufs_mtk_dbg_register_dump()
1359 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba) in ufs_mtk_apply_dev_quirks() argument
1361 struct ufs_dev_info *dev_info = &hba->dev_info; in ufs_mtk_apply_dev_quirks()
1365 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6); in ufs_mtk_apply_dev_quirks()
1366 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10); in ufs_mtk_apply_dev_quirks()
1375 ufs_mtk_setup_ref_clk_wait_us(hba, 1); in ufs_mtk_apply_dev_quirks()
1377 ufs_mtk_setup_ref_clk_wait_us(hba, 30); in ufs_mtk_apply_dev_quirks()
1379 ufs_mtk_setup_ref_clk_wait_us(hba, 100); in ufs_mtk_apply_dev_quirks()
1381 ufs_mtk_setup_ref_clk_wait_us(hba, in ufs_mtk_apply_dev_quirks()
1386 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba) in ufs_mtk_fixup_dev_quirks() argument
1388 ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups); in ufs_mtk_fixup_dev_quirks()
1390 if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc && in ufs_mtk_fixup_dev_quirks()
1391 (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) { in ufs_mtk_fixup_dev_quirks()
1392 hba->vreg_info.vcc->always_on = true; in ufs_mtk_fixup_dev_quirks()
1397 hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | in ufs_mtk_fixup_dev_quirks()
1401 ufs_mtk_vreg_fix_vcc(hba); in ufs_mtk_fixup_dev_quirks()
1402 ufs_mtk_vreg_fix_vccqx(hba); in ufs_mtk_fixup_dev_quirks()
1405 static void ufs_mtk_event_notify(struct ufs_hba *hba, in ufs_mtk_event_notify() argument
1416 dev_info(hba->dev, in ufs_mtk_event_notify()
1424 dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]); in ufs_mtk_event_notify()
1429 dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]); in ufs_mtk_event_notify()
1433 static void ufs_mtk_config_scaling_param(struct ufs_hba *hba, in ufs_mtk_config_scaling_param() argument
1438 hba->clk_scaling.min_gear = UFS_HS_G4; in ufs_mtk_config_scaling_param()
1440 hba->vps->devfreq_profile.polling_ms = 200; in ufs_mtk_config_scaling_param()
1441 hba->vps->ondemand_data.upthreshold = 50; in ufs_mtk_config_scaling_param()
1442 hba->vps->ondemand_data.downdifferential = 20; in ufs_mtk_config_scaling_param()
1454 * @hba: per adapter instance
1457 static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up) in ufs_mtk_clk_scale() argument
1459 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_clk_scale()
1466 dev_info(hba->dev, in ufs_mtk_clk_scale()
1480 dev_info(hba->dev, in ufs_mtk_clk_scale()
1489 static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up, in ufs_mtk_clk_scale_notify() argument
1492 if (!ufshcd_is_clkscaling_supported(hba)) in ufs_mtk_clk_scale_notify()
1497 ufs_mtk_clk_scale(hba, scale_up); in ufs_mtk_clk_scale_notify()
1500 ufs_mtk_scale_perf(hba, scale_up); in ufs_mtk_clk_scale_notify()
1589 struct ufs_hba *hba = platform_get_drvdata(pdev); in ufs_mtk_remove() local
1592 ufshcd_remove(hba); in ufs_mtk_remove()
1599 struct ufs_hba *hba = dev_get_drvdata(dev); in ufs_mtk_system_suspend() local
1606 ufs_mtk_dev_vreg_set_lpm(hba, true); in ufs_mtk_system_suspend()
1613 struct ufs_hba *hba = dev_get_drvdata(dev); in ufs_mtk_system_resume() local
1615 ufs_mtk_dev_vreg_set_lpm(hba, false); in ufs_mtk_system_resume()
1623 struct ufs_hba *hba = dev_get_drvdata(dev); in ufs_mtk_runtime_suspend() local
1630 ufs_mtk_dev_vreg_set_lpm(hba, true); in ufs_mtk_runtime_suspend()
1637 struct ufs_hba *hba = dev_get_drvdata(dev); in ufs_mtk_runtime_resume() local
1639 ufs_mtk_dev_vreg_set_lpm(hba, false); in ufs_mtk_runtime_resume()