Lines Matching +full:hs400 +full:- +full:cmd +full:- +full:int +full:- +full:delay

1 // SPDX-License-Identifier: GPL-2.0-only
3 * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver
5 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
10 #include <linux/delay.h>
22 #include "sdhci-pltfm.h"
121 #define INVALID_TUNING_PHASE -1
135 /* Max load for eMMC Vdd-io supply */
139 msm_host->var_ops->msm_readl_relaxed(host, offset)
142 msm_host->var_ops->msm_writel_relaxed(val, host, offset)
261 int pwr_irq; /* power irq */
296 return msm_host->offset; in sdhci_priv_msm_offset()
309 return readl_relaxed(msm_host->core_mem + offset); in sdhci_msm_mci_variant_readl_relaxed()
315 return readl_relaxed(host->ioaddr + offset); in sdhci_msm_v5_variant_readl_relaxed()
324 writel_relaxed(val, msm_host->core_mem + offset); in sdhci_msm_mci_variant_writel_relaxed()
330 writel_relaxed(val, host->ioaddr + offset); in sdhci_msm_v5_variant_writel_relaxed()
333 static unsigned int msm_get_clock_mult_for_bus_mode(struct sdhci_host *host) in msm_get_clock_mult_for_bus_mode()
335 struct mmc_ios ios = host->mmc->ios; in msm_get_clock_mult_for_bus_mode()
345 host->flags & SDHCI_HS400_TUNING) in msm_get_clock_mult_for_bus_mode()
351 unsigned int clock) in msm_set_clock_rate_for_bus_mode()
355 struct mmc_ios curr_ios = host->mmc->ios; in msm_set_clock_rate_for_bus_mode()
356 struct clk *core_clk = msm_host->bulk_clks[0].clk; in msm_set_clock_rate_for_bus_mode()
358 unsigned int desired_rate; in msm_set_clock_rate_for_bus_mode()
359 unsigned int mult; in msm_set_clock_rate_for_bus_mode()
360 int rc; in msm_set_clock_rate_for_bus_mode()
364 rc = dev_pm_opp_set_rate(mmc_dev(host->mmc), desired_rate); in msm_set_clock_rate_for_bus_mode()
367 mmc_hostname(host->mmc), desired_rate, curr_ios.timing); in msm_set_clock_rate_for_bus_mode()
379 mmc_hostname(host->mmc), desired_rate, achieved_rate); in msm_set_clock_rate_for_bus_mode()
380 host->mmc->actual_clock = achieved_rate / mult; in msm_set_clock_rate_for_bus_mode()
383 msm_host->clk_rate = desired_rate; in msm_set_clock_rate_for_bus_mode()
386 mmc_hostname(host->mmc), achieved_rate, curr_ios.timing); in msm_set_clock_rate_for_bus_mode()
390 static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll) in msm_dll_poll_ck_out_en()
394 struct mmc_host *mmc = host->mmc; in msm_dll_poll_ck_out_en()
399 ck_out_en = !!(readl_relaxed(host->ioaddr + in msm_dll_poll_ck_out_en()
400 msm_offset->core_dll_config) & CORE_CK_OUT_EN); in msm_dll_poll_ck_out_en()
403 if (--wait_cnt == 0) { in msm_dll_poll_ck_out_en()
406 return -ETIMEDOUT; in msm_dll_poll_ck_out_en()
410 ck_out_en = !!(readl_relaxed(host->ioaddr + in msm_dll_poll_ck_out_en()
411 msm_offset->core_dll_config) & CORE_CK_OUT_EN); in msm_dll_poll_ck_out_en()
417 static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase) in msm_config_cm_dll_phase()
419 int rc; in msm_config_cm_dll_phase()
426 struct mmc_host *mmc = host->mmc; in msm_config_cm_dll_phase()
431 return -EINVAL; in msm_config_cm_dll_phase()
433 spin_lock_irqsave(&host->lock, flags); in msm_config_cm_dll_phase()
435 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); in msm_config_cm_dll_phase()
438 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); in msm_config_cm_dll_phase()
449 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); in msm_config_cm_dll_phase()
452 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); in msm_config_cm_dll_phase()
454 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); in msm_config_cm_dll_phase()
456 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); in msm_config_cm_dll_phase()
463 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); in msm_config_cm_dll_phase()
466 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); in msm_config_cm_dll_phase()
473 spin_unlock_irqrestore(&host->lock, flags); in msm_config_cm_dll_phase()
480 * setting for SD3.0 UHS-I card read operation (in SDR104
482 * HS400/HS200 timing mode).
487 static int msm_find_most_appropriate_phase(struct sdhci_host *host, in msm_find_most_appropriate_phase()
490 int ret; in msm_find_most_appropriate_phase()
493 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0; in msm_find_most_appropriate_phase()
494 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0; in msm_find_most_appropriate_phase()
496 struct mmc_host *mmc = host->mmc; in msm_find_most_appropriate_phase()
501 return -EINVAL; in msm_find_most_appropriate_phase()
519 return -EINVAL; in msm_find_most_appropriate_phase()
521 /* Check if phase-0 is present in first valid window? */ in msm_find_most_appropriate_phase()
552 return -EINVAL; in msm_find_most_appropriate_phase()
576 i--; in msm_find_most_appropriate_phase()
581 ret = -EINVAL; in msm_find_most_appropriate_phase()
596 if (host->clock <= 112000000) in msm_cm_dll_set_freq()
598 else if (host->clock <= 125000000) in msm_cm_dll_set_freq()
600 else if (host->clock <= 137000000) in msm_cm_dll_set_freq()
602 else if (host->clock <= 150000000) in msm_cm_dll_set_freq()
604 else if (host->clock <= 162000000) in msm_cm_dll_set_freq()
606 else if (host->clock <= 175000000) in msm_cm_dll_set_freq()
608 else if (host->clock <= 187000000) in msm_cm_dll_set_freq()
610 else if (host->clock <= 200000000) in msm_cm_dll_set_freq()
613 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); in msm_cm_dll_set_freq()
616 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); in msm_cm_dll_set_freq()
619 /* Initialize the DLL (Programmable Delay Line) */
620 static int msm_init_cm_dll(struct sdhci_host *host) in msm_init_cm_dll()
622 struct mmc_host *mmc = host->mmc; in msm_init_cm_dll()
625 int wait_cnt = 50; in msm_init_cm_dll()
629 msm_host->offset; in msm_init_cm_dll()
631 if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk)) in msm_init_cm_dll()
632 xo_clk = clk_get_rate(msm_host->xo_clk); in msm_init_cm_dll()
634 spin_lock_irqsave(&host->lock, flags); in msm_init_cm_dll()
641 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); in msm_init_cm_dll()
643 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); in msm_init_cm_dll()
645 if (msm_host->dll_config) in msm_init_cm_dll()
646 writel_relaxed(msm_host->dll_config, in msm_init_cm_dll()
647 host->ioaddr + msm_offset->core_dll_config); in msm_init_cm_dll()
649 if (msm_host->use_14lpp_dll_reset) { in msm_init_cm_dll()
650 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
651 msm_offset->core_dll_config); in msm_init_cm_dll()
653 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
654 msm_offset->core_dll_config); in msm_init_cm_dll()
656 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
657 msm_offset->core_dll_config_2); in msm_init_cm_dll()
659 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
660 msm_offset->core_dll_config_2); in msm_init_cm_dll()
663 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
664 msm_offset->core_dll_config); in msm_init_cm_dll()
666 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
667 msm_offset->core_dll_config); in msm_init_cm_dll()
669 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
670 msm_offset->core_dll_config); in msm_init_cm_dll()
672 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
673 msm_offset->core_dll_config); in msm_init_cm_dll()
675 if (!msm_host->dll_config) in msm_init_cm_dll()
678 if (msm_host->use_14lpp_dll_reset && in msm_init_cm_dll()
679 !IS_ERR_OR_NULL(msm_host->xo_clk)) { in msm_init_cm_dll()
682 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
683 msm_offset->core_dll_config_2); in msm_init_cm_dll()
686 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8), in msm_init_cm_dll()
689 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4), in msm_init_cm_dll()
692 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
693 msm_offset->core_dll_config_2); in msm_init_cm_dll()
697 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
698 msm_offset->core_dll_config_2); in msm_init_cm_dll()
703 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
704 msm_offset->core_dll_config); in msm_init_cm_dll()
706 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
707 msm_offset->core_dll_config); in msm_init_cm_dll()
709 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
710 msm_offset->core_dll_config); in msm_init_cm_dll()
712 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
713 msm_offset->core_dll_config); in msm_init_cm_dll()
715 if (msm_host->use_14lpp_dll_reset) { in msm_init_cm_dll()
716 if (!msm_host->dll_config) in msm_init_cm_dll()
718 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
719 msm_offset->core_dll_config_2); in msm_init_cm_dll()
721 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
722 msm_offset->core_dll_config_2); in msm_init_cm_dll()
729 if (msm_host->uses_tassadar_dll) { in msm_init_cm_dll()
732 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
733 msm_offset->core_dll_usr_ctl); in msm_init_cm_dll()
735 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
736 msm_offset->core_dll_config_3); in msm_init_cm_dll()
738 if (msm_host->clk_rate < 150000000) in msm_init_cm_dll()
742 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
743 msm_offset->core_dll_config_3); in msm_init_cm_dll()
746 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
747 msm_offset->core_dll_config); in msm_init_cm_dll()
749 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
750 msm_offset->core_dll_config); in msm_init_cm_dll()
752 config = readl_relaxed(host->ioaddr + in msm_init_cm_dll()
753 msm_offset->core_dll_config); in msm_init_cm_dll()
755 writel_relaxed(config, host->ioaddr + in msm_init_cm_dll()
756 msm_offset->core_dll_config); in msm_init_cm_dll()
759 while (!(readl_relaxed(host->ioaddr + msm_offset->core_dll_status) & in msm_init_cm_dll()
762 if (--wait_cnt == 0) { in msm_init_cm_dll()
765 spin_unlock_irqrestore(&host->lock, flags); in msm_init_cm_dll()
766 return -ETIMEDOUT; in msm_init_cm_dll()
771 spin_unlock_irqrestore(&host->lock, flags); in msm_init_cm_dll()
781 msm_host->offset; in msm_hc_select_default()
783 if (!msm_host->use_cdclp533) { in msm_hc_select_default()
784 config = readl_relaxed(host->ioaddr + in msm_hc_select_default()
785 msm_offset->core_vendor_spec3); in msm_hc_select_default()
787 writel_relaxed(config, host->ioaddr + in msm_hc_select_default()
788 msm_offset->core_vendor_spec3); in msm_hc_select_default()
791 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); in msm_hc_select_default()
794 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); in msm_hc_select_default()
803 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); in msm_hc_select_default()
806 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); in msm_hc_select_default()
819 struct mmc_ios ios = host->mmc->ios; in msm_hc_select_hs400()
821 int rc; in msm_hc_select_hs400()
823 msm_host->offset; in msm_hc_select_hs400()
826 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); in msm_hc_select_hs400()
830 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); in msm_hc_select_hs400()
832 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC in msm_hc_select_hs400()
835 if ((msm_host->tuning_done || ios.enhanced_strobe) && in msm_hc_select_hs400()
836 !msm_host->calibration_done) { in msm_hc_select_hs400()
837 config = readl_relaxed(host->ioaddr + in msm_hc_select_hs400()
838 msm_offset->core_vendor_spec); in msm_hc_select_hs400()
841 writel_relaxed(config, host->ioaddr + in msm_hc_select_hs400()
842 msm_offset->core_vendor_spec); in msm_hc_select_hs400()
844 if (!msm_host->clk_rate && !msm_host->use_cdclp533) { in msm_hc_select_hs400()
850 rc = readl_relaxed_poll_timeout(host->ioaddr + in msm_hc_select_hs400()
851 msm_offset->core_dll_status, in msm_hc_select_hs400()
857 if (rc == -ETIMEDOUT) in msm_hc_select_hs400()
859 mmc_hostname(host->mmc), dll_lock); in msm_hc_select_hs400()
869 * sdhci_msm_hc_select_mode :- In general all timing modes are
871 * eMMC specific HS200/HS400 doesn't have their respective modes
874 * HS200 - SDR104 (Since they both are equivalent in functionality)
875 * HS400 - This involves multiple configurations
876 * Initially SDR104 - when tuning is required as HS200
877 * Then when switching to DDR @ 400MHz (HS400) we use
883 * HS400 - divided clock (free running MCLK/2)
884 * All other modes - default (free running MCLK)
888 struct mmc_ios ios = host->mmc->ios; in sdhci_msm_hc_select_mode()
891 host->flags & SDHCI_HS400_TUNING) in sdhci_msm_hc_select_mode()
897 static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host) in sdhci_msm_cdclp533_calibration()
902 int ret; in sdhci_msm_cdclp533_calibration()
904 msm_host->offset; in sdhci_msm_cdclp533_calibration()
906 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); in sdhci_msm_cdclp533_calibration()
909 * Retuning in HS400 (DDR mode) will fail, just reset the in sdhci_msm_cdclp533_calibration()
916 /* Set the selected phase in delay line hw block */ in sdhci_msm_cdclp533_calibration()
917 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase); in sdhci_msm_cdclp533_calibration()
921 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); in sdhci_msm_cdclp533_calibration()
923 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); in sdhci_msm_cdclp533_calibration()
925 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg); in sdhci_msm_cdclp533_calibration()
927 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg); in sdhci_msm_cdclp533_calibration()
929 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG); in sdhci_msm_cdclp533_calibration()
931 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG); in sdhci_msm_cdclp533_calibration()
933 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG); in sdhci_msm_cdclp533_calibration()
935 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG); in sdhci_msm_cdclp533_calibration()
937 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg); in sdhci_msm_cdclp533_calibration()
939 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg); in sdhci_msm_cdclp533_calibration()
943 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); in sdhci_msm_cdclp533_calibration()
944 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1); in sdhci_msm_cdclp533_calibration()
945 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); in sdhci_msm_cdclp533_calibration()
946 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1); in sdhci_msm_cdclp533_calibration()
947 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG); in sdhci_msm_cdclp533_calibration()
948 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG); in sdhci_msm_cdclp533_calibration()
949 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG); in sdhci_msm_cdclp533_calibration()
950 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG); in sdhci_msm_cdclp533_calibration()
951 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG); in sdhci_msm_cdclp533_calibration()
955 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); in sdhci_msm_cdclp533_calibration()
957 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); in sdhci_msm_cdclp533_calibration()
959 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); in sdhci_msm_cdclp533_calibration()
961 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); in sdhci_msm_cdclp533_calibration()
963 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); in sdhci_msm_cdclp533_calibration()
965 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); in sdhci_msm_cdclp533_calibration()
967 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); in sdhci_msm_cdclp533_calibration()
969 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); in sdhci_msm_cdclp533_calibration()
971 ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0, in sdhci_msm_cdclp533_calibration()
976 if (ret == -ETIMEDOUT) { in sdhci_msm_cdclp533_calibration()
978 mmc_hostname(host->mmc), __func__); in sdhci_msm_cdclp533_calibration()
982 ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0) in sdhci_msm_cdclp533_calibration()
986 mmc_hostname(host->mmc), __func__, ret); in sdhci_msm_cdclp533_calibration()
987 ret = -EINVAL; in sdhci_msm_cdclp533_calibration()
991 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg); in sdhci_msm_cdclp533_calibration()
993 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg); in sdhci_msm_cdclp533_calibration()
995 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), in sdhci_msm_cdclp533_calibration()
1000 static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host) in sdhci_msm_cm_dll_sdc4_calibration()
1002 struct mmc_host *mmc = host->mmc; in sdhci_msm_cm_dll_sdc4_calibration()
1004 int ret; in sdhci_msm_cm_dll_sdc4_calibration()
1010 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); in sdhci_msm_cm_dll_sdc4_calibration()
1019 if (msm_host->updated_ddr_cfg) in sdhci_msm_cm_dll_sdc4_calibration()
1020 ddr_cfg_offset = msm_offset->core_ddr_config; in sdhci_msm_cm_dll_sdc4_calibration()
1022 ddr_cfg_offset = msm_offset->core_ddr_config_old; in sdhci_msm_cm_dll_sdc4_calibration()
1023 writel_relaxed(msm_host->ddr_config, host->ioaddr + ddr_cfg_offset); in sdhci_msm_cm_dll_sdc4_calibration()
1025 if (mmc->ios.enhanced_strobe) { in sdhci_msm_cm_dll_sdc4_calibration()
1026 config = readl_relaxed(host->ioaddr + in sdhci_msm_cm_dll_sdc4_calibration()
1027 msm_offset->core_ddr_200_cfg); in sdhci_msm_cm_dll_sdc4_calibration()
1029 writel_relaxed(config, host->ioaddr + in sdhci_msm_cm_dll_sdc4_calibration()
1030 msm_offset->core_ddr_200_cfg); in sdhci_msm_cm_dll_sdc4_calibration()
1033 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2); in sdhci_msm_cm_dll_sdc4_calibration()
1035 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config_2); in sdhci_msm_cm_dll_sdc4_calibration()
1037 ret = readl_relaxed_poll_timeout(host->ioaddr + in sdhci_msm_cm_dll_sdc4_calibration()
1038 msm_offset->core_dll_status, in sdhci_msm_cm_dll_sdc4_calibration()
1043 if (ret == -ETIMEDOUT) { in sdhci_msm_cm_dll_sdc4_calibration()
1045 mmc_hostname(host->mmc), __func__); in sdhci_msm_cm_dll_sdc4_calibration()
1052 * and MCLK must be switched on for at-least 1us before DATA in sdhci_msm_cm_dll_sdc4_calibration()
1057 if (!msm_host->use_14lpp_dll_reset) { in sdhci_msm_cm_dll_sdc4_calibration()
1058 config = readl_relaxed(host->ioaddr + in sdhci_msm_cm_dll_sdc4_calibration()
1059 msm_offset->core_vendor_spec3); in sdhci_msm_cm_dll_sdc4_calibration()
1061 writel_relaxed(config, host->ioaddr + in sdhci_msm_cm_dll_sdc4_calibration()
1062 msm_offset->core_vendor_spec3); in sdhci_msm_cm_dll_sdc4_calibration()
1071 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), in sdhci_msm_cm_dll_sdc4_calibration()
1076 static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host) in sdhci_msm_hs400_dll_calibration()
1080 struct mmc_host *mmc = host->mmc; in sdhci_msm_hs400_dll_calibration()
1081 int ret; in sdhci_msm_hs400_dll_calibration()
1084 msm_host->offset; in sdhci_msm_hs400_dll_calibration()
1086 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); in sdhci_msm_hs400_dll_calibration()
1089 * Retuning in HS400 (DDR mode) will fail, just reset the in sdhci_msm_hs400_dll_calibration()
1096 if (!mmc->ios.enhanced_strobe) { in sdhci_msm_hs400_dll_calibration()
1097 /* Set the selected phase in delay line hw block */ in sdhci_msm_hs400_dll_calibration()
1099 msm_host->saved_tuning_phase); in sdhci_msm_hs400_dll_calibration()
1102 config = readl_relaxed(host->ioaddr + in sdhci_msm_hs400_dll_calibration()
1103 msm_offset->core_dll_config); in sdhci_msm_hs400_dll_calibration()
1105 writel_relaxed(config, host->ioaddr + in sdhci_msm_hs400_dll_calibration()
1106 msm_offset->core_dll_config); in sdhci_msm_hs400_dll_calibration()
1109 if (msm_host->use_cdclp533) in sdhci_msm_hs400_dll_calibration()
1114 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), in sdhci_msm_hs400_dll_calibration()
1121 struct mmc_ios *ios = &host->mmc->ios; in sdhci_msm_is_tuning_needed()
1124 * Tuning is required for SDR104, HS200 and HS400 cards and in sdhci_msm_is_tuning_needed()
1127 if (host->clock <= CORE_FREQ_100MHZ || in sdhci_msm_is_tuning_needed()
1128 !(ios->timing == MMC_TIMING_MMC_HS400 || in sdhci_msm_is_tuning_needed()
1129 ios->timing == MMC_TIMING_MMC_HS200 || in sdhci_msm_is_tuning_needed()
1130 ios->timing == MMC_TIMING_UHS_SDR104) || in sdhci_msm_is_tuning_needed()
1131 ios->enhanced_strobe) in sdhci_msm_is_tuning_needed()
1137 static int sdhci_msm_restore_sdr_dll_config(struct sdhci_host *host) in sdhci_msm_restore_sdr_dll_config()
1141 int ret; in sdhci_msm_restore_sdr_dll_config()
1156 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase); in sdhci_msm_restore_sdr_dll_config()
1164 u32 config, oldconfig = readl_relaxed(host->ioaddr + in sdhci_msm_set_cdr()
1165 msm_offset->core_dll_config); in sdhci_msm_set_cdr()
1177 writel_relaxed(config, host->ioaddr + in sdhci_msm_set_cdr()
1178 msm_offset->core_dll_config); in sdhci_msm_set_cdr()
1182 static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) in sdhci_msm_execute_tuning()
1185 int tuning_seq_cnt = 10; in sdhci_msm_execute_tuning()
1187 int rc; in sdhci_msm_execute_tuning()
1188 struct mmc_ios ios = host->mmc->ios; in sdhci_msm_execute_tuning()
1193 msm_host->use_cdr = false; in sdhci_msm_execute_tuning()
1198 /* Clock-Data-Recovery used to dynamically adjust RX sampling point */ in sdhci_msm_execute_tuning()
1199 msm_host->use_cdr = true; in sdhci_msm_execute_tuning()
1203 * HS400 settings. in sdhci_msm_execute_tuning()
1205 msm_host->tuning_done = 0; in sdhci_msm_execute_tuning()
1208 * For HS400 tuning in HS200 timing requires: in sdhci_msm_execute_tuning()
1209 * - select MCLK/2 in VENDOR_SPEC in sdhci_msm_execute_tuning()
1210 * - program MCLK to 400MHz (or nearest supported) in GCC in sdhci_msm_execute_tuning()
1212 if (host->flags & SDHCI_HS400_TUNING) { in sdhci_msm_execute_tuning()
1215 host->flags &= ~SDHCI_HS400_TUNING; in sdhci_msm_execute_tuning()
1226 /* Set the phase in delay line hw block */ in sdhci_msm_execute_tuning()
1251 if (--tuning_seq_cnt) { in sdhci_msm_execute_tuning()
1265 * Finally set the selected phase in delay in sdhci_msm_execute_tuning()
1271 msm_host->saved_tuning_phase = phase; in sdhci_msm_execute_tuning()
1275 if (--tuning_seq_cnt) in sdhci_msm_execute_tuning()
1280 rc = -EIO; in sdhci_msm_execute_tuning()
1284 msm_host->tuning_done = true; in sdhci_msm_execute_tuning()
1289 * sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation.
1298 int ret; in sdhci_msm_hs400()
1300 if (host->clock > CORE_FREQ_100MHZ && in sdhci_msm_hs400()
1301 (msm_host->tuning_done || ios->enhanced_strobe) && in sdhci_msm_hs400()
1302 !msm_host->calibration_done) { in sdhci_msm_hs400()
1305 msm_host->calibration_done = true; in sdhci_msm_hs400()
1307 pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n", in sdhci_msm_hs400()
1308 mmc_hostname(host->mmc), ret); in sdhci_msm_hs400()
1313 unsigned int uhs) in sdhci_msm_set_uhs_signaling()
1315 struct mmc_host *mmc = host->mmc; in sdhci_msm_set_uhs_signaling()
1321 msm_host->offset; in sdhci_msm_set_uhs_signaling()
1353 if (host->clock <= CORE_FREQ_100MHZ) { in sdhci_msm_set_uhs_signaling()
1362 config = readl_relaxed(host->ioaddr + in sdhci_msm_set_uhs_signaling()
1363 msm_offset->core_dll_config); in sdhci_msm_set_uhs_signaling()
1365 writel_relaxed(config, host->ioaddr + in sdhci_msm_set_uhs_signaling()
1366 msm_offset->core_dll_config); in sdhci_msm_set_uhs_signaling()
1368 config = readl_relaxed(host->ioaddr + in sdhci_msm_set_uhs_signaling()
1369 msm_offset->core_dll_config); in sdhci_msm_set_uhs_signaling()
1371 writel_relaxed(config, host->ioaddr + in sdhci_msm_set_uhs_signaling()
1372 msm_offset->core_dll_config); in sdhci_msm_set_uhs_signaling()
1378 msm_host->calibration_done = false; in sdhci_msm_set_uhs_signaling()
1382 mmc_hostname(host->mmc), host->clock, uhs, ctrl_2); in sdhci_msm_set_uhs_signaling()
1385 if (mmc->ios.timing == MMC_TIMING_MMC_HS400) in sdhci_msm_set_uhs_signaling()
1386 sdhci_msm_hs400(host, &mmc->ios); in sdhci_msm_set_uhs_signaling()
1389 static int sdhci_msm_set_pincfg(struct sdhci_msm_host *msm_host, bool level) in sdhci_msm_set_pincfg()
1391 struct platform_device *pdev = msm_host->pdev; in sdhci_msm_set_pincfg()
1392 int ret; in sdhci_msm_set_pincfg()
1395 ret = pinctrl_pm_select_default_state(&pdev->dev); in sdhci_msm_set_pincfg()
1397 ret = pinctrl_pm_select_sleep_state(&pdev->dev); in sdhci_msm_set_pincfg()
1402 static int sdhci_msm_set_vmmc(struct mmc_host *mmc) in sdhci_msm_set_vmmc()
1404 if (IS_ERR(mmc->supply.vmmc)) in sdhci_msm_set_vmmc()
1407 return mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, mmc->ios.vdd); in sdhci_msm_set_vmmc()
1410 static int msm_toggle_vqmmc(struct sdhci_msm_host *msm_host, in msm_toggle_vqmmc()
1413 int ret; in msm_toggle_vqmmc()
1416 if (msm_host->vqmmc_enabled == level) in msm_toggle_vqmmc()
1421 if (msm_host->caps_0 & CORE_3_0V_SUPPORT) in msm_toggle_vqmmc()
1423 else if (msm_host->caps_0 & CORE_1_8V_SUPPORT) in msm_toggle_vqmmc()
1426 if (msm_host->caps_0 & CORE_VOLT_SUPPORT) { in msm_toggle_vqmmc()
1434 ret = regulator_enable(mmc->supply.vqmmc); in msm_toggle_vqmmc()
1436 ret = regulator_disable(mmc->supply.vqmmc); in msm_toggle_vqmmc()
1443 msm_host->vqmmc_enabled = level; in msm_toggle_vqmmc()
1448 static int msm_config_vqmmc_mode(struct sdhci_msm_host *msm_host, in msm_config_vqmmc_mode()
1451 int load, ret; in msm_config_vqmmc_mode()
1454 ret = regulator_set_load(mmc->supply.vqmmc, load); in msm_config_vqmmc_mode()
1461 static int sdhci_msm_set_vqmmc(struct sdhci_msm_host *msm_host, in sdhci_msm_set_vqmmc()
1464 int ret; in sdhci_msm_set_vqmmc()
1467 if (IS_ERR(mmc->supply.vqmmc) || in sdhci_msm_set_vqmmc()
1468 (mmc->ios.power_mode == MMC_POWER_UNDEFINED)) in sdhci_msm_set_vqmmc()
1481 mmc->card && mmc_card_mmc(mmc->card); in sdhci_msm_set_vqmmc()
1493 init_waitqueue_head(&msm_host->pwr_irq_wait); in sdhci_msm_init_pwr_irq_wait()
1499 wake_up(&msm_host->pwr_irq_wait); in sdhci_msm_complete_pwr_irq_wait()
1518 msm_host->offset; in sdhci_msm_check_power_status()
1521 mmc_hostname(host->mmc), __func__, req_type, in sdhci_msm_check_power_status()
1522 msm_host->curr_pwr_state, msm_host->curr_io_level); in sdhci_msm_check_power_status()
1527 * Since sdhci-msm-v5, this bit has been removed and SW must consider in sdhci_msm_check_power_status()
1530 if (!msm_host->mci_removed) in sdhci_msm_check_power_status()
1532 msm_offset->core_generics); in sdhci_msm_check_power_status()
1539 * The IRQ for request type IO High/LOW will be generated when - in sdhci_msm_check_power_status()
1547 * for host->pwr to handle a case where IO voltage high request is in sdhci_msm_check_power_status()
1550 if ((req_type & REQ_IO_HIGH) && !host->pwr) { in sdhci_msm_check_power_status()
1552 mmc_hostname(host->mmc), req_type); in sdhci_msm_check_power_status()
1555 if ((req_type & msm_host->curr_pwr_state) || in sdhci_msm_check_power_status()
1556 (req_type & msm_host->curr_io_level)) in sdhci_msm_check_power_status()
1565 if (!wait_event_timeout(msm_host->pwr_irq_wait, in sdhci_msm_check_power_status()
1566 msm_host->pwr_irq_flag, in sdhci_msm_check_power_status()
1568 dev_warn(&msm_host->pdev->dev, in sdhci_msm_check_power_status()
1570 mmc_hostname(host->mmc), req_type); in sdhci_msm_check_power_status()
1572 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc), in sdhci_msm_check_power_status()
1581 msm_host->offset; in sdhci_msm_dump_pwr_ctrl_regs()
1584 mmc_hostname(host->mmc), in sdhci_msm_dump_pwr_ctrl_regs()
1585 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_status), in sdhci_msm_dump_pwr_ctrl_regs()
1586 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_mask), in sdhci_msm_dump_pwr_ctrl_regs()
1587 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_ctl)); in sdhci_msm_dump_pwr_ctrl_regs()
1590 static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq) in sdhci_msm_handle_pwr_irq()
1594 struct mmc_host *mmc = host->mmc; in sdhci_msm_handle_pwr_irq()
1596 int retry = 10, ret; in sdhci_msm_handle_pwr_irq()
1599 const struct sdhci_msm_offset *msm_offset = msm_host->offset; in sdhci_msm_handle_pwr_irq()
1602 msm_offset->core_pwrctl_status); in sdhci_msm_handle_pwr_irq()
1606 msm_offset->core_pwrctl_clear); in sdhci_msm_handle_pwr_irq()
1616 msm_offset->core_pwrctl_status)) { in sdhci_msm_handle_pwr_irq()
1619 mmc_hostname(host->mmc), irq_status); in sdhci_msm_handle_pwr_irq()
1625 msm_offset->core_pwrctl_clear); in sdhci_msm_handle_pwr_irq()
1626 retry--; in sdhci_msm_handle_pwr_irq()
1664 if (io_level && !IS_ERR(mmc->supply.vqmmc) && !pwr_state) { in sdhci_msm_handle_pwr_irq()
1665 ret = mmc_regulator_set_vqmmc(mmc, &mmc->ios); in sdhci_msm_handle_pwr_irq()
1669 mmc->ios.signal_voltage, mmc->ios.vdd, in sdhci_msm_handle_pwr_irq()
1681 msm_offset->core_pwrctl_ctl); in sdhci_msm_handle_pwr_irq()
1687 if (msm_host->caps_0 & CORE_VOLT_SUPPORT) { in sdhci_msm_handle_pwr_irq()
1700 config = readl_relaxed(host->ioaddr + in sdhci_msm_handle_pwr_irq()
1701 msm_offset->core_vendor_spec); in sdhci_msm_handle_pwr_irq()
1705 (msm_host->caps_0 & CORE_3_0V_SUPPORT)) in sdhci_msm_handle_pwr_irq()
1708 (msm_host->caps_0 & CORE_1_8V_SUPPORT)) in sdhci_msm_handle_pwr_irq()
1712 writel_relaxed(new_config, host->ioaddr + in sdhci_msm_handle_pwr_irq()
1713 msm_offset->core_vendor_spec); in sdhci_msm_handle_pwr_irq()
1717 msm_host->curr_pwr_state = pwr_state; in sdhci_msm_handle_pwr_irq()
1719 msm_host->curr_io_level = io_level; in sdhci_msm_handle_pwr_irq()
1722 mmc_hostname(msm_host->mmc), __func__, irq, irq_status, in sdhci_msm_handle_pwr_irq()
1726 static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data) in sdhci_msm_pwr_irq()
1733 msm_host->pwr_irq_flag = 1; in sdhci_msm_pwr_irq()
1740 static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host) in sdhci_msm_get_max_clock()
1744 struct clk *core_clk = msm_host->bulk_clks[0].clk; in sdhci_msm_get_max_clock()
1749 static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host) in sdhci_msm_get_min_clock()
1755 * __sdhci_msm_set_clock - sdhci_msm clock control.
1762 static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock) in __sdhci_msm_set_clock()
1780 /* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */
1781 static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock) in sdhci_msm_set_clock()
1787 host->mmc->actual_clock = msm_host->clk_rate = 0; in sdhci_msm_set_clock()
1823 writel((val), (host)->ice_mem + (reg))
1825 readl((host)->ice_mem + (reg))
1829 struct device *dev = mmc_dev(msm_host->mmc); in sdhci_msm_ice_supported()
1831 int major = regval >> 24; in sdhci_msm_ice_supported()
1832 int minor = (regval >> 16) & 0xFF; in sdhci_msm_ice_supported()
1833 int step = regval & 0xFFFF; in sdhci_msm_ice_supported()
1861 static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host, in sdhci_msm_ice_init()
1864 struct mmc_host *mmc = msm_host->mmc; in sdhci_msm_ice_init()
1871 res = platform_get_resource_byname(msm_host->pdev, IORESOURCE_MEM, in sdhci_msm_ice_init()
1883 msm_host->ice_mem = devm_ioremap_resource(dev, res); in sdhci_msm_ice_init()
1884 if (IS_ERR(msm_host->ice_mem)) in sdhci_msm_ice_init()
1885 return PTR_ERR(msm_host->ice_mem); in sdhci_msm_ice_init()
1890 mmc->caps2 |= MMC_CAP2_CRYPTO; in sdhci_msm_ice_init()
1905 * [0]-0, [1]-0, [2]-0, [3]-E, [4]-0, [5]-0, [6]-0, [7]-0 in sdhci_msm_ice_low_power_mode_enable()
1918 /* ICE HPG requires delay before writing */ in sdhci_msm_ice_optimization_enable()
1925 * Wait until the ICE BIST (built-in self-test) has completed.
1934 * the full storage stack anyway, and not relying on hardware-level self-tests.
1936 static int sdhci_msm_ice_wait_bist_status(struct sdhci_msm_host *msm_host) in sdhci_msm_ice_wait_bist_status()
1939 int err; in sdhci_msm_ice_wait_bist_status()
1941 err = readl_poll_timeout(msm_host->ice_mem + QCOM_ICE_REG_BIST_STATUS, in sdhci_msm_ice_wait_bist_status()
1945 dev_err(mmc_dev(msm_host->mmc), in sdhci_msm_ice_wait_bist_status()
1946 "Timed out waiting for ICE self-test to complete\n"); in sdhci_msm_ice_wait_bist_status()
1952 if (!(msm_host->mmc->caps2 & MMC_CAP2_CRYPTO)) in sdhci_msm_ice_enable()
1959 static int __maybe_unused sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host) in sdhci_msm_ice_resume()
1961 if (!(msm_host->mmc->caps2 & MMC_CAP2_CRYPTO)) in sdhci_msm_ice_resume()
1968 * vendor-specific SCM calls for this; it doesn't support the standard way.
1970 static int sdhci_msm_program_key(struct cqhci_host *cq_host, in sdhci_msm_program_key()
1972 int slot) in sdhci_msm_program_key()
1974 struct device *dev = mmc_dev(cq_host->mmc); in sdhci_msm_program_key()
1980 int i; in sdhci_msm_program_key()
1981 int err; in sdhci_msm_program_key()
1983 if (!(cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE)) in sdhci_msm_program_key()
1986 /* Only AES-256-XTS has been tested so far. */ in sdhci_msm_program_key()
1987 cap = cq_host->crypto_cap_array[cfg->crypto_cap_idx]; in sdhci_msm_program_key()
1993 return -EINVAL; in sdhci_msm_program_key()
1996 memcpy(key.bytes, cfg->crypto_key, AES_256_XTS_KEY_SIZE); in sdhci_msm_program_key()
1999 * The SCM call byte-swaps the 32-bit words of the key. So we have to in sdhci_msm_program_key()
2007 cfg->data_unit_size); in sdhci_msm_program_key()
2017 static inline int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host, in sdhci_msm_ice_init()
2027 static inline int __maybe_unused
2042 int cmd_error = 0; in sdhci_msm_cqe_irq()
2043 int data_error = 0; in sdhci_msm_cqe_irq()
2048 cqhci_irq(host->mmc, intmask, cmd_error, data_error); in sdhci_msm_cqe_irq()
2070 * on 16-byte descriptors in 64bit mode. in sdhci_msm_cqe_disable()
2072 if (host->flags & SDHCI_USE_64_BIT_DMA) in sdhci_msm_cqe_disable()
2073 host->desc_sz = 16; in sdhci_msm_cqe_disable()
2075 spin_lock_irqsave(&host->lock, flags); in sdhci_msm_cqe_disable()
2088 spin_unlock_irqrestore(&host->lock, flags); in sdhci_msm_cqe_disable()
2093 static void sdhci_msm_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) in sdhci_msm_set_timeout() argument
2097 __sdhci_set_timeout(host, cmd); in sdhci_msm_set_timeout()
2102 * using 4 * MCLK * 2^(count + 13). where MCLK = 1 / host->clock. in sdhci_msm_set_timeout()
2104 if (cmd && cmd->data && host->clock > 400000 && in sdhci_msm_set_timeout()
2105 host->clock <= 50000000 && in sdhci_msm_set_timeout()
2106 ((1 << (count + start)) > (10 * host->clock))) in sdhci_msm_set_timeout()
2107 host->data_timeout = 22LL * NSEC_PER_SEC; in sdhci_msm_set_timeout()
2118 static int sdhci_msm_cqe_add_host(struct sdhci_host *host, in sdhci_msm_cqe_add_host()
2126 int ret; in sdhci_msm_cqe_add_host()
2132 if (host->caps & SDHCI_CAN_64BIT) in sdhci_msm_cqe_add_host()
2133 host->alloc_desc_sz = 16; in sdhci_msm_cqe_add_host()
2142 dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret); in sdhci_msm_cqe_add_host()
2146 msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; in sdhci_msm_cqe_add_host()
2147 cq_host->ops = &sdhci_msm_cqhci_ops; in sdhci_msm_cqe_add_host()
2149 dma64 = host->flags & SDHCI_USE_64_BIT_DMA; in sdhci_msm_cqe_add_host()
2155 ret = cqhci_init(cq_host, host->mmc, dma64); in sdhci_msm_cqe_add_host()
2157 dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n", in sdhci_msm_cqe_add_host()
2158 mmc_hostname(host->mmc), ret); in sdhci_msm_cqe_add_host()
2173 if (host->flags & SDHCI_USE_64_BIT_DMA) in sdhci_msm_cqe_add_host()
2174 host->desc_sz = 12; in sdhci_msm_cqe_add_host()
2180 dev_info(&pdev->dev, "%s: CQE init: success\n", in sdhci_msm_cqe_add_host()
2181 mmc_hostname(host->mmc)); in sdhci_msm_cqe_add_host()
2196 static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg) in __sdhci_msm_check_write()
2208 if (host->pwr && (val & SDHCI_RESET_ALL)) in __sdhci_msm_check_write()
2215 msm_host->transfer_mode = val; in __sdhci_msm_check_write()
2218 if (!msm_host->use_cdr) in __sdhci_msm_check_write()
2220 if ((msm_host->transfer_mode & SDHCI_TRNS_READ) && in __sdhci_msm_check_write()
2230 msm_host->pwr_irq_flag = 0; in __sdhci_msm_check_write()
2241 static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg) in sdhci_msm_writew()
2246 writew_relaxed(val, host->ioaddr + reg); in sdhci_msm_writew()
2253 static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg) in sdhci_msm_writeb()
2259 writeb_relaxed(val, host->ioaddr + reg); in sdhci_msm_writeb()
2267 struct mmc_host *mmc = msm_host->mmc; in sdhci_msm_set_regulator_caps()
2268 struct regulator *supply = mmc->supply.vqmmc; in sdhci_msm_set_regulator_caps()
2271 const struct sdhci_msm_offset *msm_offset = msm_host->offset; in sdhci_msm_set_regulator_caps()
2273 if (!IS_ERR(mmc->supply.vqmmc)) { in sdhci_msm_set_regulator_caps()
2289 u32 io_level = msm_host->curr_io_level; in sdhci_msm_set_regulator_caps()
2291 config = readl_relaxed(host->ioaddr + in sdhci_msm_set_regulator_caps()
2292 msm_offset->core_vendor_spec); in sdhci_msm_set_regulator_caps()
2301 host->ioaddr + msm_offset->core_vendor_spec); in sdhci_msm_set_regulator_caps()
2303 msm_host->caps_0 |= caps; in sdhci_msm_set_regulator_caps()
2309 if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL)) in sdhci_msm_reset()
2310 cqhci_deactivate(host->mmc); in sdhci_msm_reset()
2314 static int sdhci_msm_register_vreg(struct sdhci_msm_host *msm_host) in sdhci_msm_register_vreg()
2316 int ret; in sdhci_msm_register_vreg()
2318 ret = mmc_regulator_get_supply(msm_host->mmc); in sdhci_msm_register_vreg()
2327 static int sdhci_msm_start_signal_voltage_switch(struct mmc_host *mmc, in sdhci_msm_start_signal_voltage_switch()
2337 if (host->version < SDHCI_SPEC_300) in sdhci_msm_start_signal_voltage_switch()
2342 switch (ios->signal_voltage) { in sdhci_msm_start_signal_voltage_switch()
2344 if (!(host->flags & SDHCI_SIGNALING_330)) in sdhci_msm_start_signal_voltage_switch()
2345 return -EINVAL; in sdhci_msm_start_signal_voltage_switch()
2351 if (!(host->flags & SDHCI_SIGNALING_180)) in sdhci_msm_start_signal_voltage_switch()
2352 return -EINVAL; in sdhci_msm_start_signal_voltage_switch()
2359 return -EINVAL; in sdhci_msm_start_signal_voltage_switch()
2376 return -EAGAIN; in sdhci_msm_start_signal_voltage_switch()
2381 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
2387 const struct sdhci_msm_offset *msm_offset = msm_host->offset; in sdhci_msm_dump_vendor_regs()
2389 SDHCI_MSM_DUMP("----------- VENDOR REGISTER DUMP -----------\n"); in sdhci_msm_dump_vendor_regs()
2393 readl_relaxed(host->ioaddr + msm_offset->core_dll_status), in sdhci_msm_dump_vendor_regs()
2394 readl_relaxed(host->ioaddr + msm_offset->core_dll_config), in sdhci_msm_dump_vendor_regs()
2395 readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2)); in sdhci_msm_dump_vendor_regs()
2398 readl_relaxed(host->ioaddr + msm_offset->core_dll_config_3), in sdhci_msm_dump_vendor_regs()
2399 readl_relaxed(host->ioaddr + msm_offset->core_dll_usr_ctl), in sdhci_msm_dump_vendor_regs()
2400 readl_relaxed(host->ioaddr + msm_offset->core_ddr_config)); in sdhci_msm_dump_vendor_regs()
2403 readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec), in sdhci_msm_dump_vendor_regs()
2404 readl_relaxed(host->ioaddr + in sdhci_msm_dump_vendor_regs()
2405 msm_offset->core_vendor_spec_func2), in sdhci_msm_dump_vendor_regs()
2406 readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec3)); in sdhci_msm_dump_vendor_regs()
2442 {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
2443 {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
2444 {.compatible = "qcom,sdm670-sdhci", .data = &sdm845_sdhci_var},
2445 {.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var},
2446 {.compatible = "qcom,sc7180-sdhci", .data = &sdm845_sdhci_var},
2480 struct device_node *node = pdev->dev.of_node; in sdhci_msm_get_of_property()
2484 if (of_property_read_u32(node, "qcom,ddr-config", in sdhci_msm_get_of_property()
2485 &msm_host->ddr_config)) in sdhci_msm_get_of_property()
2486 msm_host->ddr_config = DDR_CONFIG_POR_VAL; in sdhci_msm_get_of_property()
2488 of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config); in sdhci_msm_get_of_property()
2491 static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host) in sdhci_msm_gcc_reset()
2494 int ret = 0; in sdhci_msm_gcc_reset()
2511 * The hardware requirement for delay between assert/deassert in sdhci_msm_gcc_reset()
2512 * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to in sdhci_msm_gcc_reset()
2513 * ~125us (4/32768). To be on the safe side add 200us delay. in sdhci_msm_gcc_reset()
2529 static int sdhci_msm_probe(struct platform_device *pdev) in sdhci_msm_probe()
2535 int ret; in sdhci_msm_probe()
2541 struct device_node *node = pdev->dev.of_node; in sdhci_msm_probe()
2547 host->sdma_boundary = 0; in sdhci_msm_probe()
2550 msm_host->mmc = host->mmc; in sdhci_msm_probe()
2551 msm_host->pdev = pdev; in sdhci_msm_probe()
2553 ret = mmc_of_parse(host->mmc); in sdhci_msm_probe()
2561 var_info = of_device_get_match_data(&pdev->dev); in sdhci_msm_probe()
2563 msm_host->mci_removed = var_info->mci_removed; in sdhci_msm_probe()
2564 msm_host->restore_dll_config = var_info->restore_dll_config; in sdhci_msm_probe()
2565 msm_host->var_ops = var_info->var_ops; in sdhci_msm_probe()
2566 msm_host->offset = var_info->offset; in sdhci_msm_probe()
2568 msm_offset = msm_host->offset; in sdhci_msm_probe()
2573 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE; in sdhci_msm_probe()
2575 ret = sdhci_msm_gcc_reset(&pdev->dev, host); in sdhci_msm_probe()
2580 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus"); in sdhci_msm_probe()
2581 if (!IS_ERR(msm_host->bus_clk)) { in sdhci_msm_probe()
2583 ret = clk_set_rate(msm_host->bus_clk, INT_MAX); in sdhci_msm_probe()
2586 ret = clk_prepare_enable(msm_host->bus_clk); in sdhci_msm_probe()
2592 clk = devm_clk_get(&pdev->dev, "iface"); in sdhci_msm_probe()
2595 dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret); in sdhci_msm_probe()
2598 msm_host->bulk_clks[1].clk = clk; in sdhci_msm_probe()
2601 clk = devm_clk_get(&pdev->dev, "core"); in sdhci_msm_probe()
2604 dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret); in sdhci_msm_probe()
2607 msm_host->bulk_clks[0].clk = clk; in sdhci_msm_probe()
2610 ret = dev_pm_opp_of_find_icc_paths(&pdev->dev, NULL); in sdhci_msm_probe()
2614 ret = devm_pm_opp_set_clkname(&pdev->dev, "core"); in sdhci_msm_probe()
2619 ret = devm_pm_opp_of_add_table(&pdev->dev); in sdhci_msm_probe()
2620 if (ret && ret != -ENODEV) { in sdhci_msm_probe()
2621 dev_err(&pdev->dev, "Invalid OPP table in Device tree\n"); in sdhci_msm_probe()
2626 ret = dev_pm_opp_set_rate(&pdev->dev, INT_MAX); in sdhci_msm_probe()
2628 dev_warn(&pdev->dev, "core clock boost failed\n"); in sdhci_msm_probe()
2630 clk = devm_clk_get(&pdev->dev, "cal"); in sdhci_msm_probe()
2633 msm_host->bulk_clks[2].clk = clk; in sdhci_msm_probe()
2635 clk = devm_clk_get(&pdev->dev, "sleep"); in sdhci_msm_probe()
2638 msm_host->bulk_clks[3].clk = clk; in sdhci_msm_probe()
2640 clk = sdhci_msm_ice_get_clk(&pdev->dev); in sdhci_msm_probe()
2643 msm_host->bulk_clks[4].clk = clk; in sdhci_msm_probe()
2645 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), in sdhci_msm_probe()
2646 msm_host->bulk_clks); in sdhci_msm_probe()
2654 msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo"); in sdhci_msm_probe()
2655 if (IS_ERR(msm_host->xo_clk)) { in sdhci_msm_probe()
2656 ret = PTR_ERR(msm_host->xo_clk); in sdhci_msm_probe()
2657 dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret); in sdhci_msm_probe()
2660 if (!msm_host->mci_removed) { in sdhci_msm_probe()
2661 msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1); in sdhci_msm_probe()
2662 if (IS_ERR(msm_host->core_mem)) { in sdhci_msm_probe()
2663 ret = PTR_ERR(msm_host->core_mem); in sdhci_msm_probe()
2670 host->ioaddr + msm_offset->core_vendor_spec); in sdhci_msm_probe()
2672 if (!msm_host->mci_removed) { in sdhci_msm_probe()
2675 msm_offset->core_hc_mode); in sdhci_msm_probe()
2677 msm_offset->core_hc_mode); in sdhci_msm_probe()
2680 msm_offset->core_hc_mode); in sdhci_msm_probe()
2683 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION)); in sdhci_msm_probe()
2684 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n", in sdhci_msm_probe()
2689 msm_offset->core_mci_version); in sdhci_msm_probe()
2693 dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n", in sdhci_msm_probe()
2697 msm_host->use_14lpp_dll_reset = true; in sdhci_msm_probe()
2704 msm_host->use_cdclp533 = true; in sdhci_msm_probe()
2711 config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES); in sdhci_msm_probe()
2713 writel_relaxed(config, host->ioaddr + in sdhci_msm_probe()
2714 msm_offset->core_vendor_spec_capabilities0); in sdhci_msm_probe()
2718 msm_host->updated_ddr_cfg = true; in sdhci_msm_probe()
2721 msm_host->uses_tassadar_dll = true; in sdhci_msm_probe()
2743 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq"); in sdhci_msm_probe()
2744 if (msm_host->pwr_irq < 0) { in sdhci_msm_probe()
2745 ret = msm_host->pwr_irq; in sdhci_msm_probe()
2752 msm_offset->core_pwrctl_mask); in sdhci_msm_probe()
2754 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL, in sdhci_msm_probe()
2756 dev_name(&pdev->dev), host); in sdhci_msm_probe()
2758 dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret); in sdhci_msm_probe()
2762 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY; in sdhci_msm_probe()
2765 host->max_timeout_count = 0xF; in sdhci_msm_probe()
2767 pm_runtime_get_noresume(&pdev->dev); in sdhci_msm_probe()
2768 pm_runtime_set_active(&pdev->dev); in sdhci_msm_probe()
2769 pm_runtime_enable(&pdev->dev); in sdhci_msm_probe()
2770 pm_runtime_set_autosuspend_delay(&pdev->dev, in sdhci_msm_probe()
2772 pm_runtime_use_autosuspend(&pdev->dev); in sdhci_msm_probe()
2774 host->mmc_host_ops.start_signal_voltage_switch = in sdhci_msm_probe()
2776 host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning; in sdhci_msm_probe()
2777 if (of_property_read_bool(node, "supports-cqe")) in sdhci_msm_probe()
2784 pm_runtime_mark_last_busy(&pdev->dev); in sdhci_msm_probe()
2785 pm_runtime_put_autosuspend(&pdev->dev); in sdhci_msm_probe()
2790 pm_runtime_disable(&pdev->dev); in sdhci_msm_probe()
2791 pm_runtime_set_suspended(&pdev->dev); in sdhci_msm_probe()
2792 pm_runtime_put_noidle(&pdev->dev); in sdhci_msm_probe()
2794 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), in sdhci_msm_probe()
2795 msm_host->bulk_clks); in sdhci_msm_probe()
2797 if (!IS_ERR(msm_host->bus_clk)) in sdhci_msm_probe()
2798 clk_disable_unprepare(msm_host->bus_clk); in sdhci_msm_probe()
2804 static int sdhci_msm_remove(struct platform_device *pdev) in sdhci_msm_remove()
2809 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == in sdhci_msm_remove()
2814 pm_runtime_get_sync(&pdev->dev); in sdhci_msm_remove()
2815 pm_runtime_disable(&pdev->dev); in sdhci_msm_remove()
2816 pm_runtime_put_noidle(&pdev->dev); in sdhci_msm_remove()
2818 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), in sdhci_msm_remove()
2819 msm_host->bulk_clks); in sdhci_msm_remove()
2820 if (!IS_ERR(msm_host->bus_clk)) in sdhci_msm_remove()
2821 clk_disable_unprepare(msm_host->bus_clk); in sdhci_msm_remove()
2826 static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev) in sdhci_msm_runtime_suspend()
2834 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), in sdhci_msm_runtime_suspend()
2835 msm_host->bulk_clks); in sdhci_msm_runtime_suspend()
2840 static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev) in sdhci_msm_runtime_resume()
2845 int ret; in sdhci_msm_runtime_resume()
2847 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), in sdhci_msm_runtime_resume()
2848 msm_host->bulk_clks); in sdhci_msm_runtime_resume()
2852 * Whenever core-clock is gated dynamically, it's needed to in sdhci_msm_runtime_resume()
2855 if (msm_host->restore_dll_config && msm_host->clk_rate) { in sdhci_msm_runtime_resume()
2861 dev_pm_opp_set_rate(dev, msm_host->clk_rate); in sdhci_msm_runtime_resume()