Lines Matching +full:host +full:- +full:controller
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
15 #include <linux/reset-controller.h>
19 #include "ufshcd-pltfrm.h"
21 #include "ufs-qcom.h"
46 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
75 if (optional && err == -ENOENT) { in ufs_qcom_host_clk_get()
80 if (err != -EPROBE_DEFER) in ufs_qcom_host_clk_get()
98 static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host) in ufs_qcom_disable_lane_clks() argument
100 if (!host->is_lane_clks_enabled) in ufs_qcom_disable_lane_clks()
103 clk_disable_unprepare(host->tx_l1_sync_clk); in ufs_qcom_disable_lane_clks()
104 clk_disable_unprepare(host->tx_l0_sync_clk); in ufs_qcom_disable_lane_clks()
105 clk_disable_unprepare(host->rx_l1_sync_clk); in ufs_qcom_disable_lane_clks()
106 clk_disable_unprepare(host->rx_l0_sync_clk); in ufs_qcom_disable_lane_clks()
108 host->is_lane_clks_enabled = false; in ufs_qcom_disable_lane_clks()
111 static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host) in ufs_qcom_enable_lane_clks() argument
114 struct device *dev = host->hba->dev; in ufs_qcom_enable_lane_clks()
116 if (host->is_lane_clks_enabled) in ufs_qcom_enable_lane_clks()
120 host->rx_l0_sync_clk); in ufs_qcom_enable_lane_clks()
125 host->tx_l0_sync_clk); in ufs_qcom_enable_lane_clks()
130 host->rx_l1_sync_clk); in ufs_qcom_enable_lane_clks()
135 host->tx_l1_sync_clk); in ufs_qcom_enable_lane_clks()
139 host->is_lane_clks_enabled = true; in ufs_qcom_enable_lane_clks()
143 clk_disable_unprepare(host->rx_l1_sync_clk); in ufs_qcom_enable_lane_clks()
145 clk_disable_unprepare(host->tx_l0_sync_clk); in ufs_qcom_enable_lane_clks()
147 clk_disable_unprepare(host->rx_l0_sync_clk); in ufs_qcom_enable_lane_clks()
152 static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host) in ufs_qcom_init_lane_clks() argument
155 struct device *dev = host->hba->dev; in ufs_qcom_init_lane_clks()
161 &host->rx_l0_sync_clk, false); in ufs_qcom_init_lane_clks()
166 &host->tx_l0_sync_clk, false); in ufs_qcom_init_lane_clks()
171 if (host->hba->lanes_per_direction > 1) { in ufs_qcom_init_lane_clks()
173 &host->rx_l1_sync_clk, false); in ufs_qcom_init_lane_clks()
178 &host->tx_l1_sync_clk, true); in ufs_qcom_init_lane_clks()
213 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", in ufs_qcom_check_hibern8()
217 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n", in ufs_qcom_check_hibern8()
224 static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host) in ufs_qcom_select_unipro_mode() argument
226 ufshcd_rmwl(host->hba, QUNIPRO_SEL, in ufs_qcom_select_unipro_mode()
227 ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0, in ufs_qcom_select_unipro_mode()
234 * ufs_qcom_host_reset - reset host controller and PHY
239 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_host_reset() local
242 if (!host->core_reset) { in ufs_qcom_host_reset()
243 dev_warn(hba->dev, "%s: reset control not set\n", __func__); in ufs_qcom_host_reset()
247 reenable_intr = hba->is_irq_enabled; in ufs_qcom_host_reset()
248 disable_irq(hba->irq); in ufs_qcom_host_reset()
249 hba->is_irq_enabled = false; in ufs_qcom_host_reset()
251 ret = reset_control_assert(host->core_reset); in ufs_qcom_host_reset()
253 dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n", in ufs_qcom_host_reset()
260 * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to in ufs_qcom_host_reset()
265 ret = reset_control_deassert(host->core_reset); in ufs_qcom_host_reset()
267 dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n", in ufs_qcom_host_reset()
273 enable_irq(hba->irq); in ufs_qcom_host_reset()
274 hba->is_irq_enabled = true; in ufs_qcom_host_reset()
283 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_power_up_sequence() local
284 struct phy *phy = host->generic_phy; in ufs_qcom_power_up_sequence()
288 /* Reset UFS Host Controller and PHY */ in ufs_qcom_power_up_sequence()
291 dev_warn(hba->dev, "%s: host reset returned %d\n", in ufs_qcom_power_up_sequence()
297 /* phy initialization - calibrate the phy */ in ufs_qcom_power_up_sequence()
300 dev_err(hba->dev, "%s: phy init failed, ret = %d\n", in ufs_qcom_power_up_sequence()
305 /* power on phy - start serdes and phy's power and clocks */ in ufs_qcom_power_up_sequence()
308 dev_err(hba->dev, "%s: phy power on failed, ret = %d\n", in ufs_qcom_power_up_sequence()
313 ufs_qcom_select_unipro_mode(host); in ufs_qcom_power_up_sequence()
324 * The UTP controller has a number of internal clock gating cells (CGCs).
325 * Internal hardware sub-modules within the UTP controller control the CGCs.
326 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
327 * in a specific operation, UTP controller CGCs are by default disabled and
344 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_hce_enable_notify() local
355 err = ufs_qcom_enable_lane_clks(host); in ufs_qcom_hce_enable_notify()
361 ufs_qcom_ice_enable(host); in ufs_qcom_hce_enable_notify()
364 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); in ufs_qcom_hce_enable_notify()
365 err = -EINVAL; in ufs_qcom_hce_enable_notify()
372 * Returns zero for success and non-zero in case of a failure
378 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_cfg_timers() local
405 * The Qunipro controller does not use following registers: in ufs_qcom_cfg_timers()
408 * But UTP controller uses SYS1CLK_1US_REG register for Interrupt in ufs_qcom_cfg_timers()
411 if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba)) in ufs_qcom_cfg_timers()
415 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear); in ufs_qcom_cfg_timers()
419 list_for_each_entry(clki, &hba->clk_list_head, list) { in ufs_qcom_cfg_timers()
420 if (!strcmp(clki->name, "core_clk")) in ufs_qcom_cfg_timers()
421 core_clk_rate = clk_get_rate(clki->clk); in ufs_qcom_cfg_timers()
438 if (ufs_qcom_cap_qunipro(host)) in ufs_qcom_cfg_timers()
450 dev_err(hba->dev, in ufs_qcom_cfg_timers()
456 tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1]; in ufs_qcom_cfg_timers()
459 dev_err(hba->dev, in ufs_qcom_cfg_timers()
465 tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1]; in ufs_qcom_cfg_timers()
467 dev_err(hba->dev, "%s: invalid rate = %d\n", in ufs_qcom_cfg_timers()
475 dev_err(hba->dev, in ufs_qcom_cfg_timers()
481 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1]; in ufs_qcom_cfg_timers()
485 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs); in ufs_qcom_cfg_timers()
513 ret = -EINVAL; in ufs_qcom_cfg_timers()
522 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_link_startup_notify() local
528 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", in ufs_qcom_link_startup_notify()
530 err = -EINVAL; in ufs_qcom_link_startup_notify()
534 if (ufs_qcom_cap_qunipro(host)) in ufs_qcom_link_startup_notify()
543 * Some UFS devices (and may be host) have issues if LCC is in ufs_qcom_link_startup_notify()
545 * before link startup which will make sure that both host in ufs_qcom_link_startup_notify()
563 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_device_reset_ctrl() local
566 if (!host->device_reset) in ufs_qcom_device_reset_ctrl()
569 gpiod_set_value_cansleep(host->device_reset, asserted); in ufs_qcom_device_reset_ctrl()
575 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_suspend() local
576 struct phy *phy = host->generic_phy; in ufs_qcom_suspend()
587 ufs_qcom_disable_lane_clks(host); in ufs_qcom_suspend()
594 ufs_qcom_disable_lane_clks(host); in ufs_qcom_suspend()
602 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_resume() local
603 struct phy *phy = host->generic_phy; in ufs_qcom_resume()
609 dev_err(hba->dev, "%s: failed PHY power on: %d\n", in ufs_qcom_resume()
614 err = ufs_qcom_enable_lane_clks(host); in ufs_qcom_resume()
619 err = ufs_qcom_enable_lane_clks(host); in ufs_qcom_resume()
624 return ufs_qcom_ice_resume(host); in ufs_qcom_resume()
627 static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable) in ufs_qcom_dev_ref_clk_ctrl() argument
629 if (host->dev_ref_clk_ctrl_mmio && in ufs_qcom_dev_ref_clk_ctrl()
630 (enable ^ host->is_dev_ref_clk_enabled)) { in ufs_qcom_dev_ref_clk_ctrl()
631 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio); in ufs_qcom_dev_ref_clk_ctrl()
634 temp |= host->dev_ref_clk_en_mask; in ufs_qcom_dev_ref_clk_ctrl()
636 temp &= ~host->dev_ref_clk_en_mask; in ufs_qcom_dev_ref_clk_ctrl()
647 gating_wait = host->hba->dev_info.clk_gating_wait_us; in ufs_qcom_dev_ref_clk_ctrl()
655 * HS-MODE to LS-MODE or HIBERN8 state. Give it in ufs_qcom_dev_ref_clk_ctrl()
663 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio); in ufs_qcom_dev_ref_clk_ctrl()
669 readl(host->dev_ref_clk_ctrl_mmio); in ufs_qcom_dev_ref_clk_ctrl()
679 host->is_dev_ref_clk_enabled = enable; in ufs_qcom_dev_ref_clk_ctrl()
688 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_pwr_change_notify() local
694 ret = -EINVAL; in ufs_qcom_pwr_change_notify()
703 if (host->hw_ver.major == 0x1) { in ufs_qcom_pwr_change_notify()
705 * HS-G3 operations may not reliably work on legacy QCOM in ufs_qcom_pwr_change_notify()
706 * UFS host controller hardware even though capability in ufs_qcom_pwr_change_notify()
709 * Hence downgrade the maximum supported gear to HS-G2. in ufs_qcom_pwr_change_notify()
727 if (!ufshcd_is_hs_mode(&hba->pwr_info) && in ufs_qcom_pwr_change_notify()
729 ufs_qcom_dev_ref_clk_ctrl(host, true); in ufs_qcom_pwr_change_notify()
731 if (host->hw_ver.major >= 0x4) { in ufs_qcom_pwr_change_notify()
733 dev_req_params->gear_tx, in ufs_qcom_pwr_change_notify()
738 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, in ufs_qcom_pwr_change_notify()
739 dev_req_params->pwr_rx, in ufs_qcom_pwr_change_notify()
740 dev_req_params->hs_rate, false)) { in ufs_qcom_pwr_change_notify()
741 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", in ufs_qcom_pwr_change_notify()
748 ret = -EINVAL; in ufs_qcom_pwr_change_notify()
752 memcpy(&host->dev_req_params, in ufs_qcom_pwr_change_notify()
756 if (ufshcd_is_hs_mode(&hba->pwr_info) && in ufs_qcom_pwr_change_notify()
758 ufs_qcom_dev_ref_clk_ctrl(host, false); in ufs_qcom_pwr_change_notify()
761 ret = -EINVAL; in ufs_qcom_pwr_change_notify()
790 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) in ufs_qcom_apply_dev_quirks()
793 if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC) in ufs_qcom_apply_dev_quirks()
794 hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE; in ufs_qcom_apply_dev_quirks()
801 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_get_ufs_hci_version() local
803 if (host->hw_ver.major == 0x1) in ufs_qcom_get_ufs_hci_version()
810 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
811 * @hba: host controller instance
813 * QCOM UFS host controller might have some non standard behaviours (quirks)
815 * quirks to standard UFS host controller driver so standard takes them into
820 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_advertise_quirks() local
822 if (host->hw_ver.major == 0x01) { in ufs_qcom_advertise_quirks()
823 hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS in ufs_qcom_advertise_quirks()
827 if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001) in ufs_qcom_advertise_quirks()
828 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR; in ufs_qcom_advertise_quirks()
830 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC; in ufs_qcom_advertise_quirks()
833 if (host->hw_ver.major == 0x2) { in ufs_qcom_advertise_quirks()
834 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION; in ufs_qcom_advertise_quirks()
836 if (!ufs_qcom_cap_qunipro(host)) in ufs_qcom_advertise_quirks()
838 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS in ufs_qcom_advertise_quirks()
846 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_set_caps() local
848 hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; in ufs_qcom_set_caps()
849 hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING; in ufs_qcom_set_caps()
850 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; in ufs_qcom_set_caps()
851 hba->caps |= UFSHCD_CAP_WB_EN; in ufs_qcom_set_caps()
852 hba->caps |= UFSHCD_CAP_CRYPTO; in ufs_qcom_set_caps()
853 hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE; in ufs_qcom_set_caps()
854 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; in ufs_qcom_set_caps()
856 if (host->hw_ver.major >= 0x2) { in ufs_qcom_set_caps()
857 host->caps = UFS_QCOM_CAP_QUNIPRO | in ufs_qcom_set_caps()
863 * ufs_qcom_setup_clocks - enables/disable clocks
864 * @hba: host controller instance
868 * Returns 0 on success, non-zero on failure.
873 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_setup_clocks() local
880 if (!host) in ufs_qcom_setup_clocks()
888 ufs_qcom_dev_ref_clk_ctrl(host, false); in ufs_qcom_setup_clocks()
895 if (ufshcd_is_hs_mode(&hba->pwr_info)) in ufs_qcom_setup_clocks()
896 ufs_qcom_dev_ref_clk_ctrl(host, true); in ufs_qcom_setup_clocks()
907 struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev); in ufs_qcom_reset_assert() local
911 ufs_qcom_assert_reset(host->hba); in ufs_qcom_reset_assert()
920 struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev); in ufs_qcom_reset_deassert() local
924 ufs_qcom_deassert_reset(host->hba); in ufs_qcom_reset_deassert()
940 * ufs_qcom_init - bind phy with controller
941 * @hba: host controller instance
943 * Binds PHY with controller and powers up PHY enabling clocks
946 * Returns -EPROBE_DEFER if binding fails, returns negative error
952 struct device *dev = hba->dev; in ufs_qcom_init()
954 struct ufs_qcom_host *host; in ufs_qcom_init() local
958 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); in ufs_qcom_init()
959 if (!host) { in ufs_qcom_init()
960 err = -ENOMEM; in ufs_qcom_init()
961 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__); in ufs_qcom_init()
965 /* Make a two way bind between the qcom host and the hba */ in ufs_qcom_init()
966 host->hba = hba; in ufs_qcom_init()
967 ufshcd_set_variant(hba, host); in ufs_qcom_init()
970 host->core_reset = devm_reset_control_get_optional(hba->dev, "rst"); in ufs_qcom_init()
971 if (IS_ERR(host->core_reset)) { in ufs_qcom_init()
972 err = dev_err_probe(dev, PTR_ERR(host->core_reset), in ufs_qcom_init()
977 /* Fire up the reset controller. Failure here is non-fatal. */ in ufs_qcom_init()
978 host->rcdev.of_node = dev->of_node; in ufs_qcom_init()
979 host->rcdev.ops = &ufs_qcom_reset_ops; in ufs_qcom_init()
980 host->rcdev.owner = dev->driver->owner; in ufs_qcom_init()
981 host->rcdev.nr_resets = 1; in ufs_qcom_init()
982 err = devm_reset_controller_register(dev, &host->rcdev); in ufs_qcom_init()
984 dev_warn(dev, "Failed to register reset controller\n"); in ufs_qcom_init()
989 host->generic_phy = devm_phy_get(dev, "ufsphy"); in ufs_qcom_init()
990 if (IS_ERR(host->generic_phy)) { in ufs_qcom_init()
991 err = dev_err_probe(dev, PTR_ERR(host->generic_phy), "Failed to get PHY\n"); in ufs_qcom_init()
996 host->device_reset = devm_gpiod_get_optional(dev, "reset", in ufs_qcom_init()
998 if (IS_ERR(host->device_reset)) { in ufs_qcom_init()
999 err = PTR_ERR(host->device_reset); in ufs_qcom_init()
1000 if (err != -EPROBE_DEFER) in ufs_qcom_init()
1005 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major, in ufs_qcom_init()
1006 &host->hw_ver.minor, &host->hw_ver.step); in ufs_qcom_init()
1010 * moved inside UFS controller register address space itself. in ufs_qcom_init()
1012 if (host->hw_ver.major >= 0x02) { in ufs_qcom_init()
1013 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1; in ufs_qcom_init()
1014 host->dev_ref_clk_en_mask = BIT(26); in ufs_qcom_init()
1020 host->dev_ref_clk_ctrl_mmio = in ufs_qcom_init()
1022 if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) in ufs_qcom_init()
1023 host->dev_ref_clk_ctrl_mmio = NULL; in ufs_qcom_init()
1024 host->dev_ref_clk_en_mask = BIT(5); in ufs_qcom_init()
1028 list_for_each_entry(clki, &hba->clk_list_head, list) { in ufs_qcom_init()
1029 if (!strcmp(clki->name, "core_clk_unipro")) in ufs_qcom_init()
1030 clki->keep_link_active = true; in ufs_qcom_init()
1033 err = ufs_qcom_init_lane_clks(host); in ufs_qcom_init()
1040 err = ufs_qcom_ice_init(host); in ufs_qcom_init()
1046 if (hba->dev->id < MAX_UFS_QCOM_HOSTS) in ufs_qcom_init()
1047 ufs_qcom_hosts[hba->dev->id] = host; in ufs_qcom_init()
1049 host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN; in ufs_qcom_init()
1050 ufs_qcom_get_default_testbus_cfg(host); in ufs_qcom_init()
1051 err = ufs_qcom_testbus_config(host); in ufs_qcom_init()
1068 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_exit() local
1070 ufs_qcom_disable_lane_clks(host); in ufs_qcom_exit()
1071 phy_power_off(host->generic_phy); in ufs_qcom_exit()
1072 phy_exit(host->generic_phy); in ufs_qcom_exit()
1082 return -EINVAL; in ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div()
1111 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_clk_scale_up_post_change() local
1113 if (!ufs_qcom_cap_qunipro(host)) in ufs_qcom_clk_scale_up_post_change()
1122 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_clk_scale_down_pre_change() local
1126 if (!ufs_qcom_cap_qunipro(host)) in ufs_qcom_clk_scale_down_pre_change()
1147 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_clk_scale_down_post_change() local
1149 if (!ufs_qcom_cap_qunipro(host)) in ufs_qcom_clk_scale_down_post_change()
1159 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_clk_scale_notify() local
1160 struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params; in ufs_qcom_clk_scale_notify()
1187 dev_req_params->gear_rx, in ufs_qcom_clk_scale_notify()
1188 dev_req_params->pwr_rx, in ufs_qcom_clk_scale_notify()
1189 dev_req_params->hs_rate, in ufs_qcom_clk_scale_notify()
1203 struct ufs_qcom_host *host; in ufs_qcom_print_hw_debug_reg_all() local
1210 dev_err(hba->dev, "%s: print_fn is NULL\n", __func__); in ufs_qcom_print_hw_debug_reg_all()
1214 host = ufshcd_get_variant(hba); in ufs_qcom_print_hw_debug_reg_all()
1215 if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN)) in ufs_qcom_print_hw_debug_reg_all()
1218 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC); in ufs_qcom_print_hw_debug_reg_all()
1225 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM); in ufs_qcom_print_hw_debug_reg_all()
1228 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM); in ufs_qcom_print_hw_debug_reg_all()
1231 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM); in ufs_qcom_print_hw_debug_reg_all()
1234 /* clear bit 17 - UTP_DBG_RAMS_EN */ in ufs_qcom_print_hw_debug_reg_all()
1237 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM); in ufs_qcom_print_hw_debug_reg_all()
1240 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM); in ufs_qcom_print_hw_debug_reg_all()
1243 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC); in ufs_qcom_print_hw_debug_reg_all()
1246 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC); in ufs_qcom_print_hw_debug_reg_all()
1249 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC); in ufs_qcom_print_hw_debug_reg_all()
1252 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT); in ufs_qcom_print_hw_debug_reg_all()
1255 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT); in ufs_qcom_print_hw_debug_reg_all()
1259 static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host) in ufs_qcom_enable_test_bus() argument
1261 if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) { in ufs_qcom_enable_test_bus()
1262 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, in ufs_qcom_enable_test_bus()
1264 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1); in ufs_qcom_enable_test_bus()
1266 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1); in ufs_qcom_enable_test_bus()
1267 ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1); in ufs_qcom_enable_test_bus()
1271 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host) in ufs_qcom_get_default_testbus_cfg() argument
1274 host->testbus.select_major = TSTBUS_UNIPRO; in ufs_qcom_get_default_testbus_cfg()
1275 host->testbus.select_minor = 37; in ufs_qcom_get_default_testbus_cfg()
1278 static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host) in ufs_qcom_testbus_cfg_is_ok() argument
1280 if (host->testbus.select_major >= TSTBUS_MAX) { in ufs_qcom_testbus_cfg_is_ok()
1281 dev_err(host->hba->dev, in ufs_qcom_testbus_cfg_is_ok()
1283 __func__, host->testbus.select_major); in ufs_qcom_testbus_cfg_is_ok()
1290 int ufs_qcom_testbus_config(struct ufs_qcom_host *host) in ufs_qcom_testbus_config() argument
1296 if (!host) in ufs_qcom_testbus_config()
1297 return -EINVAL; in ufs_qcom_testbus_config()
1299 if (!ufs_qcom_testbus_cfg_is_ok(host)) in ufs_qcom_testbus_config()
1300 return -EPERM; in ufs_qcom_testbus_config()
1302 switch (host->testbus.select_major) { in ufs_qcom_testbus_config()
1359 ufshcd_rmwl(host->hba, TEST_BUS_SEL, in ufs_qcom_testbus_config()
1360 (u32)host->testbus.select_major << 19, in ufs_qcom_testbus_config()
1362 ufshcd_rmwl(host->hba, mask, in ufs_qcom_testbus_config()
1363 (u32)host->testbus.select_minor << offset, in ufs_qcom_testbus_config()
1365 ufs_qcom_enable_test_bus(host); in ufs_qcom_testbus_config()
1384 * ufs_qcom_device_reset() - toggle the (optional) device reset line
1385 * @hba: per-adapter instance
1391 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_device_reset() local
1394 if (!host->device_reset) in ufs_qcom_device_reset()
1395 return -EOPNOTSUPP; in ufs_qcom_device_reset()
1415 p->polling_ms = 60; in ufs_qcom_config_scaling_param()
1416 d->upthreshold = 70; in ufs_qcom_config_scaling_param()
1417 d->downdifferential = 5; in ufs_qcom_config_scaling_param()
1428 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
1430 * The variant operations configure the necessary controller and PHY
1453 * ufs_qcom_probe - probe routine of the driver
1456 * Return zero for success and non-zero for failure
1461 struct device *dev = &pdev->dev; in ufs_qcom_probe()
1472 * ufs_qcom_remove - set driver_data of the device to NULL
1481 pm_runtime_get_sync(&(pdev)->dev); in ufs_qcom_remove()
1512 .name = "ufshcd-qcom",