Lines Matching full:smu
57 static int smu_force_smuclk_levels(struct smu_context *smu,
60 static int smu_handle_task(struct smu_context *smu,
63 static int smu_reset(struct smu_context *smu);
68 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
74 struct smu_context *smu = handle; in smu_sys_get_pp_feature_mask() local
76 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_sys_get_pp_feature_mask()
79 return smu_get_pp_feature_mask(smu, buf); in smu_sys_get_pp_feature_mask()
85 struct smu_context *smu = handle; in smu_sys_set_pp_feature_mask() local
87 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_sys_set_pp_feature_mask()
90 return smu_set_pp_feature_mask(smu, new_mask); in smu_sys_set_pp_feature_mask()
93 int smu_set_residency_gfxoff(struct smu_context *smu, bool value) in smu_set_residency_gfxoff() argument
95 if (!smu->ppt_funcs->set_gfx_off_residency) in smu_set_residency_gfxoff()
98 return smu_set_gfx_off_residency(smu, value); in smu_set_residency_gfxoff()
101 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value) in smu_get_residency_gfxoff() argument
103 if (!smu->ppt_funcs->get_gfx_off_residency) in smu_get_residency_gfxoff()
106 return smu_get_gfx_off_residency(smu, value); in smu_get_residency_gfxoff()
109 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value) in smu_get_entrycount_gfxoff() argument
111 if (!smu->ppt_funcs->get_gfx_off_entrycount) in smu_get_entrycount_gfxoff()
114 return smu_get_gfx_off_entrycount(smu, value); in smu_get_entrycount_gfxoff()
117 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value) in smu_get_status_gfxoff() argument
119 if (!smu->ppt_funcs->get_gfx_off_status) in smu_get_status_gfxoff()
122 *value = smu_get_gfx_off_status(smu); in smu_get_status_gfxoff()
127 int smu_set_soft_freq_range(struct smu_context *smu, in smu_set_soft_freq_range() argument
134 if (smu->ppt_funcs->set_soft_freq_limited_range) in smu_set_soft_freq_range()
135 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, in smu_set_soft_freq_range()
143 int smu_get_dpm_freq_range(struct smu_context *smu, in smu_get_dpm_freq_range() argument
153 if (smu->ppt_funcs->get_dpm_ultimate_freq) in smu_get_dpm_freq_range()
154 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu, in smu_get_dpm_freq_range()
162 int smu_set_gfx_power_up_by_imu(struct smu_context *smu) in smu_set_gfx_power_up_by_imu() argument
164 if (!smu->ppt_funcs && !smu->ppt_funcs->set_gfx_power_up_by_imu) in smu_set_gfx_power_up_by_imu()
167 return smu->ppt_funcs->set_gfx_power_up_by_imu(smu); in smu_set_gfx_power_up_by_imu()
172 struct smu_context *smu = handle; in smu_get_mclk() local
176 ret = smu_get_dpm_freq_range(smu, SMU_UCLK, in smu_get_mclk()
186 struct smu_context *smu = handle; in smu_get_sclk() local
190 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, in smu_get_sclk()
198 static int smu_dpm_set_vcn_enable(struct smu_context *smu, in smu_dpm_set_vcn_enable() argument
201 struct smu_power_context *smu_power = &smu->smu_power; in smu_dpm_set_vcn_enable()
205 if (!smu->ppt_funcs->dpm_set_vcn_enable) in smu_dpm_set_vcn_enable()
211 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable); in smu_dpm_set_vcn_enable()
218 static int smu_dpm_set_jpeg_enable(struct smu_context *smu, in smu_dpm_set_jpeg_enable() argument
221 struct smu_power_context *smu_power = &smu->smu_power; in smu_dpm_set_jpeg_enable()
225 if (!smu->ppt_funcs->dpm_set_jpeg_enable) in smu_dpm_set_jpeg_enable()
231 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable); in smu_dpm_set_jpeg_enable()
245 * This API uses no smu->mutex lock protection due to:
249 * Under this case, the smu->mutex lock protection is already enforced on
256 struct smu_context *smu = handle; in smu_dpm_set_power_gate() local
259 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) { in smu_dpm_set_power_gate()
260 dev_WARN(smu->adev->dev, in smu_dpm_set_power_gate()
261 "SMU uninitialized but power %s requested for %u!\n", in smu_dpm_set_power_gate()
273 ret = smu_dpm_set_vcn_enable(smu, !gate); in smu_dpm_set_power_gate()
275 dev_err(smu->adev->dev, "Failed to power %s VCN!\n", in smu_dpm_set_power_gate()
279 ret = smu_gfx_off_control(smu, gate); in smu_dpm_set_power_gate()
281 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n", in smu_dpm_set_power_gate()
285 ret = smu_powergate_sdma(smu, gate); in smu_dpm_set_power_gate()
287 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n", in smu_dpm_set_power_gate()
291 ret = smu_dpm_set_jpeg_enable(smu, !gate); in smu_dpm_set_power_gate()
293 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n", in smu_dpm_set_power_gate()
297 dev_err(smu->adev->dev, "Unsupported block type!\n"); in smu_dpm_set_power_gate()
307 * @smu: smu_context pointer
312 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk) in smu_set_user_clk_dependencies() argument
314 if (smu->adev->in_suspend) in smu_set_user_clk_dependencies()
318 smu->user_dpm_profile.clk_dependency = 0; in smu_set_user_clk_dependencies()
319 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK); in smu_set_user_clk_dependencies()
322 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) in smu_set_user_clk_dependencies()
325 smu->user_dpm_profile.clk_dependency = 0; in smu_set_user_clk_dependencies()
326 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK); in smu_set_user_clk_dependencies()
329 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) in smu_set_user_clk_dependencies()
332 smu->user_dpm_profile.clk_dependency = 0; in smu_set_user_clk_dependencies()
333 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK); in smu_set_user_clk_dependencies()
342 * @smu: smu_context pointer
347 static void smu_restore_dpm_user_profile(struct smu_context *smu) in smu_restore_dpm_user_profile() argument
349 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_restore_dpm_user_profile()
352 if (!smu->adev->in_suspend) in smu_restore_dpm_user_profile()
355 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_restore_dpm_user_profile()
359 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE; in smu_restore_dpm_user_profile()
362 if (smu->user_dpm_profile.power_limit) { in smu_restore_dpm_user_profile()
363 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit); in smu_restore_dpm_user_profile()
365 dev_err(smu->adev->dev, "Failed to set power limit value\n"); in smu_restore_dpm_user_profile()
374 * Iterate over smu clk type and force the saved user clk in smu_restore_dpm_user_profile()
377 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) && in smu_restore_dpm_user_profile()
378 smu->user_dpm_profile.clk_mask[clk_type]) { in smu_restore_dpm_user_profile()
379 ret = smu_force_smuclk_levels(smu, clk_type, in smu_restore_dpm_user_profile()
380 smu->user_dpm_profile.clk_mask[clk_type]); in smu_restore_dpm_user_profile()
382 dev_err(smu->adev->dev, in smu_restore_dpm_user_profile()
389 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL || in smu_restore_dpm_user_profile()
390 smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) { in smu_restore_dpm_user_profile()
391 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode); in smu_restore_dpm_user_profile()
393 smu->user_dpm_profile.fan_speed_pwm = 0; in smu_restore_dpm_user_profile()
394 smu->user_dpm_profile.fan_speed_rpm = 0; in smu_restore_dpm_user_profile()
395 smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO; in smu_restore_dpm_user_profile()
396 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n"); in smu_restore_dpm_user_profile()
399 if (smu->user_dpm_profile.fan_speed_pwm) { in smu_restore_dpm_user_profile()
400 ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm); in smu_restore_dpm_user_profile()
402 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n"); in smu_restore_dpm_user_profile()
405 if (smu->user_dpm_profile.fan_speed_rpm) { in smu_restore_dpm_user_profile()
406 ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm); in smu_restore_dpm_user_profile()
408 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n"); in smu_restore_dpm_user_profile()
413 if (smu->user_dpm_profile.user_od) { in smu_restore_dpm_user_profile()
414 if (smu->ppt_funcs->restore_user_od_settings) { in smu_restore_dpm_user_profile()
415 ret = smu->ppt_funcs->restore_user_od_settings(smu); in smu_restore_dpm_user_profile()
417 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n"); in smu_restore_dpm_user_profile()
422 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE; in smu_restore_dpm_user_profile()
453 struct smu_context *smu = adev->powerplay.pp_handle; in is_support_cclk_dpm() local
455 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT)) in is_support_cclk_dpm()
465 struct smu_context *smu = handle; in smu_sys_get_pp_table() local
466 struct smu_table_context *smu_table = &smu->smu_table; in smu_sys_get_pp_table()
468 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_sys_get_pp_table()
486 struct smu_context *smu = handle; in smu_sys_set_pp_table() local
487 struct smu_table_context *smu_table = &smu->smu_table; in smu_sys_set_pp_table()
491 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_sys_set_pp_table()
495 dev_err(smu->adev->dev, "pp table size not matched !\n"); in smu_sys_set_pp_table()
513 smu->uploading_custom_pp_table = true; in smu_sys_set_pp_table()
515 ret = smu_reset(smu); in smu_sys_set_pp_table()
517 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret); in smu_sys_set_pp_table()
519 smu->uploading_custom_pp_table = false; in smu_sys_set_pp_table()
524 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu) in smu_get_driver_allowed_feature_mask() argument
526 struct smu_feature *feature = &smu->smu_feature; in smu_get_driver_allowed_feature_mask()
537 if (smu->adev->scpm_enabled) { in smu_get_driver_allowed_feature_mask()
544 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask, in smu_get_driver_allowed_feature_mask()
558 struct smu_context *smu = adev->powerplay.pp_handle; in smu_set_funcs() local
561 smu->od_enabled = true; in smu_set_funcs()
567 navi10_set_ppt_funcs(smu); in smu_set_funcs()
573 sienna_cichlid_set_ppt_funcs(smu); in smu_set_funcs()
577 renoir_set_ppt_funcs(smu); in smu_set_funcs()
580 vangogh_set_ppt_funcs(smu); in smu_set_funcs()
585 yellow_carp_set_ppt_funcs(smu); in smu_set_funcs()
588 smu_v13_0_4_set_ppt_funcs(smu); in smu_set_funcs()
591 smu_v13_0_5_set_ppt_funcs(smu); in smu_set_funcs()
594 cyan_skillfish_set_ppt_funcs(smu); in smu_set_funcs()
598 arcturus_set_ppt_funcs(smu); in smu_set_funcs()
600 smu->od_enabled =false; in smu_set_funcs()
603 aldebaran_set_ppt_funcs(smu); in smu_set_funcs()
605 smu->od_enabled = true; in smu_set_funcs()
609 smu_v13_0_0_set_ppt_funcs(smu); in smu_set_funcs()
612 smu_v13_0_7_set_ppt_funcs(smu); in smu_set_funcs()
624 struct smu_context *smu; in smu_early_init() local
626 smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL); in smu_early_init()
627 if (!smu) in smu_early_init()
630 smu->adev = adev; in smu_early_init()
631 smu->pm_enabled = !!amdgpu_dpm; in smu_early_init()
632 smu->is_apu = false; in smu_early_init()
633 smu->smu_baco.state = SMU_BACO_STATE_EXIT; in smu_early_init()
634 smu->smu_baco.platform_support = false; in smu_early_init()
635 smu->user_dpm_profile.fan_mode = -1; in smu_early_init()
637 mutex_init(&smu->message_lock); in smu_early_init()
639 adev->powerplay.pp_handle = smu; in smu_early_init()
645 static int smu_set_default_dpm_table(struct smu_context *smu) in smu_set_default_dpm_table() argument
647 struct smu_power_context *smu_power = &smu->smu_power; in smu_set_default_dpm_table()
652 if (!smu->ppt_funcs->set_default_dpm_table) in smu_set_default_dpm_table()
658 ret = smu_dpm_set_vcn_enable(smu, true); in smu_set_default_dpm_table()
662 ret = smu_dpm_set_jpeg_enable(smu, true); in smu_set_default_dpm_table()
666 ret = smu->ppt_funcs->set_default_dpm_table(smu); in smu_set_default_dpm_table()
668 dev_err(smu->adev->dev, in smu_set_default_dpm_table()
671 smu_dpm_set_jpeg_enable(smu, !jpeg_gate); in smu_set_default_dpm_table()
673 smu_dpm_set_vcn_enable(smu, !vcn_gate); in smu_set_default_dpm_table()
677 static int smu_apply_default_config_table_settings(struct smu_context *smu) in smu_apply_default_config_table_settings() argument
679 struct amdgpu_device *adev = smu->adev; in smu_apply_default_config_table_settings()
682 ret = smu_get_default_config_table_settings(smu, in smu_apply_default_config_table_settings()
687 return smu_set_config_table(smu, &adev->pm.config_table); in smu_apply_default_config_table_settings()
693 struct smu_context *smu = adev->powerplay.pp_handle; in smu_late_init() local
696 smu_set_fine_grain_gfx_freq_parameters(smu); in smu_late_init()
698 if (!smu->pm_enabled) in smu_late_init()
701 ret = smu_post_init(smu); in smu_late_init()
703 dev_err(adev->dev, "Failed to post smu init!\n"); in smu_late_init()
711 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) { in smu_late_init()
712 ret = smu_set_default_od_settings(smu); in smu_late_init()
719 ret = smu_populate_umd_state_clk(smu); in smu_late_init()
725 ret = smu_get_asic_power_limits(smu, in smu_late_init()
726 &smu->current_power_limit, in smu_late_init()
727 &smu->default_power_limit, in smu_late_init()
728 &smu->max_power_limit); in smu_late_init()
735 smu_get_unique_id(smu); in smu_late_init()
737 smu_get_fan_parameters(smu); in smu_late_init()
739 smu_handle_task(smu, in smu_late_init()
740 smu->smu_dpm.dpm_level, in smu_late_init()
743 ret = smu_apply_default_config_table_settings(smu); in smu_late_init()
749 smu_restore_dpm_user_profile(smu); in smu_late_init()
754 static int smu_init_fb_allocations(struct smu_context *smu) in smu_init_fb_allocations() argument
756 struct amdgpu_device *adev = smu->adev; in smu_init_fb_allocations()
757 struct smu_table_context *smu_table = &smu->smu_table; in smu_init_fb_allocations()
812 static int smu_fini_fb_allocations(struct smu_context *smu) in smu_fini_fb_allocations() argument
814 struct smu_table_context *smu_table = &smu->smu_table; in smu_fini_fb_allocations()
833 * @smu: amdgpu_device pointer
840 static int smu_alloc_memory_pool(struct smu_context *smu) in smu_alloc_memory_pool() argument
842 struct amdgpu_device *adev = smu->adev; in smu_alloc_memory_pool()
843 struct smu_table_context *smu_table = &smu->smu_table; in smu_alloc_memory_pool()
845 uint64_t pool_size = smu->pool_size; in smu_alloc_memory_pool()
877 static int smu_free_memory_pool(struct smu_context *smu) in smu_free_memory_pool() argument
879 struct smu_table_context *smu_table = &smu->smu_table; in smu_free_memory_pool()
894 static int smu_alloc_dummy_read_table(struct smu_context *smu) in smu_alloc_dummy_read_table() argument
896 struct smu_table_context *smu_table = &smu->smu_table; in smu_alloc_dummy_read_table()
899 struct amdgpu_device *adev = smu->adev; in smu_alloc_dummy_read_table()
919 static void smu_free_dummy_read_table(struct smu_context *smu) in smu_free_dummy_read_table() argument
921 struct smu_table_context *smu_table = &smu->smu_table; in smu_free_dummy_read_table()
933 static int smu_smc_table_sw_init(struct smu_context *smu) in smu_smc_table_sw_init() argument
941 ret = smu_init_smc_tables(smu); in smu_smc_table_sw_init()
943 dev_err(smu->adev->dev, "Failed to init smc tables!\n"); in smu_smc_table_sw_init()
951 ret = smu_init_power(smu); in smu_smc_table_sw_init()
953 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n"); in smu_smc_table_sw_init()
960 ret = smu_init_fb_allocations(smu); in smu_smc_table_sw_init()
964 ret = smu_alloc_memory_pool(smu); in smu_smc_table_sw_init()
968 ret = smu_alloc_dummy_read_table(smu); in smu_smc_table_sw_init()
972 ret = smu_i2c_init(smu); in smu_smc_table_sw_init()
979 static int smu_smc_table_sw_fini(struct smu_context *smu) in smu_smc_table_sw_fini() argument
983 smu_i2c_fini(smu); in smu_smc_table_sw_fini()
985 smu_free_dummy_read_table(smu); in smu_smc_table_sw_fini()
987 ret = smu_free_memory_pool(smu); in smu_smc_table_sw_fini()
991 ret = smu_fini_fb_allocations(smu); in smu_smc_table_sw_fini()
995 ret = smu_fini_power(smu); in smu_smc_table_sw_fini()
997 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n"); in smu_smc_table_sw_fini()
1001 ret = smu_fini_smc_tables(smu); in smu_smc_table_sw_fini()
1003 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n"); in smu_smc_table_sw_fini()
1012 struct smu_context *smu = container_of(work, struct smu_context, in smu_throttling_logging_work_fn() local
1015 smu_log_thermal_throttling(smu); in smu_throttling_logging_work_fn()
1020 struct smu_context *smu = container_of(work, struct smu_context, in smu_interrupt_work_fn() local
1023 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work) in smu_interrupt_work_fn()
1024 smu->ppt_funcs->interrupt_work(smu); in smu_interrupt_work_fn()
1030 struct smu_context *smu = adev->powerplay.pp_handle; in smu_sw_init() local
1033 smu->pool_size = adev->pm.smu_prv_buffer_size; in smu_sw_init()
1034 smu->smu_feature.feature_num = SMU_FEATURE_MAX; in smu_sw_init()
1035 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); in smu_sw_init()
1036 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); in smu_sw_init()
1038 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn); in smu_sw_init()
1039 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn); in smu_sw_init()
1040 atomic64_set(&smu->throttle_int_counter, 0); in smu_sw_init()
1041 smu->watermarks_bitmap = 0; in smu_sw_init()
1042 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; in smu_sw_init()
1043 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; in smu_sw_init()
1045 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); in smu_sw_init()
1046 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); in smu_sw_init()
1048 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; in smu_sw_init()
1049 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; in smu_sw_init()
1050 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; in smu_sw_init()
1051 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; in smu_sw_init()
1052 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; in smu_sw_init()
1053 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; in smu_sw_init()
1054 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; in smu_sw_init()
1055 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; in smu_sw_init()
1057 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; in smu_sw_init()
1058 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; in smu_sw_init()
1059 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; in smu_sw_init()
1060 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; in smu_sw_init()
1061 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; in smu_sw_init()
1062 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; in smu_sw_init()
1063 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM; in smu_sw_init()
1064 smu->display_config = &adev->pm.pm_display_cfg; in smu_sw_init()
1066 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; in smu_sw_init()
1067 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; in smu_sw_init()
1069 ret = smu_init_microcode(smu); in smu_sw_init()
1071 dev_err(adev->dev, "Failed to load smu firmware!\n"); in smu_sw_init()
1075 ret = smu_smc_table_sw_init(smu); in smu_sw_init()
1082 ret = smu_get_vbios_bootup_values(smu); in smu_sw_init()
1088 ret = smu_init_pptable_microcode(smu); in smu_sw_init()
1094 ret = smu_register_irq_handler(smu); in smu_sw_init()
1101 if (!smu->ppt_funcs->get_fan_control_mode) in smu_sw_init()
1102 smu->adev->pm.no_fan = true; in smu_sw_init()
1110 struct smu_context *smu = adev->powerplay.pp_handle; in smu_sw_fini() local
1113 ret = smu_smc_table_sw_fini(smu); in smu_sw_fini()
1119 smu_fini_microcode(smu); in smu_sw_fini()
1124 static int smu_get_thermal_temperature_range(struct smu_context *smu) in smu_get_thermal_temperature_range() argument
1126 struct amdgpu_device *adev = smu->adev; in smu_get_thermal_temperature_range()
1128 &smu->thermal_range; in smu_get_thermal_temperature_range()
1131 if (!smu->ppt_funcs->get_thermal_temperature_range) in smu_get_thermal_temperature_range()
1134 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range); in smu_get_thermal_temperature_range()
1151 static int smu_smc_hw_setup(struct smu_context *smu) in smu_smc_hw_setup() argument
1153 struct smu_feature *feature = &smu->smu_feature; in smu_smc_hw_setup()
1154 struct amdgpu_device *adev = smu->adev; in smu_smc_hw_setup()
1164 if (adev->in_suspend && smu_is_dpm_running(smu)) { in smu_smc_hw_setup()
1166 ret = smu_system_features_control(smu, true); in smu_smc_hw_setup()
1176 ret = smu_init_display_count(smu, 0); in smu_smc_hw_setup()
1182 ret = smu_set_driver_table_location(smu); in smu_smc_hw_setup()
1191 ret = smu_set_tool_table_location(smu); in smu_smc_hw_setup()
1201 ret = smu_notify_memory_pool_location(smu); in smu_smc_hw_setup()
1207 ret = smu_setup_pptable(smu); in smu_smc_hw_setup()
1213 /* smu_dump_pptable(smu); */ in smu_smc_hw_setup()
1217 * (to SMU). Driver involvement is not needed and permitted. in smu_smc_hw_setup()
1221 * Copy pptable bo in the vram to smc with SMU MSGs such as in smu_smc_hw_setup()
1224 ret = smu_write_pptable(smu); in smu_smc_hw_setup()
1232 ret = smu_run_btc(smu); in smu_smc_hw_setup()
1241 ret = smu_feature_set_allowed_mask(smu); in smu_smc_hw_setup()
1248 ret = smu_system_features_control(smu, true); in smu_smc_hw_setup()
1254 ret = smu_feature_get_enabled_mask(smu, &features_supported); in smu_smc_hw_setup()
1263 if (!smu_is_dpm_running(smu)) in smu_smc_hw_setup()
1271 ret = smu_set_default_dpm_table(smu); in smu_smc_hw_setup()
1302 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width); in smu_smc_hw_setup()
1308 ret = smu_get_thermal_temperature_range(smu); in smu_smc_hw_setup()
1314 ret = smu_enable_thermal_alert(smu); in smu_smc_hw_setup()
1320 ret = smu_notify_display_change(smu); in smu_smc_hw_setup()
1330 ret = smu_set_min_dcef_deep_sleep(smu, in smu_smc_hw_setup()
1331 smu->smu_table.boot_values.dcefclk / 100); in smu_smc_hw_setup()
1336 static int smu_start_smc_engine(struct smu_context *smu) in smu_start_smc_engine() argument
1338 struct amdgpu_device *adev = smu->adev; in smu_start_smc_engine()
1343 if (smu->ppt_funcs->load_microcode) { in smu_start_smc_engine()
1344 ret = smu->ppt_funcs->load_microcode(smu); in smu_start_smc_engine()
1351 if (smu->ppt_funcs->check_fw_status) { in smu_start_smc_engine()
1352 ret = smu->ppt_funcs->check_fw_status(smu); in smu_start_smc_engine()
1363 ret = smu_check_fw_version(smu); in smu_start_smc_engine()
1374 struct smu_context *smu = adev->powerplay.pp_handle; in smu_hw_init() local
1377 smu->pm_enabled = false; in smu_hw_init()
1381 ret = smu_start_smc_engine(smu); in smu_hw_init()
1387 if (smu->is_apu) { in smu_hw_init()
1388 if ((smu->ppt_funcs->set_gfx_power_up_by_imu) && in smu_hw_init()
1390 ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu); in smu_hw_init()
1397 smu_dpm_set_vcn_enable(smu, true); in smu_hw_init()
1398 smu_dpm_set_jpeg_enable(smu, true); in smu_hw_init()
1399 smu_set_gfx_cgpg(smu, true); in smu_hw_init()
1402 if (!smu->pm_enabled) in smu_hw_init()
1405 ret = smu_get_driver_allowed_feature_mask(smu); in smu_hw_init()
1409 ret = smu_smc_hw_setup(smu); in smu_hw_init()
1418 * 2. DAL settings come between .hw_init and .late_init of SMU. in smu_hw_init()
1422 ret = smu_init_max_sustainable_clocks(smu); in smu_hw_init()
1430 dev_info(adev->dev, "SMU is initialized successfully!\n"); in smu_hw_init()
1435 static int smu_disable_dpms(struct smu_context *smu) in smu_disable_dpms() argument
1437 struct amdgpu_device *adev = smu->adev; in smu_disable_dpms()
1439 bool use_baco = !smu->is_apu && in smu_disable_dpms()
1445 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others) in smu_disable_dpms()
1464 * - SMU firmware can handle the DPM reenablement in smu_disable_dpms()
1467 if (smu->uploading_custom_pp_table) { in smu_disable_dpms()
1504 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) { in smu_disable_dpms()
1505 ret = smu_disable_all_features_with_exception(smu, in smu_disable_dpms()
1508 dev_err(adev->dev, "Failed to disable smu features except BACO.\n"); in smu_disable_dpms()
1512 ret = smu_system_features_control(smu, false); in smu_disable_dpms()
1514 dev_err(adev->dev, "Failed to disable smu features.\n"); in smu_disable_dpms()
1525 static int smu_smc_hw_cleanup(struct smu_context *smu) in smu_smc_hw_cleanup() argument
1527 struct amdgpu_device *adev = smu->adev; in smu_smc_hw_cleanup()
1530 cancel_work_sync(&smu->throttling_logging_work); in smu_smc_hw_cleanup()
1531 cancel_work_sync(&smu->interrupt_work); in smu_smc_hw_cleanup()
1533 ret = smu_disable_thermal_alert(smu); in smu_smc_hw_cleanup()
1539 ret = smu_disable_dpms(smu); in smu_smc_hw_cleanup()
1551 struct smu_context *smu = adev->powerplay.pp_handle; in smu_hw_fini() local
1556 smu_dpm_set_vcn_enable(smu, false); in smu_hw_fini()
1557 smu_dpm_set_jpeg_enable(smu, false); in smu_hw_fini()
1562 if (!smu->pm_enabled) in smu_hw_fini()
1567 return smu_smc_hw_cleanup(smu); in smu_hw_fini()
1573 struct smu_context *smu = adev->powerplay.pp_handle; in smu_late_fini() local
1575 kfree(smu); in smu_late_fini()
1578 static int smu_reset(struct smu_context *smu) in smu_reset() argument
1580 struct amdgpu_device *adev = smu->adev; in smu_reset()
1601 struct smu_context *smu = adev->powerplay.pp_handle; in smu_suspend() local
1608 if (!smu->pm_enabled) in smu_suspend()
1613 ret = smu_smc_hw_cleanup(smu); in smu_suspend()
1617 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); in smu_suspend()
1619 smu_set_gfx_cgpg(smu, false); in smu_suspend()
1625 ret = smu_get_entrycount_gfxoff(smu, &count); in smu_suspend()
1636 struct smu_context *smu = adev->powerplay.pp_handle; in smu_resume() local
1641 if (!smu->pm_enabled) in smu_resume()
1644 dev_info(adev->dev, "SMU is resuming...\n"); in smu_resume()
1646 ret = smu_start_smc_engine(smu); in smu_resume()
1652 ret = smu_smc_hw_setup(smu); in smu_resume()
1658 smu_set_gfx_cgpg(smu, true); in smu_resume()
1660 smu->disable_uclk_switch = 0; in smu_resume()
1664 dev_info(adev->dev, "SMU is resumed successfully!\n"); in smu_resume()
1672 struct smu_context *smu = handle; in smu_display_configuration_change() local
1676 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_display_configuration_change()
1682 smu_set_min_dcef_deep_sleep(smu, in smu_display_configuration_change()
1713 struct smu_context *smu = (struct smu_context*)(handle); in smu_enable_umd_pstate() local
1714 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_enable_umd_pstate()
1716 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) in smu_enable_umd_pstate()
1723 smu_gpo_control(smu, false); in smu_enable_umd_pstate()
1724 smu_gfx_ulv_control(smu, false); in smu_enable_umd_pstate()
1725 smu_deep_sleep_control(smu, false); in smu_enable_umd_pstate()
1726 amdgpu_asic_update_umd_stable_pstate(smu->adev, true); in smu_enable_umd_pstate()
1733 amdgpu_asic_update_umd_stable_pstate(smu->adev, false); in smu_enable_umd_pstate()
1734 smu_deep_sleep_control(smu, true); in smu_enable_umd_pstate()
1735 smu_gfx_ulv_control(smu, true); in smu_enable_umd_pstate()
1736 smu_gpo_control(smu, true); in smu_enable_umd_pstate()
1743 static int smu_bump_power_profile_mode(struct smu_context *smu, in smu_bump_power_profile_mode() argument
1749 if (smu->ppt_funcs->set_power_profile_mode) in smu_bump_power_profile_mode()
1750 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); in smu_bump_power_profile_mode()
1755 static int smu_adjust_power_state_dynamic(struct smu_context *smu, in smu_adjust_power_state_dynamic() argument
1762 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_adjust_power_state_dynamic()
1765 ret = smu_display_config_changed(smu); in smu_adjust_power_state_dynamic()
1767 dev_err(smu->adev->dev, "Failed to change display config!"); in smu_adjust_power_state_dynamic()
1772 ret = smu_apply_clocks_adjust_rules(smu); in smu_adjust_power_state_dynamic()
1774 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!"); in smu_adjust_power_state_dynamic()
1779 ret = smu_notify_smc_display_config(smu); in smu_adjust_power_state_dynamic()
1781 dev_err(smu->adev->dev, "Failed to notify smc display config!"); in smu_adjust_power_state_dynamic()
1787 ret = smu_asic_set_performance_level(smu, level); in smu_adjust_power_state_dynamic()
1789 dev_err(smu->adev->dev, "Failed to set performance level!"); in smu_adjust_power_state_dynamic()
1799 index = fls(smu->workload_mask); in smu_adjust_power_state_dynamic()
1801 workload = smu->workload_setting[index]; in smu_adjust_power_state_dynamic()
1803 if (smu->power_profile_mode != workload) in smu_adjust_power_state_dynamic()
1804 smu_bump_power_profile_mode(smu, &workload, 0); in smu_adjust_power_state_dynamic()
1810 static int smu_handle_task(struct smu_context *smu, in smu_handle_task() argument
1816 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_handle_task()
1821 ret = smu_pre_display_config_changed(smu); in smu_handle_task()
1824 ret = smu_adjust_power_state_dynamic(smu, level, false); in smu_handle_task()
1828 ret = smu_adjust_power_state_dynamic(smu, level, true); in smu_handle_task()
1841 struct smu_context *smu = handle; in smu_handle_dpm_task() local
1842 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; in smu_handle_dpm_task()
1844 return smu_handle_task(smu, smu_dpm->dpm_level, task_id); in smu_handle_dpm_task()
1852 struct smu_context *smu = handle; in smu_switch_power_profile() local
1853 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_switch_power_profile()
1857 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_switch_power_profile()
1864 smu->workload_mask &= ~(1 << smu->workload_prority[type]); in smu_switch_power_profile()
1865 index = fls(smu->workload_mask); in smu_switch_power_profile()
1867 workload = smu->workload_setting[index]; in smu_switch_power_profile()
1869 smu->workload_mask |= (1 << smu->workload_prority[type]); in smu_switch_power_profile()
1870 index = fls(smu->workload_mask); in smu_switch_power_profile()
1872 workload = smu->workload_setting[index]; in smu_switch_power_profile()
1877 smu_bump_power_profile_mode(smu, &workload, 0); in smu_switch_power_profile()
1884 struct smu_context *smu = handle; in smu_get_performance_level() local
1885 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_get_performance_level()
1887 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_performance_level()
1890 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) in smu_get_performance_level()
1899 struct smu_context *smu = handle; in smu_force_performance_level() local
1900 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_force_performance_level()
1903 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_force_performance_level()
1906 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) in smu_force_performance_level()
1909 ret = smu_enable_umd_pstate(smu, &level); in smu_force_performance_level()
1913 ret = smu_handle_task(smu, level, in smu_force_performance_level()
1918 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask)); in smu_force_performance_level()
1919 smu->user_dpm_profile.clk_dependency = 0; in smu_force_performance_level()
1927 struct smu_context *smu = handle; in smu_set_display_count() local
1929 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_display_count()
1932 return smu_init_display_count(smu, count); in smu_set_display_count()
1935 static int smu_force_smuclk_levels(struct smu_context *smu, in smu_force_smuclk_levels() argument
1939 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_force_smuclk_levels()
1942 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_force_smuclk_levels()
1946 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n"); in smu_force_smuclk_levels()
1950 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) { in smu_force_smuclk_levels()
1951 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); in smu_force_smuclk_levels()
1952 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { in smu_force_smuclk_levels()
1953 smu->user_dpm_profile.clk_mask[clk_type] = mask; in smu_force_smuclk_levels()
1954 smu_set_user_clk_dependencies(smu, clk_type); in smu_force_smuclk_levels()
1965 struct smu_context *smu = handle; in smu_force_ppclk_levels() local
1997 return smu_force_smuclk_levels(smu, clk_type, mask); in smu_force_ppclk_levels()
2002 * flag will be cleared. So that those SMU services which
2010 struct smu_context *smu = handle; in smu_set_mp1_state() local
2013 if (!smu->pm_enabled) in smu_set_mp1_state()
2016 if (smu->ppt_funcs && in smu_set_mp1_state()
2017 smu->ppt_funcs->set_mp1_state) in smu_set_mp1_state()
2018 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state); in smu_set_mp1_state()
2026 struct smu_context *smu = handle; in smu_set_df_cstate() local
2029 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_df_cstate()
2032 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) in smu_set_df_cstate()
2035 ret = smu->ppt_funcs->set_df_cstate(smu, state); in smu_set_df_cstate()
2037 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n"); in smu_set_df_cstate()
2042 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en) in smu_allow_xgmi_power_down() argument
2046 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_allow_xgmi_power_down()
2049 if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down) in smu_allow_xgmi_power_down()
2052 ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en); in smu_allow_xgmi_power_down()
2054 dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n"); in smu_allow_xgmi_power_down()
2059 int smu_write_watermarks_table(struct smu_context *smu) in smu_write_watermarks_table() argument
2061 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_write_watermarks_table()
2064 return smu_set_watermarks_table(smu, NULL); in smu_write_watermarks_table()
2070 struct smu_context *smu = handle; in smu_set_watermarks_for_clock_ranges() local
2072 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_watermarks_for_clock_ranges()
2075 if (smu->disable_watermark) in smu_set_watermarks_for_clock_ranges()
2078 return smu_set_watermarks_table(smu, clock_ranges); in smu_set_watermarks_for_clock_ranges()
2081 int smu_set_ac_dc(struct smu_context *smu) in smu_set_ac_dc() argument
2085 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_ac_dc()
2089 if (smu->dc_controlled_by_gpio) in smu_set_ac_dc()
2092 ret = smu_set_power_source(smu, in smu_set_ac_dc()
2093 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC : in smu_set_ac_dc()
2096 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n", in smu_set_ac_dc()
2097 smu->adev->pm.ac_power ? "AC" : "DC"); in smu_set_ac_dc()
2103 .name = "smu",
2150 struct smu_context *smu = handle; in smu_load_microcode() local
2151 struct amdgpu_device *adev = smu->adev; in smu_load_microcode()
2154 if (!smu->pm_enabled) in smu_load_microcode()
2161 if (smu->ppt_funcs->load_microcode) { in smu_load_microcode()
2162 ret = smu->ppt_funcs->load_microcode(smu); in smu_load_microcode()
2169 if (smu->ppt_funcs->check_fw_status) { in smu_load_microcode()
2170 ret = smu->ppt_funcs->check_fw_status(smu); in smu_load_microcode()
2180 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) in smu_set_gfx_cgpg() argument
2184 if (smu->ppt_funcs->set_gfx_cgpg) in smu_set_gfx_cgpg()
2185 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled); in smu_set_gfx_cgpg()
2192 struct smu_context *smu = handle; in smu_set_fan_speed_rpm() local
2195 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_fan_speed_rpm()
2198 if (!smu->ppt_funcs->set_fan_speed_rpm) in smu_set_fan_speed_rpm()
2204 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed); in smu_set_fan_speed_rpm()
2205 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { in smu_set_fan_speed_rpm()
2206 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM; in smu_set_fan_speed_rpm()
2207 smu->user_dpm_profile.fan_speed_rpm = speed; in smu_set_fan_speed_rpm()
2210 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM; in smu_set_fan_speed_rpm()
2211 smu->user_dpm_profile.fan_speed_pwm = 0; in smu_set_fan_speed_rpm()
2218 * smu_get_power_limit - Request one of the SMU Power Limits
2220 * @handle: pointer to smu context
2232 struct smu_context *smu = handle; in smu_get_power_limit() local
2233 struct amdgpu_device *adev = smu->adev; in smu_get_power_limit()
2238 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_power_limit()
2270 if (smu->ppt_funcs->get_ppt_limit) in smu_get_power_limit()
2271 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level); in smu_get_power_limit()
2281 ret = smu_get_asic_power_limits(smu, in smu_get_power_limit()
2282 &smu->current_power_limit, in smu_get_power_limit()
2289 *limit = smu->current_power_limit; in smu_get_power_limit()
2292 *limit = smu->default_power_limit; in smu_get_power_limit()
2295 *limit = smu->max_power_limit; in smu_get_power_limit()
2307 struct smu_context *smu = handle; in smu_set_power_limit() local
2311 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_power_limit()
2316 if (smu->ppt_funcs->set_power_limit) in smu_set_power_limit()
2317 return smu->ppt_funcs->set_power_limit(smu, limit_type, limit); in smu_set_power_limit()
2319 if (limit > smu->max_power_limit) { in smu_set_power_limit()
2320 dev_err(smu->adev->dev, in smu_set_power_limit()
2322 limit, smu->max_power_limit); in smu_set_power_limit()
2327 limit = smu->current_power_limit; in smu_set_power_limit()
2329 if (smu->ppt_funcs->set_power_limit) { in smu_set_power_limit()
2330 ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit); in smu_set_power_limit()
2331 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) in smu_set_power_limit()
2332 smu->user_dpm_profile.power_limit = limit; in smu_set_power_limit()
2338 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) in smu_print_smuclk_levels() argument
2342 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_print_smuclk_levels()
2345 if (smu->ppt_funcs->print_clk_levels) in smu_print_smuclk_levels()
2346 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); in smu_print_smuclk_levels()
2395 struct smu_context *smu = handle; in smu_print_ppclk_levels() local
2402 return smu_print_smuclk_levels(smu, clk_type, buf); in smu_print_ppclk_levels()
2407 struct smu_context *smu = handle; in smu_emit_ppclk_levels() local
2414 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_emit_ppclk_levels()
2417 if (!smu->ppt_funcs->emit_clk_levels) in smu_emit_ppclk_levels()
2420 return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset); in smu_emit_ppclk_levels()
2428 struct smu_context *smu = handle; in smu_od_edit_dpm_table() local
2431 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_od_edit_dpm_table()
2434 if (smu->ppt_funcs->od_edit_dpm_table) { in smu_od_edit_dpm_table()
2435 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); in smu_od_edit_dpm_table()
2446 struct smu_context *smu = handle; in smu_read_sensor() local
2448 &smu->pstate_table; in smu_read_sensor()
2452 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_read_sensor()
2461 if (smu->ppt_funcs->read_sensor) in smu_read_sensor()
2462 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size)) in smu_read_sensor()
2475 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data); in smu_read_sensor()
2479 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0; in smu_read_sensor()
2483 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; in smu_read_sensor()
2487 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1; in smu_read_sensor()
2509 struct smu_context *smu = handle; in smu_get_power_profile_mode() local
2511 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || in smu_get_power_profile_mode()
2512 !smu->ppt_funcs->get_power_profile_mode) in smu_get_power_profile_mode()
2517 return smu->ppt_funcs->get_power_profile_mode(smu, buf); in smu_get_power_profile_mode()
2524 struct smu_context *smu = handle; in smu_set_power_profile_mode() local
2526 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || in smu_set_power_profile_mode()
2527 !smu->ppt_funcs->set_power_profile_mode) in smu_set_power_profile_mode()
2530 return smu_bump_power_profile_mode(smu, param, param_size); in smu_set_power_profile_mode()
2535 struct smu_context *smu = handle; in smu_get_fan_control_mode() local
2537 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_fan_control_mode()
2540 if (!smu->ppt_funcs->get_fan_control_mode) in smu_get_fan_control_mode()
2546 *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu); in smu_get_fan_control_mode()
2553 struct smu_context *smu = handle; in smu_set_fan_control_mode() local
2556 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_fan_control_mode()
2559 if (!smu->ppt_funcs->set_fan_control_mode) in smu_set_fan_control_mode()
2565 ret = smu->ppt_funcs->set_fan_control_mode(smu, value); in smu_set_fan_control_mode()
2569 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { in smu_set_fan_control_mode()
2570 smu->user_dpm_profile.fan_mode = value; in smu_set_fan_control_mode()
2574 smu->user_dpm_profile.fan_speed_pwm = 0; in smu_set_fan_control_mode()
2575 smu->user_dpm_profile.fan_speed_rpm = 0; in smu_set_fan_control_mode()
2576 smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM); in smu_set_fan_control_mode()
2586 struct smu_context *smu = handle; in smu_get_fan_speed_pwm() local
2589 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_fan_speed_pwm()
2592 if (!smu->ppt_funcs->get_fan_speed_pwm) in smu_get_fan_speed_pwm()
2598 ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed); in smu_get_fan_speed_pwm()
2605 struct smu_context *smu = handle; in smu_set_fan_speed_pwm() local
2608 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_fan_speed_pwm()
2611 if (!smu->ppt_funcs->set_fan_speed_pwm) in smu_set_fan_speed_pwm()
2617 ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed); in smu_set_fan_speed_pwm()
2618 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { in smu_set_fan_speed_pwm()
2619 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM; in smu_set_fan_speed_pwm()
2620 smu->user_dpm_profile.fan_speed_pwm = speed; in smu_set_fan_speed_pwm()
2623 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM; in smu_set_fan_speed_pwm()
2624 smu->user_dpm_profile.fan_speed_rpm = 0; in smu_set_fan_speed_pwm()
2632 struct smu_context *smu = handle; in smu_get_fan_speed_rpm() local
2635 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_fan_speed_rpm()
2638 if (!smu->ppt_funcs->get_fan_speed_rpm) in smu_get_fan_speed_rpm()
2644 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); in smu_get_fan_speed_rpm()
2651 struct smu_context *smu = handle; in smu_set_deep_sleep_dcefclk() local
2653 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_deep_sleep_dcefclk()
2656 return smu_set_min_dcef_deep_sleep(smu, clk); in smu_set_deep_sleep_dcefclk()
2663 struct smu_context *smu = handle; in smu_get_clock_by_type_with_latency() local
2667 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_clock_by_type_with_latency()
2670 if (smu->ppt_funcs->get_clock_by_type_with_latency) { in smu_get_clock_by_type_with_latency()
2685 dev_err(smu->adev->dev, "Invalid clock type!\n"); in smu_get_clock_by_type_with_latency()
2689 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); in smu_get_clock_by_type_with_latency()
2698 struct smu_context *smu = handle; in smu_display_clock_voltage_request() local
2701 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_display_clock_voltage_request()
2704 if (smu->ppt_funcs->display_clock_voltage_request) in smu_display_clock_voltage_request()
2705 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req); in smu_display_clock_voltage_request()
2714 struct smu_context *smu = handle; in smu_display_disable_memory_clock_switch() local
2717 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_display_disable_memory_clock_switch()
2720 if (smu->ppt_funcs->display_disable_memory_clock_switch) in smu_display_disable_memory_clock_switch()
2721 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); in smu_display_disable_memory_clock_switch()
2729 struct smu_context *smu = handle; in smu_set_xgmi_pstate() local
2732 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_xgmi_pstate()
2735 if (smu->ppt_funcs->set_xgmi_pstate) in smu_set_xgmi_pstate()
2736 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); in smu_set_xgmi_pstate()
2739 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n"); in smu_set_xgmi_pstate()
2746 struct smu_context *smu = handle; in smu_get_baco_capability() local
2750 if (!smu->pm_enabled) in smu_get_baco_capability()
2753 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) in smu_get_baco_capability()
2754 *cap = smu->ppt_funcs->baco_is_support(smu); in smu_get_baco_capability()
2761 struct smu_context *smu = handle; in smu_baco_set_state() local
2764 if (!smu->pm_enabled) in smu_baco_set_state()
2768 if (smu->ppt_funcs->baco_exit) in smu_baco_set_state()
2769 ret = smu->ppt_funcs->baco_exit(smu); in smu_baco_set_state()
2771 if (smu->ppt_funcs->baco_enter) in smu_baco_set_state()
2772 ret = smu->ppt_funcs->baco_enter(smu); in smu_baco_set_state()
2778 dev_err(smu->adev->dev, "Failed to %s BACO state!\n", in smu_baco_set_state()
2784 bool smu_mode1_reset_is_support(struct smu_context *smu) in smu_mode1_reset_is_support() argument
2788 if (!smu->pm_enabled) in smu_mode1_reset_is_support()
2791 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support) in smu_mode1_reset_is_support()
2792 ret = smu->ppt_funcs->mode1_reset_is_support(smu); in smu_mode1_reset_is_support()
2797 bool smu_mode2_reset_is_support(struct smu_context *smu) in smu_mode2_reset_is_support() argument
2801 if (!smu->pm_enabled) in smu_mode2_reset_is_support()
2804 if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support) in smu_mode2_reset_is_support()
2805 ret = smu->ppt_funcs->mode2_reset_is_support(smu); in smu_mode2_reset_is_support()
2810 int smu_mode1_reset(struct smu_context *smu) in smu_mode1_reset() argument
2814 if (!smu->pm_enabled) in smu_mode1_reset()
2817 if (smu->ppt_funcs->mode1_reset) in smu_mode1_reset()
2818 ret = smu->ppt_funcs->mode1_reset(smu); in smu_mode1_reset()
2825 struct smu_context *smu = handle; in smu_mode2_reset() local
2828 if (!smu->pm_enabled) in smu_mode2_reset()
2831 if (smu->ppt_funcs->mode2_reset) in smu_mode2_reset()
2832 ret = smu->ppt_funcs->mode2_reset(smu); in smu_mode2_reset()
2835 dev_err(smu->adev->dev, "Mode2 reset failed!\n"); in smu_mode2_reset()
2843 struct smu_context *smu = handle; in smu_get_max_sustainable_clocks_by_dc() local
2846 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_max_sustainable_clocks_by_dc()
2849 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) in smu_get_max_sustainable_clocks_by_dc()
2850 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); in smu_get_max_sustainable_clocks_by_dc()
2859 struct smu_context *smu = handle; in smu_get_uclk_dpm_states() local
2862 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_uclk_dpm_states()
2865 if (smu->ppt_funcs->get_uclk_dpm_states) in smu_get_uclk_dpm_states()
2866 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); in smu_get_uclk_dpm_states()
2873 struct smu_context *smu = handle; in smu_get_current_power_state() local
2876 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_current_power_state()
2879 if (smu->ppt_funcs->get_current_power_state) in smu_get_current_power_state()
2880 pm_state = smu->ppt_funcs->get_current_power_state(smu); in smu_get_current_power_state()
2888 struct smu_context *smu = handle; in smu_get_dpm_clock_table() local
2891 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_dpm_clock_table()
2894 if (smu->ppt_funcs->get_dpm_clock_table) in smu_get_dpm_clock_table()
2895 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); in smu_get_dpm_clock_table()
2902 struct smu_context *smu = handle; in smu_sys_get_gpu_metrics() local
2904 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_sys_get_gpu_metrics()
2907 if (!smu->ppt_funcs->get_gpu_metrics) in smu_sys_get_gpu_metrics()
2910 return smu->ppt_funcs->get_gpu_metrics(smu, table); in smu_sys_get_gpu_metrics()
2915 struct smu_context *smu = handle; in smu_enable_mgpu_fan_boost() local
2918 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_enable_mgpu_fan_boost()
2921 if (smu->ppt_funcs->enable_mgpu_fan_boost) in smu_enable_mgpu_fan_boost()
2922 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu); in smu_enable_mgpu_fan_boost()
2930 struct smu_context *smu = handle; in smu_gfx_state_change_set() local
2933 if (smu->ppt_funcs->gfx_state_change_set) in smu_gfx_state_change_set()
2934 ret = smu->ppt_funcs->gfx_state_change_set(smu, state); in smu_gfx_state_change_set()
2939 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable) in smu_handle_passthrough_sbr() argument
2943 if (smu->ppt_funcs->smu_handle_passthrough_sbr) in smu_handle_passthrough_sbr()
2944 ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable); in smu_handle_passthrough_sbr()
2949 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc) in smu_get_ecc_info() argument
2953 if (smu->ppt_funcs && in smu_get_ecc_info()
2954 smu->ppt_funcs->get_ecc_info) in smu_get_ecc_info()
2955 ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc); in smu_get_ecc_info()
2963 struct smu_context *smu = handle; in smu_get_prv_buffer_details() local
2964 struct smu_table_context *smu_table = &smu->smu_table; in smu_get_prv_buffer_details()
3035 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event, in smu_wait_for_event() argument
3040 if (smu->ppt_funcs->wait_for_event) in smu_wait_for_event()
3041 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg); in smu_wait_for_event()
3046 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size) in smu_stb_collect_info() argument
3049 if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled) in smu_stb_collect_info()
3053 if (size != smu->stb_context.stb_buf_size) in smu_stb_collect_info()
3057 * No need to lock smu mutex as we access STB directly through MMIO in smu_stb_collect_info()
3058 * and not going through SMU messaging route (for now at least). in smu_stb_collect_info()
3061 return smu->ppt_funcs->stb_collect_info(smu, buf, size); in smu_stb_collect_info()
3069 struct smu_context *smu = adev->powerplay.pp_handle; in smu_stb_debugfs_open() local
3073 buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL); in smu_stb_debugfs_open()
3077 r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size); in smu_stb_debugfs_open()
3094 struct smu_context *smu = adev->powerplay.pp_handle; in smu_stb_debugfs_read() local
3103 smu->stb_context.stb_buf_size); in smu_stb_debugfs_read()
3135 struct smu_context *smu = adev->powerplay.pp_handle; in amdgpu_smu_stb_debug_fs_init() local
3137 if (!smu || (!smu->stb_context.stb_buf_size)) in amdgpu_smu_stb_debug_fs_init()
3145 smu->stb_context.stb_buf_size); in amdgpu_smu_stb_debug_fs_init()
3149 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size) in smu_send_hbm_bad_pages_num() argument
3153 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num) in smu_send_hbm_bad_pages_num()
3154 ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size); in smu_send_hbm_bad_pages_num()
3159 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size) in smu_send_hbm_bad_channel_flag() argument
3163 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag) in smu_send_hbm_bad_channel_flag()
3164 ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size); in smu_send_hbm_bad_channel_flag()