Lines Matching full:smu

62 static const char *smu_get_message_name(struct smu_context *smu,  in smu_get_message_name()  argument
66 return "unknown smu message"; in smu_get_message_name()
71 static void smu_cmn_read_arg(struct smu_context *smu, in smu_cmn_read_arg() argument
74 struct amdgpu_device *adev = smu->adev; in smu_cmn_read_arg()
79 /* Redefine the SMU error codes here.
82 * when the SMU has exported a unified header file containing these
83 * macros, which header file we can just include and use the SMU's
84 * macros. At the moment, these error codes are defined by the SMU
96 * __smu_cmn_poll_stat -- poll for a status from the SMU
97 * smu: a pointer to SMU context
99 * Returns the status of the SMU, which could be,
100 * 0, the SMU is busy with your previous command;
105 * 0xFC, the command was rejected as the SMU is busy;
110 * maintained by the SMU FW team, so that we're impervious to firmware
115 static u32 __smu_cmn_poll_stat(struct smu_context *smu) in __smu_cmn_poll_stat() argument
117 struct amdgpu_device *adev = smu->adev; in __smu_cmn_poll_stat()
132 static void __smu_cmn_reg_print_error(struct smu_context *smu, in __smu_cmn_reg_print_error() argument
138 struct amdgpu_device *adev = smu->adev; in __smu_cmn_reg_print_error()
139 const char *message = smu_get_message_name(smu, msg); in __smu_cmn_reg_print_error()
144 "SMU: I'm not done with your previous command!"); in __smu_cmn_reg_print_error()
147 /* The SMU executed the command. It completed with a in __smu_cmn_reg_print_error()
152 /* The SMU executed the command. It completed with an in __smu_cmn_reg_print_error()
158 "SMU: unknown command: index:%d param:0x%08X message:%s", in __smu_cmn_reg_print_error()
163 "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s", in __smu_cmn_reg_print_error()
168 "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s", in __smu_cmn_reg_print_error()
173 "SMU: I'm debugging!"); in __smu_cmn_reg_print_error()
177 "SMU: response:0x%08X for index:%d param:0x%08X message:%s?", in __smu_cmn_reg_print_error()
183 static int __smu_cmn_reg2errno(struct smu_context *smu, u32 reg_c2pmsg_90) in __smu_cmn_reg2errno() argument
189 /* The SMU is busy--still executing your command. in __smu_cmn_reg2errno()
203 /* Unknown command--ignored by the SMU. in __smu_cmn_reg2errno()
213 /* The SMU is busy with other commands. The client in __smu_cmn_reg2errno()
219 /* Unknown or debug response from the SMU. in __smu_cmn_reg2errno()
228 static void __smu_cmn_send_msg(struct smu_context *smu, in __smu_cmn_send_msg() argument
232 struct amdgpu_device *adev = smu->adev; in __smu_cmn_send_msg()
241 * @smu: pointer to an SMU context
243 * @param: message parameter to send to the SMU
245 * Send a message to the SMU with the parameter passed. Do not wait
252 int smu_cmn_send_msg_without_waiting(struct smu_context *smu, in smu_cmn_send_msg_without_waiting() argument
259 if (smu->adev->no_hw_access) in smu_cmn_send_msg_without_waiting()
262 reg = __smu_cmn_poll_stat(smu); in smu_cmn_send_msg_without_waiting()
263 res = __smu_cmn_reg2errno(smu, reg); in smu_cmn_send_msg_without_waiting()
268 __smu_cmn_send_msg(smu, msg_index, param); in smu_cmn_send_msg_without_waiting()
275 * smu_cmn_wait_for_response -- wait for response from the SMU
276 * @smu: pointer to an SMU context
278 * Wait for status from the SMU.
284 int smu_cmn_wait_for_response(struct smu_context *smu) in smu_cmn_wait_for_response() argument
288 reg = __smu_cmn_poll_stat(smu); in smu_cmn_wait_for_response()
289 return __smu_cmn_reg2errno(smu, reg); in smu_cmn_wait_for_response()
294 * @smu: pointer to an SMU context
296 * @param: parameter to send to the SMU
297 * @read_arg: pointer to u32 to return a value from the SMU back
300 * Send the message @msg with parameter @param to the SMU, wait for
301 * completion of the command, and return back a value from the SMU in
308 * If we weren't able to send the message to the SMU, we also print
312 * -EREMOTEIO, indicating that the SMU returned back an
322 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, in smu_cmn_send_smc_msg_with_param() argument
330 if (smu->adev->no_hw_access) in smu_cmn_send_smc_msg_with_param()
333 index = smu_cmn_to_asic_specific_index(smu, in smu_cmn_send_smc_msg_with_param()
339 mutex_lock(&smu->message_lock); in smu_cmn_send_smc_msg_with_param()
340 reg = __smu_cmn_poll_stat(smu); in smu_cmn_send_smc_msg_with_param()
341 res = __smu_cmn_reg2errno(smu, reg); in smu_cmn_send_smc_msg_with_param()
345 __smu_cmn_reg_print_error(smu, reg, index, param, msg); in smu_cmn_send_smc_msg_with_param()
348 __smu_cmn_send_msg(smu, (uint16_t) index, param); in smu_cmn_send_smc_msg_with_param()
349 reg = __smu_cmn_poll_stat(smu); in smu_cmn_send_smc_msg_with_param()
350 res = __smu_cmn_reg2errno(smu, reg); in smu_cmn_send_smc_msg_with_param()
352 __smu_cmn_reg_print_error(smu, reg, index, param, msg); in smu_cmn_send_smc_msg_with_param()
354 smu_cmn_read_arg(smu, read_arg); in smu_cmn_send_smc_msg_with_param()
356 mutex_unlock(&smu->message_lock); in smu_cmn_send_smc_msg_with_param()
360 int smu_cmn_send_smc_msg(struct smu_context *smu, in smu_cmn_send_smc_msg() argument
364 return smu_cmn_send_smc_msg_with_param(smu, in smu_cmn_send_smc_msg()
370 int smu_cmn_to_asic_specific_index(struct smu_context *smu, in smu_cmn_to_asic_specific_index() argument
380 !smu->message_map) in smu_cmn_to_asic_specific_index()
383 msg_mapping = smu->message_map[index]; in smu_cmn_to_asic_specific_index()
387 if (amdgpu_sriov_vf(smu->adev) && in smu_cmn_to_asic_specific_index()
395 !smu->clock_map) in smu_cmn_to_asic_specific_index()
398 mapping = smu->clock_map[index]; in smu_cmn_to_asic_specific_index()
406 !smu->feature_map) in smu_cmn_to_asic_specific_index()
409 mapping = smu->feature_map[index]; in smu_cmn_to_asic_specific_index()
417 !smu->table_map) in smu_cmn_to_asic_specific_index()
420 mapping = smu->table_map[index]; in smu_cmn_to_asic_specific_index()
428 !smu->pwr_src_map) in smu_cmn_to_asic_specific_index()
431 mapping = smu->pwr_src_map[index]; in smu_cmn_to_asic_specific_index()
439 !smu->workload_map) in smu_cmn_to_asic_specific_index()
442 mapping = smu->workload_map[index]; in smu_cmn_to_asic_specific_index()
453 int smu_cmn_feature_is_supported(struct smu_context *smu, in smu_cmn_feature_is_supported() argument
456 struct smu_feature *feature = &smu->smu_feature; in smu_cmn_feature_is_supported()
460 feature_id = smu_cmn_to_asic_specific_index(smu, in smu_cmn_feature_is_supported()
475 int smu_cmn_feature_is_enabled(struct smu_context *smu, in smu_cmn_feature_is_enabled() argument
478 struct smu_feature *feature = &smu->smu_feature; in smu_cmn_feature_is_enabled()
479 struct amdgpu_device *adev = smu->adev; in smu_cmn_feature_is_enabled()
483 if (smu->is_apu && adev->family < AMDGPU_FAMILY_VGH) in smu_cmn_feature_is_enabled()
486 feature_id = smu_cmn_to_asic_specific_index(smu, in smu_cmn_feature_is_enabled()
501 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu, in smu_cmn_clk_dpm_is_enabled() argument
522 if (!smu_cmn_feature_is_enabled(smu, feature_id)) in smu_cmn_clk_dpm_is_enabled()
528 int smu_cmn_get_enabled_mask(struct smu_context *smu, in smu_cmn_get_enabled_mask() argument
533 struct smu_feature *feature = &smu->smu_feature; in smu_cmn_get_enabled_mask()
540 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high); in smu_cmn_get_enabled_mask()
544 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low); in smu_cmn_get_enabled_mask()
558 int smu_cmn_get_enabled_32_bits_mask(struct smu_context *smu, in smu_cmn_get_enabled_32_bits_mask() argument
564 struct smu_feature *feature = &smu->smu_feature; in smu_cmn_get_enabled_32_bits_mask()
571 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 0, in smu_cmn_get_enabled_32_bits_mask()
577 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 1, in smu_cmn_get_enabled_32_bits_mask()
608 int smu_cmn_feature_update_enable_state(struct smu_context *smu, in smu_cmn_feature_update_enable_state() argument
612 struct smu_feature *feature = &smu->smu_feature; in smu_cmn_feature_update_enable_state()
616 ret = smu_cmn_send_smc_msg_with_param(smu, in smu_cmn_feature_update_enable_state()
622 ret = smu_cmn_send_smc_msg_with_param(smu, in smu_cmn_feature_update_enable_state()
629 ret = smu_cmn_send_smc_msg_with_param(smu, in smu_cmn_feature_update_enable_state()
635 ret = smu_cmn_send_smc_msg_with_param(smu, in smu_cmn_feature_update_enable_state()
655 int smu_cmn_feature_set_enabled(struct smu_context *smu, in smu_cmn_feature_set_enabled() argument
659 struct smu_feature *feature = &smu->smu_feature; in smu_cmn_feature_set_enabled()
662 feature_id = smu_cmn_to_asic_specific_index(smu, in smu_cmn_feature_set_enabled()
670 return smu_cmn_feature_update_enable_state(smu, in smu_cmn_feature_set_enabled()
681 static const char *smu_get_feature_name(struct smu_context *smu, in smu_get_feature_name() argument
685 return "unknown smu feature"; in smu_get_feature_name()
689 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu, in smu_cmn_get_pp_feature_mask() argument
699 if (!smu->is_apu) { in smu_cmn_get_pp_feature_mask()
700 ret = smu_cmn_get_enabled_mask(smu, in smu_cmn_get_pp_feature_mask()
706 ret = smu_cmn_get_enabled_32_bits_mask(smu, in smu_cmn_get_pp_feature_mask()
719 feature_index = smu_cmn_to_asic_specific_index(smu, in smu_cmn_get_pp_feature_mask()
737 smu_get_feature_name(smu, sort_feature[i]), in smu_cmn_get_pp_feature_mask()
739 !!smu_cmn_feature_is_enabled(smu, sort_feature[i]) ? in smu_cmn_get_pp_feature_mask()
746 int smu_cmn_set_pp_feature_mask(struct smu_context *smu, in smu_cmn_set_pp_feature_mask() argument
755 ret = smu_cmn_get_enabled_mask(smu, in smu_cmn_set_pp_feature_mask()
768 ret = smu_cmn_feature_update_enable_state(smu, in smu_cmn_set_pp_feature_mask()
775 ret = smu_cmn_feature_update_enable_state(smu, in smu_cmn_set_pp_feature_mask()
790 * @smu: smu_context pointer
801 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu, in smu_cmn_disable_all_features_with_exception() argument
805 struct smu_feature *feature = &smu->smu_feature; in smu_cmn_disable_all_features_with_exception()
810 skipped_feature_id = smu_cmn_to_asic_specific_index(smu, in smu_cmn_disable_all_features_with_exception()
827 return smu_cmn_feature_update_enable_state(smu, in smu_cmn_disable_all_features_with_exception()
833 int smu_cmn_get_smc_version(struct smu_context *smu, in smu_cmn_get_smc_version() argument
842 if (smu->smc_fw_if_version && smu->smc_fw_version) in smu_cmn_get_smc_version()
845 *if_version = smu->smc_fw_if_version; in smu_cmn_get_smc_version()
848 *smu_version = smu->smc_fw_version; in smu_cmn_get_smc_version()
854 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version); in smu_cmn_get_smc_version()
858 smu->smc_fw_if_version = *if_version; in smu_cmn_get_smc_version()
862 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version); in smu_cmn_get_smc_version()
866 smu->smc_fw_version = *smu_version; in smu_cmn_get_smc_version()
872 int smu_cmn_update_table(struct smu_context *smu, in smu_cmn_update_table() argument
878 struct smu_table_context *smu_table = &smu->smu_table; in smu_cmn_update_table()
879 struct amdgpu_device *adev = smu->adev; in smu_cmn_update_table()
881 int table_id = smu_cmn_to_asic_specific_index(smu, in smu_cmn_update_table()
900 ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ? in smu_cmn_update_table()
916 int smu_cmn_write_watermarks_table(struct smu_context *smu) in smu_cmn_write_watermarks_table() argument
918 void *watermarks_table = smu->smu_table.watermarks_table; in smu_cmn_write_watermarks_table()
923 return smu_cmn_update_table(smu, in smu_cmn_write_watermarks_table()
930 int smu_cmn_write_pptable(struct smu_context *smu) in smu_cmn_write_pptable() argument
932 void *pptable = smu->smu_table.driver_pptable; in smu_cmn_write_pptable()
934 return smu_cmn_update_table(smu, in smu_cmn_write_pptable()
941 int smu_cmn_get_metrics_table_locked(struct smu_context *smu, in smu_cmn_get_metrics_table_locked() argument
945 struct smu_table_context *smu_table= &smu->smu_table; in smu_cmn_get_metrics_table_locked()
953 ret = smu_cmn_update_table(smu, in smu_cmn_get_metrics_table_locked()
959 dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n"); in smu_cmn_get_metrics_table_locked()
971 int smu_cmn_get_metrics_table(struct smu_context *smu, in smu_cmn_get_metrics_table() argument
977 mutex_lock(&smu->metrics_lock); in smu_cmn_get_metrics_table()
978 ret = smu_cmn_get_metrics_table_locked(smu, in smu_cmn_get_metrics_table()
981 mutex_unlock(&smu->metrics_lock); in smu_cmn_get_metrics_table()
1029 int smu_cmn_set_mp1_state(struct smu_context *smu, in smu_cmn_set_mp1_state() argument
1050 ret = smu_cmn_send_smc_msg(smu, msg, NULL); in smu_cmn_set_mp1_state()
1052 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n"); in smu_cmn_set_mp1_state()