1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/delay.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27
28 #include "hwmgr.h"
29 #include "amd_powerplay.h"
30 #include "vega20_smumgr.h"
31 #include "hardwaremanager.h"
32 #include "ppatomfwctrl.h"
33 #include "atomfirmware.h"
34 #include "cgs_common.h"
35 #include "vega20_powertune.h"
36 #include "vega20_inc.h"
37 #include "pppcielanes.h"
38 #include "vega20_hwmgr.h"
39 #include "vega20_processpptables.h"
40 #include "vega20_pptable.h"
41 #include "vega20_thermal.h"
42 #include "vega20_ppsmc.h"
43 #include "pp_debug.h"
44 #include "amd_pcie_helpers.h"
45 #include "ppinterrupt.h"
46 #include "pp_overdriver.h"
47 #include "pp_thermal.h"
48 #include "soc15_common.h"
49 #include "vega20_baco.h"
50 #include "smuio/smuio_9_0_offset.h"
51 #include "smuio/smuio_9_0_sh_mask.h"
52 #include "nbio/nbio_7_4_sh_mask.h"
53
54 #define smnPCIE_LC_SPEED_CNTL 0x11140290
55 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
56
57 #define LINK_WIDTH_MAX 6
58 #define LINK_SPEED_MAX 3
59 static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
60 static const int link_speed[] = {25, 50, 80, 160};
61
vega20_set_default_registry_data(struct pp_hwmgr * hwmgr)62 static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
63 {
64 struct vega20_hwmgr *data =
65 (struct vega20_hwmgr *)(hwmgr->backend);
66
67 data->gfxclk_average_alpha = PPVEGA20_VEGA20GFXCLKAVERAGEALPHA_DFLT;
68 data->socclk_average_alpha = PPVEGA20_VEGA20SOCCLKAVERAGEALPHA_DFLT;
69 data->uclk_average_alpha = PPVEGA20_VEGA20UCLKCLKAVERAGEALPHA_DFLT;
70 data->gfx_activity_average_alpha = PPVEGA20_VEGA20GFXACTIVITYAVERAGEALPHA_DFLT;
71 data->lowest_uclk_reserved_for_ulv = PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT;
72
73 data->display_voltage_mode = PPVEGA20_VEGA20DISPLAYVOLTAGEMODE_DFLT;
74 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
75 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
76 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
77 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
78 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
79 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
80 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
81 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
82 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
83 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
84 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
85 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
86
87 /*
88 * Disable the following features for now:
89 * GFXCLK DS
90 * SOCLK DS
91 * LCLK DS
92 * DCEFCLK DS
93 * FCLK DS
94 * MP1CLK DS
95 * MP0CLK DS
96 */
97 data->registry_data.disallowed_features = 0xE0041C00;
98 /* ECC feature should be disabled on old SMUs */
99 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version);
100 if (hwmgr->smu_version < 0x282100)
101 data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
102
103 if (!(hwmgr->feature_mask & PP_PCIE_DPM_MASK))
104 data->registry_data.disallowed_features |= FEATURE_DPM_LINK_MASK;
105
106 if (!(hwmgr->feature_mask & PP_SCLK_DPM_MASK))
107 data->registry_data.disallowed_features |= FEATURE_DPM_GFXCLK_MASK;
108
109 if (!(hwmgr->feature_mask & PP_SOCCLK_DPM_MASK))
110 data->registry_data.disallowed_features |= FEATURE_DPM_SOCCLK_MASK;
111
112 if (!(hwmgr->feature_mask & PP_MCLK_DPM_MASK))
113 data->registry_data.disallowed_features |= FEATURE_DPM_UCLK_MASK;
114
115 if (!(hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK))
116 data->registry_data.disallowed_features |= FEATURE_DPM_DCEFCLK_MASK;
117
118 if (!(hwmgr->feature_mask & PP_ULV_MASK))
119 data->registry_data.disallowed_features |= FEATURE_ULV_MASK;
120
121 if (!(hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK))
122 data->registry_data.disallowed_features |= FEATURE_DS_GFXCLK_MASK;
123
124 data->registry_data.od_state_in_dc_support = 0;
125 data->registry_data.thermal_support = 1;
126 data->registry_data.skip_baco_hardware = 0;
127
128 data->registry_data.log_avfs_param = 0;
129 data->registry_data.sclk_throttle_low_notification = 1;
130 data->registry_data.force_dpm_high = 0;
131 data->registry_data.stable_pstate_sclk_dpm_percentage = 75;
132
133 data->registry_data.didt_support = 0;
134 if (data->registry_data.didt_support) {
135 data->registry_data.didt_mode = 6;
136 data->registry_data.sq_ramping_support = 1;
137 data->registry_data.db_ramping_support = 0;
138 data->registry_data.td_ramping_support = 0;
139 data->registry_data.tcp_ramping_support = 0;
140 data->registry_data.dbr_ramping_support = 0;
141 data->registry_data.edc_didt_support = 1;
142 data->registry_data.gc_didt_support = 0;
143 data->registry_data.psm_didt_support = 0;
144 }
145
146 data->registry_data.pcie_lane_override = 0xff;
147 data->registry_data.pcie_speed_override = 0xff;
148 data->registry_data.pcie_clock_override = 0xffffffff;
149 data->registry_data.regulator_hot_gpio_support = 1;
150 data->registry_data.ac_dc_switch_gpio_support = 0;
151 data->registry_data.quick_transition_support = 0;
152 data->registry_data.zrpm_start_temp = 0xffff;
153 data->registry_data.zrpm_stop_temp = 0xffff;
154 data->registry_data.od8_feature_enable = 1;
155 data->registry_data.disable_water_mark = 0;
156 data->registry_data.disable_pp_tuning = 0;
157 data->registry_data.disable_xlpp_tuning = 0;
158 data->registry_data.disable_workload_policy = 0;
159 data->registry_data.perf_ui_tuning_profile_turbo = 0x19190F0F;
160 data->registry_data.perf_ui_tuning_profile_powerSave = 0x19191919;
161 data->registry_data.perf_ui_tuning_profile_xl = 0x00000F0A;
162 data->registry_data.force_workload_policy_mask = 0;
163 data->registry_data.disable_3d_fs_detection = 0;
164 data->registry_data.fps_support = 1;
165 data->registry_data.disable_auto_wattman = 1;
166 data->registry_data.auto_wattman_debug = 0;
167 data->registry_data.auto_wattman_sample_period = 100;
168 data->registry_data.fclk_gfxclk_ratio = 0;
169 data->registry_data.auto_wattman_threshold = 50;
170 data->registry_data.gfxoff_controlled_by_driver = 1;
171 data->gfxoff_allowed = false;
172 data->counter_gfxoff = 0;
173 data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
174 }
175
vega20_set_features_platform_caps(struct pp_hwmgr * hwmgr)176 static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
177 {
178 struct vega20_hwmgr *data =
179 (struct vega20_hwmgr *)(hwmgr->backend);
180 struct amdgpu_device *adev = hwmgr->adev;
181
182 if (data->vddci_control == VEGA20_VOLTAGE_CONTROL_NONE)
183 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
184 PHM_PlatformCaps_ControlVDDCI);
185
186 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
187 PHM_PlatformCaps_TablelessHardwareInterface);
188
189 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
190 PHM_PlatformCaps_BACO);
191
192 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
193 PHM_PlatformCaps_EnableSMU7ThermalManagement);
194
195 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
196 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
197 PHM_PlatformCaps_UVDPowerGating);
198
199 if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
200 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
201 PHM_PlatformCaps_VCEPowerGating);
202
203 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
204 PHM_PlatformCaps_UnTabledHardwareInterface);
205
206 if (data->registry_data.od8_feature_enable)
207 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
208 PHM_PlatformCaps_OD8inACSupport);
209
210 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
211 PHM_PlatformCaps_ActivityReporting);
212 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
213 PHM_PlatformCaps_FanSpeedInTableIsRPM);
214
215 if (data->registry_data.od_state_in_dc_support) {
216 if (data->registry_data.od8_feature_enable)
217 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
218 PHM_PlatformCaps_OD8inDCSupport);
219 }
220
221 if (data->registry_data.thermal_support &&
222 data->registry_data.fuzzy_fan_control_support &&
223 hwmgr->thermal_controller.advanceFanControlParameters.usTMax)
224 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
225 PHM_PlatformCaps_ODFuzzyFanControlSupport);
226
227 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
228 PHM_PlatformCaps_DynamicPowerManagement);
229 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
230 PHM_PlatformCaps_SMC);
231 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
232 PHM_PlatformCaps_ThermalPolicyDelay);
233
234 if (data->registry_data.force_dpm_high)
235 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
236 PHM_PlatformCaps_ExclusiveModeAlwaysHigh);
237
238 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
239 PHM_PlatformCaps_DynamicUVDState);
240
241 if (data->registry_data.sclk_throttle_low_notification)
242 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
243 PHM_PlatformCaps_SclkThrottleLowNotification);
244
245 /* power tune caps */
246 /* assume disabled */
247 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
248 PHM_PlatformCaps_PowerContainment);
249 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
250 PHM_PlatformCaps_DiDtSupport);
251 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
252 PHM_PlatformCaps_SQRamping);
253 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
254 PHM_PlatformCaps_DBRamping);
255 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
256 PHM_PlatformCaps_TDRamping);
257 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
258 PHM_PlatformCaps_TCPRamping);
259 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
260 PHM_PlatformCaps_DBRRamping);
261 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
262 PHM_PlatformCaps_DiDtEDCEnable);
263 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
264 PHM_PlatformCaps_GCEDC);
265 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
266 PHM_PlatformCaps_PSM);
267
268 if (data->registry_data.didt_support) {
269 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
270 PHM_PlatformCaps_DiDtSupport);
271 if (data->registry_data.sq_ramping_support)
272 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
273 PHM_PlatformCaps_SQRamping);
274 if (data->registry_data.db_ramping_support)
275 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
276 PHM_PlatformCaps_DBRamping);
277 if (data->registry_data.td_ramping_support)
278 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
279 PHM_PlatformCaps_TDRamping);
280 if (data->registry_data.tcp_ramping_support)
281 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
282 PHM_PlatformCaps_TCPRamping);
283 if (data->registry_data.dbr_ramping_support)
284 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
285 PHM_PlatformCaps_DBRRamping);
286 if (data->registry_data.edc_didt_support)
287 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
288 PHM_PlatformCaps_DiDtEDCEnable);
289 if (data->registry_data.gc_didt_support)
290 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
291 PHM_PlatformCaps_GCEDC);
292 if (data->registry_data.psm_didt_support)
293 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
294 PHM_PlatformCaps_PSM);
295 }
296
297 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
298 PHM_PlatformCaps_RegulatorHot);
299
300 if (data->registry_data.ac_dc_switch_gpio_support) {
301 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
302 PHM_PlatformCaps_AutomaticDCTransition);
303 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
304 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
305 }
306
307 if (data->registry_data.quick_transition_support) {
308 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
309 PHM_PlatformCaps_AutomaticDCTransition);
310 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
311 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
312 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
313 PHM_PlatformCaps_Falcon_QuickTransition);
314 }
315
316 if (data->lowest_uclk_reserved_for_ulv != PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT) {
317 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
318 PHM_PlatformCaps_LowestUclkReservedForUlv);
319 if (data->lowest_uclk_reserved_for_ulv == 1)
320 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
321 PHM_PlatformCaps_LowestUclkReservedForUlv);
322 }
323
324 if (data->registry_data.custom_fan_support)
325 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
326 PHM_PlatformCaps_CustomFanControlSupport);
327
328 return 0;
329 }
330
vega20_init_dpm_defaults(struct pp_hwmgr * hwmgr)331 static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
332 {
333 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
334 struct amdgpu_device *adev = hwmgr->adev;
335 uint32_t top32, bottom32;
336 int i;
337
338 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
339 FEATURE_DPM_PREFETCHER_BIT;
340 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
341 FEATURE_DPM_GFXCLK_BIT;
342 data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
343 FEATURE_DPM_UCLK_BIT;
344 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
345 FEATURE_DPM_SOCCLK_BIT;
346 data->smu_features[GNLD_DPM_UVD].smu_feature_id =
347 FEATURE_DPM_UVD_BIT;
348 data->smu_features[GNLD_DPM_VCE].smu_feature_id =
349 FEATURE_DPM_VCE_BIT;
350 data->smu_features[GNLD_ULV].smu_feature_id =
351 FEATURE_ULV_BIT;
352 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
353 FEATURE_DPM_MP0CLK_BIT;
354 data->smu_features[GNLD_DPM_LINK].smu_feature_id =
355 FEATURE_DPM_LINK_BIT;
356 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
357 FEATURE_DPM_DCEFCLK_BIT;
358 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
359 FEATURE_DS_GFXCLK_BIT;
360 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
361 FEATURE_DS_SOCCLK_BIT;
362 data->smu_features[GNLD_DS_LCLK].smu_feature_id =
363 FEATURE_DS_LCLK_BIT;
364 data->smu_features[GNLD_PPT].smu_feature_id =
365 FEATURE_PPT_BIT;
366 data->smu_features[GNLD_TDC].smu_feature_id =
367 FEATURE_TDC_BIT;
368 data->smu_features[GNLD_THERMAL].smu_feature_id =
369 FEATURE_THERMAL_BIT;
370 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
371 FEATURE_GFX_PER_CU_CG_BIT;
372 data->smu_features[GNLD_RM].smu_feature_id =
373 FEATURE_RM_BIT;
374 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
375 FEATURE_DS_DCEFCLK_BIT;
376 data->smu_features[GNLD_ACDC].smu_feature_id =
377 FEATURE_ACDC_BIT;
378 data->smu_features[GNLD_VR0HOT].smu_feature_id =
379 FEATURE_VR0HOT_BIT;
380 data->smu_features[GNLD_VR1HOT].smu_feature_id =
381 FEATURE_VR1HOT_BIT;
382 data->smu_features[GNLD_FW_CTF].smu_feature_id =
383 FEATURE_FW_CTF_BIT;
384 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
385 FEATURE_LED_DISPLAY_BIT;
386 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
387 FEATURE_FAN_CONTROL_BIT;
388 data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
389 data->smu_features[GNLD_GFXOFF].smu_feature_id = FEATURE_GFXOFF_BIT;
390 data->smu_features[GNLD_CG].smu_feature_id = FEATURE_CG_BIT;
391 data->smu_features[GNLD_DPM_FCLK].smu_feature_id = FEATURE_DPM_FCLK_BIT;
392 data->smu_features[GNLD_DS_FCLK].smu_feature_id = FEATURE_DS_FCLK_BIT;
393 data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT;
394 data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT;
395 data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT;
396 data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT;
397
398 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
399 data->smu_features[i].smu_feature_bitmap =
400 (uint64_t)(1ULL << data->smu_features[i].smu_feature_id);
401 data->smu_features[i].allowed =
402 ((data->registry_data.disallowed_features >> i) & 1) ?
403 false : true;
404 }
405
406 /* Get the SN to turn into a Unique ID */
407 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
408 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
409
410 adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
411 }
412
vega20_set_private_data_based_on_pptable(struct pp_hwmgr * hwmgr)413 static int vega20_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
414 {
415 return 0;
416 }
417
vega20_hwmgr_backend_fini(struct pp_hwmgr * hwmgr)418 static int vega20_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
419 {
420 kfree(hwmgr->backend);
421 hwmgr->backend = NULL;
422
423 return 0;
424 }
425
vega20_hwmgr_backend_init(struct pp_hwmgr * hwmgr)426 static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
427 {
428 struct vega20_hwmgr *data;
429 struct amdgpu_device *adev = hwmgr->adev;
430
431 data = kzalloc(sizeof(struct vega20_hwmgr), GFP_KERNEL);
432 if (data == NULL)
433 return -ENOMEM;
434
435 hwmgr->backend = data;
436
437 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
438 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
439 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
440
441 vega20_set_default_registry_data(hwmgr);
442
443 data->disable_dpm_mask = 0xff;
444
445 /* need to set voltage control types before EVV patching */
446 data->vddc_control = VEGA20_VOLTAGE_CONTROL_NONE;
447 data->mvdd_control = VEGA20_VOLTAGE_CONTROL_NONE;
448 data->vddci_control = VEGA20_VOLTAGE_CONTROL_NONE;
449
450 data->water_marks_bitmap = 0;
451 data->avfs_exist = false;
452
453 vega20_set_features_platform_caps(hwmgr);
454
455 vega20_init_dpm_defaults(hwmgr);
456
457 /* Parse pptable data read from VBIOS */
458 vega20_set_private_data_based_on_pptable(hwmgr);
459
460 data->is_tlu_enabled = false;
461
462 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
463 VEGA20_MAX_HARDWARE_POWERLEVELS;
464 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
465 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
466
467 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
468 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
469 hwmgr->platform_descriptor.clockStep.engineClock = 500;
470 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
471
472 data->total_active_cus = adev->gfx.cu_info.number;
473 data->is_custom_profile_set = false;
474
475 return 0;
476 }
477
vega20_init_sclk_threshold(struct pp_hwmgr * hwmgr)478 static int vega20_init_sclk_threshold(struct pp_hwmgr *hwmgr)
479 {
480 struct vega20_hwmgr *data =
481 (struct vega20_hwmgr *)(hwmgr->backend);
482
483 data->low_sclk_interrupt_threshold = 0;
484
485 return 0;
486 }
487
vega20_setup_asic_task(struct pp_hwmgr * hwmgr)488 static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
489 {
490 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
491 int ret = 0;
492 bool use_baco = (amdgpu_in_reset(adev) &&
493 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
494 (adev->in_runpm && amdgpu_asic_supports_baco(adev));
495
496 ret = vega20_init_sclk_threshold(hwmgr);
497 PP_ASSERT_WITH_CODE(!ret,
498 "Failed to init sclk threshold!",
499 return ret);
500
501 if (use_baco) {
502 ret = vega20_baco_apply_vdci_flush_workaround(hwmgr);
503 if (ret)
504 pr_err("Failed to apply vega20 baco workaround!\n");
505 }
506
507 return ret;
508 }
509
510 /*
511 * @fn vega20_init_dpm_state
512 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
513 *
514 * @param dpm_state - the address of the DPM Table to initiailize.
515 * @return None.
516 */
vega20_init_dpm_state(struct vega20_dpm_state * dpm_state)517 static void vega20_init_dpm_state(struct vega20_dpm_state *dpm_state)
518 {
519 dpm_state->soft_min_level = 0x0;
520 dpm_state->soft_max_level = VG20_CLOCK_MAX_DEFAULT;
521 dpm_state->hard_min_level = 0x0;
522 dpm_state->hard_max_level = VG20_CLOCK_MAX_DEFAULT;
523 }
524
vega20_get_number_of_dpm_level(struct pp_hwmgr * hwmgr,PPCLK_e clk_id,uint32_t * num_of_levels)525 static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
526 PPCLK_e clk_id, uint32_t *num_of_levels)
527 {
528 int ret = 0;
529
530 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
531 PPSMC_MSG_GetDpmFreqByIndex,
532 (clk_id << 16 | 0xFF),
533 num_of_levels);
534 PP_ASSERT_WITH_CODE(!ret,
535 "[GetNumOfDpmLevel] failed to get dpm levels!",
536 return ret);
537
538 return ret;
539 }
540
vega20_get_dpm_frequency_by_index(struct pp_hwmgr * hwmgr,PPCLK_e clk_id,uint32_t index,uint32_t * clk)541 static int vega20_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
542 PPCLK_e clk_id, uint32_t index, uint32_t *clk)
543 {
544 int ret = 0;
545
546 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
547 PPSMC_MSG_GetDpmFreqByIndex,
548 (clk_id << 16 | index),
549 clk);
550 PP_ASSERT_WITH_CODE(!ret,
551 "[GetDpmFreqByIndex] failed to get dpm freq by index!",
552 return ret);
553
554 return ret;
555 }
556
vega20_setup_single_dpm_table(struct pp_hwmgr * hwmgr,struct vega20_single_dpm_table * dpm_table,PPCLK_e clk_id)557 static int vega20_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
558 struct vega20_single_dpm_table *dpm_table, PPCLK_e clk_id)
559 {
560 int ret = 0;
561 uint32_t i, num_of_levels, clk;
562
563 ret = vega20_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels);
564 PP_ASSERT_WITH_CODE(!ret,
565 "[SetupSingleDpmTable] failed to get clk levels!",
566 return ret);
567
568 dpm_table->count = num_of_levels;
569
570 for (i = 0; i < num_of_levels; i++) {
571 ret = vega20_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk);
572 PP_ASSERT_WITH_CODE(!ret,
573 "[SetupSingleDpmTable] failed to get clk of specific level!",
574 return ret);
575 dpm_table->dpm_levels[i].value = clk;
576 dpm_table->dpm_levels[i].enabled = true;
577 }
578
579 return ret;
580 }
581
vega20_setup_gfxclk_dpm_table(struct pp_hwmgr * hwmgr)582 static int vega20_setup_gfxclk_dpm_table(struct pp_hwmgr *hwmgr)
583 {
584 struct vega20_hwmgr *data =
585 (struct vega20_hwmgr *)(hwmgr->backend);
586 struct vega20_single_dpm_table *dpm_table;
587 int ret = 0;
588
589 dpm_table = &(data->dpm_table.gfx_table);
590 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
591 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
592 PP_ASSERT_WITH_CODE(!ret,
593 "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
594 return ret);
595 } else {
596 dpm_table->count = 1;
597 dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
598 }
599
600 return ret;
601 }
602
vega20_setup_memclk_dpm_table(struct pp_hwmgr * hwmgr)603 static int vega20_setup_memclk_dpm_table(struct pp_hwmgr *hwmgr)
604 {
605 struct vega20_hwmgr *data =
606 (struct vega20_hwmgr *)(hwmgr->backend);
607 struct vega20_single_dpm_table *dpm_table;
608 int ret = 0;
609
610 dpm_table = &(data->dpm_table.mem_table);
611 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
612 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
613 PP_ASSERT_WITH_CODE(!ret,
614 "[SetupDefaultDpmTable] failed to get memclk dpm levels!",
615 return ret);
616 } else {
617 dpm_table->count = 1;
618 dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
619 }
620
621 return ret;
622 }
623
624 /*
625 * This function is to initialize all DPM state tables
626 * for SMU based on the dependency table.
627 * Dynamic state patching function will then trim these
628 * state tables to the allowed range based
629 * on the power policy or external client requests,
630 * such as UVD request, etc.
631 */
vega20_setup_default_dpm_tables(struct pp_hwmgr * hwmgr)632 static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
633 {
634 struct vega20_hwmgr *data =
635 (struct vega20_hwmgr *)(hwmgr->backend);
636 struct vega20_single_dpm_table *dpm_table;
637 int ret = 0;
638
639 memset(&data->dpm_table, 0, sizeof(data->dpm_table));
640
641 /* socclk */
642 dpm_table = &(data->dpm_table.soc_table);
643 if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
644 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK);
645 PP_ASSERT_WITH_CODE(!ret,
646 "[SetupDefaultDpmTable] failed to get socclk dpm levels!",
647 return ret);
648 } else {
649 dpm_table->count = 1;
650 dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100;
651 }
652 vega20_init_dpm_state(&(dpm_table->dpm_state));
653
654 /* gfxclk */
655 dpm_table = &(data->dpm_table.gfx_table);
656 ret = vega20_setup_gfxclk_dpm_table(hwmgr);
657 if (ret)
658 return ret;
659 vega20_init_dpm_state(&(dpm_table->dpm_state));
660
661 /* memclk */
662 dpm_table = &(data->dpm_table.mem_table);
663 ret = vega20_setup_memclk_dpm_table(hwmgr);
664 if (ret)
665 return ret;
666 vega20_init_dpm_state(&(dpm_table->dpm_state));
667
668 /* eclk */
669 dpm_table = &(data->dpm_table.eclk_table);
670 if (data->smu_features[GNLD_DPM_VCE].enabled) {
671 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK);
672 PP_ASSERT_WITH_CODE(!ret,
673 "[SetupDefaultDpmTable] failed to get eclk dpm levels!",
674 return ret);
675 } else {
676 dpm_table->count = 1;
677 dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100;
678 }
679 vega20_init_dpm_state(&(dpm_table->dpm_state));
680
681 /* vclk */
682 dpm_table = &(data->dpm_table.vclk_table);
683 if (data->smu_features[GNLD_DPM_UVD].enabled) {
684 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK);
685 PP_ASSERT_WITH_CODE(!ret,
686 "[SetupDefaultDpmTable] failed to get vclk dpm levels!",
687 return ret);
688 } else {
689 dpm_table->count = 1;
690 dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100;
691 }
692 vega20_init_dpm_state(&(dpm_table->dpm_state));
693
694 /* dclk */
695 dpm_table = &(data->dpm_table.dclk_table);
696 if (data->smu_features[GNLD_DPM_UVD].enabled) {
697 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK);
698 PP_ASSERT_WITH_CODE(!ret,
699 "[SetupDefaultDpmTable] failed to get dclk dpm levels!",
700 return ret);
701 } else {
702 dpm_table->count = 1;
703 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100;
704 }
705 vega20_init_dpm_state(&(dpm_table->dpm_state));
706
707 /* dcefclk */
708 dpm_table = &(data->dpm_table.dcef_table);
709 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
710 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK);
711 PP_ASSERT_WITH_CODE(!ret,
712 "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
713 return ret);
714 } else {
715 dpm_table->count = 1;
716 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100;
717 }
718 vega20_init_dpm_state(&(dpm_table->dpm_state));
719
720 /* pixclk */
721 dpm_table = &(data->dpm_table.pixel_table);
722 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
723 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK);
724 PP_ASSERT_WITH_CODE(!ret,
725 "[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
726 return ret);
727 } else
728 dpm_table->count = 0;
729 vega20_init_dpm_state(&(dpm_table->dpm_state));
730
731 /* dispclk */
732 dpm_table = &(data->dpm_table.display_table);
733 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
734 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK);
735 PP_ASSERT_WITH_CODE(!ret,
736 "[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
737 return ret);
738 } else
739 dpm_table->count = 0;
740 vega20_init_dpm_state(&(dpm_table->dpm_state));
741
742 /* phyclk */
743 dpm_table = &(data->dpm_table.phy_table);
744 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
745 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK);
746 PP_ASSERT_WITH_CODE(!ret,
747 "[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
748 return ret);
749 } else
750 dpm_table->count = 0;
751 vega20_init_dpm_state(&(dpm_table->dpm_state));
752
753 /* fclk */
754 dpm_table = &(data->dpm_table.fclk_table);
755 if (data->smu_features[GNLD_DPM_FCLK].enabled) {
756 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_FCLK);
757 PP_ASSERT_WITH_CODE(!ret,
758 "[SetupDefaultDpmTable] failed to get fclk dpm levels!",
759 return ret);
760 } else {
761 dpm_table->count = 1;
762 dpm_table->dpm_levels[0].value = data->vbios_boot_state.fclock / 100;
763 }
764 vega20_init_dpm_state(&(dpm_table->dpm_state));
765
766 /* save a copy of the default DPM table */
767 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
768 sizeof(struct vega20_dpm_table));
769
770 return 0;
771 }
772
773 /**
774 * vega20_init_smc_table - Initializes the SMC table and uploads it
775 *
776 * @hwmgr: the address of the powerplay hardware manager.
777 * return: always 0
778 */
vega20_init_smc_table(struct pp_hwmgr * hwmgr)779 static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
780 {
781 int result;
782 struct vega20_hwmgr *data =
783 (struct vega20_hwmgr *)(hwmgr->backend);
784 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
785 struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
786 struct phm_ppt_v3_information *pptable_information =
787 (struct phm_ppt_v3_information *)hwmgr->pptable;
788
789 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
790 PP_ASSERT_WITH_CODE(!result,
791 "[InitSMCTable] Failed to get vbios bootup values!",
792 return result);
793
794 data->vbios_boot_state.vddc = boot_up_values.usVddc;
795 data->vbios_boot_state.vddci = boot_up_values.usVddci;
796 data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
797 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
798 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
799 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
800 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
801 data->vbios_boot_state.eclock = boot_up_values.ulEClk;
802 data->vbios_boot_state.vclock = boot_up_values.ulVClk;
803 data->vbios_boot_state.dclock = boot_up_values.ulDClk;
804 data->vbios_boot_state.fclock = boot_up_values.ulFClk;
805 data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
806
807 smum_send_msg_to_smc_with_parameter(hwmgr,
808 PPSMC_MSG_SetMinDeepSleepDcefclk,
809 (uint32_t)(data->vbios_boot_state.dcef_clock / 100),
810 NULL);
811
812 memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
813
814 result = smum_smc_table_manager(hwmgr,
815 (uint8_t *)pp_table, TABLE_PPTABLE, false);
816 PP_ASSERT_WITH_CODE(!result,
817 "[InitSMCTable] Failed to upload PPtable!",
818 return result);
819
820 return 0;
821 }
822
823 /*
824 * Override PCIe link speed and link width for DPM Level 1. PPTable entries
825 * reflect the ASIC capabilities and not the system capabilities. For e.g.
826 * Vega20 board in a PCI Gen3 system. In this case, when SMU's tries to switch
827 * to DPM1, it fails as system doesn't support Gen4.
828 */
vega20_override_pcie_parameters(struct pp_hwmgr * hwmgr)829 static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
830 {
831 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
832 struct vega20_hwmgr *data =
833 (struct vega20_hwmgr *)(hwmgr->backend);
834 uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg;
835 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
836 int i;
837 int ret;
838
839 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
840 pcie_gen = 3;
841 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
842 pcie_gen = 2;
843 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
844 pcie_gen = 1;
845 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
846 pcie_gen = 0;
847
848 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
849 pcie_width = 6;
850 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
851 pcie_width = 5;
852 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
853 pcie_width = 4;
854 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
855 pcie_width = 3;
856 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
857 pcie_width = 2;
858 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
859 pcie_width = 1;
860
861 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
862 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
863 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
864 */
865 for (i = 0; i < NUM_LINK_LEVELS; i++) {
866 pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen :
867 pp_table->PcieGenSpeed[i];
868 pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width :
869 pp_table->PcieLaneCount[i];
870
871 if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg !=
872 pp_table->PcieLaneCount[i]) {
873 smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg;
874 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
875 PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
876 NULL);
877 PP_ASSERT_WITH_CODE(!ret,
878 "[OverridePcieParameters] Attempt to override pcie params failed!",
879 return ret);
880 }
881
882 /* update the pptable */
883 pp_table->PcieGenSpeed[i] = pcie_gen_arg;
884 pp_table->PcieLaneCount[i] = pcie_width_arg;
885 }
886
887 /* override to the highest if it's disabled from ppfeaturmask */
888 if (data->registry_data.pcie_dpm_key_disabled) {
889 for (i = 0; i < NUM_LINK_LEVELS; i++) {
890 smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width;
891 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
892 PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
893 NULL);
894 PP_ASSERT_WITH_CODE(!ret,
895 "[OverridePcieParameters] Attempt to override pcie params failed!",
896 return ret);
897
898 pp_table->PcieGenSpeed[i] = pcie_gen;
899 pp_table->PcieLaneCount[i] = pcie_width;
900 }
901 ret = vega20_enable_smc_features(hwmgr,
902 false,
903 data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap);
904 PP_ASSERT_WITH_CODE(!ret,
905 "Attempt to Disable DPM LINK Failed!",
906 return ret);
907 data->smu_features[GNLD_DPM_LINK].enabled = false;
908 data->smu_features[GNLD_DPM_LINK].supported = false;
909 }
910
911 return 0;
912 }
913
vega20_set_allowed_featuresmask(struct pp_hwmgr * hwmgr)914 static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
915 {
916 struct vega20_hwmgr *data =
917 (struct vega20_hwmgr *)(hwmgr->backend);
918 uint32_t allowed_features_low = 0, allowed_features_high = 0;
919 int i;
920 int ret = 0;
921
922 for (i = 0; i < GNLD_FEATURES_MAX; i++)
923 if (data->smu_features[i].allowed)
924 data->smu_features[i].smu_feature_id > 31 ?
925 (allowed_features_high |=
926 ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_HIGH_SHIFT)
927 & 0xFFFFFFFF)) :
928 (allowed_features_low |=
929 ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT)
930 & 0xFFFFFFFF));
931
932 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
933 PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high, NULL);
934 PP_ASSERT_WITH_CODE(!ret,
935 "[SetAllowedFeaturesMask] Attempt to set allowed features mask(high) failed!",
936 return ret);
937
938 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
939 PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low, NULL);
940 PP_ASSERT_WITH_CODE(!ret,
941 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
942 return ret);
943
944 return 0;
945 }
946
vega20_run_btc(struct pp_hwmgr * hwmgr)947 static int vega20_run_btc(struct pp_hwmgr *hwmgr)
948 {
949 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc, NULL);
950 }
951
vega20_run_btc_afll(struct pp_hwmgr * hwmgr)952 static int vega20_run_btc_afll(struct pp_hwmgr *hwmgr)
953 {
954 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc, NULL);
955 }
956
vega20_enable_all_smu_features(struct pp_hwmgr * hwmgr)957 static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
958 {
959 struct vega20_hwmgr *data =
960 (struct vega20_hwmgr *)(hwmgr->backend);
961 uint64_t features_enabled;
962 int i;
963 bool enabled;
964 int ret = 0;
965
966 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
967 PPSMC_MSG_EnableAllSmuFeatures,
968 NULL)) == 0,
969 "[EnableAllSMUFeatures] Failed to enable all smu features!",
970 return ret);
971
972 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
973 PP_ASSERT_WITH_CODE(!ret,
974 "[EnableAllSmuFeatures] Failed to get enabled smc features!",
975 return ret);
976
977 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
978 enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
979 true : false;
980 data->smu_features[i].enabled = enabled;
981 data->smu_features[i].supported = enabled;
982
983 #if 0
984 if (data->smu_features[i].allowed && !enabled)
985 pr_info("[EnableAllSMUFeatures] feature %d is expected enabled!", i);
986 else if (!data->smu_features[i].allowed && enabled)
987 pr_info("[EnableAllSMUFeatures] feature %d is expected disabled!", i);
988 #endif
989 }
990
991 return 0;
992 }
993
vega20_notify_smc_display_change(struct pp_hwmgr * hwmgr)994 static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr)
995 {
996 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
997
998 if (data->smu_features[GNLD_DPM_UCLK].enabled)
999 return smum_send_msg_to_smc_with_parameter(hwmgr,
1000 PPSMC_MSG_SetUclkFastSwitch,
1001 1,
1002 NULL);
1003
1004 return 0;
1005 }
1006
vega20_send_clock_ratio(struct pp_hwmgr * hwmgr)1007 static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr)
1008 {
1009 struct vega20_hwmgr *data =
1010 (struct vega20_hwmgr *)(hwmgr->backend);
1011
1012 return smum_send_msg_to_smc_with_parameter(hwmgr,
1013 PPSMC_MSG_SetFclkGfxClkRatio,
1014 data->registry_data.fclk_gfxclk_ratio,
1015 NULL);
1016 }
1017
vega20_disable_all_smu_features(struct pp_hwmgr * hwmgr)1018 static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
1019 {
1020 struct vega20_hwmgr *data =
1021 (struct vega20_hwmgr *)(hwmgr->backend);
1022 int i, ret = 0;
1023
1024 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
1025 PPSMC_MSG_DisableAllSmuFeatures,
1026 NULL)) == 0,
1027 "[DisableAllSMUFeatures] Failed to disable all smu features!",
1028 return ret);
1029
1030 for (i = 0; i < GNLD_FEATURES_MAX; i++)
1031 data->smu_features[i].enabled = 0;
1032
1033 return 0;
1034 }
1035
vega20_od8_set_feature_capabilities(struct pp_hwmgr * hwmgr)1036 static int vega20_od8_set_feature_capabilities(
1037 struct pp_hwmgr *hwmgr)
1038 {
1039 struct phm_ppt_v3_information *pptable_information =
1040 (struct phm_ppt_v3_information *)hwmgr->pptable;
1041 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1042 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1043 struct vega20_od8_settings *od_settings = &(data->od8_settings);
1044
1045 od_settings->overdrive8_capabilities = 0;
1046
1047 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
1048 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_LIMITS] &&
1049 pptable_information->od_settings_max[OD8_SETTING_GFXCLK_FMAX] > 0 &&
1050 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_FMIN] > 0 &&
1051 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_FMAX] >=
1052 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_FMIN]))
1053 od_settings->overdrive8_capabilities |= OD8_GFXCLK_LIMITS;
1054
1055 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_CURVE] &&
1056 (pptable_information->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1] >=
1057 pp_table->MinVoltageGfx / VOLTAGE_SCALE) &&
1058 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] <=
1059 pp_table->MaxVoltageGfx / VOLTAGE_SCALE) &&
1060 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] >=
1061 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1]))
1062 od_settings->overdrive8_capabilities |= OD8_GFXCLK_CURVE;
1063 }
1064
1065 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
1066 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] =
1067 data->dpm_table.mem_table.dpm_levels[data->dpm_table.mem_table.count - 2].value;
1068 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] &&
1069 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 &&
1070 pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 &&
1071 (pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] >=
1072 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX]))
1073 od_settings->overdrive8_capabilities |= OD8_UCLK_MAX;
1074 }
1075
1076 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_POWER_LIMIT] &&
1077 pptable_information->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] > 0 &&
1078 pptable_information->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] <= 100 &&
1079 pptable_information->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] > 0 &&
1080 pptable_information->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] <= 100)
1081 od_settings->overdrive8_capabilities |= OD8_POWER_LIMIT;
1082
1083 if (data->smu_features[GNLD_FAN_CONTROL].enabled) {
1084 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ACOUSTIC_LIMIT] &&
1085 pptable_information->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 &&
1086 pptable_information->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 &&
1087 (pptable_information->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] >=
1088 pptable_information->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT]))
1089 od_settings->overdrive8_capabilities |= OD8_ACOUSTIC_LIMIT_SCLK;
1090
1091 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_SPEED_MIN] &&
1092 (pptable_information->od_settings_min[OD8_SETTING_FAN_MIN_SPEED] >=
1093 (pp_table->FanPwmMin * pp_table->FanMaximumRpm / 100)) &&
1094 pptable_information->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] > 0 &&
1095 (pptable_information->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] >=
1096 pptable_information->od_settings_min[OD8_SETTING_FAN_MIN_SPEED]))
1097 od_settings->overdrive8_capabilities |= OD8_FAN_SPEED_MIN;
1098 }
1099
1100 if (data->smu_features[GNLD_THERMAL].enabled) {
1101 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_FAN] &&
1102 pptable_information->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] > 0 &&
1103 pptable_information->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP] > 0 &&
1104 (pptable_information->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] >=
1105 pptable_information->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP]))
1106 od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_FAN;
1107
1108 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_SYSTEM] &&
1109 pptable_information->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] > 0 &&
1110 pptable_information->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX] > 0 &&
1111 (pptable_information->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] >=
1112 pptable_information->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX]))
1113 od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_SYSTEM;
1114 }
1115
1116 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_MEMORY_TIMING_TUNE])
1117 od_settings->overdrive8_capabilities |= OD8_MEMORY_TIMING_TUNE;
1118
1119 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ZERO_RPM_CONTROL] &&
1120 pp_table->FanZeroRpmEnable)
1121 od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL;
1122
1123 if (!od_settings->overdrive8_capabilities)
1124 hwmgr->od_enabled = false;
1125
1126 return 0;
1127 }
1128
vega20_od8_set_feature_id(struct pp_hwmgr * hwmgr)1129 static int vega20_od8_set_feature_id(
1130 struct pp_hwmgr *hwmgr)
1131 {
1132 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1133 struct vega20_od8_settings *od_settings = &(data->od8_settings);
1134
1135 if (od_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) {
1136 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id =
1137 OD8_GFXCLK_LIMITS;
1138 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id =
1139 OD8_GFXCLK_LIMITS;
1140 } else {
1141 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id =
1142 0;
1143 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id =
1144 0;
1145 }
1146
1147 if (od_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) {
1148 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id =
1149 OD8_GFXCLK_CURVE;
1150 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id =
1151 OD8_GFXCLK_CURVE;
1152 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id =
1153 OD8_GFXCLK_CURVE;
1154 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id =
1155 OD8_GFXCLK_CURVE;
1156 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id =
1157 OD8_GFXCLK_CURVE;
1158 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id =
1159 OD8_GFXCLK_CURVE;
1160 } else {
1161 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id =
1162 0;
1163 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id =
1164 0;
1165 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id =
1166 0;
1167 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id =
1168 0;
1169 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id =
1170 0;
1171 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id =
1172 0;
1173 }
1174
1175 if (od_settings->overdrive8_capabilities & OD8_UCLK_MAX)
1176 od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = OD8_UCLK_MAX;
1177 else
1178 od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = 0;
1179
1180 if (od_settings->overdrive8_capabilities & OD8_POWER_LIMIT)
1181 od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = OD8_POWER_LIMIT;
1182 else
1183 od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = 0;
1184
1185 if (od_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK)
1186 od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id =
1187 OD8_ACOUSTIC_LIMIT_SCLK;
1188 else
1189 od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id =
1190 0;
1191
1192 if (od_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN)
1193 od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id =
1194 OD8_FAN_SPEED_MIN;
1195 else
1196 od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id =
1197 0;
1198
1199 if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN)
1200 od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id =
1201 OD8_TEMPERATURE_FAN;
1202 else
1203 od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id =
1204 0;
1205
1206 if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM)
1207 od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id =
1208 OD8_TEMPERATURE_SYSTEM;
1209 else
1210 od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id =
1211 0;
1212
1213 return 0;
1214 }
1215
vega20_od8_get_gfx_clock_base_voltage(struct pp_hwmgr * hwmgr,uint32_t * voltage,uint32_t freq)1216 static int vega20_od8_get_gfx_clock_base_voltage(
1217 struct pp_hwmgr *hwmgr,
1218 uint32_t *voltage,
1219 uint32_t freq)
1220 {
1221 int ret = 0;
1222
1223 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1224 PPSMC_MSG_GetAVFSVoltageByDpm,
1225 ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq),
1226 voltage);
1227 PP_ASSERT_WITH_CODE(!ret,
1228 "[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!",
1229 return ret);
1230
1231 *voltage = *voltage / VOLTAGE_SCALE;
1232
1233 return 0;
1234 }
1235
vega20_od8_initialize_default_settings(struct pp_hwmgr * hwmgr)1236 static int vega20_od8_initialize_default_settings(
1237 struct pp_hwmgr *hwmgr)
1238 {
1239 struct phm_ppt_v3_information *pptable_information =
1240 (struct phm_ppt_v3_information *)hwmgr->pptable;
1241 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1242 struct vega20_od8_settings *od8_settings = &(data->od8_settings);
1243 OverDriveTable_t *od_table = &(data->smc_state_table.overdrive_table);
1244 int i, ret = 0;
1245
1246 /* Set Feature Capabilities */
1247 vega20_od8_set_feature_capabilities(hwmgr);
1248
1249 /* Map FeatureID to individual settings */
1250 vega20_od8_set_feature_id(hwmgr);
1251
1252 /* Set default values */
1253 ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, true);
1254 PP_ASSERT_WITH_CODE(!ret,
1255 "Failed to export over drive table!",
1256 return ret);
1257
1258 if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) {
1259 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value =
1260 od_table->GfxclkFmin;
1261 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value =
1262 od_table->GfxclkFmax;
1263 } else {
1264 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value =
1265 0;
1266 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value =
1267 0;
1268 }
1269
1270 if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) {
1271 od_table->GfxclkFreq1 = od_table->GfxclkFmin;
1272 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value =
1273 od_table->GfxclkFreq1;
1274
1275 od_table->GfxclkFreq3 = od_table->GfxclkFmax;
1276 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value =
1277 od_table->GfxclkFreq3;
1278
1279 od_table->GfxclkFreq2 = (od_table->GfxclkFreq1 + od_table->GfxclkFreq3) / 2;
1280 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value =
1281 od_table->GfxclkFreq2;
1282
1283 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr,
1284 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value),
1285 od_table->GfxclkFreq1),
1286 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!",
1287 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value = 0);
1288 od_table->GfxclkVolt1 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value
1289 * VOLTAGE_SCALE;
1290
1291 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr,
1292 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value),
1293 od_table->GfxclkFreq2),
1294 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!",
1295 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value = 0);
1296 od_table->GfxclkVolt2 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value
1297 * VOLTAGE_SCALE;
1298
1299 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr,
1300 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value),
1301 od_table->GfxclkFreq3),
1302 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!",
1303 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value = 0);
1304 od_table->GfxclkVolt3 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value
1305 * VOLTAGE_SCALE;
1306 } else {
1307 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value =
1308 0;
1309 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value =
1310 0;
1311 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value =
1312 0;
1313 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value =
1314 0;
1315 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value =
1316 0;
1317 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value =
1318 0;
1319 }
1320
1321 if (od8_settings->overdrive8_capabilities & OD8_UCLK_MAX)
1322 od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value =
1323 od_table->UclkFmax;
1324 else
1325 od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value =
1326 0;
1327
1328 if (od8_settings->overdrive8_capabilities & OD8_POWER_LIMIT)
1329 od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value =
1330 od_table->OverDrivePct;
1331 else
1332 od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value =
1333 0;
1334
1335 if (od8_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK)
1336 od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value =
1337 od_table->FanMaximumRpm;
1338 else
1339 od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value =
1340 0;
1341
1342 if (od8_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN)
1343 od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value =
1344 od_table->FanMinimumPwm * data->smc_state_table.pp_table.FanMaximumRpm / 100;
1345 else
1346 od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value =
1347 0;
1348
1349 if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN)
1350 od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value =
1351 od_table->FanTargetTemperature;
1352 else
1353 od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value =
1354 0;
1355
1356 if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM)
1357 od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value =
1358 od_table->MaxOpTemp;
1359 else
1360 od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value =
1361 0;
1362
1363 for (i = 0; i < OD8_SETTING_COUNT; i++) {
1364 if (od8_settings->od8_settings_array[i].feature_id) {
1365 od8_settings->od8_settings_array[i].min_value =
1366 pptable_information->od_settings_min[i];
1367 od8_settings->od8_settings_array[i].max_value =
1368 pptable_information->od_settings_max[i];
1369 od8_settings->od8_settings_array[i].current_value =
1370 od8_settings->od8_settings_array[i].default_value;
1371 } else {
1372 od8_settings->od8_settings_array[i].min_value =
1373 0;
1374 od8_settings->od8_settings_array[i].max_value =
1375 0;
1376 od8_settings->od8_settings_array[i].current_value =
1377 0;
1378 }
1379 }
1380
1381 ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, false);
1382 PP_ASSERT_WITH_CODE(!ret,
1383 "Failed to import over drive table!",
1384 return ret);
1385
1386 return 0;
1387 }
1388
vega20_od8_set_settings(struct pp_hwmgr * hwmgr,uint32_t index,uint32_t value)1389 static int vega20_od8_set_settings(
1390 struct pp_hwmgr *hwmgr,
1391 uint32_t index,
1392 uint32_t value)
1393 {
1394 OverDriveTable_t od_table;
1395 int ret = 0;
1396 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1397 struct vega20_od8_single_setting *od8_settings =
1398 data->od8_settings.od8_settings_array;
1399
1400 ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, true);
1401 PP_ASSERT_WITH_CODE(!ret,
1402 "Failed to export over drive table!",
1403 return ret);
1404
1405 switch (index) {
1406 case OD8_SETTING_GFXCLK_FMIN:
1407 od_table.GfxclkFmin = (uint16_t)value;
1408 break;
1409 case OD8_SETTING_GFXCLK_FMAX:
1410 if (value < od8_settings[OD8_SETTING_GFXCLK_FMAX].min_value ||
1411 value > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value)
1412 return -EINVAL;
1413
1414 od_table.GfxclkFmax = (uint16_t)value;
1415 break;
1416 case OD8_SETTING_GFXCLK_FREQ1:
1417 od_table.GfxclkFreq1 = (uint16_t)value;
1418 break;
1419 case OD8_SETTING_GFXCLK_VOLTAGE1:
1420 od_table.GfxclkVolt1 = (uint16_t)value;
1421 break;
1422 case OD8_SETTING_GFXCLK_FREQ2:
1423 od_table.GfxclkFreq2 = (uint16_t)value;
1424 break;
1425 case OD8_SETTING_GFXCLK_VOLTAGE2:
1426 od_table.GfxclkVolt2 = (uint16_t)value;
1427 break;
1428 case OD8_SETTING_GFXCLK_FREQ3:
1429 od_table.GfxclkFreq3 = (uint16_t)value;
1430 break;
1431 case OD8_SETTING_GFXCLK_VOLTAGE3:
1432 od_table.GfxclkVolt3 = (uint16_t)value;
1433 break;
1434 case OD8_SETTING_UCLK_FMAX:
1435 if (value < od8_settings[OD8_SETTING_UCLK_FMAX].min_value ||
1436 value > od8_settings[OD8_SETTING_UCLK_FMAX].max_value)
1437 return -EINVAL;
1438 od_table.UclkFmax = (uint16_t)value;
1439 break;
1440 case OD8_SETTING_POWER_PERCENTAGE:
1441 od_table.OverDrivePct = (int16_t)value;
1442 break;
1443 case OD8_SETTING_FAN_ACOUSTIC_LIMIT:
1444 od_table.FanMaximumRpm = (uint16_t)value;
1445 break;
1446 case OD8_SETTING_FAN_MIN_SPEED:
1447 od_table.FanMinimumPwm = (uint16_t)value;
1448 break;
1449 case OD8_SETTING_FAN_TARGET_TEMP:
1450 od_table.FanTargetTemperature = (uint16_t)value;
1451 break;
1452 case OD8_SETTING_OPERATING_TEMP_MAX:
1453 od_table.MaxOpTemp = (uint16_t)value;
1454 break;
1455 }
1456
1457 ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, false);
1458 PP_ASSERT_WITH_CODE(!ret,
1459 "Failed to import over drive table!",
1460 return ret);
1461
1462 return 0;
1463 }
1464
vega20_get_sclk_od(struct pp_hwmgr * hwmgr)1465 static int vega20_get_sclk_od(
1466 struct pp_hwmgr *hwmgr)
1467 {
1468 struct vega20_hwmgr *data = hwmgr->backend;
1469 struct vega20_single_dpm_table *sclk_table =
1470 &(data->dpm_table.gfx_table);
1471 struct vega20_single_dpm_table *golden_sclk_table =
1472 &(data->golden_dpm_table.gfx_table);
1473 int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
1474 int golden_value = golden_sclk_table->dpm_levels
1475 [golden_sclk_table->count - 1].value;
1476
1477 /* od percentage */
1478 value -= golden_value;
1479 value = DIV_ROUND_UP(value * 100, golden_value);
1480
1481 return value;
1482 }
1483
vega20_set_sclk_od(struct pp_hwmgr * hwmgr,uint32_t value)1484 static int vega20_set_sclk_od(
1485 struct pp_hwmgr *hwmgr, uint32_t value)
1486 {
1487 struct vega20_hwmgr *data = hwmgr->backend;
1488 struct vega20_single_dpm_table *golden_sclk_table =
1489 &(data->golden_dpm_table.gfx_table);
1490 uint32_t od_sclk;
1491 int ret = 0;
1492
1493 od_sclk = golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * value;
1494 od_sclk /= 100;
1495 od_sclk += golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
1496
1497 ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_GFXCLK_FMAX, od_sclk);
1498 PP_ASSERT_WITH_CODE(!ret,
1499 "[SetSclkOD] failed to set od gfxclk!",
1500 return ret);
1501
1502 /* retrieve updated gfxclk table */
1503 ret = vega20_setup_gfxclk_dpm_table(hwmgr);
1504 PP_ASSERT_WITH_CODE(!ret,
1505 "[SetSclkOD] failed to refresh gfxclk table!",
1506 return ret);
1507
1508 return 0;
1509 }
1510
vega20_get_mclk_od(struct pp_hwmgr * hwmgr)1511 static int vega20_get_mclk_od(
1512 struct pp_hwmgr *hwmgr)
1513 {
1514 struct vega20_hwmgr *data = hwmgr->backend;
1515 struct vega20_single_dpm_table *mclk_table =
1516 &(data->dpm_table.mem_table);
1517 struct vega20_single_dpm_table *golden_mclk_table =
1518 &(data->golden_dpm_table.mem_table);
1519 int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
1520 int golden_value = golden_mclk_table->dpm_levels
1521 [golden_mclk_table->count - 1].value;
1522
1523 /* od percentage */
1524 value -= golden_value;
1525 value = DIV_ROUND_UP(value * 100, golden_value);
1526
1527 return value;
1528 }
1529
vega20_set_mclk_od(struct pp_hwmgr * hwmgr,uint32_t value)1530 static int vega20_set_mclk_od(
1531 struct pp_hwmgr *hwmgr, uint32_t value)
1532 {
1533 struct vega20_hwmgr *data = hwmgr->backend;
1534 struct vega20_single_dpm_table *golden_mclk_table =
1535 &(data->golden_dpm_table.mem_table);
1536 uint32_t od_mclk;
1537 int ret = 0;
1538
1539 od_mclk = golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * value;
1540 od_mclk /= 100;
1541 od_mclk += golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
1542
1543 ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_UCLK_FMAX, od_mclk);
1544 PP_ASSERT_WITH_CODE(!ret,
1545 "[SetMclkOD] failed to set od memclk!",
1546 return ret);
1547
1548 /* retrieve updated memclk table */
1549 ret = vega20_setup_memclk_dpm_table(hwmgr);
1550 PP_ASSERT_WITH_CODE(!ret,
1551 "[SetMclkOD] failed to refresh memclk table!",
1552 return ret);
1553
1554 return 0;
1555 }
1556
vega20_populate_umdpstate_clocks(struct pp_hwmgr * hwmgr)1557 static void vega20_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
1558 {
1559 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1560 struct vega20_single_dpm_table *gfx_table = &(data->dpm_table.gfx_table);
1561 struct vega20_single_dpm_table *mem_table = &(data->dpm_table.mem_table);
1562
1563 if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
1564 mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) {
1565 hwmgr->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
1566 hwmgr->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
1567 } else {
1568 hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value;
1569 hwmgr->pstate_mclk = mem_table->dpm_levels[0].value;
1570 }
1571
1572 hwmgr->pstate_sclk_peak = gfx_table->dpm_levels[gfx_table->count - 1].value;
1573 hwmgr->pstate_mclk_peak = mem_table->dpm_levels[mem_table->count - 1].value;
1574 }
1575
vega20_get_max_sustainable_clock(struct pp_hwmgr * hwmgr,PP_Clock * clock,PPCLK_e clock_select)1576 static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
1577 PP_Clock *clock, PPCLK_e clock_select)
1578 {
1579 int ret = 0;
1580
1581 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1582 PPSMC_MSG_GetDcModeMaxDpmFreq,
1583 (clock_select << 16),
1584 clock)) == 0,
1585 "[GetMaxSustainableClock] Failed to get max DC clock from SMC!",
1586 return ret);
1587
1588 /* if DC limit is zero, return AC limit */
1589 if (*clock == 0) {
1590 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1591 PPSMC_MSG_GetMaxDpmFreq,
1592 (clock_select << 16),
1593 clock)) == 0,
1594 "[GetMaxSustainableClock] failed to get max AC clock from SMC!",
1595 return ret);
1596 }
1597
1598 return 0;
1599 }
1600
vega20_init_max_sustainable_clocks(struct pp_hwmgr * hwmgr)1601 static int vega20_init_max_sustainable_clocks(struct pp_hwmgr *hwmgr)
1602 {
1603 struct vega20_hwmgr *data =
1604 (struct vega20_hwmgr *)(hwmgr->backend);
1605 struct vega20_max_sustainable_clocks *max_sustainable_clocks =
1606 &(data->max_sustainable_clocks);
1607 int ret = 0;
1608
1609 max_sustainable_clocks->uclock = data->vbios_boot_state.mem_clock / 100;
1610 max_sustainable_clocks->soc_clock = data->vbios_boot_state.soc_clock / 100;
1611 max_sustainable_clocks->dcef_clock = data->vbios_boot_state.dcef_clock / 100;
1612 max_sustainable_clocks->display_clock = 0xFFFFFFFF;
1613 max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
1614 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
1615
1616 if (data->smu_features[GNLD_DPM_UCLK].enabled)
1617 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1618 &(max_sustainable_clocks->uclock),
1619 PPCLK_UCLK)) == 0,
1620 "[InitMaxSustainableClocks] failed to get max UCLK from SMC!",
1621 return ret);
1622
1623 if (data->smu_features[GNLD_DPM_SOCCLK].enabled)
1624 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1625 &(max_sustainable_clocks->soc_clock),
1626 PPCLK_SOCCLK)) == 0,
1627 "[InitMaxSustainableClocks] failed to get max SOCCLK from SMC!",
1628 return ret);
1629
1630 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
1631 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1632 &(max_sustainable_clocks->dcef_clock),
1633 PPCLK_DCEFCLK)) == 0,
1634 "[InitMaxSustainableClocks] failed to get max DCEFCLK from SMC!",
1635 return ret);
1636 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1637 &(max_sustainable_clocks->display_clock),
1638 PPCLK_DISPCLK)) == 0,
1639 "[InitMaxSustainableClocks] failed to get max DISPCLK from SMC!",
1640 return ret);
1641 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1642 &(max_sustainable_clocks->phy_clock),
1643 PPCLK_PHYCLK)) == 0,
1644 "[InitMaxSustainableClocks] failed to get max PHYCLK from SMC!",
1645 return ret);
1646 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1647 &(max_sustainable_clocks->pixel_clock),
1648 PPCLK_PIXCLK)) == 0,
1649 "[InitMaxSustainableClocks] failed to get max PIXCLK from SMC!",
1650 return ret);
1651 }
1652
1653 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
1654 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
1655
1656 return 0;
1657 }
1658
vega20_enable_mgpu_fan_boost(struct pp_hwmgr * hwmgr)1659 static int vega20_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr)
1660 {
1661 int result;
1662
1663 result = smum_send_msg_to_smc(hwmgr,
1664 PPSMC_MSG_SetMGpuFanBoostLimitRpm,
1665 NULL);
1666 PP_ASSERT_WITH_CODE(!result,
1667 "[EnableMgpuFan] Failed to enable mgpu fan boost!",
1668 return result);
1669
1670 return 0;
1671 }
1672
vega20_init_powergate_state(struct pp_hwmgr * hwmgr)1673 static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr)
1674 {
1675 struct vega20_hwmgr *data =
1676 (struct vega20_hwmgr *)(hwmgr->backend);
1677
1678 data->uvd_power_gated = true;
1679 data->vce_power_gated = true;
1680 }
1681
vega20_enable_dpm_tasks(struct pp_hwmgr * hwmgr)1682 static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1683 {
1684 int result = 0;
1685
1686 smum_send_msg_to_smc_with_parameter(hwmgr,
1687 PPSMC_MSG_NumOfDisplays, 0, NULL);
1688
1689 result = vega20_set_allowed_featuresmask(hwmgr);
1690 PP_ASSERT_WITH_CODE(!result,
1691 "[EnableDPMTasks] Failed to set allowed featuresmask!\n",
1692 return result);
1693
1694 result = vega20_init_smc_table(hwmgr);
1695 PP_ASSERT_WITH_CODE(!result,
1696 "[EnableDPMTasks] Failed to initialize SMC table!",
1697 return result);
1698
1699 result = vega20_run_btc(hwmgr);
1700 PP_ASSERT_WITH_CODE(!result,
1701 "[EnableDPMTasks] Failed to run btc!",
1702 return result);
1703
1704 result = vega20_run_btc_afll(hwmgr);
1705 PP_ASSERT_WITH_CODE(!result,
1706 "[EnableDPMTasks] Failed to run btc afll!",
1707 return result);
1708
1709 result = vega20_enable_all_smu_features(hwmgr);
1710 PP_ASSERT_WITH_CODE(!result,
1711 "[EnableDPMTasks] Failed to enable all smu features!",
1712 return result);
1713
1714 result = vega20_override_pcie_parameters(hwmgr);
1715 PP_ASSERT_WITH_CODE(!result,
1716 "[EnableDPMTasks] Failed to override pcie parameters!",
1717 return result);
1718
1719 result = vega20_notify_smc_display_change(hwmgr);
1720 PP_ASSERT_WITH_CODE(!result,
1721 "[EnableDPMTasks] Failed to notify smc display change!",
1722 return result);
1723
1724 result = vega20_send_clock_ratio(hwmgr);
1725 PP_ASSERT_WITH_CODE(!result,
1726 "[EnableDPMTasks] Failed to send clock ratio!",
1727 return result);
1728
1729 /* Initialize UVD/VCE powergating state */
1730 vega20_init_powergate_state(hwmgr);
1731
1732 result = vega20_setup_default_dpm_tables(hwmgr);
1733 PP_ASSERT_WITH_CODE(!result,
1734 "[EnableDPMTasks] Failed to setup default DPM tables!",
1735 return result);
1736
1737 result = vega20_init_max_sustainable_clocks(hwmgr);
1738 PP_ASSERT_WITH_CODE(!result,
1739 "[EnableDPMTasks] Failed to get maximum sustainable clocks!",
1740 return result);
1741
1742 result = vega20_power_control_set_level(hwmgr);
1743 PP_ASSERT_WITH_CODE(!result,
1744 "[EnableDPMTasks] Failed to power control set level!",
1745 return result);
1746
1747 result = vega20_od8_initialize_default_settings(hwmgr);
1748 PP_ASSERT_WITH_CODE(!result,
1749 "[EnableDPMTasks] Failed to initialize odn settings!",
1750 return result);
1751
1752 vega20_populate_umdpstate_clocks(hwmgr);
1753
1754 result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit,
1755 POWER_SOURCE_AC << 16, &hwmgr->default_power_limit);
1756 PP_ASSERT_WITH_CODE(!result,
1757 "[GetPptLimit] get default PPT limit failed!",
1758 return result);
1759 hwmgr->power_limit =
1760 hwmgr->default_power_limit;
1761
1762 return 0;
1763 }
1764
vega20_find_lowest_dpm_level(struct vega20_single_dpm_table * table)1765 static uint32_t vega20_find_lowest_dpm_level(
1766 struct vega20_single_dpm_table *table)
1767 {
1768 uint32_t i;
1769
1770 for (i = 0; i < table->count; i++) {
1771 if (table->dpm_levels[i].enabled)
1772 break;
1773 }
1774 if (i >= table->count) {
1775 i = 0;
1776 table->dpm_levels[i].enabled = true;
1777 }
1778
1779 return i;
1780 }
1781
vega20_find_highest_dpm_level(struct vega20_single_dpm_table * table)1782 static uint32_t vega20_find_highest_dpm_level(
1783 struct vega20_single_dpm_table *table)
1784 {
1785 int i = 0;
1786
1787 PP_ASSERT_WITH_CODE(table != NULL,
1788 "[FindHighestDPMLevel] DPM Table does not exist!",
1789 return 0);
1790 PP_ASSERT_WITH_CODE(table->count > 0,
1791 "[FindHighestDPMLevel] DPM Table has no entry!",
1792 return 0);
1793 PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
1794 "[FindHighestDPMLevel] DPM Table has too many entries!",
1795 return MAX_REGULAR_DPM_NUMBER - 1);
1796
1797 for (i = table->count - 1; i >= 0; i--) {
1798 if (table->dpm_levels[i].enabled)
1799 break;
1800 }
1801 if (i < 0) {
1802 i = 0;
1803 table->dpm_levels[i].enabled = true;
1804 }
1805
1806 return i;
1807 }
1808
vega20_upload_dpm_min_level(struct pp_hwmgr * hwmgr,uint32_t feature_mask)1809 static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
1810 {
1811 struct vega20_hwmgr *data =
1812 (struct vega20_hwmgr *)(hwmgr->backend);
1813 uint32_t min_freq;
1814 int ret = 0;
1815
1816 if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
1817 (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
1818 min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
1819 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1820 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1821 (PPCLK_GFXCLK << 16) | (min_freq & 0xffff),
1822 NULL)),
1823 "Failed to set soft min gfxclk !",
1824 return ret);
1825 }
1826
1827 if (data->smu_features[GNLD_DPM_UCLK].enabled &&
1828 (feature_mask & FEATURE_DPM_UCLK_MASK)) {
1829 min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
1830 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1831 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1832 (PPCLK_UCLK << 16) | (min_freq & 0xffff),
1833 NULL)),
1834 "Failed to set soft min memclk !",
1835 return ret);
1836 }
1837
1838 if (data->smu_features[GNLD_DPM_UVD].enabled &&
1839 (feature_mask & FEATURE_DPM_UVD_MASK)) {
1840 min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
1841
1842 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1843 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1844 (PPCLK_VCLK << 16) | (min_freq & 0xffff),
1845 NULL)),
1846 "Failed to set soft min vclk!",
1847 return ret);
1848
1849 min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level;
1850
1851 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1852 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1853 (PPCLK_DCLK << 16) | (min_freq & 0xffff),
1854 NULL)),
1855 "Failed to set soft min dclk!",
1856 return ret);
1857 }
1858
1859 if (data->smu_features[GNLD_DPM_VCE].enabled &&
1860 (feature_mask & FEATURE_DPM_VCE_MASK)) {
1861 min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
1862
1863 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1864 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1865 (PPCLK_ECLK << 16) | (min_freq & 0xffff),
1866 NULL)),
1867 "Failed to set soft min eclk!",
1868 return ret);
1869 }
1870
1871 if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
1872 (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
1873 min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
1874
1875 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1876 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1877 (PPCLK_SOCCLK << 16) | (min_freq & 0xffff),
1878 NULL)),
1879 "Failed to set soft min socclk!",
1880 return ret);
1881 }
1882
1883 if (data->smu_features[GNLD_DPM_FCLK].enabled &&
1884 (feature_mask & FEATURE_DPM_FCLK_MASK)) {
1885 min_freq = data->dpm_table.fclk_table.dpm_state.soft_min_level;
1886
1887 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1888 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1889 (PPCLK_FCLK << 16) | (min_freq & 0xffff),
1890 NULL)),
1891 "Failed to set soft min fclk!",
1892 return ret);
1893 }
1894
1895 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled &&
1896 (feature_mask & FEATURE_DPM_DCEFCLK_MASK)) {
1897 min_freq = data->dpm_table.dcef_table.dpm_state.hard_min_level;
1898
1899 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1900 hwmgr, PPSMC_MSG_SetHardMinByFreq,
1901 (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff),
1902 NULL)),
1903 "Failed to set hard min dcefclk!",
1904 return ret);
1905 }
1906
1907 return ret;
1908 }
1909
vega20_upload_dpm_max_level(struct pp_hwmgr * hwmgr,uint32_t feature_mask)1910 static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
1911 {
1912 struct vega20_hwmgr *data =
1913 (struct vega20_hwmgr *)(hwmgr->backend);
1914 uint32_t max_freq;
1915 int ret = 0;
1916
1917 if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
1918 (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
1919 max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
1920
1921 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1922 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1923 (PPCLK_GFXCLK << 16) | (max_freq & 0xffff),
1924 NULL)),
1925 "Failed to set soft max gfxclk!",
1926 return ret);
1927 }
1928
1929 if (data->smu_features[GNLD_DPM_UCLK].enabled &&
1930 (feature_mask & FEATURE_DPM_UCLK_MASK)) {
1931 max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
1932
1933 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1934 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1935 (PPCLK_UCLK << 16) | (max_freq & 0xffff),
1936 NULL)),
1937 "Failed to set soft max memclk!",
1938 return ret);
1939 }
1940
1941 if (data->smu_features[GNLD_DPM_UVD].enabled &&
1942 (feature_mask & FEATURE_DPM_UVD_MASK)) {
1943 max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
1944
1945 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1946 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1947 (PPCLK_VCLK << 16) | (max_freq & 0xffff),
1948 NULL)),
1949 "Failed to set soft max vclk!",
1950 return ret);
1951
1952 max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
1953 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1954 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1955 (PPCLK_DCLK << 16) | (max_freq & 0xffff),
1956 NULL)),
1957 "Failed to set soft max dclk!",
1958 return ret);
1959 }
1960
1961 if (data->smu_features[GNLD_DPM_VCE].enabled &&
1962 (feature_mask & FEATURE_DPM_VCE_MASK)) {
1963 max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
1964
1965 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1966 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1967 (PPCLK_ECLK << 16) | (max_freq & 0xffff),
1968 NULL)),
1969 "Failed to set soft max eclk!",
1970 return ret);
1971 }
1972
1973 if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
1974 (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
1975 max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
1976
1977 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1978 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1979 (PPCLK_SOCCLK << 16) | (max_freq & 0xffff),
1980 NULL)),
1981 "Failed to set soft max socclk!",
1982 return ret);
1983 }
1984
1985 if (data->smu_features[GNLD_DPM_FCLK].enabled &&
1986 (feature_mask & FEATURE_DPM_FCLK_MASK)) {
1987 max_freq = data->dpm_table.fclk_table.dpm_state.soft_max_level;
1988
1989 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1990 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1991 (PPCLK_FCLK << 16) | (max_freq & 0xffff),
1992 NULL)),
1993 "Failed to set soft max fclk!",
1994 return ret);
1995 }
1996
1997 return ret;
1998 }
1999
vega20_enable_disable_vce_dpm(struct pp_hwmgr * hwmgr,bool enable)2000 static int vega20_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
2001 {
2002 struct vega20_hwmgr *data =
2003 (struct vega20_hwmgr *)(hwmgr->backend);
2004 int ret = 0;
2005
2006 if (data->smu_features[GNLD_DPM_VCE].supported) {
2007 if (data->smu_features[GNLD_DPM_VCE].enabled == enable) {
2008 if (enable)
2009 PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already enabled!\n");
2010 else
2011 PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already disabled!\n");
2012 }
2013
2014 ret = vega20_enable_smc_features(hwmgr,
2015 enable,
2016 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap);
2017 PP_ASSERT_WITH_CODE(!ret,
2018 "Attempt to Enable/Disable DPM VCE Failed!",
2019 return ret);
2020 data->smu_features[GNLD_DPM_VCE].enabled = enable;
2021 }
2022
2023 return 0;
2024 }
2025
vega20_get_clock_ranges(struct pp_hwmgr * hwmgr,uint32_t * clock,PPCLK_e clock_select,bool max)2026 static int vega20_get_clock_ranges(struct pp_hwmgr *hwmgr,
2027 uint32_t *clock,
2028 PPCLK_e clock_select,
2029 bool max)
2030 {
2031 int ret;
2032 *clock = 0;
2033
2034 if (max) {
2035 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2036 PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16),
2037 clock)) == 0,
2038 "[GetClockRanges] Failed to get max clock from SMC!",
2039 return ret);
2040 } else {
2041 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2042 PPSMC_MSG_GetMinDpmFreq,
2043 (clock_select << 16),
2044 clock)) == 0,
2045 "[GetClockRanges] Failed to get min clock from SMC!",
2046 return ret);
2047 }
2048
2049 return 0;
2050 }
2051
vega20_dpm_get_sclk(struct pp_hwmgr * hwmgr,bool low)2052 static uint32_t vega20_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
2053 {
2054 struct vega20_hwmgr *data =
2055 (struct vega20_hwmgr *)(hwmgr->backend);
2056 uint32_t gfx_clk;
2057 int ret = 0;
2058
2059 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_GFXCLK].enabled,
2060 "[GetSclks]: gfxclk dpm not enabled!\n",
2061 return -EPERM);
2062
2063 if (low) {
2064 ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, false);
2065 PP_ASSERT_WITH_CODE(!ret,
2066 "[GetSclks]: fail to get min PPCLK_GFXCLK\n",
2067 return ret);
2068 } else {
2069 ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, true);
2070 PP_ASSERT_WITH_CODE(!ret,
2071 "[GetSclks]: fail to get max PPCLK_GFXCLK\n",
2072 return ret);
2073 }
2074
2075 return (gfx_clk * 100);
2076 }
2077
vega20_dpm_get_mclk(struct pp_hwmgr * hwmgr,bool low)2078 static uint32_t vega20_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
2079 {
2080 struct vega20_hwmgr *data =
2081 (struct vega20_hwmgr *)(hwmgr->backend);
2082 uint32_t mem_clk;
2083 int ret = 0;
2084
2085 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_UCLK].enabled,
2086 "[MemMclks]: memclk dpm not enabled!\n",
2087 return -EPERM);
2088
2089 if (low) {
2090 ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, false);
2091 PP_ASSERT_WITH_CODE(!ret,
2092 "[GetMclks]: fail to get min PPCLK_UCLK\n",
2093 return ret);
2094 } else {
2095 ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, true);
2096 PP_ASSERT_WITH_CODE(!ret,
2097 "[GetMclks]: fail to get max PPCLK_UCLK\n",
2098 return ret);
2099 }
2100
2101 return (mem_clk * 100);
2102 }
2103
vega20_get_metrics_table(struct pp_hwmgr * hwmgr,SmuMetrics_t * metrics_table,bool bypass_cache)2104 static int vega20_get_metrics_table(struct pp_hwmgr *hwmgr,
2105 SmuMetrics_t *metrics_table,
2106 bool bypass_cache)
2107 {
2108 struct vega20_hwmgr *data =
2109 (struct vega20_hwmgr *)(hwmgr->backend);
2110 int ret = 0;
2111
2112 if (bypass_cache ||
2113 !data->metrics_time ||
2114 time_after(jiffies, data->metrics_time + msecs_to_jiffies(1))) {
2115 ret = smum_smc_table_manager(hwmgr,
2116 (uint8_t *)(&data->metrics_table),
2117 TABLE_SMU_METRICS,
2118 true);
2119 if (ret) {
2120 pr_info("Failed to export SMU metrics table!\n");
2121 return ret;
2122 }
2123 data->metrics_time = jiffies;
2124 }
2125
2126 if (metrics_table)
2127 memcpy(metrics_table, &data->metrics_table, sizeof(SmuMetrics_t));
2128
2129 return ret;
2130 }
2131
vega20_get_gpu_power(struct pp_hwmgr * hwmgr,int idx,uint32_t * query)2132 static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr, int idx,
2133 uint32_t *query)
2134 {
2135 int ret = 0;
2136 SmuMetrics_t metrics_table;
2137
2138 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
2139 if (ret)
2140 return ret;
2141
2142 /* For the 40.46 release, they changed the value name */
2143 switch (idx) {
2144 case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
2145 if (hwmgr->smu_version == 0x282e00)
2146 *query = metrics_table.AverageSocketPower << 8;
2147 else
2148 ret = -EOPNOTSUPP;
2149 break;
2150 case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
2151 *query = metrics_table.CurrSocketPower << 8;
2152 break;
2153 }
2154
2155 return ret;
2156 }
2157
vega20_get_current_clk_freq(struct pp_hwmgr * hwmgr,PPCLK_e clk_id,uint32_t * clk_freq)2158 static int vega20_get_current_clk_freq(struct pp_hwmgr *hwmgr,
2159 PPCLK_e clk_id, uint32_t *clk_freq)
2160 {
2161 int ret = 0;
2162
2163 *clk_freq = 0;
2164
2165 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2166 PPSMC_MSG_GetDpmClockFreq, (clk_id << 16),
2167 clk_freq)) == 0,
2168 "[GetCurrentClkFreq] Attempt to get Current Frequency Failed!",
2169 return ret);
2170
2171 *clk_freq = *clk_freq * 100;
2172
2173 return 0;
2174 }
2175
vega20_get_current_activity_percent(struct pp_hwmgr * hwmgr,int idx,uint32_t * activity_percent)2176 static int vega20_get_current_activity_percent(struct pp_hwmgr *hwmgr,
2177 int idx,
2178 uint32_t *activity_percent)
2179 {
2180 int ret = 0;
2181 SmuMetrics_t metrics_table;
2182
2183 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
2184 if (ret)
2185 return ret;
2186
2187 switch (idx) {
2188 case AMDGPU_PP_SENSOR_GPU_LOAD:
2189 *activity_percent = metrics_table.AverageGfxActivity;
2190 break;
2191 case AMDGPU_PP_SENSOR_MEM_LOAD:
2192 *activity_percent = metrics_table.AverageUclkActivity;
2193 break;
2194 default:
2195 pr_err("Invalid index for retrieving clock activity\n");
2196 return -EINVAL;
2197 }
2198
2199 return ret;
2200 }
2201
vega20_read_sensor(struct pp_hwmgr * hwmgr,int idx,void * value,int * size)2202 static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
2203 void *value, int *size)
2204 {
2205 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2206 struct amdgpu_device *adev = hwmgr->adev;
2207 SmuMetrics_t metrics_table;
2208 uint32_t val_vid;
2209 int ret = 0;
2210
2211 switch (idx) {
2212 case AMDGPU_PP_SENSOR_GFX_SCLK:
2213 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
2214 if (ret)
2215 return ret;
2216
2217 *((uint32_t *)value) = metrics_table.AverageGfxclkFrequency * 100;
2218 *size = 4;
2219 break;
2220 case AMDGPU_PP_SENSOR_GFX_MCLK:
2221 ret = vega20_get_current_clk_freq(hwmgr,
2222 PPCLK_UCLK,
2223 (uint32_t *)value);
2224 if (!ret)
2225 *size = 4;
2226 break;
2227 case AMDGPU_PP_SENSOR_GPU_LOAD:
2228 case AMDGPU_PP_SENSOR_MEM_LOAD:
2229 ret = vega20_get_current_activity_percent(hwmgr, idx, (uint32_t *)value);
2230 if (!ret)
2231 *size = 4;
2232 break;
2233 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
2234 *((uint32_t *)value) = vega20_thermal_get_temperature(hwmgr);
2235 *size = 4;
2236 break;
2237 case AMDGPU_PP_SENSOR_EDGE_TEMP:
2238 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
2239 if (ret)
2240 return ret;
2241
2242 *((uint32_t *)value) = metrics_table.TemperatureEdge *
2243 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2244 *size = 4;
2245 break;
2246 case AMDGPU_PP_SENSOR_MEM_TEMP:
2247 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
2248 if (ret)
2249 return ret;
2250
2251 *((uint32_t *)value) = metrics_table.TemperatureHBM *
2252 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2253 *size = 4;
2254 break;
2255 case AMDGPU_PP_SENSOR_UVD_POWER:
2256 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
2257 *size = 4;
2258 break;
2259 case AMDGPU_PP_SENSOR_VCE_POWER:
2260 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
2261 *size = 4;
2262 break;
2263 case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
2264 case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
2265 *size = 16;
2266 ret = vega20_get_gpu_power(hwmgr, idx, (uint32_t *)value);
2267 break;
2268 case AMDGPU_PP_SENSOR_VDDGFX:
2269 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
2270 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
2271 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
2272 *((uint32_t *)value) =
2273 (uint32_t)convert_to_vddc((uint8_t)val_vid);
2274 break;
2275 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
2276 ret = vega20_get_enabled_smc_features(hwmgr, (uint64_t *)value);
2277 if (!ret)
2278 *size = 8;
2279 break;
2280 default:
2281 ret = -EOPNOTSUPP;
2282 break;
2283 }
2284 return ret;
2285 }
2286
vega20_display_clock_voltage_request(struct pp_hwmgr * hwmgr,struct pp_display_clock_request * clock_req)2287 static int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
2288 struct pp_display_clock_request *clock_req)
2289 {
2290 int result = 0;
2291 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2292 enum amd_pp_clock_type clk_type = clock_req->clock_type;
2293 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
2294 PPCLK_e clk_select = 0;
2295 uint32_t clk_request = 0;
2296
2297 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
2298 switch (clk_type) {
2299 case amd_pp_dcef_clock:
2300 clk_select = PPCLK_DCEFCLK;
2301 break;
2302 case amd_pp_disp_clock:
2303 clk_select = PPCLK_DISPCLK;
2304 break;
2305 case amd_pp_pixel_clock:
2306 clk_select = PPCLK_PIXCLK;
2307 break;
2308 case amd_pp_phy_clock:
2309 clk_select = PPCLK_PHYCLK;
2310 break;
2311 default:
2312 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
2313 result = -EINVAL;
2314 break;
2315 }
2316
2317 if (!result) {
2318 clk_request = (clk_select << 16) | clk_freq;
2319 result = smum_send_msg_to_smc_with_parameter(hwmgr,
2320 PPSMC_MSG_SetHardMinByFreq,
2321 clk_request,
2322 NULL);
2323 }
2324 }
2325
2326 return result;
2327 }
2328
vega20_get_performance_level(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * state,PHM_PerformanceLevelDesignation designation,uint32_t index,PHM_PerformanceLevel * level)2329 static int vega20_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
2330 PHM_PerformanceLevelDesignation designation, uint32_t index,
2331 PHM_PerformanceLevel *level)
2332 {
2333 return 0;
2334 }
2335
vega20_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr * hwmgr)2336 static int vega20_notify_smc_display_config_after_ps_adjustment(
2337 struct pp_hwmgr *hwmgr)
2338 {
2339 struct vega20_hwmgr *data =
2340 (struct vega20_hwmgr *)(hwmgr->backend);
2341 struct vega20_single_dpm_table *dpm_table =
2342 &data->dpm_table.mem_table;
2343 struct PP_Clocks min_clocks = {0};
2344 struct pp_display_clock_request clock_req;
2345 int ret = 0;
2346
2347 min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
2348 min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
2349 min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
2350
2351 if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
2352 clock_req.clock_type = amd_pp_dcef_clock;
2353 clock_req.clock_freq_in_khz = min_clocks.dcefClock * 10;
2354 if (!vega20_display_clock_voltage_request(hwmgr, &clock_req)) {
2355 if (data->smu_features[GNLD_DS_DCEFCLK].supported)
2356 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(
2357 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
2358 min_clocks.dcefClockInSR / 100,
2359 NULL)) == 0,
2360 "Attempt to set divider for DCEFCLK Failed!",
2361 return ret);
2362 } else {
2363 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
2364 }
2365 }
2366
2367 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
2368 dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100;
2369 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2370 PPSMC_MSG_SetHardMinByFreq,
2371 (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level,
2372 NULL)),
2373 "[SetHardMinFreq] Set hard min uclk failed!",
2374 return ret);
2375 }
2376
2377 return 0;
2378 }
2379
vega20_force_dpm_highest(struct pp_hwmgr * hwmgr)2380 static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
2381 {
2382 struct vega20_hwmgr *data =
2383 (struct vega20_hwmgr *)(hwmgr->backend);
2384 uint32_t soft_level;
2385 int ret = 0;
2386
2387 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
2388
2389 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2390 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2391 data->dpm_table.gfx_table.dpm_levels[soft_level].value;
2392
2393 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
2394
2395 data->dpm_table.mem_table.dpm_state.soft_min_level =
2396 data->dpm_table.mem_table.dpm_state.soft_max_level =
2397 data->dpm_table.mem_table.dpm_levels[soft_level].value;
2398
2399 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
2400
2401 data->dpm_table.soc_table.dpm_state.soft_min_level =
2402 data->dpm_table.soc_table.dpm_state.soft_max_level =
2403 data->dpm_table.soc_table.dpm_levels[soft_level].value;
2404
2405 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2406 FEATURE_DPM_UCLK_MASK |
2407 FEATURE_DPM_SOCCLK_MASK);
2408 PP_ASSERT_WITH_CODE(!ret,
2409 "Failed to upload boot level to highest!",
2410 return ret);
2411
2412 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2413 FEATURE_DPM_UCLK_MASK |
2414 FEATURE_DPM_SOCCLK_MASK);
2415 PP_ASSERT_WITH_CODE(!ret,
2416 "Failed to upload dpm max level to highest!",
2417 return ret);
2418
2419 return 0;
2420 }
2421
vega20_force_dpm_lowest(struct pp_hwmgr * hwmgr)2422 static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2423 {
2424 struct vega20_hwmgr *data =
2425 (struct vega20_hwmgr *)(hwmgr->backend);
2426 uint32_t soft_level;
2427 int ret = 0;
2428
2429 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
2430
2431 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2432 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2433 data->dpm_table.gfx_table.dpm_levels[soft_level].value;
2434
2435 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
2436
2437 data->dpm_table.mem_table.dpm_state.soft_min_level =
2438 data->dpm_table.mem_table.dpm_state.soft_max_level =
2439 data->dpm_table.mem_table.dpm_levels[soft_level].value;
2440
2441 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
2442
2443 data->dpm_table.soc_table.dpm_state.soft_min_level =
2444 data->dpm_table.soc_table.dpm_state.soft_max_level =
2445 data->dpm_table.soc_table.dpm_levels[soft_level].value;
2446
2447 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2448 FEATURE_DPM_UCLK_MASK |
2449 FEATURE_DPM_SOCCLK_MASK);
2450 PP_ASSERT_WITH_CODE(!ret,
2451 "Failed to upload boot level to highest!",
2452 return ret);
2453
2454 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2455 FEATURE_DPM_UCLK_MASK |
2456 FEATURE_DPM_SOCCLK_MASK);
2457 PP_ASSERT_WITH_CODE(!ret,
2458 "Failed to upload dpm max level to highest!",
2459 return ret);
2460
2461 return 0;
2462
2463 }
2464
vega20_unforce_dpm_levels(struct pp_hwmgr * hwmgr)2465 static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2466 {
2467 struct vega20_hwmgr *data =
2468 (struct vega20_hwmgr *)(hwmgr->backend);
2469 uint32_t soft_min_level, soft_max_level;
2470 int ret = 0;
2471
2472 /* gfxclk soft min/max settings */
2473 soft_min_level =
2474 vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
2475 soft_max_level =
2476 vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
2477
2478 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2479 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
2480 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2481 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
2482
2483 /* uclk soft min/max settings */
2484 soft_min_level =
2485 vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
2486 soft_max_level =
2487 vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
2488
2489 data->dpm_table.mem_table.dpm_state.soft_min_level =
2490 data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
2491 data->dpm_table.mem_table.dpm_state.soft_max_level =
2492 data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
2493
2494 /* socclk soft min/max settings */
2495 soft_min_level =
2496 vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
2497 soft_max_level =
2498 vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
2499
2500 data->dpm_table.soc_table.dpm_state.soft_min_level =
2501 data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
2502 data->dpm_table.soc_table.dpm_state.soft_max_level =
2503 data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
2504
2505 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2506 FEATURE_DPM_UCLK_MASK |
2507 FEATURE_DPM_SOCCLK_MASK);
2508 PP_ASSERT_WITH_CODE(!ret,
2509 "Failed to upload DPM Bootup Levels!",
2510 return ret);
2511
2512 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2513 FEATURE_DPM_UCLK_MASK |
2514 FEATURE_DPM_SOCCLK_MASK);
2515 PP_ASSERT_WITH_CODE(!ret,
2516 "Failed to upload DPM Max Levels!",
2517 return ret);
2518
2519 return 0;
2520 }
2521
vega20_get_profiling_clk_mask(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level level,uint32_t * sclk_mask,uint32_t * mclk_mask,uint32_t * soc_mask)2522 static int vega20_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
2523 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
2524 {
2525 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2526 struct vega20_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
2527 struct vega20_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
2528 struct vega20_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table);
2529
2530 *sclk_mask = 0;
2531 *mclk_mask = 0;
2532 *soc_mask = 0;
2533
2534 if (gfx_dpm_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
2535 mem_dpm_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL &&
2536 soc_dpm_table->count > VEGA20_UMD_PSTATE_SOCCLK_LEVEL) {
2537 *sclk_mask = VEGA20_UMD_PSTATE_GFXCLK_LEVEL;
2538 *mclk_mask = VEGA20_UMD_PSTATE_MCLK_LEVEL;
2539 *soc_mask = VEGA20_UMD_PSTATE_SOCCLK_LEVEL;
2540 }
2541
2542 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2543 *sclk_mask = 0;
2544 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
2545 *mclk_mask = 0;
2546 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2547 *sclk_mask = gfx_dpm_table->count - 1;
2548 *mclk_mask = mem_dpm_table->count - 1;
2549 *soc_mask = soc_dpm_table->count - 1;
2550 }
2551
2552 return 0;
2553 }
2554
vega20_force_clock_level(struct pp_hwmgr * hwmgr,enum pp_clock_type type,uint32_t mask)2555 static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
2556 enum pp_clock_type type, uint32_t mask)
2557 {
2558 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2559 uint32_t soft_min_level, soft_max_level, hard_min_level;
2560 int ret = 0;
2561
2562 switch (type) {
2563 case PP_SCLK:
2564 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2565 soft_max_level = mask ? (fls(mask) - 1) : 0;
2566
2567 if (soft_max_level >= data->dpm_table.gfx_table.count) {
2568 pr_err("Clock level specified %d is over max allowed %d\n",
2569 soft_max_level,
2570 data->dpm_table.gfx_table.count - 1);
2571 return -EINVAL;
2572 }
2573
2574 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2575 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
2576 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2577 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
2578
2579 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
2580 PP_ASSERT_WITH_CODE(!ret,
2581 "Failed to upload boot level to lowest!",
2582 return ret);
2583
2584 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
2585 PP_ASSERT_WITH_CODE(!ret,
2586 "Failed to upload dpm max level to highest!",
2587 return ret);
2588 break;
2589
2590 case PP_MCLK:
2591 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2592 soft_max_level = mask ? (fls(mask) - 1) : 0;
2593
2594 if (soft_max_level >= data->dpm_table.mem_table.count) {
2595 pr_err("Clock level specified %d is over max allowed %d\n",
2596 soft_max_level,
2597 data->dpm_table.mem_table.count - 1);
2598 return -EINVAL;
2599 }
2600
2601 data->dpm_table.mem_table.dpm_state.soft_min_level =
2602 data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
2603 data->dpm_table.mem_table.dpm_state.soft_max_level =
2604 data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
2605
2606 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_UCLK_MASK);
2607 PP_ASSERT_WITH_CODE(!ret,
2608 "Failed to upload boot level to lowest!",
2609 return ret);
2610
2611 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_UCLK_MASK);
2612 PP_ASSERT_WITH_CODE(!ret,
2613 "Failed to upload dpm max level to highest!",
2614 return ret);
2615
2616 break;
2617
2618 case PP_SOCCLK:
2619 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2620 soft_max_level = mask ? (fls(mask) - 1) : 0;
2621
2622 if (soft_max_level >= data->dpm_table.soc_table.count) {
2623 pr_err("Clock level specified %d is over max allowed %d\n",
2624 soft_max_level,
2625 data->dpm_table.soc_table.count - 1);
2626 return -EINVAL;
2627 }
2628
2629 data->dpm_table.soc_table.dpm_state.soft_min_level =
2630 data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
2631 data->dpm_table.soc_table.dpm_state.soft_max_level =
2632 data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
2633
2634 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_SOCCLK_MASK);
2635 PP_ASSERT_WITH_CODE(!ret,
2636 "Failed to upload boot level to lowest!",
2637 return ret);
2638
2639 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_SOCCLK_MASK);
2640 PP_ASSERT_WITH_CODE(!ret,
2641 "Failed to upload dpm max level to highest!",
2642 return ret);
2643
2644 break;
2645
2646 case PP_FCLK:
2647 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2648 soft_max_level = mask ? (fls(mask) - 1) : 0;
2649
2650 if (soft_max_level >= data->dpm_table.fclk_table.count) {
2651 pr_err("Clock level specified %d is over max allowed %d\n",
2652 soft_max_level,
2653 data->dpm_table.fclk_table.count - 1);
2654 return -EINVAL;
2655 }
2656
2657 data->dpm_table.fclk_table.dpm_state.soft_min_level =
2658 data->dpm_table.fclk_table.dpm_levels[soft_min_level].value;
2659 data->dpm_table.fclk_table.dpm_state.soft_max_level =
2660 data->dpm_table.fclk_table.dpm_levels[soft_max_level].value;
2661
2662 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_FCLK_MASK);
2663 PP_ASSERT_WITH_CODE(!ret,
2664 "Failed to upload boot level to lowest!",
2665 return ret);
2666
2667 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_FCLK_MASK);
2668 PP_ASSERT_WITH_CODE(!ret,
2669 "Failed to upload dpm max level to highest!",
2670 return ret);
2671
2672 break;
2673
2674 case PP_DCEFCLK:
2675 hard_min_level = mask ? (ffs(mask) - 1) : 0;
2676
2677 if (hard_min_level >= data->dpm_table.dcef_table.count) {
2678 pr_err("Clock level specified %d is over max allowed %d\n",
2679 hard_min_level,
2680 data->dpm_table.dcef_table.count - 1);
2681 return -EINVAL;
2682 }
2683
2684 data->dpm_table.dcef_table.dpm_state.hard_min_level =
2685 data->dpm_table.dcef_table.dpm_levels[hard_min_level].value;
2686
2687 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_DCEFCLK_MASK);
2688 PP_ASSERT_WITH_CODE(!ret,
2689 "Failed to upload boot level to lowest!",
2690 return ret);
2691
2692 //TODO: Setting DCEFCLK max dpm level is not supported
2693
2694 break;
2695
2696 case PP_PCIE:
2697 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2698 soft_max_level = mask ? (fls(mask) - 1) : 0;
2699 if (soft_min_level >= NUM_LINK_LEVELS ||
2700 soft_max_level >= NUM_LINK_LEVELS)
2701 return -EINVAL;
2702
2703 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2704 PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level,
2705 NULL);
2706 PP_ASSERT_WITH_CODE(!ret,
2707 "Failed to set min link dpm level!",
2708 return ret);
2709
2710 break;
2711
2712 default:
2713 break;
2714 }
2715
2716 return 0;
2717 }
2718
vega20_dpm_force_dpm_level(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level level)2719 static int vega20_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
2720 enum amd_dpm_forced_level level)
2721 {
2722 int ret = 0;
2723 uint32_t sclk_mask, mclk_mask, soc_mask;
2724
2725 switch (level) {
2726 case AMD_DPM_FORCED_LEVEL_HIGH:
2727 ret = vega20_force_dpm_highest(hwmgr);
2728 break;
2729
2730 case AMD_DPM_FORCED_LEVEL_LOW:
2731 ret = vega20_force_dpm_lowest(hwmgr);
2732 break;
2733
2734 case AMD_DPM_FORCED_LEVEL_AUTO:
2735 ret = vega20_unforce_dpm_levels(hwmgr);
2736 break;
2737
2738 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
2739 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
2740 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
2741 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
2742 ret = vega20_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
2743 if (ret)
2744 return ret;
2745 vega20_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask);
2746 vega20_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask);
2747 vega20_force_clock_level(hwmgr, PP_SOCCLK, 1 << soc_mask);
2748 break;
2749
2750 case AMD_DPM_FORCED_LEVEL_MANUAL:
2751 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
2752 default:
2753 break;
2754 }
2755
2756 return ret;
2757 }
2758
vega20_get_fan_control_mode(struct pp_hwmgr * hwmgr)2759 static uint32_t vega20_get_fan_control_mode(struct pp_hwmgr *hwmgr)
2760 {
2761 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2762
2763 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
2764 return AMD_FAN_CTRL_MANUAL;
2765 else
2766 return AMD_FAN_CTRL_AUTO;
2767 }
2768
vega20_set_fan_control_mode(struct pp_hwmgr * hwmgr,uint32_t mode)2769 static void vega20_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
2770 {
2771 switch (mode) {
2772 case AMD_FAN_CTRL_NONE:
2773 vega20_fan_ctrl_set_fan_speed_pwm(hwmgr, 255);
2774 break;
2775 case AMD_FAN_CTRL_MANUAL:
2776 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
2777 vega20_fan_ctrl_stop_smc_fan_control(hwmgr);
2778 break;
2779 case AMD_FAN_CTRL_AUTO:
2780 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
2781 vega20_fan_ctrl_start_smc_fan_control(hwmgr);
2782 break;
2783 default:
2784 break;
2785 }
2786 }
2787
vega20_get_dal_power_level(struct pp_hwmgr * hwmgr,struct amd_pp_simple_clock_info * info)2788 static int vega20_get_dal_power_level(struct pp_hwmgr *hwmgr,
2789 struct amd_pp_simple_clock_info *info)
2790 {
2791 #if 0
2792 struct phm_ppt_v2_information *table_info =
2793 (struct phm_ppt_v2_information *)hwmgr->pptable;
2794 struct phm_clock_and_voltage_limits *max_limits =
2795 &table_info->max_clock_voltage_on_ac;
2796
2797 info->engine_max_clock = max_limits->sclk;
2798 info->memory_max_clock = max_limits->mclk;
2799 #endif
2800 return 0;
2801 }
2802
2803
vega20_get_sclks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)2804 static int vega20_get_sclks(struct pp_hwmgr *hwmgr,
2805 struct pp_clock_levels_with_latency *clocks)
2806 {
2807 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2808 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
2809 int i, count;
2810
2811 if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
2812 return -1;
2813
2814 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2815 clocks->num_levels = count;
2816
2817 for (i = 0; i < count; i++) {
2818 clocks->data[i].clocks_in_khz =
2819 dpm_table->dpm_levels[i].value * 1000;
2820 clocks->data[i].latency_in_us = 0;
2821 }
2822
2823 return 0;
2824 }
2825
vega20_get_mem_latency(struct pp_hwmgr * hwmgr,uint32_t clock)2826 static uint32_t vega20_get_mem_latency(struct pp_hwmgr *hwmgr,
2827 uint32_t clock)
2828 {
2829 return 25;
2830 }
2831
vega20_get_memclocks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)2832 static int vega20_get_memclocks(struct pp_hwmgr *hwmgr,
2833 struct pp_clock_levels_with_latency *clocks)
2834 {
2835 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2836 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.mem_table);
2837 int i, count;
2838
2839 if (!data->smu_features[GNLD_DPM_UCLK].enabled)
2840 return -1;
2841
2842 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2843 clocks->num_levels = data->mclk_latency_table.count = count;
2844
2845 for (i = 0; i < count; i++) {
2846 clocks->data[i].clocks_in_khz =
2847 data->mclk_latency_table.entries[i].frequency =
2848 dpm_table->dpm_levels[i].value * 1000;
2849 clocks->data[i].latency_in_us =
2850 data->mclk_latency_table.entries[i].latency =
2851 vega20_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
2852 }
2853
2854 return 0;
2855 }
2856
vega20_get_dcefclocks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)2857 static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr,
2858 struct pp_clock_levels_with_latency *clocks)
2859 {
2860 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2861 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.dcef_table);
2862 int i, count;
2863
2864 if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled)
2865 return -1;
2866
2867 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2868 clocks->num_levels = count;
2869
2870 for (i = 0; i < count; i++) {
2871 clocks->data[i].clocks_in_khz =
2872 dpm_table->dpm_levels[i].value * 1000;
2873 clocks->data[i].latency_in_us = 0;
2874 }
2875
2876 return 0;
2877 }
2878
vega20_get_socclocks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)2879 static int vega20_get_socclocks(struct pp_hwmgr *hwmgr,
2880 struct pp_clock_levels_with_latency *clocks)
2881 {
2882 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2883 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.soc_table);
2884 int i, count;
2885
2886 if (!data->smu_features[GNLD_DPM_SOCCLK].enabled)
2887 return -1;
2888
2889 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2890 clocks->num_levels = count;
2891
2892 for (i = 0; i < count; i++) {
2893 clocks->data[i].clocks_in_khz =
2894 dpm_table->dpm_levels[i].value * 1000;
2895 clocks->data[i].latency_in_us = 0;
2896 }
2897
2898 return 0;
2899
2900 }
2901
vega20_get_clock_by_type_with_latency(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)2902 static int vega20_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
2903 enum amd_pp_clock_type type,
2904 struct pp_clock_levels_with_latency *clocks)
2905 {
2906 int ret;
2907
2908 switch (type) {
2909 case amd_pp_sys_clock:
2910 ret = vega20_get_sclks(hwmgr, clocks);
2911 break;
2912 case amd_pp_mem_clock:
2913 ret = vega20_get_memclocks(hwmgr, clocks);
2914 break;
2915 case amd_pp_dcef_clock:
2916 ret = vega20_get_dcefclocks(hwmgr, clocks);
2917 break;
2918 case amd_pp_soc_clock:
2919 ret = vega20_get_socclocks(hwmgr, clocks);
2920 break;
2921 default:
2922 return -EINVAL;
2923 }
2924
2925 return ret;
2926 }
2927
vega20_get_clock_by_type_with_voltage(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)2928 static int vega20_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
2929 enum amd_pp_clock_type type,
2930 struct pp_clock_levels_with_voltage *clocks)
2931 {
2932 clocks->num_levels = 0;
2933
2934 return 0;
2935 }
2936
vega20_set_watermarks_for_clocks_ranges(struct pp_hwmgr * hwmgr,void * clock_ranges)2937 static int vega20_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
2938 void *clock_ranges)
2939 {
2940 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2941 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
2942 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
2943
2944 if (!data->registry_data.disable_water_mark &&
2945 data->smu_features[GNLD_DPM_DCEFCLK].supported &&
2946 data->smu_features[GNLD_DPM_SOCCLK].supported) {
2947 smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
2948 data->water_marks_bitmap |= WaterMarksExist;
2949 data->water_marks_bitmap &= ~WaterMarksLoaded;
2950 }
2951
2952 return 0;
2953 }
2954
vega20_odn_edit_dpm_table(struct pp_hwmgr * hwmgr,enum PP_OD_DPM_TABLE_COMMAND type,long * input,uint32_t size)2955 static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
2956 enum PP_OD_DPM_TABLE_COMMAND type,
2957 long *input, uint32_t size)
2958 {
2959 struct vega20_hwmgr *data =
2960 (struct vega20_hwmgr *)(hwmgr->backend);
2961 struct vega20_od8_single_setting *od8_settings =
2962 data->od8_settings.od8_settings_array;
2963 OverDriveTable_t *od_table =
2964 &(data->smc_state_table.overdrive_table);
2965 int32_t input_clk, input_vol, i;
2966 uint32_t input_index;
2967 int od8_id;
2968 int ret;
2969
2970 PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
2971 return -EINVAL);
2972
2973 switch (type) {
2974 case PP_OD_EDIT_SCLK_VDDC_TABLE:
2975 if (!(od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
2976 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id)) {
2977 pr_info("Sclk min/max frequency overdrive not supported\n");
2978 return -EOPNOTSUPP;
2979 }
2980
2981 for (i = 0; i < size; i += 2) {
2982 if (i + 2 > size) {
2983 pr_info("invalid number of input parameters %d\n",
2984 size);
2985 return -EINVAL;
2986 }
2987
2988 input_index = input[i];
2989 input_clk = input[i + 1];
2990
2991 if (input_index != 0 && input_index != 1) {
2992 pr_info("Invalid index %d\n", input_index);
2993 pr_info("Support min/max sclk frequency setting only which index by 0/1\n");
2994 return -EINVAL;
2995 }
2996
2997 if (input_clk < od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value ||
2998 input_clk > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value) {
2999 pr_info("clock freq %d is not within allowed range [%d - %d]\n",
3000 input_clk,
3001 od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
3002 od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
3003 return -EINVAL;
3004 }
3005
3006 if ((input_index == 0 && od_table->GfxclkFmin != input_clk) ||
3007 (input_index == 1 && od_table->GfxclkFmax != input_clk))
3008 data->gfxclk_overdrive = true;
3009
3010 if (input_index == 0)
3011 od_table->GfxclkFmin = input_clk;
3012 else
3013 od_table->GfxclkFmax = input_clk;
3014 }
3015
3016 break;
3017
3018 case PP_OD_EDIT_MCLK_VDDC_TABLE:
3019 if (!od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
3020 pr_info("Mclk max frequency overdrive not supported\n");
3021 return -EOPNOTSUPP;
3022 }
3023
3024 for (i = 0; i < size; i += 2) {
3025 if (i + 2 > size) {
3026 pr_info("invalid number of input parameters %d\n",
3027 size);
3028 return -EINVAL;
3029 }
3030
3031 input_index = input[i];
3032 input_clk = input[i + 1];
3033
3034 if (input_index != 1) {
3035 pr_info("Invalid index %d\n", input_index);
3036 pr_info("Support max Mclk frequency setting only which index by 1\n");
3037 return -EINVAL;
3038 }
3039
3040 if (input_clk < od8_settings[OD8_SETTING_UCLK_FMAX].min_value ||
3041 input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) {
3042 pr_info("clock freq %d is not within allowed range [%d - %d]\n",
3043 input_clk,
3044 od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
3045 od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
3046 return -EINVAL;
3047 }
3048
3049 if (input_index == 1 && od_table->UclkFmax != input_clk)
3050 data->memclk_overdrive = true;
3051
3052 od_table->UclkFmax = input_clk;
3053 }
3054
3055 break;
3056
3057 case PP_OD_EDIT_VDDC_CURVE:
3058 if (!(od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
3059 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
3060 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
3061 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
3062 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
3063 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id)) {
3064 pr_info("Voltage curve calibrate not supported\n");
3065 return -EOPNOTSUPP;
3066 }
3067
3068 for (i = 0; i < size; i += 3) {
3069 if (i + 3 > size) {
3070 pr_info("invalid number of input parameters %d\n",
3071 size);
3072 return -EINVAL;
3073 }
3074
3075 input_index = input[i];
3076 input_clk = input[i + 1];
3077 input_vol = input[i + 2];
3078
3079 if (input_index > 2) {
3080 pr_info("Setting for point %d is not supported\n",
3081 input_index + 1);
3082 pr_info("Three supported points index by 0, 1, 2\n");
3083 return -EINVAL;
3084 }
3085
3086 od8_id = OD8_SETTING_GFXCLK_FREQ1 + 2 * input_index;
3087 if (input_clk < od8_settings[od8_id].min_value ||
3088 input_clk > od8_settings[od8_id].max_value) {
3089 pr_info("clock freq %d is not within allowed range [%d - %d]\n",
3090 input_clk,
3091 od8_settings[od8_id].min_value,
3092 od8_settings[od8_id].max_value);
3093 return -EINVAL;
3094 }
3095
3096 od8_id = OD8_SETTING_GFXCLK_VOLTAGE1 + 2 * input_index;
3097 if (input_vol < od8_settings[od8_id].min_value ||
3098 input_vol > od8_settings[od8_id].max_value) {
3099 pr_info("clock voltage %d is not within allowed range [%d - %d]\n",
3100 input_vol,
3101 od8_settings[od8_id].min_value,
3102 od8_settings[od8_id].max_value);
3103 return -EINVAL;
3104 }
3105
3106 switch (input_index) {
3107 case 0:
3108 od_table->GfxclkFreq1 = input_clk;
3109 od_table->GfxclkVolt1 = input_vol * VOLTAGE_SCALE;
3110 break;
3111 case 1:
3112 od_table->GfxclkFreq2 = input_clk;
3113 od_table->GfxclkVolt2 = input_vol * VOLTAGE_SCALE;
3114 break;
3115 case 2:
3116 od_table->GfxclkFreq3 = input_clk;
3117 od_table->GfxclkVolt3 = input_vol * VOLTAGE_SCALE;
3118 break;
3119 }
3120 }
3121 break;
3122
3123 case PP_OD_RESTORE_DEFAULT_TABLE:
3124 data->gfxclk_overdrive = false;
3125 data->memclk_overdrive = false;
3126
3127 ret = smum_smc_table_manager(hwmgr,
3128 (uint8_t *)od_table,
3129 TABLE_OVERDRIVE, true);
3130 PP_ASSERT_WITH_CODE(!ret,
3131 "Failed to export overdrive table!",
3132 return ret);
3133 break;
3134
3135 case PP_OD_COMMIT_DPM_TABLE:
3136 ret = smum_smc_table_manager(hwmgr,
3137 (uint8_t *)od_table,
3138 TABLE_OVERDRIVE, false);
3139 PP_ASSERT_WITH_CODE(!ret,
3140 "Failed to import overdrive table!",
3141 return ret);
3142
3143 /* retrieve updated gfxclk table */
3144 if (data->gfxclk_overdrive) {
3145 data->gfxclk_overdrive = false;
3146
3147 ret = vega20_setup_gfxclk_dpm_table(hwmgr);
3148 if (ret)
3149 return ret;
3150 }
3151
3152 /* retrieve updated memclk table */
3153 if (data->memclk_overdrive) {
3154 data->memclk_overdrive = false;
3155
3156 ret = vega20_setup_memclk_dpm_table(hwmgr);
3157 if (ret)
3158 return ret;
3159 }
3160 break;
3161
3162 default:
3163 return -EINVAL;
3164 }
3165
3166 return 0;
3167 }
3168
vega20_set_mp1_state(struct pp_hwmgr * hwmgr,enum pp_mp1_state mp1_state)3169 static int vega20_set_mp1_state(struct pp_hwmgr *hwmgr,
3170 enum pp_mp1_state mp1_state)
3171 {
3172 uint16_t msg;
3173 int ret;
3174
3175 switch (mp1_state) {
3176 case PP_MP1_STATE_SHUTDOWN:
3177 msg = PPSMC_MSG_PrepareMp1ForShutdown;
3178 break;
3179 case PP_MP1_STATE_UNLOAD:
3180 msg = PPSMC_MSG_PrepareMp1ForUnload;
3181 break;
3182 case PP_MP1_STATE_RESET:
3183 msg = PPSMC_MSG_PrepareMp1ForReset;
3184 break;
3185 case PP_MP1_STATE_NONE:
3186 default:
3187 return 0;
3188 }
3189
3190 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0,
3191 "[PrepareMp1] Failed!",
3192 return ret);
3193
3194 return 0;
3195 }
3196
vega20_get_ppfeature_status(struct pp_hwmgr * hwmgr,char * buf)3197 static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
3198 {
3199 static const char *ppfeature_name[] = {
3200 "DPM_PREFETCHER",
3201 "GFXCLK_DPM",
3202 "UCLK_DPM",
3203 "SOCCLK_DPM",
3204 "UVD_DPM",
3205 "VCE_DPM",
3206 "ULV",
3207 "MP0CLK_DPM",
3208 "LINK_DPM",
3209 "DCEFCLK_DPM",
3210 "GFXCLK_DS",
3211 "SOCCLK_DS",
3212 "LCLK_DS",
3213 "PPT",
3214 "TDC",
3215 "THERMAL",
3216 "GFX_PER_CU_CG",
3217 "RM",
3218 "DCEFCLK_DS",
3219 "ACDC",
3220 "VR0HOT",
3221 "VR1HOT",
3222 "FW_CTF",
3223 "LED_DISPLAY",
3224 "FAN_CONTROL",
3225 "GFX_EDC",
3226 "GFXOFF",
3227 "CG",
3228 "FCLK_DPM",
3229 "FCLK_DS",
3230 "MP1CLK_DS",
3231 "MP0CLK_DS",
3232 "XGMI",
3233 "ECC"};
3234 static const char *output_title[] = {
3235 "FEATURES",
3236 "BITMASK",
3237 "ENABLEMENT"};
3238 uint64_t features_enabled;
3239 int i;
3240 int ret = 0;
3241 int size = 0;
3242
3243 phm_get_sysfs_buf(&buf, &size);
3244
3245 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
3246 PP_ASSERT_WITH_CODE(!ret,
3247 "[EnableAllSmuFeatures] Failed to get enabled smc features!",
3248 return ret);
3249
3250 size += sysfs_emit_at(buf, size, "Current ppfeatures: 0x%016llx\n", features_enabled);
3251 size += sysfs_emit_at(buf, size, "%-19s %-22s %s\n",
3252 output_title[0],
3253 output_title[1],
3254 output_title[2]);
3255 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
3256 size += sysfs_emit_at(buf, size, "%-19s 0x%016llx %6s\n",
3257 ppfeature_name[i],
3258 1ULL << i,
3259 (features_enabled & (1ULL << i)) ? "Y" : "N");
3260 }
3261
3262 return size;
3263 }
3264
vega20_set_ppfeature_status(struct pp_hwmgr * hwmgr,uint64_t new_ppfeature_masks)3265 static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks)
3266 {
3267 struct vega20_hwmgr *data =
3268 (struct vega20_hwmgr *)(hwmgr->backend);
3269 uint64_t features_enabled, features_to_enable, features_to_disable;
3270 int i, ret = 0;
3271 bool enabled;
3272
3273 if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
3274 return -EINVAL;
3275
3276 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
3277 if (ret)
3278 return ret;
3279
3280 features_to_disable =
3281 features_enabled & ~new_ppfeature_masks;
3282 features_to_enable =
3283 ~features_enabled & new_ppfeature_masks;
3284
3285 pr_debug("features_to_disable 0x%llx\n", features_to_disable);
3286 pr_debug("features_to_enable 0x%llx\n", features_to_enable);
3287
3288 if (features_to_disable) {
3289 ret = vega20_enable_smc_features(hwmgr, false, features_to_disable);
3290 if (ret)
3291 return ret;
3292 }
3293
3294 if (features_to_enable) {
3295 ret = vega20_enable_smc_features(hwmgr, true, features_to_enable);
3296 if (ret)
3297 return ret;
3298 }
3299
3300 /* Update the cached feature enablement state */
3301 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
3302 if (ret)
3303 return ret;
3304
3305 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
3306 enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
3307 true : false;
3308 data->smu_features[i].enabled = enabled;
3309 }
3310
3311 return 0;
3312 }
3313
vega20_get_current_pcie_link_width_level(struct pp_hwmgr * hwmgr)3314 static int vega20_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
3315 {
3316 struct amdgpu_device *adev = hwmgr->adev;
3317
3318 return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
3319 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
3320 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
3321 }
3322
vega20_get_current_pcie_link_width(struct pp_hwmgr * hwmgr)3323 static int vega20_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
3324 {
3325 uint32_t width_level;
3326
3327 width_level = vega20_get_current_pcie_link_width_level(hwmgr);
3328 if (width_level > LINK_WIDTH_MAX)
3329 width_level = 0;
3330
3331 return link_width[width_level];
3332 }
3333
vega20_get_current_pcie_link_speed_level(struct pp_hwmgr * hwmgr)3334 static int vega20_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
3335 {
3336 struct amdgpu_device *adev = hwmgr->adev;
3337
3338 return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
3339 PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
3340 >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
3341 }
3342
vega20_get_current_pcie_link_speed(struct pp_hwmgr * hwmgr)3343 static int vega20_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
3344 {
3345 uint32_t speed_level;
3346
3347 speed_level = vega20_get_current_pcie_link_speed_level(hwmgr);
3348 if (speed_level > LINK_SPEED_MAX)
3349 speed_level = 0;
3350
3351 return link_speed[speed_level];
3352 }
3353
vega20_print_clock_levels(struct pp_hwmgr * hwmgr,enum pp_clock_type type,char * buf)3354 static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
3355 enum pp_clock_type type, char *buf)
3356 {
3357 struct vega20_hwmgr *data =
3358 (struct vega20_hwmgr *)(hwmgr->backend);
3359 struct vega20_od8_single_setting *od8_settings =
3360 data->od8_settings.od8_settings_array;
3361 OverDriveTable_t *od_table =
3362 &(data->smc_state_table.overdrive_table);
3363 PPTable_t *pptable = &(data->smc_state_table.pp_table);
3364 struct pp_clock_levels_with_latency clocks;
3365 struct vega20_single_dpm_table *fclk_dpm_table =
3366 &(data->dpm_table.fclk_table);
3367 int i, now, size = 0;
3368 int ret = 0;
3369 uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
3370
3371 switch (type) {
3372 case PP_SCLK:
3373 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now);
3374 PP_ASSERT_WITH_CODE(!ret,
3375 "Attempt to get current gfx clk Failed!",
3376 return ret);
3377
3378 if (vega20_get_sclks(hwmgr, &clocks)) {
3379 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
3380 now / 100);
3381 break;
3382 }
3383
3384 for (i = 0; i < clocks.num_levels; i++)
3385 size += sprintf(buf + size, "%d: %uMhz %s\n",
3386 i, clocks.data[i].clocks_in_khz / 1000,
3387 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
3388 break;
3389
3390 case PP_MCLK:
3391 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_UCLK, &now);
3392 PP_ASSERT_WITH_CODE(!ret,
3393 "Attempt to get current mclk freq Failed!",
3394 return ret);
3395
3396 if (vega20_get_memclocks(hwmgr, &clocks)) {
3397 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
3398 now / 100);
3399 break;
3400 }
3401
3402 for (i = 0; i < clocks.num_levels; i++)
3403 size += sprintf(buf + size, "%d: %uMhz %s\n",
3404 i, clocks.data[i].clocks_in_khz / 1000,
3405 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
3406 break;
3407
3408 case PP_SOCCLK:
3409 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_SOCCLK, &now);
3410 PP_ASSERT_WITH_CODE(!ret,
3411 "Attempt to get current socclk freq Failed!",
3412 return ret);
3413
3414 if (vega20_get_socclocks(hwmgr, &clocks)) {
3415 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
3416 now / 100);
3417 break;
3418 }
3419
3420 for (i = 0; i < clocks.num_levels; i++)
3421 size += sprintf(buf + size, "%d: %uMhz %s\n",
3422 i, clocks.data[i].clocks_in_khz / 1000,
3423 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
3424 break;
3425
3426 case PP_FCLK:
3427 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_FCLK, &now);
3428 PP_ASSERT_WITH_CODE(!ret,
3429 "Attempt to get current fclk freq Failed!",
3430 return ret);
3431
3432 for (i = 0; i < fclk_dpm_table->count; i++)
3433 size += sprintf(buf + size, "%d: %uMhz %s\n",
3434 i, fclk_dpm_table->dpm_levels[i].value,
3435 fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : "");
3436 break;
3437
3438 case PP_DCEFCLK:
3439 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_DCEFCLK, &now);
3440 PP_ASSERT_WITH_CODE(!ret,
3441 "Attempt to get current dcefclk freq Failed!",
3442 return ret);
3443
3444 if (vega20_get_dcefclocks(hwmgr, &clocks)) {
3445 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
3446 now / 100);
3447 break;
3448 }
3449
3450 for (i = 0; i < clocks.num_levels; i++)
3451 size += sprintf(buf + size, "%d: %uMhz %s\n",
3452 i, clocks.data[i].clocks_in_khz / 1000,
3453 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
3454 break;
3455
3456 case PP_PCIE:
3457 current_gen_speed =
3458 vega20_get_current_pcie_link_speed_level(hwmgr);
3459 current_lane_width =
3460 vega20_get_current_pcie_link_width_level(hwmgr);
3461 for (i = 0; i < NUM_LINK_LEVELS; i++) {
3462 gen_speed = pptable->PcieGenSpeed[i];
3463 lane_width = pptable->PcieLaneCount[i];
3464
3465 size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
3466 (gen_speed == 0) ? "2.5GT/s," :
3467 (gen_speed == 1) ? "5.0GT/s," :
3468 (gen_speed == 2) ? "8.0GT/s," :
3469 (gen_speed == 3) ? "16.0GT/s," : "",
3470 (lane_width == 1) ? "x1" :
3471 (lane_width == 2) ? "x2" :
3472 (lane_width == 3) ? "x4" :
3473 (lane_width == 4) ? "x8" :
3474 (lane_width == 5) ? "x12" :
3475 (lane_width == 6) ? "x16" : "",
3476 pptable->LclkFreq[i],
3477 (current_gen_speed == gen_speed) &&
3478 (current_lane_width == lane_width) ?
3479 "*" : "");
3480 }
3481 break;
3482
3483 case OD_SCLK:
3484 if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
3485 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
3486 size += sprintf(buf + size, "%s:\n", "OD_SCLK");
3487 size += sprintf(buf + size, "0: %10uMhz\n",
3488 od_table->GfxclkFmin);
3489 size += sprintf(buf + size, "1: %10uMhz\n",
3490 od_table->GfxclkFmax);
3491 }
3492 break;
3493
3494 case OD_MCLK:
3495 if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
3496 size += sprintf(buf + size, "%s:\n", "OD_MCLK");
3497 size += sprintf(buf + size, "1: %10uMhz\n",
3498 od_table->UclkFmax);
3499 }
3500
3501 break;
3502
3503 case OD_VDDC_CURVE:
3504 if (od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
3505 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
3506 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
3507 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
3508 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
3509 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
3510 size += sprintf(buf + size, "%s:\n", "OD_VDDC_CURVE");
3511 size += sprintf(buf + size, "0: %10uMhz %10dmV\n",
3512 od_table->GfxclkFreq1,
3513 od_table->GfxclkVolt1 / VOLTAGE_SCALE);
3514 size += sprintf(buf + size, "1: %10uMhz %10dmV\n",
3515 od_table->GfxclkFreq2,
3516 od_table->GfxclkVolt2 / VOLTAGE_SCALE);
3517 size += sprintf(buf + size, "2: %10uMhz %10dmV\n",
3518 od_table->GfxclkFreq3,
3519 od_table->GfxclkVolt3 / VOLTAGE_SCALE);
3520 }
3521
3522 break;
3523
3524 case OD_RANGE:
3525 size += sprintf(buf + size, "%s:\n", "OD_RANGE");
3526
3527 if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
3528 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
3529 size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
3530 od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
3531 od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
3532 }
3533
3534 if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
3535 size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
3536 od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
3537 od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
3538 }
3539
3540 if (od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
3541 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
3542 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
3543 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
3544 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
3545 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
3546 size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
3547 od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value,
3548 od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value);
3549 size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
3550 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value,
3551 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value);
3552 size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
3553 od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value,
3554 od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value);
3555 size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
3556 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value,
3557 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value);
3558 size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
3559 od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value,
3560 od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value);
3561 size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
3562 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value,
3563 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value);
3564 }
3565
3566 break;
3567 default:
3568 break;
3569 }
3570 return size;
3571 }
3572
vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr * hwmgr,struct vega20_single_dpm_table * dpm_table)3573 static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
3574 struct vega20_single_dpm_table *dpm_table)
3575 {
3576 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3577 int ret = 0;
3578
3579 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
3580 PP_ASSERT_WITH_CODE(dpm_table->count > 0,
3581 "[SetUclkToHightestDpmLevel] Dpm table has no entry!",
3582 return -EINVAL);
3583 PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS,
3584 "[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
3585 return -EINVAL);
3586
3587 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3588 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
3589 PPSMC_MSG_SetHardMinByFreq,
3590 (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level,
3591 NULL)),
3592 "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
3593 return ret);
3594 }
3595
3596 return ret;
3597 }
3598
vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr * hwmgr)3599 static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr)
3600 {
3601 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3602 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.fclk_table);
3603 int ret = 0;
3604
3605 if (data->smu_features[GNLD_DPM_FCLK].enabled) {
3606 PP_ASSERT_WITH_CODE(dpm_table->count > 0,
3607 "[SetFclkToHightestDpmLevel] Dpm table has no entry!",
3608 return -EINVAL);
3609 PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_FCLK_DPM_LEVELS,
3610 "[SetFclkToHightestDpmLevel] Dpm table has too many entries!",
3611 return -EINVAL);
3612
3613 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3614 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
3615 PPSMC_MSG_SetSoftMinByFreq,
3616 (PPCLK_FCLK << 16) | dpm_table->dpm_state.soft_min_level,
3617 NULL)),
3618 "[SetFclkToHightestDpmLevel] Set soft min fclk failed!",
3619 return ret);
3620 }
3621
3622 return ret;
3623 }
3624
vega20_pre_display_configuration_changed_task(struct pp_hwmgr * hwmgr)3625 static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
3626 {
3627 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3628 int ret = 0;
3629
3630 smum_send_msg_to_smc_with_parameter(hwmgr,
3631 PPSMC_MSG_NumOfDisplays, 0, NULL);
3632
3633 ret = vega20_set_uclk_to_highest_dpm_level(hwmgr,
3634 &data->dpm_table.mem_table);
3635 if (ret)
3636 return ret;
3637
3638 return vega20_set_fclk_to_highest_dpm_level(hwmgr);
3639 }
3640
vega20_display_configuration_changed_task(struct pp_hwmgr * hwmgr)3641 static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
3642 {
3643 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3644 int result = 0;
3645 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
3646
3647 if ((data->water_marks_bitmap & WaterMarksExist) &&
3648 !(data->water_marks_bitmap & WaterMarksLoaded)) {
3649 result = smum_smc_table_manager(hwmgr,
3650 (uint8_t *)wm_table, TABLE_WATERMARKS, false);
3651 PP_ASSERT_WITH_CODE(!result,
3652 "Failed to update WMTABLE!",
3653 return result);
3654 data->water_marks_bitmap |= WaterMarksLoaded;
3655 }
3656
3657 if ((data->water_marks_bitmap & WaterMarksExist) &&
3658 data->smu_features[GNLD_DPM_DCEFCLK].supported &&
3659 data->smu_features[GNLD_DPM_SOCCLK].supported) {
3660 result = smum_send_msg_to_smc_with_parameter(hwmgr,
3661 PPSMC_MSG_NumOfDisplays,
3662 hwmgr->display_config->num_display,
3663 NULL);
3664 }
3665
3666 return result;
3667 }
3668
vega20_enable_disable_uvd_dpm(struct pp_hwmgr * hwmgr,bool enable)3669 static int vega20_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
3670 {
3671 struct vega20_hwmgr *data =
3672 (struct vega20_hwmgr *)(hwmgr->backend);
3673 int ret = 0;
3674
3675 if (data->smu_features[GNLD_DPM_UVD].supported) {
3676 if (data->smu_features[GNLD_DPM_UVD].enabled == enable) {
3677 if (enable)
3678 PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already enabled!\n");
3679 else
3680 PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already disabled!\n");
3681 }
3682
3683 ret = vega20_enable_smc_features(hwmgr,
3684 enable,
3685 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap);
3686 PP_ASSERT_WITH_CODE(!ret,
3687 "[EnableDisableUVDDPM] Attempt to Enable/Disable DPM UVD Failed!",
3688 return ret);
3689 data->smu_features[GNLD_DPM_UVD].enabled = enable;
3690 }
3691
3692 return 0;
3693 }
3694
vega20_power_gate_vce(struct pp_hwmgr * hwmgr,bool bgate)3695 static void vega20_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
3696 {
3697 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3698
3699 if (data->vce_power_gated == bgate)
3700 return ;
3701
3702 data->vce_power_gated = bgate;
3703 if (bgate) {
3704 vega20_enable_disable_vce_dpm(hwmgr, !bgate);
3705 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
3706 AMD_IP_BLOCK_TYPE_VCE,
3707 AMD_PG_STATE_GATE);
3708 } else {
3709 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
3710 AMD_IP_BLOCK_TYPE_VCE,
3711 AMD_PG_STATE_UNGATE);
3712 vega20_enable_disable_vce_dpm(hwmgr, !bgate);
3713 }
3714
3715 }
3716
vega20_power_gate_uvd(struct pp_hwmgr * hwmgr,bool bgate)3717 static void vega20_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
3718 {
3719 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3720
3721 if (data->uvd_power_gated == bgate)
3722 return ;
3723
3724 data->uvd_power_gated = bgate;
3725 vega20_enable_disable_uvd_dpm(hwmgr, !bgate);
3726 }
3727
vega20_apply_clocks_adjust_rules(struct pp_hwmgr * hwmgr)3728 static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
3729 {
3730 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3731 struct vega20_single_dpm_table *dpm_table;
3732 bool vblank_too_short = false;
3733 bool disable_mclk_switching;
3734 bool disable_fclk_switching;
3735 uint32_t i, latency;
3736
3737 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
3738 !hwmgr->display_config->multi_monitor_in_sync) ||
3739 vblank_too_short;
3740 latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
3741
3742 /* gfxclk */
3743 dpm_table = &(data->dpm_table.gfx_table);
3744 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3745 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3746 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3747 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3748
3749 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3750 if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
3751 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
3752 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
3753 }
3754
3755 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
3756 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3757 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
3758 }
3759
3760 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3761 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3762 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3763 }
3764 }
3765
3766 /* memclk */
3767 dpm_table = &(data->dpm_table.mem_table);
3768 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3769 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3770 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3771 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3772
3773 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3774 if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
3775 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
3776 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
3777 }
3778
3779 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
3780 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3781 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
3782 }
3783
3784 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3785 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3786 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3787 }
3788 }
3789
3790 /* honour DAL's UCLK Hardmin */
3791 if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
3792 dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
3793
3794 /* Hardmin is dependent on displayconfig */
3795 if (disable_mclk_switching) {
3796 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3797 for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
3798 if (data->mclk_latency_table.entries[i].latency <= latency) {
3799 if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
3800 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
3801 break;
3802 }
3803 }
3804 }
3805 }
3806
3807 if (hwmgr->display_config->nb_pstate_switch_disable)
3808 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3809
3810 if ((disable_mclk_switching &&
3811 (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) ||
3812 hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value)
3813 disable_fclk_switching = true;
3814 else
3815 disable_fclk_switching = false;
3816
3817 /* fclk */
3818 dpm_table = &(data->dpm_table.fclk_table);
3819 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3820 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3821 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3822 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3823 if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching)
3824 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3825
3826 /* vclk */
3827 dpm_table = &(data->dpm_table.vclk_table);
3828 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3829 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3830 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3831 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3832
3833 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3834 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
3835 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
3836 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
3837 }
3838
3839 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3840 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3841 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3842 }
3843 }
3844
3845 /* dclk */
3846 dpm_table = &(data->dpm_table.dclk_table);
3847 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3848 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3849 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3850 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3851
3852 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3853 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
3854 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
3855 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
3856 }
3857
3858 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3859 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3860 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3861 }
3862 }
3863
3864 /* socclk */
3865 dpm_table = &(data->dpm_table.soc_table);
3866 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3867 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3868 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3869 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3870
3871 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3872 if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
3873 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value;
3874 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value;
3875 }
3876
3877 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3878 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3879 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3880 }
3881 }
3882
3883 /* eclk */
3884 dpm_table = &(data->dpm_table.eclk_table);
3885 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3886 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3887 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3888 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3889
3890 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3891 if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
3892 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value;
3893 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value;
3894 }
3895
3896 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3897 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3898 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3899 }
3900 }
3901
3902 return 0;
3903 }
3904
3905 static bool
vega20_check_smc_update_required_for_display_configuration(struct pp_hwmgr * hwmgr)3906 vega20_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
3907 {
3908 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3909 bool is_update_required = false;
3910
3911 if (data->display_timing.num_existing_displays !=
3912 hwmgr->display_config->num_display)
3913 is_update_required = true;
3914
3915 if (data->registry_data.gfx_clk_deep_sleep_support &&
3916 (data->display_timing.min_clock_in_sr !=
3917 hwmgr->display_config->min_core_set_clock_in_sr))
3918 is_update_required = true;
3919
3920 return is_update_required;
3921 }
3922
vega20_disable_dpm_tasks(struct pp_hwmgr * hwmgr)3923 static int vega20_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
3924 {
3925 int ret = 0;
3926
3927 ret = vega20_disable_all_smu_features(hwmgr);
3928 PP_ASSERT_WITH_CODE(!ret,
3929 "[DisableDpmTasks] Failed to disable all smu features!",
3930 return ret);
3931
3932 return 0;
3933 }
3934
vega20_power_off_asic(struct pp_hwmgr * hwmgr)3935 static int vega20_power_off_asic(struct pp_hwmgr *hwmgr)
3936 {
3937 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3938 int result;
3939
3940 result = vega20_disable_dpm_tasks(hwmgr);
3941 PP_ASSERT_WITH_CODE((0 == result),
3942 "[PowerOffAsic] Failed to disable DPM!",
3943 );
3944 data->water_marks_bitmap &= ~(WaterMarksLoaded);
3945
3946 return result;
3947 }
3948
conv_power_profile_to_pplib_workload(int power_profile)3949 static int conv_power_profile_to_pplib_workload(int power_profile)
3950 {
3951 int pplib_workload = 0;
3952
3953 switch (power_profile) {
3954 case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
3955 pplib_workload = WORKLOAD_DEFAULT_BIT;
3956 break;
3957 case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
3958 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
3959 break;
3960 case PP_SMC_POWER_PROFILE_POWERSAVING:
3961 pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
3962 break;
3963 case PP_SMC_POWER_PROFILE_VIDEO:
3964 pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
3965 break;
3966 case PP_SMC_POWER_PROFILE_VR:
3967 pplib_workload = WORKLOAD_PPLIB_VR_BIT;
3968 break;
3969 case PP_SMC_POWER_PROFILE_COMPUTE:
3970 pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
3971 break;
3972 case PP_SMC_POWER_PROFILE_CUSTOM:
3973 pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
3974 break;
3975 }
3976
3977 return pplib_workload;
3978 }
3979
vega20_get_power_profile_mode(struct pp_hwmgr * hwmgr,char * buf)3980 static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
3981 {
3982 DpmActivityMonitorCoeffInt_t activity_monitor;
3983 uint32_t i, size = 0;
3984 uint16_t workload_type = 0;
3985 static const char *title[] = {
3986 "PROFILE_INDEX(NAME)",
3987 "CLOCK_TYPE(NAME)",
3988 "FPS",
3989 "UseRlcBusy",
3990 "MinActiveFreqType",
3991 "MinActiveFreq",
3992 "BoosterFreqType",
3993 "BoosterFreq",
3994 "PD_Data_limit_c",
3995 "PD_Data_error_coeff",
3996 "PD_Data_error_rate_coeff"};
3997 int result = 0;
3998
3999 if (!buf)
4000 return -EINVAL;
4001
4002 phm_get_sysfs_buf(&buf, &size);
4003
4004 size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
4005 title[0], title[1], title[2], title[3], title[4], title[5],
4006 title[6], title[7], title[8], title[9], title[10]);
4007
4008 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
4009 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
4010 workload_type = conv_power_profile_to_pplib_workload(i);
4011 result = vega20_get_activity_monitor_coeff(hwmgr,
4012 (uint8_t *)(&activity_monitor), workload_type);
4013 PP_ASSERT_WITH_CODE(!result,
4014 "[GetPowerProfile] Failed to get activity monitor!",
4015 return result);
4016
4017 size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
4018 i, amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ");
4019
4020 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
4021 " ",
4022 0,
4023 "GFXCLK",
4024 activity_monitor.Gfx_FPS,
4025 activity_monitor.Gfx_UseRlcBusy,
4026 activity_monitor.Gfx_MinActiveFreqType,
4027 activity_monitor.Gfx_MinActiveFreq,
4028 activity_monitor.Gfx_BoosterFreqType,
4029 activity_monitor.Gfx_BoosterFreq,
4030 activity_monitor.Gfx_PD_Data_limit_c,
4031 activity_monitor.Gfx_PD_Data_error_coeff,
4032 activity_monitor.Gfx_PD_Data_error_rate_coeff);
4033
4034 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
4035 " ",
4036 1,
4037 "SOCCLK",
4038 activity_monitor.Soc_FPS,
4039 activity_monitor.Soc_UseRlcBusy,
4040 activity_monitor.Soc_MinActiveFreqType,
4041 activity_monitor.Soc_MinActiveFreq,
4042 activity_monitor.Soc_BoosterFreqType,
4043 activity_monitor.Soc_BoosterFreq,
4044 activity_monitor.Soc_PD_Data_limit_c,
4045 activity_monitor.Soc_PD_Data_error_coeff,
4046 activity_monitor.Soc_PD_Data_error_rate_coeff);
4047
4048 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
4049 " ",
4050 2,
4051 "UCLK",
4052 activity_monitor.Mem_FPS,
4053 activity_monitor.Mem_UseRlcBusy,
4054 activity_monitor.Mem_MinActiveFreqType,
4055 activity_monitor.Mem_MinActiveFreq,
4056 activity_monitor.Mem_BoosterFreqType,
4057 activity_monitor.Mem_BoosterFreq,
4058 activity_monitor.Mem_PD_Data_limit_c,
4059 activity_monitor.Mem_PD_Data_error_coeff,
4060 activity_monitor.Mem_PD_Data_error_rate_coeff);
4061
4062 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
4063 " ",
4064 3,
4065 "FCLK",
4066 activity_monitor.Fclk_FPS,
4067 activity_monitor.Fclk_UseRlcBusy,
4068 activity_monitor.Fclk_MinActiveFreqType,
4069 activity_monitor.Fclk_MinActiveFreq,
4070 activity_monitor.Fclk_BoosterFreqType,
4071 activity_monitor.Fclk_BoosterFreq,
4072 activity_monitor.Fclk_PD_Data_limit_c,
4073 activity_monitor.Fclk_PD_Data_error_coeff,
4074 activity_monitor.Fclk_PD_Data_error_rate_coeff);
4075 }
4076
4077 return size;
4078 }
4079
vega20_set_power_profile_mode(struct pp_hwmgr * hwmgr,long * input,uint32_t size)4080 static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
4081 {
4082 DpmActivityMonitorCoeffInt_t activity_monitor;
4083 int workload_type, result = 0;
4084 uint32_t power_profile_mode = input[size];
4085
4086 if (power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
4087 pr_err("Invalid power profile mode %d\n", power_profile_mode);
4088 return -EINVAL;
4089 }
4090
4091 if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
4092 struct vega20_hwmgr *data =
4093 (struct vega20_hwmgr *)(hwmgr->backend);
4094 if (size == 0 && !data->is_custom_profile_set)
4095 return -EINVAL;
4096 if (size < 10 && size != 0)
4097 return -EINVAL;
4098
4099 result = vega20_get_activity_monitor_coeff(hwmgr,
4100 (uint8_t *)(&activity_monitor),
4101 WORKLOAD_PPLIB_CUSTOM_BIT);
4102 PP_ASSERT_WITH_CODE(!result,
4103 "[SetPowerProfile] Failed to get activity monitor!",
4104 return result);
4105
4106 /* If size==0, then we want to apply the already-configured
4107 * CUSTOM profile again. Just apply it, since we checked its
4108 * validity above
4109 */
4110 if (size == 0)
4111 goto out;
4112
4113 switch (input[0]) {
4114 case 0: /* Gfxclk */
4115 activity_monitor.Gfx_FPS = input[1];
4116 activity_monitor.Gfx_UseRlcBusy = input[2];
4117 activity_monitor.Gfx_MinActiveFreqType = input[3];
4118 activity_monitor.Gfx_MinActiveFreq = input[4];
4119 activity_monitor.Gfx_BoosterFreqType = input[5];
4120 activity_monitor.Gfx_BoosterFreq = input[6];
4121 activity_monitor.Gfx_PD_Data_limit_c = input[7];
4122 activity_monitor.Gfx_PD_Data_error_coeff = input[8];
4123 activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
4124 break;
4125 case 1: /* Socclk */
4126 activity_monitor.Soc_FPS = input[1];
4127 activity_monitor.Soc_UseRlcBusy = input[2];
4128 activity_monitor.Soc_MinActiveFreqType = input[3];
4129 activity_monitor.Soc_MinActiveFreq = input[4];
4130 activity_monitor.Soc_BoosterFreqType = input[5];
4131 activity_monitor.Soc_BoosterFreq = input[6];
4132 activity_monitor.Soc_PD_Data_limit_c = input[7];
4133 activity_monitor.Soc_PD_Data_error_coeff = input[8];
4134 activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
4135 break;
4136 case 2: /* Uclk */
4137 activity_monitor.Mem_FPS = input[1];
4138 activity_monitor.Mem_UseRlcBusy = input[2];
4139 activity_monitor.Mem_MinActiveFreqType = input[3];
4140 activity_monitor.Mem_MinActiveFreq = input[4];
4141 activity_monitor.Mem_BoosterFreqType = input[5];
4142 activity_monitor.Mem_BoosterFreq = input[6];
4143 activity_monitor.Mem_PD_Data_limit_c = input[7];
4144 activity_monitor.Mem_PD_Data_error_coeff = input[8];
4145 activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
4146 break;
4147 case 3: /* Fclk */
4148 activity_monitor.Fclk_FPS = input[1];
4149 activity_monitor.Fclk_UseRlcBusy = input[2];
4150 activity_monitor.Fclk_MinActiveFreqType = input[3];
4151 activity_monitor.Fclk_MinActiveFreq = input[4];
4152 activity_monitor.Fclk_BoosterFreqType = input[5];
4153 activity_monitor.Fclk_BoosterFreq = input[6];
4154 activity_monitor.Fclk_PD_Data_limit_c = input[7];
4155 activity_monitor.Fclk_PD_Data_error_coeff = input[8];
4156 activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
4157 break;
4158 }
4159
4160 result = vega20_set_activity_monitor_coeff(hwmgr,
4161 (uint8_t *)(&activity_monitor),
4162 WORKLOAD_PPLIB_CUSTOM_BIT);
4163 data->is_custom_profile_set = true;
4164 PP_ASSERT_WITH_CODE(!result,
4165 "[SetPowerProfile] Failed to set activity monitor!",
4166 return result);
4167 }
4168
4169 out:
4170 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
4171 workload_type =
4172 conv_power_profile_to_pplib_workload(power_profile_mode);
4173 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
4174 1 << workload_type,
4175 NULL);
4176
4177 hwmgr->power_profile_mode = power_profile_mode;
4178
4179 return 0;
4180 }
4181
vega20_notify_cac_buffer_info(struct pp_hwmgr * hwmgr,uint32_t virtual_addr_low,uint32_t virtual_addr_hi,uint32_t mc_addr_low,uint32_t mc_addr_hi,uint32_t size)4182 static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4183 uint32_t virtual_addr_low,
4184 uint32_t virtual_addr_hi,
4185 uint32_t mc_addr_low,
4186 uint32_t mc_addr_hi,
4187 uint32_t size)
4188 {
4189 smum_send_msg_to_smc_with_parameter(hwmgr,
4190 PPSMC_MSG_SetSystemVirtualDramAddrHigh,
4191 virtual_addr_hi,
4192 NULL);
4193 smum_send_msg_to_smc_with_parameter(hwmgr,
4194 PPSMC_MSG_SetSystemVirtualDramAddrLow,
4195 virtual_addr_low,
4196 NULL);
4197 smum_send_msg_to_smc_with_parameter(hwmgr,
4198 PPSMC_MSG_DramLogSetDramAddrHigh,
4199 mc_addr_hi,
4200 NULL);
4201
4202 smum_send_msg_to_smc_with_parameter(hwmgr,
4203 PPSMC_MSG_DramLogSetDramAddrLow,
4204 mc_addr_low,
4205 NULL);
4206
4207 smum_send_msg_to_smc_with_parameter(hwmgr,
4208 PPSMC_MSG_DramLogSetDramSize,
4209 size,
4210 NULL);
4211 return 0;
4212 }
4213
vega20_get_thermal_temperature_range(struct pp_hwmgr * hwmgr,struct PP_TemperatureRange * thermal_data)4214 static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
4215 struct PP_TemperatureRange *thermal_data)
4216 {
4217 struct phm_ppt_v3_information *pptable_information =
4218 (struct phm_ppt_v3_information *)hwmgr->pptable;
4219 struct vega20_hwmgr *data =
4220 (struct vega20_hwmgr *)(hwmgr->backend);
4221 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
4222
4223 memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
4224
4225 thermal_data->max = pp_table->TedgeLimit *
4226 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4227 thermal_data->edge_emergency_max = (pp_table->TedgeLimit + CTF_OFFSET_EDGE) *
4228 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4229 thermal_data->hotspot_crit_max = pp_table->ThotspotLimit *
4230 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4231 thermal_data->hotspot_emergency_max = (pp_table->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
4232 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4233 thermal_data->mem_crit_max = pp_table->ThbmLimit *
4234 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4235 thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
4236 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4237 thermal_data->sw_ctf_threshold = pptable_information->us_software_shutdown_temp *
4238 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4239
4240 return 0;
4241 }
4242
vega20_smu_i2c_bus_access(struct pp_hwmgr * hwmgr,bool acquire)4243 static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire)
4244 {
4245 int res;
4246
4247 /* I2C bus access can happen very early, when SMU not loaded yet */
4248 if (!vega20_is_smc_ram_running(hwmgr))
4249 return 0;
4250
4251 res = smum_send_msg_to_smc_with_parameter(hwmgr,
4252 (acquire ?
4253 PPSMC_MSG_RequestI2CBus :
4254 PPSMC_MSG_ReleaseI2CBus),
4255 0,
4256 NULL);
4257
4258 PP_ASSERT_WITH_CODE(!res, "[SmuI2CAccessBus] Failed to access bus!", return res);
4259 return res;
4260 }
4261
vega20_set_df_cstate(struct pp_hwmgr * hwmgr,enum pp_df_cstate state)4262 static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr,
4263 enum pp_df_cstate state)
4264 {
4265 int ret;
4266
4267 /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */
4268 if (hwmgr->smu_version < 0x283200) {
4269 pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n");
4270 return -EINVAL;
4271 }
4272
4273 ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state,
4274 NULL);
4275 if (ret)
4276 pr_err("SetDfCstate failed!\n");
4277
4278 return ret;
4279 }
4280
vega20_set_xgmi_pstate(struct pp_hwmgr * hwmgr,uint32_t pstate)4281 static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr,
4282 uint32_t pstate)
4283 {
4284 int ret;
4285
4286 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
4287 PPSMC_MSG_SetXgmiMode,
4288 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
4289 NULL);
4290 if (ret)
4291 pr_err("SetXgmiPstate failed!\n");
4292
4293 return ret;
4294 }
4295
vega20_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 * gpu_metrics)4296 static void vega20_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics)
4297 {
4298 memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0));
4299
4300 gpu_metrics->common_header.structure_size =
4301 sizeof(struct gpu_metrics_v1_0);
4302 gpu_metrics->common_header.format_revision = 1;
4303 gpu_metrics->common_header.content_revision = 0;
4304
4305 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
4306 }
4307
vega20_get_gpu_metrics(struct pp_hwmgr * hwmgr,void ** table)4308 static ssize_t vega20_get_gpu_metrics(struct pp_hwmgr *hwmgr,
4309 void **table)
4310 {
4311 struct vega20_hwmgr *data =
4312 (struct vega20_hwmgr *)(hwmgr->backend);
4313 struct gpu_metrics_v1_0 *gpu_metrics =
4314 &data->gpu_metrics_table;
4315 SmuMetrics_t metrics;
4316 uint32_t fan_speed_rpm;
4317 int ret;
4318
4319 ret = vega20_get_metrics_table(hwmgr, &metrics, true);
4320 if (ret)
4321 return ret;
4322
4323 vega20_init_gpu_metrics_v1_0(gpu_metrics);
4324
4325 gpu_metrics->temperature_edge = metrics.TemperatureEdge;
4326 gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
4327 gpu_metrics->temperature_mem = metrics.TemperatureHBM;
4328 gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
4329 gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
4330 gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
4331
4332 gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
4333 gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
4334
4335 gpu_metrics->average_socket_power = metrics.AverageSocketPower;
4336
4337 gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
4338 gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
4339 gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
4340
4341 gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
4342 gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
4343 gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
4344 gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
4345 gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
4346
4347 gpu_metrics->throttle_status = metrics.ThrottlerStatus;
4348
4349 vega20_fan_ctrl_get_fan_speed_rpm(hwmgr, &fan_speed_rpm);
4350 gpu_metrics->current_fan_speed = (uint16_t)fan_speed_rpm;
4351
4352 gpu_metrics->pcie_link_width =
4353 vega20_get_current_pcie_link_width(hwmgr);
4354 gpu_metrics->pcie_link_speed =
4355 vega20_get_current_pcie_link_speed(hwmgr);
4356
4357 *table = (void *)gpu_metrics;
4358
4359 return sizeof(struct gpu_metrics_v1_0);
4360 }
4361
4362 static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
4363 /* init/fini related */
4364 .backend_init = vega20_hwmgr_backend_init,
4365 .backend_fini = vega20_hwmgr_backend_fini,
4366 .asic_setup = vega20_setup_asic_task,
4367 .power_off_asic = vega20_power_off_asic,
4368 .dynamic_state_management_enable = vega20_enable_dpm_tasks,
4369 .dynamic_state_management_disable = vega20_disable_dpm_tasks,
4370 /* power state related */
4371 .apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules,
4372 .pre_display_config_changed = vega20_pre_display_configuration_changed_task,
4373 .display_config_changed = vega20_display_configuration_changed_task,
4374 .check_smc_update_required_for_display_configuration =
4375 vega20_check_smc_update_required_for_display_configuration,
4376 .notify_smc_display_config_after_ps_adjustment =
4377 vega20_notify_smc_display_config_after_ps_adjustment,
4378 /* export to DAL */
4379 .get_sclk = vega20_dpm_get_sclk,
4380 .get_mclk = vega20_dpm_get_mclk,
4381 .get_dal_power_level = vega20_get_dal_power_level,
4382 .get_clock_by_type_with_latency = vega20_get_clock_by_type_with_latency,
4383 .get_clock_by_type_with_voltage = vega20_get_clock_by_type_with_voltage,
4384 .set_watermarks_for_clocks_ranges = vega20_set_watermarks_for_clocks_ranges,
4385 .display_clock_voltage_request = vega20_display_clock_voltage_request,
4386 .get_performance_level = vega20_get_performance_level,
4387 /* UMD pstate, profile related */
4388 .force_dpm_level = vega20_dpm_force_dpm_level,
4389 .get_power_profile_mode = vega20_get_power_profile_mode,
4390 .set_power_profile_mode = vega20_set_power_profile_mode,
4391 /* od related */
4392 .set_power_limit = vega20_set_power_limit,
4393 .get_sclk_od = vega20_get_sclk_od,
4394 .set_sclk_od = vega20_set_sclk_od,
4395 .get_mclk_od = vega20_get_mclk_od,
4396 .set_mclk_od = vega20_set_mclk_od,
4397 .odn_edit_dpm_table = vega20_odn_edit_dpm_table,
4398 /* for sysfs to retrive/set gfxclk/memclk */
4399 .force_clock_level = vega20_force_clock_level,
4400 .print_clock_levels = vega20_print_clock_levels,
4401 .read_sensor = vega20_read_sensor,
4402 .get_ppfeature_status = vega20_get_ppfeature_status,
4403 .set_ppfeature_status = vega20_set_ppfeature_status,
4404 /* powergate related */
4405 .powergate_uvd = vega20_power_gate_uvd,
4406 .powergate_vce = vega20_power_gate_vce,
4407 /* thermal related */
4408 .start_thermal_controller = vega20_start_thermal_controller,
4409 .stop_thermal_controller = vega20_thermal_stop_thermal_controller,
4410 .get_thermal_temperature_range = vega20_get_thermal_temperature_range,
4411 .register_irq_handlers = smu9_register_irq_handlers,
4412 .disable_smc_firmware_ctf = vega20_thermal_disable_alert,
4413 /* fan control related */
4414 .get_fan_speed_pwm = vega20_fan_ctrl_get_fan_speed_pwm,
4415 .set_fan_speed_pwm = vega20_fan_ctrl_set_fan_speed_pwm,
4416 .get_fan_speed_info = vega20_fan_ctrl_get_fan_speed_info,
4417 .get_fan_speed_rpm = vega20_fan_ctrl_get_fan_speed_rpm,
4418 .set_fan_speed_rpm = vega20_fan_ctrl_set_fan_speed_rpm,
4419 .get_fan_control_mode = vega20_get_fan_control_mode,
4420 .set_fan_control_mode = vega20_set_fan_control_mode,
4421 /* smu memory related */
4422 .notify_cac_buffer_info = vega20_notify_cac_buffer_info,
4423 .enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost,
4424 /* BACO related */
4425 .get_asic_baco_capability = vega20_baco_get_capability,
4426 .get_asic_baco_state = vega20_baco_get_state,
4427 .set_asic_baco_state = vega20_baco_set_state,
4428 .set_mp1_state = vega20_set_mp1_state,
4429 .smu_i2c_bus_access = vega20_smu_i2c_bus_access,
4430 .set_df_cstate = vega20_set_df_cstate,
4431 .set_xgmi_pstate = vega20_set_xgmi_pstate,
4432 .get_gpu_metrics = vega20_get_gpu_metrics,
4433 };
4434
vega20_hwmgr_init(struct pp_hwmgr * hwmgr)4435 int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
4436 {
4437 hwmgr->hwmgr_func = &vega20_hwmgr_funcs;
4438 hwmgr->pptable_func = &vega20_pptable_funcs;
4439
4440 return 0;
4441 }
4442