1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/delay.h>
25 #include <linux/fb.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28
29 #include "hwmgr.h"
30 #include "amd_powerplay.h"
31 #include "vega20_smumgr.h"
32 #include "hardwaremanager.h"
33 #include "ppatomfwctrl.h"
34 #include "atomfirmware.h"
35 #include "cgs_common.h"
36 #include "vega20_powertune.h"
37 #include "vega20_inc.h"
38 #include "pppcielanes.h"
39 #include "vega20_hwmgr.h"
40 #include "vega20_processpptables.h"
41 #include "vega20_pptable.h"
42 #include "vega20_thermal.h"
43 #include "vega20_ppsmc.h"
44 #include "pp_debug.h"
45 #include "amd_pcie_helpers.h"
46 #include "ppinterrupt.h"
47 #include "pp_overdriver.h"
48 #include "pp_thermal.h"
49 #include "soc15_common.h"
50 #include "vega20_baco.h"
51 #include "smuio/smuio_9_0_offset.h"
52 #include "smuio/smuio_9_0_sh_mask.h"
53 #include "nbio/nbio_7_4_sh_mask.h"
54
55 #define smnPCIE_LC_SPEED_CNTL 0x11140290
56 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
57
58 #define LINK_WIDTH_MAX 6
59 #define LINK_SPEED_MAX 3
60 static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
61 static const int link_speed[] = {25, 50, 80, 160};
62
vega20_set_default_registry_data(struct pp_hwmgr * hwmgr)63 static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
64 {
65 struct vega20_hwmgr *data =
66 (struct vega20_hwmgr *)(hwmgr->backend);
67
68 data->gfxclk_average_alpha = PPVEGA20_VEGA20GFXCLKAVERAGEALPHA_DFLT;
69 data->socclk_average_alpha = PPVEGA20_VEGA20SOCCLKAVERAGEALPHA_DFLT;
70 data->uclk_average_alpha = PPVEGA20_VEGA20UCLKCLKAVERAGEALPHA_DFLT;
71 data->gfx_activity_average_alpha = PPVEGA20_VEGA20GFXACTIVITYAVERAGEALPHA_DFLT;
72 data->lowest_uclk_reserved_for_ulv = PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT;
73
74 data->display_voltage_mode = PPVEGA20_VEGA20DISPLAYVOLTAGEMODE_DFLT;
75 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
76 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
77 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
78 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
79 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
80 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
81 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
82 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
83 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
84 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
85 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
86 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
87
88 /*
89 * Disable the following features for now:
90 * GFXCLK DS
91 * SOCLK DS
92 * LCLK DS
93 * DCEFCLK DS
94 * FCLK DS
95 * MP1CLK DS
96 * MP0CLK DS
97 */
98 data->registry_data.disallowed_features = 0xE0041C00;
99 /* ECC feature should be disabled on old SMUs */
100 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version);
101 if (hwmgr->smu_version < 0x282100)
102 data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
103
104 if (!(hwmgr->feature_mask & PP_PCIE_DPM_MASK))
105 data->registry_data.disallowed_features |= FEATURE_DPM_LINK_MASK;
106
107 if (!(hwmgr->feature_mask & PP_SCLK_DPM_MASK))
108 data->registry_data.disallowed_features |= FEATURE_DPM_GFXCLK_MASK;
109
110 if (!(hwmgr->feature_mask & PP_SOCCLK_DPM_MASK))
111 data->registry_data.disallowed_features |= FEATURE_DPM_SOCCLK_MASK;
112
113 if (!(hwmgr->feature_mask & PP_MCLK_DPM_MASK))
114 data->registry_data.disallowed_features |= FEATURE_DPM_UCLK_MASK;
115
116 if (!(hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK))
117 data->registry_data.disallowed_features |= FEATURE_DPM_DCEFCLK_MASK;
118
119 if (!(hwmgr->feature_mask & PP_ULV_MASK))
120 data->registry_data.disallowed_features |= FEATURE_ULV_MASK;
121
122 if (!(hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK))
123 data->registry_data.disallowed_features |= FEATURE_DS_GFXCLK_MASK;
124
125 data->registry_data.od_state_in_dc_support = 0;
126 data->registry_data.thermal_support = 1;
127 data->registry_data.skip_baco_hardware = 0;
128
129 data->registry_data.log_avfs_param = 0;
130 data->registry_data.sclk_throttle_low_notification = 1;
131 data->registry_data.force_dpm_high = 0;
132 data->registry_data.stable_pstate_sclk_dpm_percentage = 75;
133
134 data->registry_data.didt_support = 0;
135 if (data->registry_data.didt_support) {
136 data->registry_data.didt_mode = 6;
137 data->registry_data.sq_ramping_support = 1;
138 data->registry_data.db_ramping_support = 0;
139 data->registry_data.td_ramping_support = 0;
140 data->registry_data.tcp_ramping_support = 0;
141 data->registry_data.dbr_ramping_support = 0;
142 data->registry_data.edc_didt_support = 1;
143 data->registry_data.gc_didt_support = 0;
144 data->registry_data.psm_didt_support = 0;
145 }
146
147 data->registry_data.pcie_lane_override = 0xff;
148 data->registry_data.pcie_speed_override = 0xff;
149 data->registry_data.pcie_clock_override = 0xffffffff;
150 data->registry_data.regulator_hot_gpio_support = 1;
151 data->registry_data.ac_dc_switch_gpio_support = 0;
152 data->registry_data.quick_transition_support = 0;
153 data->registry_data.zrpm_start_temp = 0xffff;
154 data->registry_data.zrpm_stop_temp = 0xffff;
155 data->registry_data.od8_feature_enable = 1;
156 data->registry_data.disable_water_mark = 0;
157 data->registry_data.disable_pp_tuning = 0;
158 data->registry_data.disable_xlpp_tuning = 0;
159 data->registry_data.disable_workload_policy = 0;
160 data->registry_data.perf_ui_tuning_profile_turbo = 0x19190F0F;
161 data->registry_data.perf_ui_tuning_profile_powerSave = 0x19191919;
162 data->registry_data.perf_ui_tuning_profile_xl = 0x00000F0A;
163 data->registry_data.force_workload_policy_mask = 0;
164 data->registry_data.disable_3d_fs_detection = 0;
165 data->registry_data.fps_support = 1;
166 data->registry_data.disable_auto_wattman = 1;
167 data->registry_data.auto_wattman_debug = 0;
168 data->registry_data.auto_wattman_sample_period = 100;
169 data->registry_data.fclk_gfxclk_ratio = 0;
170 data->registry_data.auto_wattman_threshold = 50;
171 data->registry_data.gfxoff_controlled_by_driver = 1;
172 data->gfxoff_allowed = false;
173 data->counter_gfxoff = 0;
174 data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
175 }
176
vega20_set_features_platform_caps(struct pp_hwmgr * hwmgr)177 static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
178 {
179 struct vega20_hwmgr *data =
180 (struct vega20_hwmgr *)(hwmgr->backend);
181 struct amdgpu_device *adev = hwmgr->adev;
182
183 if (data->vddci_control == VEGA20_VOLTAGE_CONTROL_NONE)
184 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
185 PHM_PlatformCaps_ControlVDDCI);
186
187 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
188 PHM_PlatformCaps_TablelessHardwareInterface);
189
190 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
191 PHM_PlatformCaps_BACO);
192
193 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
194 PHM_PlatformCaps_EnableSMU7ThermalManagement);
195
196 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
197 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
198 PHM_PlatformCaps_UVDPowerGating);
199
200 if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
201 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
202 PHM_PlatformCaps_VCEPowerGating);
203
204 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
205 PHM_PlatformCaps_UnTabledHardwareInterface);
206
207 if (data->registry_data.od8_feature_enable)
208 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
209 PHM_PlatformCaps_OD8inACSupport);
210
211 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
212 PHM_PlatformCaps_ActivityReporting);
213 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
214 PHM_PlatformCaps_FanSpeedInTableIsRPM);
215
216 if (data->registry_data.od_state_in_dc_support) {
217 if (data->registry_data.od8_feature_enable)
218 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
219 PHM_PlatformCaps_OD8inDCSupport);
220 }
221
222 if (data->registry_data.thermal_support &&
223 data->registry_data.fuzzy_fan_control_support &&
224 hwmgr->thermal_controller.advanceFanControlParameters.usTMax)
225 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
226 PHM_PlatformCaps_ODFuzzyFanControlSupport);
227
228 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
229 PHM_PlatformCaps_DynamicPowerManagement);
230 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
231 PHM_PlatformCaps_SMC);
232 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
233 PHM_PlatformCaps_ThermalPolicyDelay);
234
235 if (data->registry_data.force_dpm_high)
236 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
237 PHM_PlatformCaps_ExclusiveModeAlwaysHigh);
238
239 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
240 PHM_PlatformCaps_DynamicUVDState);
241
242 if (data->registry_data.sclk_throttle_low_notification)
243 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
244 PHM_PlatformCaps_SclkThrottleLowNotification);
245
246 /* power tune caps */
247 /* assume disabled */
248 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
249 PHM_PlatformCaps_PowerContainment);
250 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
251 PHM_PlatformCaps_DiDtSupport);
252 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
253 PHM_PlatformCaps_SQRamping);
254 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
255 PHM_PlatformCaps_DBRamping);
256 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
257 PHM_PlatformCaps_TDRamping);
258 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
259 PHM_PlatformCaps_TCPRamping);
260 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
261 PHM_PlatformCaps_DBRRamping);
262 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
263 PHM_PlatformCaps_DiDtEDCEnable);
264 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
265 PHM_PlatformCaps_GCEDC);
266 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
267 PHM_PlatformCaps_PSM);
268
269 if (data->registry_data.didt_support) {
270 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
271 PHM_PlatformCaps_DiDtSupport);
272 if (data->registry_data.sq_ramping_support)
273 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
274 PHM_PlatformCaps_SQRamping);
275 if (data->registry_data.db_ramping_support)
276 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
277 PHM_PlatformCaps_DBRamping);
278 if (data->registry_data.td_ramping_support)
279 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
280 PHM_PlatformCaps_TDRamping);
281 if (data->registry_data.tcp_ramping_support)
282 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
283 PHM_PlatformCaps_TCPRamping);
284 if (data->registry_data.dbr_ramping_support)
285 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
286 PHM_PlatformCaps_DBRRamping);
287 if (data->registry_data.edc_didt_support)
288 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
289 PHM_PlatformCaps_DiDtEDCEnable);
290 if (data->registry_data.gc_didt_support)
291 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
292 PHM_PlatformCaps_GCEDC);
293 if (data->registry_data.psm_didt_support)
294 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
295 PHM_PlatformCaps_PSM);
296 }
297
298 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
299 PHM_PlatformCaps_RegulatorHot);
300
301 if (data->registry_data.ac_dc_switch_gpio_support) {
302 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
303 PHM_PlatformCaps_AutomaticDCTransition);
304 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
305 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
306 }
307
308 if (data->registry_data.quick_transition_support) {
309 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
310 PHM_PlatformCaps_AutomaticDCTransition);
311 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
312 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
313 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
314 PHM_PlatformCaps_Falcon_QuickTransition);
315 }
316
317 if (data->lowest_uclk_reserved_for_ulv != PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT) {
318 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
319 PHM_PlatformCaps_LowestUclkReservedForUlv);
320 if (data->lowest_uclk_reserved_for_ulv == 1)
321 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
322 PHM_PlatformCaps_LowestUclkReservedForUlv);
323 }
324
325 if (data->registry_data.custom_fan_support)
326 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
327 PHM_PlatformCaps_CustomFanControlSupport);
328
329 return 0;
330 }
331
vega20_init_dpm_defaults(struct pp_hwmgr * hwmgr)332 static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
333 {
334 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
335 struct amdgpu_device *adev = hwmgr->adev;
336 uint32_t top32, bottom32;
337 int i;
338
339 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
340 FEATURE_DPM_PREFETCHER_BIT;
341 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
342 FEATURE_DPM_GFXCLK_BIT;
343 data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
344 FEATURE_DPM_UCLK_BIT;
345 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
346 FEATURE_DPM_SOCCLK_BIT;
347 data->smu_features[GNLD_DPM_UVD].smu_feature_id =
348 FEATURE_DPM_UVD_BIT;
349 data->smu_features[GNLD_DPM_VCE].smu_feature_id =
350 FEATURE_DPM_VCE_BIT;
351 data->smu_features[GNLD_ULV].smu_feature_id =
352 FEATURE_ULV_BIT;
353 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
354 FEATURE_DPM_MP0CLK_BIT;
355 data->smu_features[GNLD_DPM_LINK].smu_feature_id =
356 FEATURE_DPM_LINK_BIT;
357 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
358 FEATURE_DPM_DCEFCLK_BIT;
359 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
360 FEATURE_DS_GFXCLK_BIT;
361 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
362 FEATURE_DS_SOCCLK_BIT;
363 data->smu_features[GNLD_DS_LCLK].smu_feature_id =
364 FEATURE_DS_LCLK_BIT;
365 data->smu_features[GNLD_PPT].smu_feature_id =
366 FEATURE_PPT_BIT;
367 data->smu_features[GNLD_TDC].smu_feature_id =
368 FEATURE_TDC_BIT;
369 data->smu_features[GNLD_THERMAL].smu_feature_id =
370 FEATURE_THERMAL_BIT;
371 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
372 FEATURE_GFX_PER_CU_CG_BIT;
373 data->smu_features[GNLD_RM].smu_feature_id =
374 FEATURE_RM_BIT;
375 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
376 FEATURE_DS_DCEFCLK_BIT;
377 data->smu_features[GNLD_ACDC].smu_feature_id =
378 FEATURE_ACDC_BIT;
379 data->smu_features[GNLD_VR0HOT].smu_feature_id =
380 FEATURE_VR0HOT_BIT;
381 data->smu_features[GNLD_VR1HOT].smu_feature_id =
382 FEATURE_VR1HOT_BIT;
383 data->smu_features[GNLD_FW_CTF].smu_feature_id =
384 FEATURE_FW_CTF_BIT;
385 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
386 FEATURE_LED_DISPLAY_BIT;
387 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
388 FEATURE_FAN_CONTROL_BIT;
389 data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
390 data->smu_features[GNLD_GFXOFF].smu_feature_id = FEATURE_GFXOFF_BIT;
391 data->smu_features[GNLD_CG].smu_feature_id = FEATURE_CG_BIT;
392 data->smu_features[GNLD_DPM_FCLK].smu_feature_id = FEATURE_DPM_FCLK_BIT;
393 data->smu_features[GNLD_DS_FCLK].smu_feature_id = FEATURE_DS_FCLK_BIT;
394 data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT;
395 data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT;
396 data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT;
397 data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT;
398
399 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
400 data->smu_features[i].smu_feature_bitmap =
401 (uint64_t)(1ULL << data->smu_features[i].smu_feature_id);
402 data->smu_features[i].allowed =
403 ((data->registry_data.disallowed_features >> i) & 1) ?
404 false : true;
405 }
406
407 /* Get the SN to turn into a Unique ID */
408 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
409 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
410
411 adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
412 }
413
vega20_set_private_data_based_on_pptable(struct pp_hwmgr * hwmgr)414 static int vega20_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
415 {
416 return 0;
417 }
418
vega20_hwmgr_backend_fini(struct pp_hwmgr * hwmgr)419 static int vega20_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
420 {
421 kfree(hwmgr->backend);
422 hwmgr->backend = NULL;
423
424 return 0;
425 }
426
vega20_hwmgr_backend_init(struct pp_hwmgr * hwmgr)427 static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
428 {
429 struct vega20_hwmgr *data;
430 struct amdgpu_device *adev = hwmgr->adev;
431
432 data = kzalloc(sizeof(struct vega20_hwmgr), GFP_KERNEL);
433 if (data == NULL)
434 return -ENOMEM;
435
436 hwmgr->backend = data;
437
438 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
439 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
440 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
441
442 vega20_set_default_registry_data(hwmgr);
443
444 data->disable_dpm_mask = 0xff;
445
446 /* need to set voltage control types before EVV patching */
447 data->vddc_control = VEGA20_VOLTAGE_CONTROL_NONE;
448 data->mvdd_control = VEGA20_VOLTAGE_CONTROL_NONE;
449 data->vddci_control = VEGA20_VOLTAGE_CONTROL_NONE;
450
451 data->water_marks_bitmap = 0;
452 data->avfs_exist = false;
453
454 vega20_set_features_platform_caps(hwmgr);
455
456 vega20_init_dpm_defaults(hwmgr);
457
458 /* Parse pptable data read from VBIOS */
459 vega20_set_private_data_based_on_pptable(hwmgr);
460
461 data->is_tlu_enabled = false;
462
463 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
464 VEGA20_MAX_HARDWARE_POWERLEVELS;
465 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
466 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
467
468 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
469 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
470 hwmgr->platform_descriptor.clockStep.engineClock = 500;
471 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
472
473 data->total_active_cus = adev->gfx.cu_info.number;
474 data->is_custom_profile_set = false;
475
476 return 0;
477 }
478
vega20_init_sclk_threshold(struct pp_hwmgr * hwmgr)479 static int vega20_init_sclk_threshold(struct pp_hwmgr *hwmgr)
480 {
481 struct vega20_hwmgr *data =
482 (struct vega20_hwmgr *)(hwmgr->backend);
483
484 data->low_sclk_interrupt_threshold = 0;
485
486 return 0;
487 }
488
vega20_setup_asic_task(struct pp_hwmgr * hwmgr)489 static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
490 {
491 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
492 int ret = 0;
493 bool use_baco = (amdgpu_in_reset(adev) &&
494 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
495 (adev->in_runpm && amdgpu_asic_supports_baco(adev));
496
497 ret = vega20_init_sclk_threshold(hwmgr);
498 PP_ASSERT_WITH_CODE(!ret,
499 "Failed to init sclk threshold!",
500 return ret);
501
502 if (use_baco) {
503 ret = vega20_baco_apply_vdci_flush_workaround(hwmgr);
504 if (ret)
505 pr_err("Failed to apply vega20 baco workaround!\n");
506 }
507
508 return ret;
509 }
510
511 /*
512 * @fn vega20_init_dpm_state
513 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
514 *
515 * @param dpm_state - the address of the DPM Table to initiailize.
516 * @return None.
517 */
vega20_init_dpm_state(struct vega20_dpm_state * dpm_state)518 static void vega20_init_dpm_state(struct vega20_dpm_state *dpm_state)
519 {
520 dpm_state->soft_min_level = 0x0;
521 dpm_state->soft_max_level = VG20_CLOCK_MAX_DEFAULT;
522 dpm_state->hard_min_level = 0x0;
523 dpm_state->hard_max_level = VG20_CLOCK_MAX_DEFAULT;
524 }
525
vega20_get_number_of_dpm_level(struct pp_hwmgr * hwmgr,PPCLK_e clk_id,uint32_t * num_of_levels)526 static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
527 PPCLK_e clk_id, uint32_t *num_of_levels)
528 {
529 int ret = 0;
530
531 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
532 PPSMC_MSG_GetDpmFreqByIndex,
533 (clk_id << 16 | 0xFF),
534 num_of_levels);
535 PP_ASSERT_WITH_CODE(!ret,
536 "[GetNumOfDpmLevel] failed to get dpm levels!",
537 return ret);
538
539 return ret;
540 }
541
vega20_get_dpm_frequency_by_index(struct pp_hwmgr * hwmgr,PPCLK_e clk_id,uint32_t index,uint32_t * clk)542 static int vega20_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
543 PPCLK_e clk_id, uint32_t index, uint32_t *clk)
544 {
545 int ret = 0;
546
547 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
548 PPSMC_MSG_GetDpmFreqByIndex,
549 (clk_id << 16 | index),
550 clk);
551 PP_ASSERT_WITH_CODE(!ret,
552 "[GetDpmFreqByIndex] failed to get dpm freq by index!",
553 return ret);
554
555 return ret;
556 }
557
vega20_setup_single_dpm_table(struct pp_hwmgr * hwmgr,struct vega20_single_dpm_table * dpm_table,PPCLK_e clk_id)558 static int vega20_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
559 struct vega20_single_dpm_table *dpm_table, PPCLK_e clk_id)
560 {
561 int ret = 0;
562 uint32_t i, num_of_levels, clk;
563
564 ret = vega20_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels);
565 PP_ASSERT_WITH_CODE(!ret,
566 "[SetupSingleDpmTable] failed to get clk levels!",
567 return ret);
568
569 dpm_table->count = num_of_levels;
570
571 for (i = 0; i < num_of_levels; i++) {
572 ret = vega20_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk);
573 PP_ASSERT_WITH_CODE(!ret,
574 "[SetupSingleDpmTable] failed to get clk of specific level!",
575 return ret);
576 dpm_table->dpm_levels[i].value = clk;
577 dpm_table->dpm_levels[i].enabled = true;
578 }
579
580 return ret;
581 }
582
vega20_setup_gfxclk_dpm_table(struct pp_hwmgr * hwmgr)583 static int vega20_setup_gfxclk_dpm_table(struct pp_hwmgr *hwmgr)
584 {
585 struct vega20_hwmgr *data =
586 (struct vega20_hwmgr *)(hwmgr->backend);
587 struct vega20_single_dpm_table *dpm_table;
588 int ret = 0;
589
590 dpm_table = &(data->dpm_table.gfx_table);
591 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
592 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
593 PP_ASSERT_WITH_CODE(!ret,
594 "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
595 return ret);
596 } else {
597 dpm_table->count = 1;
598 dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
599 }
600
601 return ret;
602 }
603
vega20_setup_memclk_dpm_table(struct pp_hwmgr * hwmgr)604 static int vega20_setup_memclk_dpm_table(struct pp_hwmgr *hwmgr)
605 {
606 struct vega20_hwmgr *data =
607 (struct vega20_hwmgr *)(hwmgr->backend);
608 struct vega20_single_dpm_table *dpm_table;
609 int ret = 0;
610
611 dpm_table = &(data->dpm_table.mem_table);
612 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
613 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
614 PP_ASSERT_WITH_CODE(!ret,
615 "[SetupDefaultDpmTable] failed to get memclk dpm levels!",
616 return ret);
617 } else {
618 dpm_table->count = 1;
619 dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
620 }
621
622 return ret;
623 }
624
625 /*
626 * This function is to initialize all DPM state tables
627 * for SMU based on the dependency table.
628 * Dynamic state patching function will then trim these
629 * state tables to the allowed range based
630 * on the power policy or external client requests,
631 * such as UVD request, etc.
632 */
vega20_setup_default_dpm_tables(struct pp_hwmgr * hwmgr)633 static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
634 {
635 struct vega20_hwmgr *data =
636 (struct vega20_hwmgr *)(hwmgr->backend);
637 struct vega20_single_dpm_table *dpm_table;
638 int ret = 0;
639
640 memset(&data->dpm_table, 0, sizeof(data->dpm_table));
641
642 /* socclk */
643 dpm_table = &(data->dpm_table.soc_table);
644 if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
645 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK);
646 PP_ASSERT_WITH_CODE(!ret,
647 "[SetupDefaultDpmTable] failed to get socclk dpm levels!",
648 return ret);
649 } else {
650 dpm_table->count = 1;
651 dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100;
652 }
653 vega20_init_dpm_state(&(dpm_table->dpm_state));
654
655 /* gfxclk */
656 dpm_table = &(data->dpm_table.gfx_table);
657 ret = vega20_setup_gfxclk_dpm_table(hwmgr);
658 if (ret)
659 return ret;
660 vega20_init_dpm_state(&(dpm_table->dpm_state));
661
662 /* memclk */
663 dpm_table = &(data->dpm_table.mem_table);
664 ret = vega20_setup_memclk_dpm_table(hwmgr);
665 if (ret)
666 return ret;
667 vega20_init_dpm_state(&(dpm_table->dpm_state));
668
669 /* eclk */
670 dpm_table = &(data->dpm_table.eclk_table);
671 if (data->smu_features[GNLD_DPM_VCE].enabled) {
672 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK);
673 PP_ASSERT_WITH_CODE(!ret,
674 "[SetupDefaultDpmTable] failed to get eclk dpm levels!",
675 return ret);
676 } else {
677 dpm_table->count = 1;
678 dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100;
679 }
680 vega20_init_dpm_state(&(dpm_table->dpm_state));
681
682 /* vclk */
683 dpm_table = &(data->dpm_table.vclk_table);
684 if (data->smu_features[GNLD_DPM_UVD].enabled) {
685 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK);
686 PP_ASSERT_WITH_CODE(!ret,
687 "[SetupDefaultDpmTable] failed to get vclk dpm levels!",
688 return ret);
689 } else {
690 dpm_table->count = 1;
691 dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100;
692 }
693 vega20_init_dpm_state(&(dpm_table->dpm_state));
694
695 /* dclk */
696 dpm_table = &(data->dpm_table.dclk_table);
697 if (data->smu_features[GNLD_DPM_UVD].enabled) {
698 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK);
699 PP_ASSERT_WITH_CODE(!ret,
700 "[SetupDefaultDpmTable] failed to get dclk dpm levels!",
701 return ret);
702 } else {
703 dpm_table->count = 1;
704 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100;
705 }
706 vega20_init_dpm_state(&(dpm_table->dpm_state));
707
708 /* dcefclk */
709 dpm_table = &(data->dpm_table.dcef_table);
710 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
711 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK);
712 PP_ASSERT_WITH_CODE(!ret,
713 "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
714 return ret);
715 } else {
716 dpm_table->count = 1;
717 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100;
718 }
719 vega20_init_dpm_state(&(dpm_table->dpm_state));
720
721 /* pixclk */
722 dpm_table = &(data->dpm_table.pixel_table);
723 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
724 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK);
725 PP_ASSERT_WITH_CODE(!ret,
726 "[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
727 return ret);
728 } else
729 dpm_table->count = 0;
730 vega20_init_dpm_state(&(dpm_table->dpm_state));
731
732 /* dispclk */
733 dpm_table = &(data->dpm_table.display_table);
734 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
735 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK);
736 PP_ASSERT_WITH_CODE(!ret,
737 "[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
738 return ret);
739 } else
740 dpm_table->count = 0;
741 vega20_init_dpm_state(&(dpm_table->dpm_state));
742
743 /* phyclk */
744 dpm_table = &(data->dpm_table.phy_table);
745 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
746 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK);
747 PP_ASSERT_WITH_CODE(!ret,
748 "[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
749 return ret);
750 } else
751 dpm_table->count = 0;
752 vega20_init_dpm_state(&(dpm_table->dpm_state));
753
754 /* fclk */
755 dpm_table = &(data->dpm_table.fclk_table);
756 if (data->smu_features[GNLD_DPM_FCLK].enabled) {
757 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_FCLK);
758 PP_ASSERT_WITH_CODE(!ret,
759 "[SetupDefaultDpmTable] failed to get fclk dpm levels!",
760 return ret);
761 } else {
762 dpm_table->count = 1;
763 dpm_table->dpm_levels[0].value = data->vbios_boot_state.fclock / 100;
764 }
765 vega20_init_dpm_state(&(dpm_table->dpm_state));
766
767 /* save a copy of the default DPM table */
768 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
769 sizeof(struct vega20_dpm_table));
770
771 return 0;
772 }
773
774 /**
775 * vega20_init_smc_table - Initializes the SMC table and uploads it
776 *
777 * @hwmgr: the address of the powerplay hardware manager.
778 * return: always 0
779 */
vega20_init_smc_table(struct pp_hwmgr * hwmgr)780 static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
781 {
782 int result;
783 struct vega20_hwmgr *data =
784 (struct vega20_hwmgr *)(hwmgr->backend);
785 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
786 struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
787 struct phm_ppt_v3_information *pptable_information =
788 (struct phm_ppt_v3_information *)hwmgr->pptable;
789
790 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
791 PP_ASSERT_WITH_CODE(!result,
792 "[InitSMCTable] Failed to get vbios bootup values!",
793 return result);
794
795 data->vbios_boot_state.vddc = boot_up_values.usVddc;
796 data->vbios_boot_state.vddci = boot_up_values.usVddci;
797 data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
798 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
799 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
800 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
801 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
802 data->vbios_boot_state.eclock = boot_up_values.ulEClk;
803 data->vbios_boot_state.vclock = boot_up_values.ulVClk;
804 data->vbios_boot_state.dclock = boot_up_values.ulDClk;
805 data->vbios_boot_state.fclock = boot_up_values.ulFClk;
806 data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
807
808 smum_send_msg_to_smc_with_parameter(hwmgr,
809 PPSMC_MSG_SetMinDeepSleepDcefclk,
810 (uint32_t)(data->vbios_boot_state.dcef_clock / 100),
811 NULL);
812
813 memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
814
815 result = smum_smc_table_manager(hwmgr,
816 (uint8_t *)pp_table, TABLE_PPTABLE, false);
817 PP_ASSERT_WITH_CODE(!result,
818 "[InitSMCTable] Failed to upload PPtable!",
819 return result);
820
821 return 0;
822 }
823
824 /*
825 * Override PCIe link speed and link width for DPM Level 1. PPTable entries
826 * reflect the ASIC capabilities and not the system capabilities. For e.g.
827 * Vega20 board in a PCI Gen3 system. In this case, when SMU's tries to switch
828 * to DPM1, it fails as system doesn't support Gen4.
829 */
vega20_override_pcie_parameters(struct pp_hwmgr * hwmgr)830 static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
831 {
832 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
833 struct vega20_hwmgr *data =
834 (struct vega20_hwmgr *)(hwmgr->backend);
835 uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg;
836 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
837 int i;
838 int ret;
839
840 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
841 pcie_gen = 3;
842 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
843 pcie_gen = 2;
844 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
845 pcie_gen = 1;
846 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
847 pcie_gen = 0;
848
849 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
850 pcie_width = 6;
851 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
852 pcie_width = 5;
853 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
854 pcie_width = 4;
855 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
856 pcie_width = 3;
857 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
858 pcie_width = 2;
859 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
860 pcie_width = 1;
861
862 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
863 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
864 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
865 */
866 for (i = 0; i < NUM_LINK_LEVELS; i++) {
867 pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen :
868 pp_table->PcieGenSpeed[i];
869 pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width :
870 pp_table->PcieLaneCount[i];
871
872 if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg !=
873 pp_table->PcieLaneCount[i]) {
874 smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg;
875 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
876 PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
877 NULL);
878 PP_ASSERT_WITH_CODE(!ret,
879 "[OverridePcieParameters] Attempt to override pcie params failed!",
880 return ret);
881 }
882
883 /* update the pptable */
884 pp_table->PcieGenSpeed[i] = pcie_gen_arg;
885 pp_table->PcieLaneCount[i] = pcie_width_arg;
886 }
887
888 /* override to the highest if it's disabled from ppfeaturmask */
889 if (data->registry_data.pcie_dpm_key_disabled) {
890 for (i = 0; i < NUM_LINK_LEVELS; i++) {
891 smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width;
892 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
893 PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
894 NULL);
895 PP_ASSERT_WITH_CODE(!ret,
896 "[OverridePcieParameters] Attempt to override pcie params failed!",
897 return ret);
898
899 pp_table->PcieGenSpeed[i] = pcie_gen;
900 pp_table->PcieLaneCount[i] = pcie_width;
901 }
902 ret = vega20_enable_smc_features(hwmgr,
903 false,
904 data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap);
905 PP_ASSERT_WITH_CODE(!ret,
906 "Attempt to Disable DPM LINK Failed!",
907 return ret);
908 data->smu_features[GNLD_DPM_LINK].enabled = false;
909 data->smu_features[GNLD_DPM_LINK].supported = false;
910 }
911
912 return 0;
913 }
914
vega20_set_allowed_featuresmask(struct pp_hwmgr * hwmgr)915 static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
916 {
917 struct vega20_hwmgr *data =
918 (struct vega20_hwmgr *)(hwmgr->backend);
919 uint32_t allowed_features_low = 0, allowed_features_high = 0;
920 int i;
921 int ret = 0;
922
923 for (i = 0; i < GNLD_FEATURES_MAX; i++)
924 if (data->smu_features[i].allowed)
925 data->smu_features[i].smu_feature_id > 31 ?
926 (allowed_features_high |=
927 ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_HIGH_SHIFT)
928 & 0xFFFFFFFF)) :
929 (allowed_features_low |=
930 ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT)
931 & 0xFFFFFFFF));
932
933 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
934 PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high, NULL);
935 PP_ASSERT_WITH_CODE(!ret,
936 "[SetAllowedFeaturesMask] Attempt to set allowed features mask(high) failed!",
937 return ret);
938
939 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
940 PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low, NULL);
941 PP_ASSERT_WITH_CODE(!ret,
942 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
943 return ret);
944
945 return 0;
946 }
947
vega20_run_btc(struct pp_hwmgr * hwmgr)948 static int vega20_run_btc(struct pp_hwmgr *hwmgr)
949 {
950 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc, NULL);
951 }
952
vega20_run_btc_afll(struct pp_hwmgr * hwmgr)953 static int vega20_run_btc_afll(struct pp_hwmgr *hwmgr)
954 {
955 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc, NULL);
956 }
957
vega20_enable_all_smu_features(struct pp_hwmgr * hwmgr)958 static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
959 {
960 struct vega20_hwmgr *data =
961 (struct vega20_hwmgr *)(hwmgr->backend);
962 uint64_t features_enabled;
963 int i;
964 bool enabled;
965 int ret = 0;
966
967 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
968 PPSMC_MSG_EnableAllSmuFeatures,
969 NULL)) == 0,
970 "[EnableAllSMUFeatures] Failed to enable all smu features!",
971 return ret);
972
973 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
974 PP_ASSERT_WITH_CODE(!ret,
975 "[EnableAllSmuFeatures] Failed to get enabled smc features!",
976 return ret);
977
978 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
979 enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
980 true : false;
981 data->smu_features[i].enabled = enabled;
982 data->smu_features[i].supported = enabled;
983
984 #if 0
985 if (data->smu_features[i].allowed && !enabled)
986 pr_info("[EnableAllSMUFeatures] feature %d is expected enabled!", i);
987 else if (!data->smu_features[i].allowed && enabled)
988 pr_info("[EnableAllSMUFeatures] feature %d is expected disabled!", i);
989 #endif
990 }
991
992 return 0;
993 }
994
vega20_notify_smc_display_change(struct pp_hwmgr * hwmgr)995 static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr)
996 {
997 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
998
999 if (data->smu_features[GNLD_DPM_UCLK].enabled)
1000 return smum_send_msg_to_smc_with_parameter(hwmgr,
1001 PPSMC_MSG_SetUclkFastSwitch,
1002 1,
1003 NULL);
1004
1005 return 0;
1006 }
1007
vega20_send_clock_ratio(struct pp_hwmgr * hwmgr)1008 static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr)
1009 {
1010 struct vega20_hwmgr *data =
1011 (struct vega20_hwmgr *)(hwmgr->backend);
1012
1013 return smum_send_msg_to_smc_with_parameter(hwmgr,
1014 PPSMC_MSG_SetFclkGfxClkRatio,
1015 data->registry_data.fclk_gfxclk_ratio,
1016 NULL);
1017 }
1018
vega20_disable_all_smu_features(struct pp_hwmgr * hwmgr)1019 static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
1020 {
1021 struct vega20_hwmgr *data =
1022 (struct vega20_hwmgr *)(hwmgr->backend);
1023 int i, ret = 0;
1024
1025 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
1026 PPSMC_MSG_DisableAllSmuFeatures,
1027 NULL)) == 0,
1028 "[DisableAllSMUFeatures] Failed to disable all smu features!",
1029 return ret);
1030
1031 for (i = 0; i < GNLD_FEATURES_MAX; i++)
1032 data->smu_features[i].enabled = 0;
1033
1034 return 0;
1035 }
1036
vega20_od8_set_feature_capabilities(struct pp_hwmgr * hwmgr)1037 static int vega20_od8_set_feature_capabilities(
1038 struct pp_hwmgr *hwmgr)
1039 {
1040 struct phm_ppt_v3_information *pptable_information =
1041 (struct phm_ppt_v3_information *)hwmgr->pptable;
1042 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1043 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1044 struct vega20_od8_settings *od_settings = &(data->od8_settings);
1045
1046 od_settings->overdrive8_capabilities = 0;
1047
1048 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
1049 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_LIMITS] &&
1050 pptable_information->od_settings_max[OD8_SETTING_GFXCLK_FMAX] > 0 &&
1051 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_FMIN] > 0 &&
1052 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_FMAX] >=
1053 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_FMIN]))
1054 od_settings->overdrive8_capabilities |= OD8_GFXCLK_LIMITS;
1055
1056 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_CURVE] &&
1057 (pptable_information->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1] >=
1058 pp_table->MinVoltageGfx / VOLTAGE_SCALE) &&
1059 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] <=
1060 pp_table->MaxVoltageGfx / VOLTAGE_SCALE) &&
1061 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] >=
1062 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1]))
1063 od_settings->overdrive8_capabilities |= OD8_GFXCLK_CURVE;
1064 }
1065
1066 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
1067 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] =
1068 data->dpm_table.mem_table.dpm_levels[data->dpm_table.mem_table.count - 2].value;
1069 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] &&
1070 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 &&
1071 pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 &&
1072 (pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] >=
1073 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX]))
1074 od_settings->overdrive8_capabilities |= OD8_UCLK_MAX;
1075 }
1076
1077 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_POWER_LIMIT] &&
1078 pptable_information->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] > 0 &&
1079 pptable_information->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] <= 100 &&
1080 pptable_information->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] > 0 &&
1081 pptable_information->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] <= 100)
1082 od_settings->overdrive8_capabilities |= OD8_POWER_LIMIT;
1083
1084 if (data->smu_features[GNLD_FAN_CONTROL].enabled) {
1085 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ACOUSTIC_LIMIT] &&
1086 pptable_information->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 &&
1087 pptable_information->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 &&
1088 (pptable_information->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] >=
1089 pptable_information->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT]))
1090 od_settings->overdrive8_capabilities |= OD8_ACOUSTIC_LIMIT_SCLK;
1091
1092 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_SPEED_MIN] &&
1093 (pptable_information->od_settings_min[OD8_SETTING_FAN_MIN_SPEED] >=
1094 (pp_table->FanPwmMin * pp_table->FanMaximumRpm / 100)) &&
1095 pptable_information->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] > 0 &&
1096 (pptable_information->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] >=
1097 pptable_information->od_settings_min[OD8_SETTING_FAN_MIN_SPEED]))
1098 od_settings->overdrive8_capabilities |= OD8_FAN_SPEED_MIN;
1099 }
1100
1101 if (data->smu_features[GNLD_THERMAL].enabled) {
1102 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_FAN] &&
1103 pptable_information->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] > 0 &&
1104 pptable_information->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP] > 0 &&
1105 (pptable_information->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] >=
1106 pptable_information->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP]))
1107 od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_FAN;
1108
1109 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_SYSTEM] &&
1110 pptable_information->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] > 0 &&
1111 pptable_information->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX] > 0 &&
1112 (pptable_information->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] >=
1113 pptable_information->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX]))
1114 od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_SYSTEM;
1115 }
1116
1117 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_MEMORY_TIMING_TUNE])
1118 od_settings->overdrive8_capabilities |= OD8_MEMORY_TIMING_TUNE;
1119
1120 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ZERO_RPM_CONTROL] &&
1121 pp_table->FanZeroRpmEnable)
1122 od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL;
1123
1124 if (!od_settings->overdrive8_capabilities)
1125 hwmgr->od_enabled = false;
1126
1127 return 0;
1128 }
1129
vega20_od8_set_feature_id(struct pp_hwmgr * hwmgr)1130 static int vega20_od8_set_feature_id(
1131 struct pp_hwmgr *hwmgr)
1132 {
1133 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1134 struct vega20_od8_settings *od_settings = &(data->od8_settings);
1135
1136 if (od_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) {
1137 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id =
1138 OD8_GFXCLK_LIMITS;
1139 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id =
1140 OD8_GFXCLK_LIMITS;
1141 } else {
1142 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id =
1143 0;
1144 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id =
1145 0;
1146 }
1147
1148 if (od_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) {
1149 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id =
1150 OD8_GFXCLK_CURVE;
1151 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id =
1152 OD8_GFXCLK_CURVE;
1153 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id =
1154 OD8_GFXCLK_CURVE;
1155 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id =
1156 OD8_GFXCLK_CURVE;
1157 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id =
1158 OD8_GFXCLK_CURVE;
1159 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id =
1160 OD8_GFXCLK_CURVE;
1161 } else {
1162 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id =
1163 0;
1164 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id =
1165 0;
1166 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id =
1167 0;
1168 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id =
1169 0;
1170 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id =
1171 0;
1172 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id =
1173 0;
1174 }
1175
1176 if (od_settings->overdrive8_capabilities & OD8_UCLK_MAX)
1177 od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = OD8_UCLK_MAX;
1178 else
1179 od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = 0;
1180
1181 if (od_settings->overdrive8_capabilities & OD8_POWER_LIMIT)
1182 od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = OD8_POWER_LIMIT;
1183 else
1184 od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = 0;
1185
1186 if (od_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK)
1187 od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id =
1188 OD8_ACOUSTIC_LIMIT_SCLK;
1189 else
1190 od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id =
1191 0;
1192
1193 if (od_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN)
1194 od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id =
1195 OD8_FAN_SPEED_MIN;
1196 else
1197 od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id =
1198 0;
1199
1200 if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN)
1201 od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id =
1202 OD8_TEMPERATURE_FAN;
1203 else
1204 od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id =
1205 0;
1206
1207 if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM)
1208 od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id =
1209 OD8_TEMPERATURE_SYSTEM;
1210 else
1211 od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id =
1212 0;
1213
1214 return 0;
1215 }
1216
vega20_od8_get_gfx_clock_base_voltage(struct pp_hwmgr * hwmgr,uint32_t * voltage,uint32_t freq)1217 static int vega20_od8_get_gfx_clock_base_voltage(
1218 struct pp_hwmgr *hwmgr,
1219 uint32_t *voltage,
1220 uint32_t freq)
1221 {
1222 int ret = 0;
1223
1224 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1225 PPSMC_MSG_GetAVFSVoltageByDpm,
1226 ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq),
1227 voltage);
1228 PP_ASSERT_WITH_CODE(!ret,
1229 "[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!",
1230 return ret);
1231
1232 *voltage = *voltage / VOLTAGE_SCALE;
1233
1234 return 0;
1235 }
1236
vega20_od8_initialize_default_settings(struct pp_hwmgr * hwmgr)1237 static int vega20_od8_initialize_default_settings(
1238 struct pp_hwmgr *hwmgr)
1239 {
1240 struct phm_ppt_v3_information *pptable_information =
1241 (struct phm_ppt_v3_information *)hwmgr->pptable;
1242 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1243 struct vega20_od8_settings *od8_settings = &(data->od8_settings);
1244 OverDriveTable_t *od_table = &(data->smc_state_table.overdrive_table);
1245 int i, ret = 0;
1246
1247 /* Set Feature Capabilities */
1248 vega20_od8_set_feature_capabilities(hwmgr);
1249
1250 /* Map FeatureID to individual settings */
1251 vega20_od8_set_feature_id(hwmgr);
1252
1253 /* Set default values */
1254 ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, true);
1255 PP_ASSERT_WITH_CODE(!ret,
1256 "Failed to export over drive table!",
1257 return ret);
1258
1259 if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) {
1260 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value =
1261 od_table->GfxclkFmin;
1262 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value =
1263 od_table->GfxclkFmax;
1264 } else {
1265 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value =
1266 0;
1267 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value =
1268 0;
1269 }
1270
1271 if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) {
1272 od_table->GfxclkFreq1 = od_table->GfxclkFmin;
1273 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value =
1274 od_table->GfxclkFreq1;
1275
1276 od_table->GfxclkFreq3 = od_table->GfxclkFmax;
1277 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value =
1278 od_table->GfxclkFreq3;
1279
1280 od_table->GfxclkFreq2 = (od_table->GfxclkFreq1 + od_table->GfxclkFreq3) / 2;
1281 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value =
1282 od_table->GfxclkFreq2;
1283
1284 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr,
1285 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value),
1286 od_table->GfxclkFreq1),
1287 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!",
1288 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value = 0);
1289 od_table->GfxclkVolt1 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value
1290 * VOLTAGE_SCALE;
1291
1292 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr,
1293 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value),
1294 od_table->GfxclkFreq2),
1295 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!",
1296 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value = 0);
1297 od_table->GfxclkVolt2 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value
1298 * VOLTAGE_SCALE;
1299
1300 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr,
1301 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value),
1302 od_table->GfxclkFreq3),
1303 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!",
1304 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value = 0);
1305 od_table->GfxclkVolt3 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value
1306 * VOLTAGE_SCALE;
1307 } else {
1308 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value =
1309 0;
1310 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value =
1311 0;
1312 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value =
1313 0;
1314 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value =
1315 0;
1316 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value =
1317 0;
1318 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value =
1319 0;
1320 }
1321
1322 if (od8_settings->overdrive8_capabilities & OD8_UCLK_MAX)
1323 od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value =
1324 od_table->UclkFmax;
1325 else
1326 od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value =
1327 0;
1328
1329 if (od8_settings->overdrive8_capabilities & OD8_POWER_LIMIT)
1330 od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value =
1331 od_table->OverDrivePct;
1332 else
1333 od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value =
1334 0;
1335
1336 if (od8_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK)
1337 od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value =
1338 od_table->FanMaximumRpm;
1339 else
1340 od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value =
1341 0;
1342
1343 if (od8_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN)
1344 od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value =
1345 od_table->FanMinimumPwm * data->smc_state_table.pp_table.FanMaximumRpm / 100;
1346 else
1347 od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value =
1348 0;
1349
1350 if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN)
1351 od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value =
1352 od_table->FanTargetTemperature;
1353 else
1354 od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value =
1355 0;
1356
1357 if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM)
1358 od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value =
1359 od_table->MaxOpTemp;
1360 else
1361 od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value =
1362 0;
1363
1364 for (i = 0; i < OD8_SETTING_COUNT; i++) {
1365 if (od8_settings->od8_settings_array[i].feature_id) {
1366 od8_settings->od8_settings_array[i].min_value =
1367 pptable_information->od_settings_min[i];
1368 od8_settings->od8_settings_array[i].max_value =
1369 pptable_information->od_settings_max[i];
1370 od8_settings->od8_settings_array[i].current_value =
1371 od8_settings->od8_settings_array[i].default_value;
1372 } else {
1373 od8_settings->od8_settings_array[i].min_value =
1374 0;
1375 od8_settings->od8_settings_array[i].max_value =
1376 0;
1377 od8_settings->od8_settings_array[i].current_value =
1378 0;
1379 }
1380 }
1381
1382 ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, false);
1383 PP_ASSERT_WITH_CODE(!ret,
1384 "Failed to import over drive table!",
1385 return ret);
1386
1387 return 0;
1388 }
1389
vega20_od8_set_settings(struct pp_hwmgr * hwmgr,uint32_t index,uint32_t value)1390 static int vega20_od8_set_settings(
1391 struct pp_hwmgr *hwmgr,
1392 uint32_t index,
1393 uint32_t value)
1394 {
1395 OverDriveTable_t od_table;
1396 int ret = 0;
1397 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1398 struct vega20_od8_single_setting *od8_settings =
1399 data->od8_settings.od8_settings_array;
1400
1401 ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, true);
1402 PP_ASSERT_WITH_CODE(!ret,
1403 "Failed to export over drive table!",
1404 return ret);
1405
1406 switch(index) {
1407 case OD8_SETTING_GFXCLK_FMIN:
1408 od_table.GfxclkFmin = (uint16_t)value;
1409 break;
1410 case OD8_SETTING_GFXCLK_FMAX:
1411 if (value < od8_settings[OD8_SETTING_GFXCLK_FMAX].min_value ||
1412 value > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value)
1413 return -EINVAL;
1414
1415 od_table.GfxclkFmax = (uint16_t)value;
1416 break;
1417 case OD8_SETTING_GFXCLK_FREQ1:
1418 od_table.GfxclkFreq1 = (uint16_t)value;
1419 break;
1420 case OD8_SETTING_GFXCLK_VOLTAGE1:
1421 od_table.GfxclkVolt1 = (uint16_t)value;
1422 break;
1423 case OD8_SETTING_GFXCLK_FREQ2:
1424 od_table.GfxclkFreq2 = (uint16_t)value;
1425 break;
1426 case OD8_SETTING_GFXCLK_VOLTAGE2:
1427 od_table.GfxclkVolt2 = (uint16_t)value;
1428 break;
1429 case OD8_SETTING_GFXCLK_FREQ3:
1430 od_table.GfxclkFreq3 = (uint16_t)value;
1431 break;
1432 case OD8_SETTING_GFXCLK_VOLTAGE3:
1433 od_table.GfxclkVolt3 = (uint16_t)value;
1434 break;
1435 case OD8_SETTING_UCLK_FMAX:
1436 if (value < od8_settings[OD8_SETTING_UCLK_FMAX].min_value ||
1437 value > od8_settings[OD8_SETTING_UCLK_FMAX].max_value)
1438 return -EINVAL;
1439 od_table.UclkFmax = (uint16_t)value;
1440 break;
1441 case OD8_SETTING_POWER_PERCENTAGE:
1442 od_table.OverDrivePct = (int16_t)value;
1443 break;
1444 case OD8_SETTING_FAN_ACOUSTIC_LIMIT:
1445 od_table.FanMaximumRpm = (uint16_t)value;
1446 break;
1447 case OD8_SETTING_FAN_MIN_SPEED:
1448 od_table.FanMinimumPwm = (uint16_t)value;
1449 break;
1450 case OD8_SETTING_FAN_TARGET_TEMP:
1451 od_table.FanTargetTemperature = (uint16_t)value;
1452 break;
1453 case OD8_SETTING_OPERATING_TEMP_MAX:
1454 od_table.MaxOpTemp = (uint16_t)value;
1455 break;
1456 }
1457
1458 ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, false);
1459 PP_ASSERT_WITH_CODE(!ret,
1460 "Failed to import over drive table!",
1461 return ret);
1462
1463 return 0;
1464 }
1465
vega20_get_sclk_od(struct pp_hwmgr * hwmgr)1466 static int vega20_get_sclk_od(
1467 struct pp_hwmgr *hwmgr)
1468 {
1469 struct vega20_hwmgr *data = hwmgr->backend;
1470 struct vega20_single_dpm_table *sclk_table =
1471 &(data->dpm_table.gfx_table);
1472 struct vega20_single_dpm_table *golden_sclk_table =
1473 &(data->golden_dpm_table.gfx_table);
1474 int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
1475 int golden_value = golden_sclk_table->dpm_levels
1476 [golden_sclk_table->count - 1].value;
1477
1478 /* od percentage */
1479 value -= golden_value;
1480 value = DIV_ROUND_UP(value * 100, golden_value);
1481
1482 return value;
1483 }
1484
vega20_set_sclk_od(struct pp_hwmgr * hwmgr,uint32_t value)1485 static int vega20_set_sclk_od(
1486 struct pp_hwmgr *hwmgr, uint32_t value)
1487 {
1488 struct vega20_hwmgr *data = hwmgr->backend;
1489 struct vega20_single_dpm_table *golden_sclk_table =
1490 &(data->golden_dpm_table.gfx_table);
1491 uint32_t od_sclk;
1492 int ret = 0;
1493
1494 od_sclk = golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * value;
1495 od_sclk /= 100;
1496 od_sclk += golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
1497
1498 ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_GFXCLK_FMAX, od_sclk);
1499 PP_ASSERT_WITH_CODE(!ret,
1500 "[SetSclkOD] failed to set od gfxclk!",
1501 return ret);
1502
1503 /* retrieve updated gfxclk table */
1504 ret = vega20_setup_gfxclk_dpm_table(hwmgr);
1505 PP_ASSERT_WITH_CODE(!ret,
1506 "[SetSclkOD] failed to refresh gfxclk table!",
1507 return ret);
1508
1509 return 0;
1510 }
1511
vega20_get_mclk_od(struct pp_hwmgr * hwmgr)1512 static int vega20_get_mclk_od(
1513 struct pp_hwmgr *hwmgr)
1514 {
1515 struct vega20_hwmgr *data = hwmgr->backend;
1516 struct vega20_single_dpm_table *mclk_table =
1517 &(data->dpm_table.mem_table);
1518 struct vega20_single_dpm_table *golden_mclk_table =
1519 &(data->golden_dpm_table.mem_table);
1520 int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
1521 int golden_value = golden_mclk_table->dpm_levels
1522 [golden_mclk_table->count - 1].value;
1523
1524 /* od percentage */
1525 value -= golden_value;
1526 value = DIV_ROUND_UP(value * 100, golden_value);
1527
1528 return value;
1529 }
1530
vega20_set_mclk_od(struct pp_hwmgr * hwmgr,uint32_t value)1531 static int vega20_set_mclk_od(
1532 struct pp_hwmgr *hwmgr, uint32_t value)
1533 {
1534 struct vega20_hwmgr *data = hwmgr->backend;
1535 struct vega20_single_dpm_table *golden_mclk_table =
1536 &(data->golden_dpm_table.mem_table);
1537 uint32_t od_mclk;
1538 int ret = 0;
1539
1540 od_mclk = golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * value;
1541 od_mclk /= 100;
1542 od_mclk += golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
1543
1544 ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_UCLK_FMAX, od_mclk);
1545 PP_ASSERT_WITH_CODE(!ret,
1546 "[SetMclkOD] failed to set od memclk!",
1547 return ret);
1548
1549 /* retrieve updated memclk table */
1550 ret = vega20_setup_memclk_dpm_table(hwmgr);
1551 PP_ASSERT_WITH_CODE(!ret,
1552 "[SetMclkOD] failed to refresh memclk table!",
1553 return ret);
1554
1555 return 0;
1556 }
1557
vega20_populate_umdpstate_clocks(struct pp_hwmgr * hwmgr)1558 static int vega20_populate_umdpstate_clocks(
1559 struct pp_hwmgr *hwmgr)
1560 {
1561 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1562 struct vega20_single_dpm_table *gfx_table = &(data->dpm_table.gfx_table);
1563 struct vega20_single_dpm_table *mem_table = &(data->dpm_table.mem_table);
1564
1565 hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value;
1566 hwmgr->pstate_mclk = mem_table->dpm_levels[0].value;
1567
1568 if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
1569 mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) {
1570 hwmgr->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
1571 hwmgr->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
1572 }
1573
1574 hwmgr->pstate_sclk = hwmgr->pstate_sclk * 100;
1575 hwmgr->pstate_mclk = hwmgr->pstate_mclk * 100;
1576
1577 return 0;
1578 }
1579
vega20_get_max_sustainable_clock(struct pp_hwmgr * hwmgr,PP_Clock * clock,PPCLK_e clock_select)1580 static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
1581 PP_Clock *clock, PPCLK_e clock_select)
1582 {
1583 int ret = 0;
1584
1585 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1586 PPSMC_MSG_GetDcModeMaxDpmFreq,
1587 (clock_select << 16),
1588 clock)) == 0,
1589 "[GetMaxSustainableClock] Failed to get max DC clock from SMC!",
1590 return ret);
1591
1592 /* if DC limit is zero, return AC limit */
1593 if (*clock == 0) {
1594 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1595 PPSMC_MSG_GetMaxDpmFreq,
1596 (clock_select << 16),
1597 clock)) == 0,
1598 "[GetMaxSustainableClock] failed to get max AC clock from SMC!",
1599 return ret);
1600 }
1601
1602 return 0;
1603 }
1604
vega20_init_max_sustainable_clocks(struct pp_hwmgr * hwmgr)1605 static int vega20_init_max_sustainable_clocks(struct pp_hwmgr *hwmgr)
1606 {
1607 struct vega20_hwmgr *data =
1608 (struct vega20_hwmgr *)(hwmgr->backend);
1609 struct vega20_max_sustainable_clocks *max_sustainable_clocks =
1610 &(data->max_sustainable_clocks);
1611 int ret = 0;
1612
1613 max_sustainable_clocks->uclock = data->vbios_boot_state.mem_clock / 100;
1614 max_sustainable_clocks->soc_clock = data->vbios_boot_state.soc_clock / 100;
1615 max_sustainable_clocks->dcef_clock = data->vbios_boot_state.dcef_clock / 100;
1616 max_sustainable_clocks->display_clock = 0xFFFFFFFF;
1617 max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
1618 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
1619
1620 if (data->smu_features[GNLD_DPM_UCLK].enabled)
1621 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1622 &(max_sustainable_clocks->uclock),
1623 PPCLK_UCLK)) == 0,
1624 "[InitMaxSustainableClocks] failed to get max UCLK from SMC!",
1625 return ret);
1626
1627 if (data->smu_features[GNLD_DPM_SOCCLK].enabled)
1628 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1629 &(max_sustainable_clocks->soc_clock),
1630 PPCLK_SOCCLK)) == 0,
1631 "[InitMaxSustainableClocks] failed to get max SOCCLK from SMC!",
1632 return ret);
1633
1634 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
1635 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1636 &(max_sustainable_clocks->dcef_clock),
1637 PPCLK_DCEFCLK)) == 0,
1638 "[InitMaxSustainableClocks] failed to get max DCEFCLK from SMC!",
1639 return ret);
1640 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1641 &(max_sustainable_clocks->display_clock),
1642 PPCLK_DISPCLK)) == 0,
1643 "[InitMaxSustainableClocks] failed to get max DISPCLK from SMC!",
1644 return ret);
1645 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1646 &(max_sustainable_clocks->phy_clock),
1647 PPCLK_PHYCLK)) == 0,
1648 "[InitMaxSustainableClocks] failed to get max PHYCLK from SMC!",
1649 return ret);
1650 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1651 &(max_sustainable_clocks->pixel_clock),
1652 PPCLK_PIXCLK)) == 0,
1653 "[InitMaxSustainableClocks] failed to get max PIXCLK from SMC!",
1654 return ret);
1655 }
1656
1657 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
1658 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
1659
1660 return 0;
1661 }
1662
vega20_enable_mgpu_fan_boost(struct pp_hwmgr * hwmgr)1663 static int vega20_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr)
1664 {
1665 int result;
1666
1667 result = smum_send_msg_to_smc(hwmgr,
1668 PPSMC_MSG_SetMGpuFanBoostLimitRpm,
1669 NULL);
1670 PP_ASSERT_WITH_CODE(!result,
1671 "[EnableMgpuFan] Failed to enable mgpu fan boost!",
1672 return result);
1673
1674 return 0;
1675 }
1676
vega20_init_powergate_state(struct pp_hwmgr * hwmgr)1677 static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr)
1678 {
1679 struct vega20_hwmgr *data =
1680 (struct vega20_hwmgr *)(hwmgr->backend);
1681
1682 data->uvd_power_gated = true;
1683 data->vce_power_gated = true;
1684 }
1685
vega20_enable_dpm_tasks(struct pp_hwmgr * hwmgr)1686 static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1687 {
1688 int result = 0;
1689
1690 smum_send_msg_to_smc_with_parameter(hwmgr,
1691 PPSMC_MSG_NumOfDisplays, 0, NULL);
1692
1693 result = vega20_set_allowed_featuresmask(hwmgr);
1694 PP_ASSERT_WITH_CODE(!result,
1695 "[EnableDPMTasks] Failed to set allowed featuresmask!\n",
1696 return result);
1697
1698 result = vega20_init_smc_table(hwmgr);
1699 PP_ASSERT_WITH_CODE(!result,
1700 "[EnableDPMTasks] Failed to initialize SMC table!",
1701 return result);
1702
1703 result = vega20_run_btc(hwmgr);
1704 PP_ASSERT_WITH_CODE(!result,
1705 "[EnableDPMTasks] Failed to run btc!",
1706 return result);
1707
1708 result = vega20_run_btc_afll(hwmgr);
1709 PP_ASSERT_WITH_CODE(!result,
1710 "[EnableDPMTasks] Failed to run btc afll!",
1711 return result);
1712
1713 result = vega20_enable_all_smu_features(hwmgr);
1714 PP_ASSERT_WITH_CODE(!result,
1715 "[EnableDPMTasks] Failed to enable all smu features!",
1716 return result);
1717
1718 result = vega20_override_pcie_parameters(hwmgr);
1719 PP_ASSERT_WITH_CODE(!result,
1720 "[EnableDPMTasks] Failed to override pcie parameters!",
1721 return result);
1722
1723 result = vega20_notify_smc_display_change(hwmgr);
1724 PP_ASSERT_WITH_CODE(!result,
1725 "[EnableDPMTasks] Failed to notify smc display change!",
1726 return result);
1727
1728 result = vega20_send_clock_ratio(hwmgr);
1729 PP_ASSERT_WITH_CODE(!result,
1730 "[EnableDPMTasks] Failed to send clock ratio!",
1731 return result);
1732
1733 /* Initialize UVD/VCE powergating state */
1734 vega20_init_powergate_state(hwmgr);
1735
1736 result = vega20_setup_default_dpm_tables(hwmgr);
1737 PP_ASSERT_WITH_CODE(!result,
1738 "[EnableDPMTasks] Failed to setup default DPM tables!",
1739 return result);
1740
1741 result = vega20_init_max_sustainable_clocks(hwmgr);
1742 PP_ASSERT_WITH_CODE(!result,
1743 "[EnableDPMTasks] Failed to get maximum sustainable clocks!",
1744 return result);
1745
1746 result = vega20_power_control_set_level(hwmgr);
1747 PP_ASSERT_WITH_CODE(!result,
1748 "[EnableDPMTasks] Failed to power control set level!",
1749 return result);
1750
1751 result = vega20_od8_initialize_default_settings(hwmgr);
1752 PP_ASSERT_WITH_CODE(!result,
1753 "[EnableDPMTasks] Failed to initialize odn settings!",
1754 return result);
1755
1756 result = vega20_populate_umdpstate_clocks(hwmgr);
1757 PP_ASSERT_WITH_CODE(!result,
1758 "[EnableDPMTasks] Failed to populate umdpstate clocks!",
1759 return result);
1760
1761 result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit,
1762 POWER_SOURCE_AC << 16, &hwmgr->default_power_limit);
1763 PP_ASSERT_WITH_CODE(!result,
1764 "[GetPptLimit] get default PPT limit failed!",
1765 return result);
1766 hwmgr->power_limit =
1767 hwmgr->default_power_limit;
1768
1769 return 0;
1770 }
1771
vega20_find_lowest_dpm_level(struct vega20_single_dpm_table * table)1772 static uint32_t vega20_find_lowest_dpm_level(
1773 struct vega20_single_dpm_table *table)
1774 {
1775 uint32_t i;
1776
1777 for (i = 0; i < table->count; i++) {
1778 if (table->dpm_levels[i].enabled)
1779 break;
1780 }
1781 if (i >= table->count) {
1782 i = 0;
1783 table->dpm_levels[i].enabled = true;
1784 }
1785
1786 return i;
1787 }
1788
vega20_find_highest_dpm_level(struct vega20_single_dpm_table * table)1789 static uint32_t vega20_find_highest_dpm_level(
1790 struct vega20_single_dpm_table *table)
1791 {
1792 int i = 0;
1793
1794 PP_ASSERT_WITH_CODE(table != NULL,
1795 "[FindHighestDPMLevel] DPM Table does not exist!",
1796 return 0);
1797 PP_ASSERT_WITH_CODE(table->count > 0,
1798 "[FindHighestDPMLevel] DPM Table has no entry!",
1799 return 0);
1800 PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
1801 "[FindHighestDPMLevel] DPM Table has too many entries!",
1802 return MAX_REGULAR_DPM_NUMBER - 1);
1803
1804 for (i = table->count - 1; i >= 0; i--) {
1805 if (table->dpm_levels[i].enabled)
1806 break;
1807 }
1808 if (i < 0) {
1809 i = 0;
1810 table->dpm_levels[i].enabled = true;
1811 }
1812
1813 return i;
1814 }
1815
vega20_upload_dpm_min_level(struct pp_hwmgr * hwmgr,uint32_t feature_mask)1816 static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
1817 {
1818 struct vega20_hwmgr *data =
1819 (struct vega20_hwmgr *)(hwmgr->backend);
1820 uint32_t min_freq;
1821 int ret = 0;
1822
1823 if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
1824 (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
1825 min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
1826 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1827 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1828 (PPCLK_GFXCLK << 16) | (min_freq & 0xffff),
1829 NULL)),
1830 "Failed to set soft min gfxclk !",
1831 return ret);
1832 }
1833
1834 if (data->smu_features[GNLD_DPM_UCLK].enabled &&
1835 (feature_mask & FEATURE_DPM_UCLK_MASK)) {
1836 min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
1837 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1838 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1839 (PPCLK_UCLK << 16) | (min_freq & 0xffff),
1840 NULL)),
1841 "Failed to set soft min memclk !",
1842 return ret);
1843 }
1844
1845 if (data->smu_features[GNLD_DPM_UVD].enabled &&
1846 (feature_mask & FEATURE_DPM_UVD_MASK)) {
1847 min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
1848
1849 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1850 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1851 (PPCLK_VCLK << 16) | (min_freq & 0xffff),
1852 NULL)),
1853 "Failed to set soft min vclk!",
1854 return ret);
1855
1856 min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level;
1857
1858 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1859 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1860 (PPCLK_DCLK << 16) | (min_freq & 0xffff),
1861 NULL)),
1862 "Failed to set soft min dclk!",
1863 return ret);
1864 }
1865
1866 if (data->smu_features[GNLD_DPM_VCE].enabled &&
1867 (feature_mask & FEATURE_DPM_VCE_MASK)) {
1868 min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
1869
1870 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1871 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1872 (PPCLK_ECLK << 16) | (min_freq & 0xffff),
1873 NULL)),
1874 "Failed to set soft min eclk!",
1875 return ret);
1876 }
1877
1878 if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
1879 (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
1880 min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
1881
1882 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1883 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1884 (PPCLK_SOCCLK << 16) | (min_freq & 0xffff),
1885 NULL)),
1886 "Failed to set soft min socclk!",
1887 return ret);
1888 }
1889
1890 if (data->smu_features[GNLD_DPM_FCLK].enabled &&
1891 (feature_mask & FEATURE_DPM_FCLK_MASK)) {
1892 min_freq = data->dpm_table.fclk_table.dpm_state.soft_min_level;
1893
1894 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1895 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1896 (PPCLK_FCLK << 16) | (min_freq & 0xffff),
1897 NULL)),
1898 "Failed to set soft min fclk!",
1899 return ret);
1900 }
1901
1902 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled &&
1903 (feature_mask & FEATURE_DPM_DCEFCLK_MASK)) {
1904 min_freq = data->dpm_table.dcef_table.dpm_state.hard_min_level;
1905
1906 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1907 hwmgr, PPSMC_MSG_SetHardMinByFreq,
1908 (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff),
1909 NULL)),
1910 "Failed to set hard min dcefclk!",
1911 return ret);
1912 }
1913
1914 return ret;
1915 }
1916
vega20_upload_dpm_max_level(struct pp_hwmgr * hwmgr,uint32_t feature_mask)1917 static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
1918 {
1919 struct vega20_hwmgr *data =
1920 (struct vega20_hwmgr *)(hwmgr->backend);
1921 uint32_t max_freq;
1922 int ret = 0;
1923
1924 if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
1925 (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
1926 max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
1927
1928 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1929 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1930 (PPCLK_GFXCLK << 16) | (max_freq & 0xffff),
1931 NULL)),
1932 "Failed to set soft max gfxclk!",
1933 return ret);
1934 }
1935
1936 if (data->smu_features[GNLD_DPM_UCLK].enabled &&
1937 (feature_mask & FEATURE_DPM_UCLK_MASK)) {
1938 max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
1939
1940 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1941 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1942 (PPCLK_UCLK << 16) | (max_freq & 0xffff),
1943 NULL)),
1944 "Failed to set soft max memclk!",
1945 return ret);
1946 }
1947
1948 if (data->smu_features[GNLD_DPM_UVD].enabled &&
1949 (feature_mask & FEATURE_DPM_UVD_MASK)) {
1950 max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
1951
1952 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1953 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1954 (PPCLK_VCLK << 16) | (max_freq & 0xffff),
1955 NULL)),
1956 "Failed to set soft max vclk!",
1957 return ret);
1958
1959 max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
1960 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1961 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1962 (PPCLK_DCLK << 16) | (max_freq & 0xffff),
1963 NULL)),
1964 "Failed to set soft max dclk!",
1965 return ret);
1966 }
1967
1968 if (data->smu_features[GNLD_DPM_VCE].enabled &&
1969 (feature_mask & FEATURE_DPM_VCE_MASK)) {
1970 max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
1971
1972 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1973 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1974 (PPCLK_ECLK << 16) | (max_freq & 0xffff),
1975 NULL)),
1976 "Failed to set soft max eclk!",
1977 return ret);
1978 }
1979
1980 if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
1981 (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
1982 max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
1983
1984 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1985 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1986 (PPCLK_SOCCLK << 16) | (max_freq & 0xffff),
1987 NULL)),
1988 "Failed to set soft max socclk!",
1989 return ret);
1990 }
1991
1992 if (data->smu_features[GNLD_DPM_FCLK].enabled &&
1993 (feature_mask & FEATURE_DPM_FCLK_MASK)) {
1994 max_freq = data->dpm_table.fclk_table.dpm_state.soft_max_level;
1995
1996 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1997 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1998 (PPCLK_FCLK << 16) | (max_freq & 0xffff),
1999 NULL)),
2000 "Failed to set soft max fclk!",
2001 return ret);
2002 }
2003
2004 return ret;
2005 }
2006
vega20_enable_disable_vce_dpm(struct pp_hwmgr * hwmgr,bool enable)2007 static int vega20_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
2008 {
2009 struct vega20_hwmgr *data =
2010 (struct vega20_hwmgr *)(hwmgr->backend);
2011 int ret = 0;
2012
2013 if (data->smu_features[GNLD_DPM_VCE].supported) {
2014 if (data->smu_features[GNLD_DPM_VCE].enabled == enable) {
2015 if (enable)
2016 PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already enabled!\n");
2017 else
2018 PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already disabled!\n");
2019 }
2020
2021 ret = vega20_enable_smc_features(hwmgr,
2022 enable,
2023 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap);
2024 PP_ASSERT_WITH_CODE(!ret,
2025 "Attempt to Enable/Disable DPM VCE Failed!",
2026 return ret);
2027 data->smu_features[GNLD_DPM_VCE].enabled = enable;
2028 }
2029
2030 return 0;
2031 }
2032
vega20_get_clock_ranges(struct pp_hwmgr * hwmgr,uint32_t * clock,PPCLK_e clock_select,bool max)2033 static int vega20_get_clock_ranges(struct pp_hwmgr *hwmgr,
2034 uint32_t *clock,
2035 PPCLK_e clock_select,
2036 bool max)
2037 {
2038 int ret;
2039 *clock = 0;
2040
2041 if (max) {
2042 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2043 PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16),
2044 clock)) == 0,
2045 "[GetClockRanges] Failed to get max clock from SMC!",
2046 return ret);
2047 } else {
2048 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2049 PPSMC_MSG_GetMinDpmFreq,
2050 (clock_select << 16),
2051 clock)) == 0,
2052 "[GetClockRanges] Failed to get min clock from SMC!",
2053 return ret);
2054 }
2055
2056 return 0;
2057 }
2058
vega20_dpm_get_sclk(struct pp_hwmgr * hwmgr,bool low)2059 static uint32_t vega20_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
2060 {
2061 struct vega20_hwmgr *data =
2062 (struct vega20_hwmgr *)(hwmgr->backend);
2063 uint32_t gfx_clk;
2064 int ret = 0;
2065
2066 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_GFXCLK].enabled,
2067 "[GetSclks]: gfxclk dpm not enabled!\n",
2068 return -EPERM);
2069
2070 if (low) {
2071 ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, false);
2072 PP_ASSERT_WITH_CODE(!ret,
2073 "[GetSclks]: fail to get min PPCLK_GFXCLK\n",
2074 return ret);
2075 } else {
2076 ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, true);
2077 PP_ASSERT_WITH_CODE(!ret,
2078 "[GetSclks]: fail to get max PPCLK_GFXCLK\n",
2079 return ret);
2080 }
2081
2082 return (gfx_clk * 100);
2083 }
2084
vega20_dpm_get_mclk(struct pp_hwmgr * hwmgr,bool low)2085 static uint32_t vega20_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
2086 {
2087 struct vega20_hwmgr *data =
2088 (struct vega20_hwmgr *)(hwmgr->backend);
2089 uint32_t mem_clk;
2090 int ret = 0;
2091
2092 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_UCLK].enabled,
2093 "[MemMclks]: memclk dpm not enabled!\n",
2094 return -EPERM);
2095
2096 if (low) {
2097 ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, false);
2098 PP_ASSERT_WITH_CODE(!ret,
2099 "[GetMclks]: fail to get min PPCLK_UCLK\n",
2100 return ret);
2101 } else {
2102 ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, true);
2103 PP_ASSERT_WITH_CODE(!ret,
2104 "[GetMclks]: fail to get max PPCLK_UCLK\n",
2105 return ret);
2106 }
2107
2108 return (mem_clk * 100);
2109 }
2110
vega20_get_metrics_table(struct pp_hwmgr * hwmgr,SmuMetrics_t * metrics_table,bool bypass_cache)2111 static int vega20_get_metrics_table(struct pp_hwmgr *hwmgr,
2112 SmuMetrics_t *metrics_table,
2113 bool bypass_cache)
2114 {
2115 struct vega20_hwmgr *data =
2116 (struct vega20_hwmgr *)(hwmgr->backend);
2117 int ret = 0;
2118
2119 if (bypass_cache ||
2120 !data->metrics_time ||
2121 time_after(jiffies, data->metrics_time + msecs_to_jiffies(1))) {
2122 ret = smum_smc_table_manager(hwmgr,
2123 (uint8_t *)(&data->metrics_table),
2124 TABLE_SMU_METRICS,
2125 true);
2126 if (ret) {
2127 pr_info("Failed to export SMU metrics table!\n");
2128 return ret;
2129 }
2130 data->metrics_time = jiffies;
2131 }
2132
2133 if (metrics_table)
2134 memcpy(metrics_table, &data->metrics_table, sizeof(SmuMetrics_t));
2135
2136 return ret;
2137 }
2138
vega20_get_gpu_power(struct pp_hwmgr * hwmgr,uint32_t * query)2139 static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
2140 uint32_t *query)
2141 {
2142 int ret = 0;
2143 SmuMetrics_t metrics_table;
2144
2145 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
2146 if (ret)
2147 return ret;
2148
2149 /* For the 40.46 release, they changed the value name */
2150 if (hwmgr->smu_version == 0x282e00)
2151 *query = metrics_table.AverageSocketPower << 8;
2152 else
2153 *query = metrics_table.CurrSocketPower << 8;
2154
2155 return ret;
2156 }
2157
vega20_get_current_clk_freq(struct pp_hwmgr * hwmgr,PPCLK_e clk_id,uint32_t * clk_freq)2158 static int vega20_get_current_clk_freq(struct pp_hwmgr *hwmgr,
2159 PPCLK_e clk_id, uint32_t *clk_freq)
2160 {
2161 int ret = 0;
2162
2163 *clk_freq = 0;
2164
2165 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2166 PPSMC_MSG_GetDpmClockFreq, (clk_id << 16),
2167 clk_freq)) == 0,
2168 "[GetCurrentClkFreq] Attempt to get Current Frequency Failed!",
2169 return ret);
2170
2171 *clk_freq = *clk_freq * 100;
2172
2173 return 0;
2174 }
2175
vega20_get_current_activity_percent(struct pp_hwmgr * hwmgr,int idx,uint32_t * activity_percent)2176 static int vega20_get_current_activity_percent(struct pp_hwmgr *hwmgr,
2177 int idx,
2178 uint32_t *activity_percent)
2179 {
2180 int ret = 0;
2181 SmuMetrics_t metrics_table;
2182
2183 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
2184 if (ret)
2185 return ret;
2186
2187 switch (idx) {
2188 case AMDGPU_PP_SENSOR_GPU_LOAD:
2189 *activity_percent = metrics_table.AverageGfxActivity;
2190 break;
2191 case AMDGPU_PP_SENSOR_MEM_LOAD:
2192 *activity_percent = metrics_table.AverageUclkActivity;
2193 break;
2194 default:
2195 pr_err("Invalid index for retrieving clock activity\n");
2196 return -EINVAL;
2197 }
2198
2199 return ret;
2200 }
2201
vega20_read_sensor(struct pp_hwmgr * hwmgr,int idx,void * value,int * size)2202 static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
2203 void *value, int *size)
2204 {
2205 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2206 struct amdgpu_device *adev = hwmgr->adev;
2207 SmuMetrics_t metrics_table;
2208 uint32_t val_vid;
2209 int ret = 0;
2210
2211 switch (idx) {
2212 case AMDGPU_PP_SENSOR_GFX_SCLK:
2213 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
2214 if (ret)
2215 return ret;
2216
2217 *((uint32_t *)value) = metrics_table.AverageGfxclkFrequency * 100;
2218 *size = 4;
2219 break;
2220 case AMDGPU_PP_SENSOR_GFX_MCLK:
2221 ret = vega20_get_current_clk_freq(hwmgr,
2222 PPCLK_UCLK,
2223 (uint32_t *)value);
2224 if (!ret)
2225 *size = 4;
2226 break;
2227 case AMDGPU_PP_SENSOR_GPU_LOAD:
2228 case AMDGPU_PP_SENSOR_MEM_LOAD:
2229 ret = vega20_get_current_activity_percent(hwmgr, idx, (uint32_t *)value);
2230 if (!ret)
2231 *size = 4;
2232 break;
2233 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
2234 *((uint32_t *)value) = vega20_thermal_get_temperature(hwmgr);
2235 *size = 4;
2236 break;
2237 case AMDGPU_PP_SENSOR_EDGE_TEMP:
2238 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
2239 if (ret)
2240 return ret;
2241
2242 *((uint32_t *)value) = metrics_table.TemperatureEdge *
2243 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2244 *size = 4;
2245 break;
2246 case AMDGPU_PP_SENSOR_MEM_TEMP:
2247 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
2248 if (ret)
2249 return ret;
2250
2251 *((uint32_t *)value) = metrics_table.TemperatureHBM *
2252 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2253 *size = 4;
2254 break;
2255 case AMDGPU_PP_SENSOR_UVD_POWER:
2256 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
2257 *size = 4;
2258 break;
2259 case AMDGPU_PP_SENSOR_VCE_POWER:
2260 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
2261 *size = 4;
2262 break;
2263 case AMDGPU_PP_SENSOR_GPU_POWER:
2264 *size = 16;
2265 ret = vega20_get_gpu_power(hwmgr, (uint32_t *)value);
2266 break;
2267 case AMDGPU_PP_SENSOR_VDDGFX:
2268 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
2269 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
2270 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
2271 *((uint32_t *)value) =
2272 (uint32_t)convert_to_vddc((uint8_t)val_vid);
2273 break;
2274 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
2275 ret = vega20_get_enabled_smc_features(hwmgr, (uint64_t *)value);
2276 if (!ret)
2277 *size = 8;
2278 break;
2279 default:
2280 ret = -EOPNOTSUPP;
2281 break;
2282 }
2283 return ret;
2284 }
2285
vega20_display_clock_voltage_request(struct pp_hwmgr * hwmgr,struct pp_display_clock_request * clock_req)2286 static int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
2287 struct pp_display_clock_request *clock_req)
2288 {
2289 int result = 0;
2290 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2291 enum amd_pp_clock_type clk_type = clock_req->clock_type;
2292 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
2293 PPCLK_e clk_select = 0;
2294 uint32_t clk_request = 0;
2295
2296 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
2297 switch (clk_type) {
2298 case amd_pp_dcef_clock:
2299 clk_select = PPCLK_DCEFCLK;
2300 break;
2301 case amd_pp_disp_clock:
2302 clk_select = PPCLK_DISPCLK;
2303 break;
2304 case amd_pp_pixel_clock:
2305 clk_select = PPCLK_PIXCLK;
2306 break;
2307 case amd_pp_phy_clock:
2308 clk_select = PPCLK_PHYCLK;
2309 break;
2310 default:
2311 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
2312 result = -EINVAL;
2313 break;
2314 }
2315
2316 if (!result) {
2317 clk_request = (clk_select << 16) | clk_freq;
2318 result = smum_send_msg_to_smc_with_parameter(hwmgr,
2319 PPSMC_MSG_SetHardMinByFreq,
2320 clk_request,
2321 NULL);
2322 }
2323 }
2324
2325 return result;
2326 }
2327
vega20_get_performance_level(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * state,PHM_PerformanceLevelDesignation designation,uint32_t index,PHM_PerformanceLevel * level)2328 static int vega20_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
2329 PHM_PerformanceLevelDesignation designation, uint32_t index,
2330 PHM_PerformanceLevel *level)
2331 {
2332 return 0;
2333 }
2334
vega20_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr * hwmgr)2335 static int vega20_notify_smc_display_config_after_ps_adjustment(
2336 struct pp_hwmgr *hwmgr)
2337 {
2338 struct vega20_hwmgr *data =
2339 (struct vega20_hwmgr *)(hwmgr->backend);
2340 struct vega20_single_dpm_table *dpm_table =
2341 &data->dpm_table.mem_table;
2342 struct PP_Clocks min_clocks = {0};
2343 struct pp_display_clock_request clock_req;
2344 int ret = 0;
2345
2346 min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
2347 min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
2348 min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
2349
2350 if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
2351 clock_req.clock_type = amd_pp_dcef_clock;
2352 clock_req.clock_freq_in_khz = min_clocks.dcefClock * 10;
2353 if (!vega20_display_clock_voltage_request(hwmgr, &clock_req)) {
2354 if (data->smu_features[GNLD_DS_DCEFCLK].supported)
2355 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(
2356 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
2357 min_clocks.dcefClockInSR / 100,
2358 NULL)) == 0,
2359 "Attempt to set divider for DCEFCLK Failed!",
2360 return ret);
2361 } else {
2362 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
2363 }
2364 }
2365
2366 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
2367 dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100;
2368 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2369 PPSMC_MSG_SetHardMinByFreq,
2370 (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
2371 NULL)),
2372 "[SetHardMinFreq] Set hard min uclk failed!",
2373 return ret);
2374 }
2375
2376 return 0;
2377 }
2378
vega20_force_dpm_highest(struct pp_hwmgr * hwmgr)2379 static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
2380 {
2381 struct vega20_hwmgr *data =
2382 (struct vega20_hwmgr *)(hwmgr->backend);
2383 uint32_t soft_level;
2384 int ret = 0;
2385
2386 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
2387
2388 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2389 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2390 data->dpm_table.gfx_table.dpm_levels[soft_level].value;
2391
2392 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
2393
2394 data->dpm_table.mem_table.dpm_state.soft_min_level =
2395 data->dpm_table.mem_table.dpm_state.soft_max_level =
2396 data->dpm_table.mem_table.dpm_levels[soft_level].value;
2397
2398 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
2399
2400 data->dpm_table.soc_table.dpm_state.soft_min_level =
2401 data->dpm_table.soc_table.dpm_state.soft_max_level =
2402 data->dpm_table.soc_table.dpm_levels[soft_level].value;
2403
2404 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2405 FEATURE_DPM_UCLK_MASK |
2406 FEATURE_DPM_SOCCLK_MASK);
2407 PP_ASSERT_WITH_CODE(!ret,
2408 "Failed to upload boot level to highest!",
2409 return ret);
2410
2411 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2412 FEATURE_DPM_UCLK_MASK |
2413 FEATURE_DPM_SOCCLK_MASK);
2414 PP_ASSERT_WITH_CODE(!ret,
2415 "Failed to upload dpm max level to highest!",
2416 return ret);
2417
2418 return 0;
2419 }
2420
vega20_force_dpm_lowest(struct pp_hwmgr * hwmgr)2421 static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2422 {
2423 struct vega20_hwmgr *data =
2424 (struct vega20_hwmgr *)(hwmgr->backend);
2425 uint32_t soft_level;
2426 int ret = 0;
2427
2428 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
2429
2430 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2431 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2432 data->dpm_table.gfx_table.dpm_levels[soft_level].value;
2433
2434 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
2435
2436 data->dpm_table.mem_table.dpm_state.soft_min_level =
2437 data->dpm_table.mem_table.dpm_state.soft_max_level =
2438 data->dpm_table.mem_table.dpm_levels[soft_level].value;
2439
2440 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
2441
2442 data->dpm_table.soc_table.dpm_state.soft_min_level =
2443 data->dpm_table.soc_table.dpm_state.soft_max_level =
2444 data->dpm_table.soc_table.dpm_levels[soft_level].value;
2445
2446 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2447 FEATURE_DPM_UCLK_MASK |
2448 FEATURE_DPM_SOCCLK_MASK);
2449 PP_ASSERT_WITH_CODE(!ret,
2450 "Failed to upload boot level to highest!",
2451 return ret);
2452
2453 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2454 FEATURE_DPM_UCLK_MASK |
2455 FEATURE_DPM_SOCCLK_MASK);
2456 PP_ASSERT_WITH_CODE(!ret,
2457 "Failed to upload dpm max level to highest!",
2458 return ret);
2459
2460 return 0;
2461
2462 }
2463
vega20_unforce_dpm_levels(struct pp_hwmgr * hwmgr)2464 static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2465 {
2466 struct vega20_hwmgr *data =
2467 (struct vega20_hwmgr *)(hwmgr->backend);
2468 uint32_t soft_min_level, soft_max_level;
2469 int ret = 0;
2470
2471 /* gfxclk soft min/max settings */
2472 soft_min_level =
2473 vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
2474 soft_max_level =
2475 vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
2476
2477 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2478 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
2479 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2480 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
2481
2482 /* uclk soft min/max settings */
2483 soft_min_level =
2484 vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
2485 soft_max_level =
2486 vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
2487
2488 data->dpm_table.mem_table.dpm_state.soft_min_level =
2489 data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
2490 data->dpm_table.mem_table.dpm_state.soft_max_level =
2491 data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
2492
2493 /* socclk soft min/max settings */
2494 soft_min_level =
2495 vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
2496 soft_max_level =
2497 vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
2498
2499 data->dpm_table.soc_table.dpm_state.soft_min_level =
2500 data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
2501 data->dpm_table.soc_table.dpm_state.soft_max_level =
2502 data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
2503
2504 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2505 FEATURE_DPM_UCLK_MASK |
2506 FEATURE_DPM_SOCCLK_MASK);
2507 PP_ASSERT_WITH_CODE(!ret,
2508 "Failed to upload DPM Bootup Levels!",
2509 return ret);
2510
2511 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2512 FEATURE_DPM_UCLK_MASK |
2513 FEATURE_DPM_SOCCLK_MASK);
2514 PP_ASSERT_WITH_CODE(!ret,
2515 "Failed to upload DPM Max Levels!",
2516 return ret);
2517
2518 return 0;
2519 }
2520
vega20_get_profiling_clk_mask(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level level,uint32_t * sclk_mask,uint32_t * mclk_mask,uint32_t * soc_mask)2521 static int vega20_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
2522 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
2523 {
2524 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2525 struct vega20_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
2526 struct vega20_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
2527 struct vega20_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table);
2528
2529 *sclk_mask = 0;
2530 *mclk_mask = 0;
2531 *soc_mask = 0;
2532
2533 if (gfx_dpm_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
2534 mem_dpm_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL &&
2535 soc_dpm_table->count > VEGA20_UMD_PSTATE_SOCCLK_LEVEL) {
2536 *sclk_mask = VEGA20_UMD_PSTATE_GFXCLK_LEVEL;
2537 *mclk_mask = VEGA20_UMD_PSTATE_MCLK_LEVEL;
2538 *soc_mask = VEGA20_UMD_PSTATE_SOCCLK_LEVEL;
2539 }
2540
2541 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2542 *sclk_mask = 0;
2543 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
2544 *mclk_mask = 0;
2545 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2546 *sclk_mask = gfx_dpm_table->count - 1;
2547 *mclk_mask = mem_dpm_table->count - 1;
2548 *soc_mask = soc_dpm_table->count - 1;
2549 }
2550
2551 return 0;
2552 }
2553
vega20_force_clock_level(struct pp_hwmgr * hwmgr,enum pp_clock_type type,uint32_t mask)2554 static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
2555 enum pp_clock_type type, uint32_t mask)
2556 {
2557 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2558 uint32_t soft_min_level, soft_max_level, hard_min_level;
2559 int ret = 0;
2560
2561 switch (type) {
2562 case PP_SCLK:
2563 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2564 soft_max_level = mask ? (fls(mask) - 1) : 0;
2565
2566 if (soft_max_level >= data->dpm_table.gfx_table.count) {
2567 pr_err("Clock level specified %d is over max allowed %d\n",
2568 soft_max_level,
2569 data->dpm_table.gfx_table.count - 1);
2570 return -EINVAL;
2571 }
2572
2573 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2574 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
2575 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2576 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
2577
2578 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
2579 PP_ASSERT_WITH_CODE(!ret,
2580 "Failed to upload boot level to lowest!",
2581 return ret);
2582
2583 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
2584 PP_ASSERT_WITH_CODE(!ret,
2585 "Failed to upload dpm max level to highest!",
2586 return ret);
2587 break;
2588
2589 case PP_MCLK:
2590 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2591 soft_max_level = mask ? (fls(mask) - 1) : 0;
2592
2593 if (soft_max_level >= data->dpm_table.mem_table.count) {
2594 pr_err("Clock level specified %d is over max allowed %d\n",
2595 soft_max_level,
2596 data->dpm_table.mem_table.count - 1);
2597 return -EINVAL;
2598 }
2599
2600 data->dpm_table.mem_table.dpm_state.soft_min_level =
2601 data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
2602 data->dpm_table.mem_table.dpm_state.soft_max_level =
2603 data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
2604
2605 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_UCLK_MASK);
2606 PP_ASSERT_WITH_CODE(!ret,
2607 "Failed to upload boot level to lowest!",
2608 return ret);
2609
2610 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_UCLK_MASK);
2611 PP_ASSERT_WITH_CODE(!ret,
2612 "Failed to upload dpm max level to highest!",
2613 return ret);
2614
2615 break;
2616
2617 case PP_SOCCLK:
2618 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2619 soft_max_level = mask ? (fls(mask) - 1) : 0;
2620
2621 if (soft_max_level >= data->dpm_table.soc_table.count) {
2622 pr_err("Clock level specified %d is over max allowed %d\n",
2623 soft_max_level,
2624 data->dpm_table.soc_table.count - 1);
2625 return -EINVAL;
2626 }
2627
2628 data->dpm_table.soc_table.dpm_state.soft_min_level =
2629 data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
2630 data->dpm_table.soc_table.dpm_state.soft_max_level =
2631 data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
2632
2633 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_SOCCLK_MASK);
2634 PP_ASSERT_WITH_CODE(!ret,
2635 "Failed to upload boot level to lowest!",
2636 return ret);
2637
2638 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_SOCCLK_MASK);
2639 PP_ASSERT_WITH_CODE(!ret,
2640 "Failed to upload dpm max level to highest!",
2641 return ret);
2642
2643 break;
2644
2645 case PP_FCLK:
2646 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2647 soft_max_level = mask ? (fls(mask) - 1) : 0;
2648
2649 if (soft_max_level >= data->dpm_table.fclk_table.count) {
2650 pr_err("Clock level specified %d is over max allowed %d\n",
2651 soft_max_level,
2652 data->dpm_table.fclk_table.count - 1);
2653 return -EINVAL;
2654 }
2655
2656 data->dpm_table.fclk_table.dpm_state.soft_min_level =
2657 data->dpm_table.fclk_table.dpm_levels[soft_min_level].value;
2658 data->dpm_table.fclk_table.dpm_state.soft_max_level =
2659 data->dpm_table.fclk_table.dpm_levels[soft_max_level].value;
2660
2661 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_FCLK_MASK);
2662 PP_ASSERT_WITH_CODE(!ret,
2663 "Failed to upload boot level to lowest!",
2664 return ret);
2665
2666 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_FCLK_MASK);
2667 PP_ASSERT_WITH_CODE(!ret,
2668 "Failed to upload dpm max level to highest!",
2669 return ret);
2670
2671 break;
2672
2673 case PP_DCEFCLK:
2674 hard_min_level = mask ? (ffs(mask) - 1) : 0;
2675
2676 if (hard_min_level >= data->dpm_table.dcef_table.count) {
2677 pr_err("Clock level specified %d is over max allowed %d\n",
2678 hard_min_level,
2679 data->dpm_table.dcef_table.count - 1);
2680 return -EINVAL;
2681 }
2682
2683 data->dpm_table.dcef_table.dpm_state.hard_min_level =
2684 data->dpm_table.dcef_table.dpm_levels[hard_min_level].value;
2685
2686 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_DCEFCLK_MASK);
2687 PP_ASSERT_WITH_CODE(!ret,
2688 "Failed to upload boot level to lowest!",
2689 return ret);
2690
2691 //TODO: Setting DCEFCLK max dpm level is not supported
2692
2693 break;
2694
2695 case PP_PCIE:
2696 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2697 soft_max_level = mask ? (fls(mask) - 1) : 0;
2698 if (soft_min_level >= NUM_LINK_LEVELS ||
2699 soft_max_level >= NUM_LINK_LEVELS)
2700 return -EINVAL;
2701
2702 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2703 PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level,
2704 NULL);
2705 PP_ASSERT_WITH_CODE(!ret,
2706 "Failed to set min link dpm level!",
2707 return ret);
2708
2709 break;
2710
2711 default:
2712 break;
2713 }
2714
2715 return 0;
2716 }
2717
vega20_dpm_force_dpm_level(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level level)2718 static int vega20_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
2719 enum amd_dpm_forced_level level)
2720 {
2721 int ret = 0;
2722 uint32_t sclk_mask, mclk_mask, soc_mask;
2723
2724 switch (level) {
2725 case AMD_DPM_FORCED_LEVEL_HIGH:
2726 ret = vega20_force_dpm_highest(hwmgr);
2727 break;
2728
2729 case AMD_DPM_FORCED_LEVEL_LOW:
2730 ret = vega20_force_dpm_lowest(hwmgr);
2731 break;
2732
2733 case AMD_DPM_FORCED_LEVEL_AUTO:
2734 ret = vega20_unforce_dpm_levels(hwmgr);
2735 break;
2736
2737 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
2738 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
2739 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
2740 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
2741 ret = vega20_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
2742 if (ret)
2743 return ret;
2744 vega20_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask);
2745 vega20_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask);
2746 vega20_force_clock_level(hwmgr, PP_SOCCLK, 1 << soc_mask);
2747 break;
2748
2749 case AMD_DPM_FORCED_LEVEL_MANUAL:
2750 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
2751 default:
2752 break;
2753 }
2754
2755 return ret;
2756 }
2757
vega20_get_fan_control_mode(struct pp_hwmgr * hwmgr)2758 static uint32_t vega20_get_fan_control_mode(struct pp_hwmgr *hwmgr)
2759 {
2760 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2761
2762 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
2763 return AMD_FAN_CTRL_MANUAL;
2764 else
2765 return AMD_FAN_CTRL_AUTO;
2766 }
2767
vega20_set_fan_control_mode(struct pp_hwmgr * hwmgr,uint32_t mode)2768 static void vega20_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
2769 {
2770 switch (mode) {
2771 case AMD_FAN_CTRL_NONE:
2772 vega20_fan_ctrl_set_fan_speed_pwm(hwmgr, 255);
2773 break;
2774 case AMD_FAN_CTRL_MANUAL:
2775 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
2776 vega20_fan_ctrl_stop_smc_fan_control(hwmgr);
2777 break;
2778 case AMD_FAN_CTRL_AUTO:
2779 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
2780 vega20_fan_ctrl_start_smc_fan_control(hwmgr);
2781 break;
2782 default:
2783 break;
2784 }
2785 }
2786
vega20_get_dal_power_level(struct pp_hwmgr * hwmgr,struct amd_pp_simple_clock_info * info)2787 static int vega20_get_dal_power_level(struct pp_hwmgr *hwmgr,
2788 struct amd_pp_simple_clock_info *info)
2789 {
2790 #if 0
2791 struct phm_ppt_v2_information *table_info =
2792 (struct phm_ppt_v2_information *)hwmgr->pptable;
2793 struct phm_clock_and_voltage_limits *max_limits =
2794 &table_info->max_clock_voltage_on_ac;
2795
2796 info->engine_max_clock = max_limits->sclk;
2797 info->memory_max_clock = max_limits->mclk;
2798 #endif
2799 return 0;
2800 }
2801
2802
vega20_get_sclks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)2803 static int vega20_get_sclks(struct pp_hwmgr *hwmgr,
2804 struct pp_clock_levels_with_latency *clocks)
2805 {
2806 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2807 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
2808 int i, count;
2809
2810 if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
2811 return -1;
2812
2813 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2814 clocks->num_levels = count;
2815
2816 for (i = 0; i < count; i++) {
2817 clocks->data[i].clocks_in_khz =
2818 dpm_table->dpm_levels[i].value * 1000;
2819 clocks->data[i].latency_in_us = 0;
2820 }
2821
2822 return 0;
2823 }
2824
vega20_get_mem_latency(struct pp_hwmgr * hwmgr,uint32_t clock)2825 static uint32_t vega20_get_mem_latency(struct pp_hwmgr *hwmgr,
2826 uint32_t clock)
2827 {
2828 return 25;
2829 }
2830
vega20_get_memclocks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)2831 static int vega20_get_memclocks(struct pp_hwmgr *hwmgr,
2832 struct pp_clock_levels_with_latency *clocks)
2833 {
2834 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2835 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.mem_table);
2836 int i, count;
2837
2838 if (!data->smu_features[GNLD_DPM_UCLK].enabled)
2839 return -1;
2840
2841 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2842 clocks->num_levels = data->mclk_latency_table.count = count;
2843
2844 for (i = 0; i < count; i++) {
2845 clocks->data[i].clocks_in_khz =
2846 data->mclk_latency_table.entries[i].frequency =
2847 dpm_table->dpm_levels[i].value * 1000;
2848 clocks->data[i].latency_in_us =
2849 data->mclk_latency_table.entries[i].latency =
2850 vega20_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
2851 }
2852
2853 return 0;
2854 }
2855
vega20_get_dcefclocks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)2856 static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr,
2857 struct pp_clock_levels_with_latency *clocks)
2858 {
2859 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2860 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.dcef_table);
2861 int i, count;
2862
2863 if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled)
2864 return -1;
2865
2866 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2867 clocks->num_levels = count;
2868
2869 for (i = 0; i < count; i++) {
2870 clocks->data[i].clocks_in_khz =
2871 dpm_table->dpm_levels[i].value * 1000;
2872 clocks->data[i].latency_in_us = 0;
2873 }
2874
2875 return 0;
2876 }
2877
vega20_get_socclocks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)2878 static int vega20_get_socclocks(struct pp_hwmgr *hwmgr,
2879 struct pp_clock_levels_with_latency *clocks)
2880 {
2881 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2882 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.soc_table);
2883 int i, count;
2884
2885 if (!data->smu_features[GNLD_DPM_SOCCLK].enabled)
2886 return -1;
2887
2888 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2889 clocks->num_levels = count;
2890
2891 for (i = 0; i < count; i++) {
2892 clocks->data[i].clocks_in_khz =
2893 dpm_table->dpm_levels[i].value * 1000;
2894 clocks->data[i].latency_in_us = 0;
2895 }
2896
2897 return 0;
2898
2899 }
2900
vega20_get_clock_by_type_with_latency(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)2901 static int vega20_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
2902 enum amd_pp_clock_type type,
2903 struct pp_clock_levels_with_latency *clocks)
2904 {
2905 int ret;
2906
2907 switch (type) {
2908 case amd_pp_sys_clock:
2909 ret = vega20_get_sclks(hwmgr, clocks);
2910 break;
2911 case amd_pp_mem_clock:
2912 ret = vega20_get_memclocks(hwmgr, clocks);
2913 break;
2914 case amd_pp_dcef_clock:
2915 ret = vega20_get_dcefclocks(hwmgr, clocks);
2916 break;
2917 case amd_pp_soc_clock:
2918 ret = vega20_get_socclocks(hwmgr, clocks);
2919 break;
2920 default:
2921 return -EINVAL;
2922 }
2923
2924 return ret;
2925 }
2926
vega20_get_clock_by_type_with_voltage(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)2927 static int vega20_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
2928 enum amd_pp_clock_type type,
2929 struct pp_clock_levels_with_voltage *clocks)
2930 {
2931 clocks->num_levels = 0;
2932
2933 return 0;
2934 }
2935
vega20_set_watermarks_for_clocks_ranges(struct pp_hwmgr * hwmgr,void * clock_ranges)2936 static int vega20_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
2937 void *clock_ranges)
2938 {
2939 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2940 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
2941 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
2942
2943 if (!data->registry_data.disable_water_mark &&
2944 data->smu_features[GNLD_DPM_DCEFCLK].supported &&
2945 data->smu_features[GNLD_DPM_SOCCLK].supported) {
2946 smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
2947 data->water_marks_bitmap |= WaterMarksExist;
2948 data->water_marks_bitmap &= ~WaterMarksLoaded;
2949 }
2950
2951 return 0;
2952 }
2953
vega20_odn_edit_dpm_table(struct pp_hwmgr * hwmgr,enum PP_OD_DPM_TABLE_COMMAND type,long * input,uint32_t size)2954 static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
2955 enum PP_OD_DPM_TABLE_COMMAND type,
2956 long *input, uint32_t size)
2957 {
2958 struct vega20_hwmgr *data =
2959 (struct vega20_hwmgr *)(hwmgr->backend);
2960 struct vega20_od8_single_setting *od8_settings =
2961 data->od8_settings.od8_settings_array;
2962 OverDriveTable_t *od_table =
2963 &(data->smc_state_table.overdrive_table);
2964 int32_t input_index, input_clk, input_vol, i;
2965 int od8_id;
2966 int ret;
2967
2968 PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
2969 return -EINVAL);
2970
2971 switch (type) {
2972 case PP_OD_EDIT_SCLK_VDDC_TABLE:
2973 if (!(od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
2974 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id)) {
2975 pr_info("Sclk min/max frequency overdrive not supported\n");
2976 return -EOPNOTSUPP;
2977 }
2978
2979 for (i = 0; i < size; i += 2) {
2980 if (i + 2 > size) {
2981 pr_info("invalid number of input parameters %d\n",
2982 size);
2983 return -EINVAL;
2984 }
2985
2986 input_index = input[i];
2987 input_clk = input[i + 1];
2988
2989 if (input_index != 0 && input_index != 1) {
2990 pr_info("Invalid index %d\n", input_index);
2991 pr_info("Support min/max sclk frequency setting only which index by 0/1\n");
2992 return -EINVAL;
2993 }
2994
2995 if (input_clk < od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value ||
2996 input_clk > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value) {
2997 pr_info("clock freq %d is not within allowed range [%d - %d]\n",
2998 input_clk,
2999 od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
3000 od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
3001 return -EINVAL;
3002 }
3003
3004 if ((input_index == 0 && od_table->GfxclkFmin != input_clk) ||
3005 (input_index == 1 && od_table->GfxclkFmax != input_clk))
3006 data->gfxclk_overdrive = true;
3007
3008 if (input_index == 0)
3009 od_table->GfxclkFmin = input_clk;
3010 else
3011 od_table->GfxclkFmax = input_clk;
3012 }
3013
3014 break;
3015
3016 case PP_OD_EDIT_MCLK_VDDC_TABLE:
3017 if (!od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
3018 pr_info("Mclk max frequency overdrive not supported\n");
3019 return -EOPNOTSUPP;
3020 }
3021
3022 for (i = 0; i < size; i += 2) {
3023 if (i + 2 > size) {
3024 pr_info("invalid number of input parameters %d\n",
3025 size);
3026 return -EINVAL;
3027 }
3028
3029 input_index = input[i];
3030 input_clk = input[i + 1];
3031
3032 if (input_index != 1) {
3033 pr_info("Invalid index %d\n", input_index);
3034 pr_info("Support max Mclk frequency setting only which index by 1\n");
3035 return -EINVAL;
3036 }
3037
3038 if (input_clk < od8_settings[OD8_SETTING_UCLK_FMAX].min_value ||
3039 input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) {
3040 pr_info("clock freq %d is not within allowed range [%d - %d]\n",
3041 input_clk,
3042 od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
3043 od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
3044 return -EINVAL;
3045 }
3046
3047 if (input_index == 1 && od_table->UclkFmax != input_clk)
3048 data->memclk_overdrive = true;
3049
3050 od_table->UclkFmax = input_clk;
3051 }
3052
3053 break;
3054
3055 case PP_OD_EDIT_VDDC_CURVE:
3056 if (!(od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
3057 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
3058 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
3059 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
3060 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
3061 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id)) {
3062 pr_info("Voltage curve calibrate not supported\n");
3063 return -EOPNOTSUPP;
3064 }
3065
3066 for (i = 0; i < size; i += 3) {
3067 if (i + 3 > size) {
3068 pr_info("invalid number of input parameters %d\n",
3069 size);
3070 return -EINVAL;
3071 }
3072
3073 input_index = input[i];
3074 input_clk = input[i + 1];
3075 input_vol = input[i + 2];
3076
3077 if (input_index > 2) {
3078 pr_info("Setting for point %d is not supported\n",
3079 input_index + 1);
3080 pr_info("Three supported points index by 0, 1, 2\n");
3081 return -EINVAL;
3082 }
3083
3084 od8_id = OD8_SETTING_GFXCLK_FREQ1 + 2 * input_index;
3085 if (input_clk < od8_settings[od8_id].min_value ||
3086 input_clk > od8_settings[od8_id].max_value) {
3087 pr_info("clock freq %d is not within allowed range [%d - %d]\n",
3088 input_clk,
3089 od8_settings[od8_id].min_value,
3090 od8_settings[od8_id].max_value);
3091 return -EINVAL;
3092 }
3093
3094 od8_id = OD8_SETTING_GFXCLK_VOLTAGE1 + 2 * input_index;
3095 if (input_vol < od8_settings[od8_id].min_value ||
3096 input_vol > od8_settings[od8_id].max_value) {
3097 pr_info("clock voltage %d is not within allowed range [%d - %d]\n",
3098 input_vol,
3099 od8_settings[od8_id].min_value,
3100 od8_settings[od8_id].max_value);
3101 return -EINVAL;
3102 }
3103
3104 switch (input_index) {
3105 case 0:
3106 od_table->GfxclkFreq1 = input_clk;
3107 od_table->GfxclkVolt1 = input_vol * VOLTAGE_SCALE;
3108 break;
3109 case 1:
3110 od_table->GfxclkFreq2 = input_clk;
3111 od_table->GfxclkVolt2 = input_vol * VOLTAGE_SCALE;
3112 break;
3113 case 2:
3114 od_table->GfxclkFreq3 = input_clk;
3115 od_table->GfxclkVolt3 = input_vol * VOLTAGE_SCALE;
3116 break;
3117 }
3118 }
3119 break;
3120
3121 case PP_OD_RESTORE_DEFAULT_TABLE:
3122 data->gfxclk_overdrive = false;
3123 data->memclk_overdrive = false;
3124
3125 ret = smum_smc_table_manager(hwmgr,
3126 (uint8_t *)od_table,
3127 TABLE_OVERDRIVE, true);
3128 PP_ASSERT_WITH_CODE(!ret,
3129 "Failed to export overdrive table!",
3130 return ret);
3131 break;
3132
3133 case PP_OD_COMMIT_DPM_TABLE:
3134 ret = smum_smc_table_manager(hwmgr,
3135 (uint8_t *)od_table,
3136 TABLE_OVERDRIVE, false);
3137 PP_ASSERT_WITH_CODE(!ret,
3138 "Failed to import overdrive table!",
3139 return ret);
3140
3141 /* retrieve updated gfxclk table */
3142 if (data->gfxclk_overdrive) {
3143 data->gfxclk_overdrive = false;
3144
3145 ret = vega20_setup_gfxclk_dpm_table(hwmgr);
3146 if (ret)
3147 return ret;
3148 }
3149
3150 /* retrieve updated memclk table */
3151 if (data->memclk_overdrive) {
3152 data->memclk_overdrive = false;
3153
3154 ret = vega20_setup_memclk_dpm_table(hwmgr);
3155 if (ret)
3156 return ret;
3157 }
3158 break;
3159
3160 default:
3161 return -EINVAL;
3162 }
3163
3164 return 0;
3165 }
3166
vega20_set_mp1_state(struct pp_hwmgr * hwmgr,enum pp_mp1_state mp1_state)3167 static int vega20_set_mp1_state(struct pp_hwmgr *hwmgr,
3168 enum pp_mp1_state mp1_state)
3169 {
3170 uint16_t msg;
3171 int ret;
3172
3173 switch (mp1_state) {
3174 case PP_MP1_STATE_SHUTDOWN:
3175 msg = PPSMC_MSG_PrepareMp1ForShutdown;
3176 break;
3177 case PP_MP1_STATE_UNLOAD:
3178 msg = PPSMC_MSG_PrepareMp1ForUnload;
3179 break;
3180 case PP_MP1_STATE_RESET:
3181 msg = PPSMC_MSG_PrepareMp1ForReset;
3182 break;
3183 case PP_MP1_STATE_NONE:
3184 default:
3185 return 0;
3186 }
3187
3188 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0,
3189 "[PrepareMp1] Failed!",
3190 return ret);
3191
3192 return 0;
3193 }
3194
vega20_get_ppfeature_status(struct pp_hwmgr * hwmgr,char * buf)3195 static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
3196 {
3197 static const char *ppfeature_name[] = {
3198 "DPM_PREFETCHER",
3199 "GFXCLK_DPM",
3200 "UCLK_DPM",
3201 "SOCCLK_DPM",
3202 "UVD_DPM",
3203 "VCE_DPM",
3204 "ULV",
3205 "MP0CLK_DPM",
3206 "LINK_DPM",
3207 "DCEFCLK_DPM",
3208 "GFXCLK_DS",
3209 "SOCCLK_DS",
3210 "LCLK_DS",
3211 "PPT",
3212 "TDC",
3213 "THERMAL",
3214 "GFX_PER_CU_CG",
3215 "RM",
3216 "DCEFCLK_DS",
3217 "ACDC",
3218 "VR0HOT",
3219 "VR1HOT",
3220 "FW_CTF",
3221 "LED_DISPLAY",
3222 "FAN_CONTROL",
3223 "GFX_EDC",
3224 "GFXOFF",
3225 "CG",
3226 "FCLK_DPM",
3227 "FCLK_DS",
3228 "MP1CLK_DS",
3229 "MP0CLK_DS",
3230 "XGMI",
3231 "ECC"};
3232 static const char *output_title[] = {
3233 "FEATURES",
3234 "BITMASK",
3235 "ENABLEMENT"};
3236 uint64_t features_enabled;
3237 int i;
3238 int ret = 0;
3239 int size = 0;
3240
3241 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
3242 PP_ASSERT_WITH_CODE(!ret,
3243 "[EnableAllSmuFeatures] Failed to get enabled smc features!",
3244 return ret);
3245
3246 size += sysfs_emit_at(buf, size, "Current ppfeatures: 0x%016llx\n", features_enabled);
3247 size += sysfs_emit_at(buf, size, "%-19s %-22s %s\n",
3248 output_title[0],
3249 output_title[1],
3250 output_title[2]);
3251 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
3252 size += sysfs_emit_at(buf, size, "%-19s 0x%016llx %6s\n",
3253 ppfeature_name[i],
3254 1ULL << i,
3255 (features_enabled & (1ULL << i)) ? "Y" : "N");
3256 }
3257
3258 return size;
3259 }
3260
vega20_set_ppfeature_status(struct pp_hwmgr * hwmgr,uint64_t new_ppfeature_masks)3261 static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks)
3262 {
3263 struct vega20_hwmgr *data =
3264 (struct vega20_hwmgr *)(hwmgr->backend);
3265 uint64_t features_enabled, features_to_enable, features_to_disable;
3266 int i, ret = 0;
3267 bool enabled;
3268
3269 if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
3270 return -EINVAL;
3271
3272 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
3273 if (ret)
3274 return ret;
3275
3276 features_to_disable =
3277 features_enabled & ~new_ppfeature_masks;
3278 features_to_enable =
3279 ~features_enabled & new_ppfeature_masks;
3280
3281 pr_debug("features_to_disable 0x%llx\n", features_to_disable);
3282 pr_debug("features_to_enable 0x%llx\n", features_to_enable);
3283
3284 if (features_to_disable) {
3285 ret = vega20_enable_smc_features(hwmgr, false, features_to_disable);
3286 if (ret)
3287 return ret;
3288 }
3289
3290 if (features_to_enable) {
3291 ret = vega20_enable_smc_features(hwmgr, true, features_to_enable);
3292 if (ret)
3293 return ret;
3294 }
3295
3296 /* Update the cached feature enablement state */
3297 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
3298 if (ret)
3299 return ret;
3300
3301 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
3302 enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
3303 true : false;
3304 data->smu_features[i].enabled = enabled;
3305 }
3306
3307 return 0;
3308 }
3309
vega20_get_current_pcie_link_width_level(struct pp_hwmgr * hwmgr)3310 static int vega20_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
3311 {
3312 struct amdgpu_device *adev = hwmgr->adev;
3313
3314 return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
3315 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
3316 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
3317 }
3318
vega20_get_current_pcie_link_width(struct pp_hwmgr * hwmgr)3319 static int vega20_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
3320 {
3321 uint32_t width_level;
3322
3323 width_level = vega20_get_current_pcie_link_width_level(hwmgr);
3324 if (width_level > LINK_WIDTH_MAX)
3325 width_level = 0;
3326
3327 return link_width[width_level];
3328 }
3329
vega20_get_current_pcie_link_speed_level(struct pp_hwmgr * hwmgr)3330 static int vega20_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
3331 {
3332 struct amdgpu_device *adev = hwmgr->adev;
3333
3334 return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
3335 PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
3336 >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
3337 }
3338
vega20_get_current_pcie_link_speed(struct pp_hwmgr * hwmgr)3339 static int vega20_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
3340 {
3341 uint32_t speed_level;
3342
3343 speed_level = vega20_get_current_pcie_link_speed_level(hwmgr);
3344 if (speed_level > LINK_SPEED_MAX)
3345 speed_level = 0;
3346
3347 return link_speed[speed_level];
3348 }
3349
vega20_print_clock_levels(struct pp_hwmgr * hwmgr,enum pp_clock_type type,char * buf)3350 static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
3351 enum pp_clock_type type, char *buf)
3352 {
3353 struct vega20_hwmgr *data =
3354 (struct vega20_hwmgr *)(hwmgr->backend);
3355 struct vega20_od8_single_setting *od8_settings =
3356 data->od8_settings.od8_settings_array;
3357 OverDriveTable_t *od_table =
3358 &(data->smc_state_table.overdrive_table);
3359 PPTable_t *pptable = &(data->smc_state_table.pp_table);
3360 struct pp_clock_levels_with_latency clocks;
3361 struct vega20_single_dpm_table *fclk_dpm_table =
3362 &(data->dpm_table.fclk_table);
3363 int i, now, size = 0;
3364 int ret = 0;
3365 uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
3366
3367 switch (type) {
3368 case PP_SCLK:
3369 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now);
3370 PP_ASSERT_WITH_CODE(!ret,
3371 "Attempt to get current gfx clk Failed!",
3372 return ret);
3373
3374 if (vega20_get_sclks(hwmgr, &clocks)) {
3375 size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
3376 now / 100);
3377 break;
3378 }
3379
3380 for (i = 0; i < clocks.num_levels; i++)
3381 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
3382 i, clocks.data[i].clocks_in_khz / 1000,
3383 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
3384 break;
3385
3386 case PP_MCLK:
3387 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_UCLK, &now);
3388 PP_ASSERT_WITH_CODE(!ret,
3389 "Attempt to get current mclk freq Failed!",
3390 return ret);
3391
3392 if (vega20_get_memclocks(hwmgr, &clocks)) {
3393 size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
3394 now / 100);
3395 break;
3396 }
3397
3398 for (i = 0; i < clocks.num_levels; i++)
3399 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
3400 i, clocks.data[i].clocks_in_khz / 1000,
3401 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
3402 break;
3403
3404 case PP_SOCCLK:
3405 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_SOCCLK, &now);
3406 PP_ASSERT_WITH_CODE(!ret,
3407 "Attempt to get current socclk freq Failed!",
3408 return ret);
3409
3410 if (vega20_get_socclocks(hwmgr, &clocks)) {
3411 size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
3412 now / 100);
3413 break;
3414 }
3415
3416 for (i = 0; i < clocks.num_levels; i++)
3417 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
3418 i, clocks.data[i].clocks_in_khz / 1000,
3419 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
3420 break;
3421
3422 case PP_FCLK:
3423 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_FCLK, &now);
3424 PP_ASSERT_WITH_CODE(!ret,
3425 "Attempt to get current fclk freq Failed!",
3426 return ret);
3427
3428 for (i = 0; i < fclk_dpm_table->count; i++)
3429 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
3430 i, fclk_dpm_table->dpm_levels[i].value,
3431 fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : "");
3432 break;
3433
3434 case PP_DCEFCLK:
3435 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_DCEFCLK, &now);
3436 PP_ASSERT_WITH_CODE(!ret,
3437 "Attempt to get current dcefclk freq Failed!",
3438 return ret);
3439
3440 if (vega20_get_dcefclocks(hwmgr, &clocks)) {
3441 size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
3442 now / 100);
3443 break;
3444 }
3445
3446 for (i = 0; i < clocks.num_levels; i++)
3447 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
3448 i, clocks.data[i].clocks_in_khz / 1000,
3449 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
3450 break;
3451
3452 case PP_PCIE:
3453 current_gen_speed =
3454 vega20_get_current_pcie_link_speed_level(hwmgr);
3455 current_lane_width =
3456 vega20_get_current_pcie_link_width_level(hwmgr);
3457 for (i = 0; i < NUM_LINK_LEVELS; i++) {
3458 gen_speed = pptable->PcieGenSpeed[i];
3459 lane_width = pptable->PcieLaneCount[i];
3460
3461 size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
3462 (gen_speed == 0) ? "2.5GT/s," :
3463 (gen_speed == 1) ? "5.0GT/s," :
3464 (gen_speed == 2) ? "8.0GT/s," :
3465 (gen_speed == 3) ? "16.0GT/s," : "",
3466 (lane_width == 1) ? "x1" :
3467 (lane_width == 2) ? "x2" :
3468 (lane_width == 3) ? "x4" :
3469 (lane_width == 4) ? "x8" :
3470 (lane_width == 5) ? "x12" :
3471 (lane_width == 6) ? "x16" : "",
3472 pptable->LclkFreq[i],
3473 (current_gen_speed == gen_speed) &&
3474 (current_lane_width == lane_width) ?
3475 "*" : "");
3476 }
3477 break;
3478
3479 case OD_SCLK:
3480 if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
3481 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
3482 size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
3483 size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
3484 od_table->GfxclkFmin);
3485 size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
3486 od_table->GfxclkFmax);
3487 }
3488 break;
3489
3490 case OD_MCLK:
3491 if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
3492 size = sysfs_emit(buf, "%s:\n", "OD_MCLK");
3493 size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
3494 od_table->UclkFmax);
3495 }
3496
3497 break;
3498
3499 case OD_VDDC_CURVE:
3500 if (od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
3501 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
3502 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
3503 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
3504 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
3505 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
3506 size = sysfs_emit(buf, "%s:\n", "OD_VDDC_CURVE");
3507 size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n",
3508 od_table->GfxclkFreq1,
3509 od_table->GfxclkVolt1 / VOLTAGE_SCALE);
3510 size += sysfs_emit_at(buf, size, "1: %10uMhz %10dmV\n",
3511 od_table->GfxclkFreq2,
3512 od_table->GfxclkVolt2 / VOLTAGE_SCALE);
3513 size += sysfs_emit_at(buf, size, "2: %10uMhz %10dmV\n",
3514 od_table->GfxclkFreq3,
3515 od_table->GfxclkVolt3 / VOLTAGE_SCALE);
3516 }
3517
3518 break;
3519
3520 case OD_RANGE:
3521 size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
3522
3523 if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
3524 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
3525 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
3526 od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
3527 od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
3528 }
3529
3530 if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
3531 size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
3532 od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
3533 od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
3534 }
3535
3536 if (od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
3537 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
3538 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
3539 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
3540 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
3541 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
3542 size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
3543 od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value,
3544 od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value);
3545 size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
3546 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value,
3547 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value);
3548 size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
3549 od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value,
3550 od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value);
3551 size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
3552 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value,
3553 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value);
3554 size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
3555 od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value,
3556 od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value);
3557 size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
3558 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value,
3559 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value);
3560 }
3561
3562 break;
3563 default:
3564 break;
3565 }
3566 return size;
3567 }
3568
vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr * hwmgr,struct vega20_single_dpm_table * dpm_table)3569 static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
3570 struct vega20_single_dpm_table *dpm_table)
3571 {
3572 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3573 int ret = 0;
3574
3575 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
3576 PP_ASSERT_WITH_CODE(dpm_table->count > 0,
3577 "[SetUclkToHightestDpmLevel] Dpm table has no entry!",
3578 return -EINVAL);
3579 PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS,
3580 "[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
3581 return -EINVAL);
3582
3583 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3584 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
3585 PPSMC_MSG_SetHardMinByFreq,
3586 (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
3587 NULL)),
3588 "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
3589 return ret);
3590 }
3591
3592 return ret;
3593 }
3594
vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr * hwmgr)3595 static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr)
3596 {
3597 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3598 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.fclk_table);
3599 int ret = 0;
3600
3601 if (data->smu_features[GNLD_DPM_FCLK].enabled) {
3602 PP_ASSERT_WITH_CODE(dpm_table->count > 0,
3603 "[SetFclkToHightestDpmLevel] Dpm table has no entry!",
3604 return -EINVAL);
3605 PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_FCLK_DPM_LEVELS,
3606 "[SetFclkToHightestDpmLevel] Dpm table has too many entries!",
3607 return -EINVAL);
3608
3609 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3610 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
3611 PPSMC_MSG_SetSoftMinByFreq,
3612 (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level,
3613 NULL)),
3614 "[SetFclkToHightestDpmLevel] Set soft min fclk failed!",
3615 return ret);
3616 }
3617
3618 return ret;
3619 }
3620
vega20_pre_display_configuration_changed_task(struct pp_hwmgr * hwmgr)3621 static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
3622 {
3623 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3624 int ret = 0;
3625
3626 smum_send_msg_to_smc_with_parameter(hwmgr,
3627 PPSMC_MSG_NumOfDisplays, 0, NULL);
3628
3629 ret = vega20_set_uclk_to_highest_dpm_level(hwmgr,
3630 &data->dpm_table.mem_table);
3631 if (ret)
3632 return ret;
3633
3634 return vega20_set_fclk_to_highest_dpm_level(hwmgr);
3635 }
3636
vega20_display_configuration_changed_task(struct pp_hwmgr * hwmgr)3637 static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
3638 {
3639 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3640 int result = 0;
3641 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
3642
3643 if ((data->water_marks_bitmap & WaterMarksExist) &&
3644 !(data->water_marks_bitmap & WaterMarksLoaded)) {
3645 result = smum_smc_table_manager(hwmgr,
3646 (uint8_t *)wm_table, TABLE_WATERMARKS, false);
3647 PP_ASSERT_WITH_CODE(!result,
3648 "Failed to update WMTABLE!",
3649 return result);
3650 data->water_marks_bitmap |= WaterMarksLoaded;
3651 }
3652
3653 if ((data->water_marks_bitmap & WaterMarksExist) &&
3654 data->smu_features[GNLD_DPM_DCEFCLK].supported &&
3655 data->smu_features[GNLD_DPM_SOCCLK].supported) {
3656 result = smum_send_msg_to_smc_with_parameter(hwmgr,
3657 PPSMC_MSG_NumOfDisplays,
3658 hwmgr->display_config->num_display,
3659 NULL);
3660 }
3661
3662 return result;
3663 }
3664
vega20_enable_disable_uvd_dpm(struct pp_hwmgr * hwmgr,bool enable)3665 static int vega20_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
3666 {
3667 struct vega20_hwmgr *data =
3668 (struct vega20_hwmgr *)(hwmgr->backend);
3669 int ret = 0;
3670
3671 if (data->smu_features[GNLD_DPM_UVD].supported) {
3672 if (data->smu_features[GNLD_DPM_UVD].enabled == enable) {
3673 if (enable)
3674 PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already enabled!\n");
3675 else
3676 PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already disabled!\n");
3677 }
3678
3679 ret = vega20_enable_smc_features(hwmgr,
3680 enable,
3681 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap);
3682 PP_ASSERT_WITH_CODE(!ret,
3683 "[EnableDisableUVDDPM] Attempt to Enable/Disable DPM UVD Failed!",
3684 return ret);
3685 data->smu_features[GNLD_DPM_UVD].enabled = enable;
3686 }
3687
3688 return 0;
3689 }
3690
vega20_power_gate_vce(struct pp_hwmgr * hwmgr,bool bgate)3691 static void vega20_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
3692 {
3693 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3694
3695 if (data->vce_power_gated == bgate)
3696 return ;
3697
3698 data->vce_power_gated = bgate;
3699 if (bgate) {
3700 vega20_enable_disable_vce_dpm(hwmgr, !bgate);
3701 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
3702 AMD_IP_BLOCK_TYPE_VCE,
3703 AMD_PG_STATE_GATE);
3704 } else {
3705 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
3706 AMD_IP_BLOCK_TYPE_VCE,
3707 AMD_PG_STATE_UNGATE);
3708 vega20_enable_disable_vce_dpm(hwmgr, !bgate);
3709 }
3710
3711 }
3712
vega20_power_gate_uvd(struct pp_hwmgr * hwmgr,bool bgate)3713 static void vega20_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
3714 {
3715 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3716
3717 if (data->uvd_power_gated == bgate)
3718 return ;
3719
3720 data->uvd_power_gated = bgate;
3721 vega20_enable_disable_uvd_dpm(hwmgr, !bgate);
3722 }
3723
vega20_apply_clocks_adjust_rules(struct pp_hwmgr * hwmgr)3724 static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
3725 {
3726 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3727 struct vega20_single_dpm_table *dpm_table;
3728 bool vblank_too_short = false;
3729 bool disable_mclk_switching;
3730 bool disable_fclk_switching;
3731 uint32_t i, latency;
3732
3733 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
3734 !hwmgr->display_config->multi_monitor_in_sync) ||
3735 vblank_too_short;
3736 latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
3737
3738 /* gfxclk */
3739 dpm_table = &(data->dpm_table.gfx_table);
3740 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3741 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3742 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3743 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3744
3745 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3746 if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
3747 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
3748 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
3749 }
3750
3751 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
3752 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3753 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
3754 }
3755
3756 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3757 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3758 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3759 }
3760 }
3761
3762 /* memclk */
3763 dpm_table = &(data->dpm_table.mem_table);
3764 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3765 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3766 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3767 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3768
3769 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3770 if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
3771 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
3772 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
3773 }
3774
3775 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
3776 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3777 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
3778 }
3779
3780 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3781 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3782 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3783 }
3784 }
3785
3786 /* honour DAL's UCLK Hardmin */
3787 if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
3788 dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
3789
3790 /* Hardmin is dependent on displayconfig */
3791 if (disable_mclk_switching) {
3792 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3793 for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
3794 if (data->mclk_latency_table.entries[i].latency <= latency) {
3795 if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
3796 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
3797 break;
3798 }
3799 }
3800 }
3801 }
3802
3803 if (hwmgr->display_config->nb_pstate_switch_disable)
3804 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3805
3806 if ((disable_mclk_switching &&
3807 (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) ||
3808 hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value)
3809 disable_fclk_switching = true;
3810 else
3811 disable_fclk_switching = false;
3812
3813 /* fclk */
3814 dpm_table = &(data->dpm_table.fclk_table);
3815 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3816 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3817 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3818 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3819 if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching)
3820 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3821
3822 /* vclk */
3823 dpm_table = &(data->dpm_table.vclk_table);
3824 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3825 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3826 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3827 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3828
3829 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3830 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
3831 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
3832 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
3833 }
3834
3835 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3836 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3837 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3838 }
3839 }
3840
3841 /* dclk */
3842 dpm_table = &(data->dpm_table.dclk_table);
3843 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3844 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3845 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3846 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3847
3848 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3849 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
3850 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
3851 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
3852 }
3853
3854 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3855 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3856 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3857 }
3858 }
3859
3860 /* socclk */
3861 dpm_table = &(data->dpm_table.soc_table);
3862 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3863 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3864 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3865 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3866
3867 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3868 if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
3869 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value;
3870 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value;
3871 }
3872
3873 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3874 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3875 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3876 }
3877 }
3878
3879 /* eclk */
3880 dpm_table = &(data->dpm_table.eclk_table);
3881 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3882 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3883 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3884 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3885
3886 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3887 if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
3888 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value;
3889 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value;
3890 }
3891
3892 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3893 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3894 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3895 }
3896 }
3897
3898 return 0;
3899 }
3900
3901 static bool
vega20_check_smc_update_required_for_display_configuration(struct pp_hwmgr * hwmgr)3902 vega20_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
3903 {
3904 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3905 bool is_update_required = false;
3906
3907 if (data->display_timing.num_existing_displays !=
3908 hwmgr->display_config->num_display)
3909 is_update_required = true;
3910
3911 if (data->registry_data.gfx_clk_deep_sleep_support &&
3912 (data->display_timing.min_clock_in_sr !=
3913 hwmgr->display_config->min_core_set_clock_in_sr))
3914 is_update_required = true;
3915
3916 return is_update_required;
3917 }
3918
vega20_disable_dpm_tasks(struct pp_hwmgr * hwmgr)3919 static int vega20_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
3920 {
3921 int ret = 0;
3922
3923 ret = vega20_disable_all_smu_features(hwmgr);
3924 PP_ASSERT_WITH_CODE(!ret,
3925 "[DisableDpmTasks] Failed to disable all smu features!",
3926 return ret);
3927
3928 return 0;
3929 }
3930
vega20_power_off_asic(struct pp_hwmgr * hwmgr)3931 static int vega20_power_off_asic(struct pp_hwmgr *hwmgr)
3932 {
3933 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3934 int result;
3935
3936 result = vega20_disable_dpm_tasks(hwmgr);
3937 PP_ASSERT_WITH_CODE((0 == result),
3938 "[PowerOffAsic] Failed to disable DPM!",
3939 );
3940 data->water_marks_bitmap &= ~(WaterMarksLoaded);
3941
3942 return result;
3943 }
3944
conv_power_profile_to_pplib_workload(int power_profile)3945 static int conv_power_profile_to_pplib_workload(int power_profile)
3946 {
3947 int pplib_workload = 0;
3948
3949 switch (power_profile) {
3950 case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
3951 pplib_workload = WORKLOAD_DEFAULT_BIT;
3952 break;
3953 case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
3954 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
3955 break;
3956 case PP_SMC_POWER_PROFILE_POWERSAVING:
3957 pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
3958 break;
3959 case PP_SMC_POWER_PROFILE_VIDEO:
3960 pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
3961 break;
3962 case PP_SMC_POWER_PROFILE_VR:
3963 pplib_workload = WORKLOAD_PPLIB_VR_BIT;
3964 break;
3965 case PP_SMC_POWER_PROFILE_COMPUTE:
3966 pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
3967 break;
3968 case PP_SMC_POWER_PROFILE_CUSTOM:
3969 pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
3970 break;
3971 }
3972
3973 return pplib_workload;
3974 }
3975
vega20_get_power_profile_mode(struct pp_hwmgr * hwmgr,char * buf)3976 static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
3977 {
3978 DpmActivityMonitorCoeffInt_t activity_monitor;
3979 uint32_t i, size = 0;
3980 uint16_t workload_type = 0;
3981 static const char *profile_name[] = {
3982 "BOOTUP_DEFAULT",
3983 "3D_FULL_SCREEN",
3984 "POWER_SAVING",
3985 "VIDEO",
3986 "VR",
3987 "COMPUTE",
3988 "CUSTOM"};
3989 static const char *title[] = {
3990 "PROFILE_INDEX(NAME)",
3991 "CLOCK_TYPE(NAME)",
3992 "FPS",
3993 "UseRlcBusy",
3994 "MinActiveFreqType",
3995 "MinActiveFreq",
3996 "BoosterFreqType",
3997 "BoosterFreq",
3998 "PD_Data_limit_c",
3999 "PD_Data_error_coeff",
4000 "PD_Data_error_rate_coeff"};
4001 int result = 0;
4002
4003 if (!buf)
4004 return -EINVAL;
4005
4006 size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
4007 title[0], title[1], title[2], title[3], title[4], title[5],
4008 title[6], title[7], title[8], title[9], title[10]);
4009
4010 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
4011 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
4012 workload_type = conv_power_profile_to_pplib_workload(i);
4013 result = vega20_get_activity_monitor_coeff(hwmgr,
4014 (uint8_t *)(&activity_monitor), workload_type);
4015 PP_ASSERT_WITH_CODE(!result,
4016 "[GetPowerProfile] Failed to get activity monitor!",
4017 return result);
4018
4019 size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
4020 i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ");
4021
4022 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
4023 " ",
4024 0,
4025 "GFXCLK",
4026 activity_monitor.Gfx_FPS,
4027 activity_monitor.Gfx_UseRlcBusy,
4028 activity_monitor.Gfx_MinActiveFreqType,
4029 activity_monitor.Gfx_MinActiveFreq,
4030 activity_monitor.Gfx_BoosterFreqType,
4031 activity_monitor.Gfx_BoosterFreq,
4032 activity_monitor.Gfx_PD_Data_limit_c,
4033 activity_monitor.Gfx_PD_Data_error_coeff,
4034 activity_monitor.Gfx_PD_Data_error_rate_coeff);
4035
4036 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
4037 " ",
4038 1,
4039 "SOCCLK",
4040 activity_monitor.Soc_FPS,
4041 activity_monitor.Soc_UseRlcBusy,
4042 activity_monitor.Soc_MinActiveFreqType,
4043 activity_monitor.Soc_MinActiveFreq,
4044 activity_monitor.Soc_BoosterFreqType,
4045 activity_monitor.Soc_BoosterFreq,
4046 activity_monitor.Soc_PD_Data_limit_c,
4047 activity_monitor.Soc_PD_Data_error_coeff,
4048 activity_monitor.Soc_PD_Data_error_rate_coeff);
4049
4050 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
4051 " ",
4052 2,
4053 "UCLK",
4054 activity_monitor.Mem_FPS,
4055 activity_monitor.Mem_UseRlcBusy,
4056 activity_monitor.Mem_MinActiveFreqType,
4057 activity_monitor.Mem_MinActiveFreq,
4058 activity_monitor.Mem_BoosterFreqType,
4059 activity_monitor.Mem_BoosterFreq,
4060 activity_monitor.Mem_PD_Data_limit_c,
4061 activity_monitor.Mem_PD_Data_error_coeff,
4062 activity_monitor.Mem_PD_Data_error_rate_coeff);
4063
4064 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
4065 " ",
4066 3,
4067 "FCLK",
4068 activity_monitor.Fclk_FPS,
4069 activity_monitor.Fclk_UseRlcBusy,
4070 activity_monitor.Fclk_MinActiveFreqType,
4071 activity_monitor.Fclk_MinActiveFreq,
4072 activity_monitor.Fclk_BoosterFreqType,
4073 activity_monitor.Fclk_BoosterFreq,
4074 activity_monitor.Fclk_PD_Data_limit_c,
4075 activity_monitor.Fclk_PD_Data_error_coeff,
4076 activity_monitor.Fclk_PD_Data_error_rate_coeff);
4077 }
4078
4079 return size;
4080 }
4081
vega20_set_power_profile_mode(struct pp_hwmgr * hwmgr,long * input,uint32_t size)4082 static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
4083 {
4084 DpmActivityMonitorCoeffInt_t activity_monitor;
4085 int workload_type, result = 0;
4086 uint32_t power_profile_mode = input[size];
4087
4088 if (power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
4089 pr_err("Invalid power profile mode %d\n", power_profile_mode);
4090 return -EINVAL;
4091 }
4092
4093 if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
4094 struct vega20_hwmgr *data =
4095 (struct vega20_hwmgr *)(hwmgr->backend);
4096 if (size == 0 && !data->is_custom_profile_set)
4097 return -EINVAL;
4098 if (size < 10 && size != 0)
4099 return -EINVAL;
4100
4101 result = vega20_get_activity_monitor_coeff(hwmgr,
4102 (uint8_t *)(&activity_monitor),
4103 WORKLOAD_PPLIB_CUSTOM_BIT);
4104 PP_ASSERT_WITH_CODE(!result,
4105 "[SetPowerProfile] Failed to get activity monitor!",
4106 return result);
4107
4108 /* If size==0, then we want to apply the already-configured
4109 * CUSTOM profile again. Just apply it, since we checked its
4110 * validity above
4111 */
4112 if (size == 0)
4113 goto out;
4114
4115 switch (input[0]) {
4116 case 0: /* Gfxclk */
4117 activity_monitor.Gfx_FPS = input[1];
4118 activity_monitor.Gfx_UseRlcBusy = input[2];
4119 activity_monitor.Gfx_MinActiveFreqType = input[3];
4120 activity_monitor.Gfx_MinActiveFreq = input[4];
4121 activity_monitor.Gfx_BoosterFreqType = input[5];
4122 activity_monitor.Gfx_BoosterFreq = input[6];
4123 activity_monitor.Gfx_PD_Data_limit_c = input[7];
4124 activity_monitor.Gfx_PD_Data_error_coeff = input[8];
4125 activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
4126 break;
4127 case 1: /* Socclk */
4128 activity_monitor.Soc_FPS = input[1];
4129 activity_monitor.Soc_UseRlcBusy = input[2];
4130 activity_monitor.Soc_MinActiveFreqType = input[3];
4131 activity_monitor.Soc_MinActiveFreq = input[4];
4132 activity_monitor.Soc_BoosterFreqType = input[5];
4133 activity_monitor.Soc_BoosterFreq = input[6];
4134 activity_monitor.Soc_PD_Data_limit_c = input[7];
4135 activity_monitor.Soc_PD_Data_error_coeff = input[8];
4136 activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
4137 break;
4138 case 2: /* Uclk */
4139 activity_monitor.Mem_FPS = input[1];
4140 activity_monitor.Mem_UseRlcBusy = input[2];
4141 activity_monitor.Mem_MinActiveFreqType = input[3];
4142 activity_monitor.Mem_MinActiveFreq = input[4];
4143 activity_monitor.Mem_BoosterFreqType = input[5];
4144 activity_monitor.Mem_BoosterFreq = input[6];
4145 activity_monitor.Mem_PD_Data_limit_c = input[7];
4146 activity_monitor.Mem_PD_Data_error_coeff = input[8];
4147 activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
4148 break;
4149 case 3: /* Fclk */
4150 activity_monitor.Fclk_FPS = input[1];
4151 activity_monitor.Fclk_UseRlcBusy = input[2];
4152 activity_monitor.Fclk_MinActiveFreqType = input[3];
4153 activity_monitor.Fclk_MinActiveFreq = input[4];
4154 activity_monitor.Fclk_BoosterFreqType = input[5];
4155 activity_monitor.Fclk_BoosterFreq = input[6];
4156 activity_monitor.Fclk_PD_Data_limit_c = input[7];
4157 activity_monitor.Fclk_PD_Data_error_coeff = input[8];
4158 activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
4159 break;
4160 }
4161
4162 result = vega20_set_activity_monitor_coeff(hwmgr,
4163 (uint8_t *)(&activity_monitor),
4164 WORKLOAD_PPLIB_CUSTOM_BIT);
4165 data->is_custom_profile_set = true;
4166 PP_ASSERT_WITH_CODE(!result,
4167 "[SetPowerProfile] Failed to set activity monitor!",
4168 return result);
4169 }
4170
4171 out:
4172 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
4173 workload_type =
4174 conv_power_profile_to_pplib_workload(power_profile_mode);
4175 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
4176 1 << workload_type,
4177 NULL);
4178
4179 hwmgr->power_profile_mode = power_profile_mode;
4180
4181 return 0;
4182 }
4183
vega20_notify_cac_buffer_info(struct pp_hwmgr * hwmgr,uint32_t virtual_addr_low,uint32_t virtual_addr_hi,uint32_t mc_addr_low,uint32_t mc_addr_hi,uint32_t size)4184 static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4185 uint32_t virtual_addr_low,
4186 uint32_t virtual_addr_hi,
4187 uint32_t mc_addr_low,
4188 uint32_t mc_addr_hi,
4189 uint32_t size)
4190 {
4191 smum_send_msg_to_smc_with_parameter(hwmgr,
4192 PPSMC_MSG_SetSystemVirtualDramAddrHigh,
4193 virtual_addr_hi,
4194 NULL);
4195 smum_send_msg_to_smc_with_parameter(hwmgr,
4196 PPSMC_MSG_SetSystemVirtualDramAddrLow,
4197 virtual_addr_low,
4198 NULL);
4199 smum_send_msg_to_smc_with_parameter(hwmgr,
4200 PPSMC_MSG_DramLogSetDramAddrHigh,
4201 mc_addr_hi,
4202 NULL);
4203
4204 smum_send_msg_to_smc_with_parameter(hwmgr,
4205 PPSMC_MSG_DramLogSetDramAddrLow,
4206 mc_addr_low,
4207 NULL);
4208
4209 smum_send_msg_to_smc_with_parameter(hwmgr,
4210 PPSMC_MSG_DramLogSetDramSize,
4211 size,
4212 NULL);
4213 return 0;
4214 }
4215
vega20_get_thermal_temperature_range(struct pp_hwmgr * hwmgr,struct PP_TemperatureRange * thermal_data)4216 static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
4217 struct PP_TemperatureRange *thermal_data)
4218 {
4219 struct vega20_hwmgr *data =
4220 (struct vega20_hwmgr *)(hwmgr->backend);
4221 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
4222
4223 memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
4224
4225 thermal_data->max = pp_table->TedgeLimit *
4226 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4227 thermal_data->edge_emergency_max = (pp_table->TedgeLimit + CTF_OFFSET_EDGE) *
4228 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4229 thermal_data->hotspot_crit_max = pp_table->ThotspotLimit *
4230 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4231 thermal_data->hotspot_emergency_max = (pp_table->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
4232 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4233 thermal_data->mem_crit_max = pp_table->ThbmLimit *
4234 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4235 thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
4236 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4237
4238 return 0;
4239 }
4240
vega20_smu_i2c_bus_access(struct pp_hwmgr * hwmgr,bool acquire)4241 static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire)
4242 {
4243 int res;
4244
4245 /* I2C bus access can happen very early, when SMU not loaded yet */
4246 if (!vega20_is_smc_ram_running(hwmgr))
4247 return 0;
4248
4249 res = smum_send_msg_to_smc_with_parameter(hwmgr,
4250 (acquire ?
4251 PPSMC_MSG_RequestI2CBus :
4252 PPSMC_MSG_ReleaseI2CBus),
4253 0,
4254 NULL);
4255
4256 PP_ASSERT_WITH_CODE(!res, "[SmuI2CAccessBus] Failed to access bus!", return res);
4257 return res;
4258 }
4259
vega20_set_df_cstate(struct pp_hwmgr * hwmgr,enum pp_df_cstate state)4260 static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr,
4261 enum pp_df_cstate state)
4262 {
4263 int ret;
4264
4265 /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */
4266 if (hwmgr->smu_version < 0x283200) {
4267 pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n");
4268 return -EINVAL;
4269 }
4270
4271 ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state,
4272 NULL);
4273 if (ret)
4274 pr_err("SetDfCstate failed!\n");
4275
4276 return ret;
4277 }
4278
vega20_set_xgmi_pstate(struct pp_hwmgr * hwmgr,uint32_t pstate)4279 static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr,
4280 uint32_t pstate)
4281 {
4282 int ret;
4283
4284 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
4285 PPSMC_MSG_SetXgmiMode,
4286 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
4287 NULL);
4288 if (ret)
4289 pr_err("SetXgmiPstate failed!\n");
4290
4291 return ret;
4292 }
4293
vega20_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 * gpu_metrics)4294 static void vega20_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics)
4295 {
4296 memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0));
4297
4298 gpu_metrics->common_header.structure_size =
4299 sizeof(struct gpu_metrics_v1_0);
4300 gpu_metrics->common_header.format_revision = 1;
4301 gpu_metrics->common_header.content_revision = 0;
4302
4303 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
4304 }
4305
vega20_get_gpu_metrics(struct pp_hwmgr * hwmgr,void ** table)4306 static ssize_t vega20_get_gpu_metrics(struct pp_hwmgr *hwmgr,
4307 void **table)
4308 {
4309 struct vega20_hwmgr *data =
4310 (struct vega20_hwmgr *)(hwmgr->backend);
4311 struct gpu_metrics_v1_0 *gpu_metrics =
4312 &data->gpu_metrics_table;
4313 SmuMetrics_t metrics;
4314 uint32_t fan_speed_rpm;
4315 int ret;
4316
4317 ret = vega20_get_metrics_table(hwmgr, &metrics, true);
4318 if (ret)
4319 return ret;
4320
4321 vega20_init_gpu_metrics_v1_0(gpu_metrics);
4322
4323 gpu_metrics->temperature_edge = metrics.TemperatureEdge;
4324 gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
4325 gpu_metrics->temperature_mem = metrics.TemperatureHBM;
4326 gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
4327 gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
4328 gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
4329
4330 gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
4331 gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
4332
4333 gpu_metrics->average_socket_power = metrics.AverageSocketPower;
4334
4335 gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
4336 gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
4337 gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
4338
4339 gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
4340 gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
4341 gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
4342 gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
4343 gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
4344
4345 gpu_metrics->throttle_status = metrics.ThrottlerStatus;
4346
4347 vega20_fan_ctrl_get_fan_speed_rpm(hwmgr, &fan_speed_rpm);
4348 gpu_metrics->current_fan_speed = (uint16_t)fan_speed_rpm;
4349
4350 gpu_metrics->pcie_link_width =
4351 vega20_get_current_pcie_link_width(hwmgr);
4352 gpu_metrics->pcie_link_speed =
4353 vega20_get_current_pcie_link_speed(hwmgr);
4354
4355 *table = (void *)gpu_metrics;
4356
4357 return sizeof(struct gpu_metrics_v1_0);
4358 }
4359
4360 static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
4361 /* init/fini related */
4362 .backend_init = vega20_hwmgr_backend_init,
4363 .backend_fini = vega20_hwmgr_backend_fini,
4364 .asic_setup = vega20_setup_asic_task,
4365 .power_off_asic = vega20_power_off_asic,
4366 .dynamic_state_management_enable = vega20_enable_dpm_tasks,
4367 .dynamic_state_management_disable = vega20_disable_dpm_tasks,
4368 /* power state related */
4369 .apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules,
4370 .pre_display_config_changed = vega20_pre_display_configuration_changed_task,
4371 .display_config_changed = vega20_display_configuration_changed_task,
4372 .check_smc_update_required_for_display_configuration =
4373 vega20_check_smc_update_required_for_display_configuration,
4374 .notify_smc_display_config_after_ps_adjustment =
4375 vega20_notify_smc_display_config_after_ps_adjustment,
4376 /* export to DAL */
4377 .get_sclk = vega20_dpm_get_sclk,
4378 .get_mclk = vega20_dpm_get_mclk,
4379 .get_dal_power_level = vega20_get_dal_power_level,
4380 .get_clock_by_type_with_latency = vega20_get_clock_by_type_with_latency,
4381 .get_clock_by_type_with_voltage = vega20_get_clock_by_type_with_voltage,
4382 .set_watermarks_for_clocks_ranges = vega20_set_watermarks_for_clocks_ranges,
4383 .display_clock_voltage_request = vega20_display_clock_voltage_request,
4384 .get_performance_level = vega20_get_performance_level,
4385 /* UMD pstate, profile related */
4386 .force_dpm_level = vega20_dpm_force_dpm_level,
4387 .get_power_profile_mode = vega20_get_power_profile_mode,
4388 .set_power_profile_mode = vega20_set_power_profile_mode,
4389 /* od related */
4390 .set_power_limit = vega20_set_power_limit,
4391 .get_sclk_od = vega20_get_sclk_od,
4392 .set_sclk_od = vega20_set_sclk_od,
4393 .get_mclk_od = vega20_get_mclk_od,
4394 .set_mclk_od = vega20_set_mclk_od,
4395 .odn_edit_dpm_table = vega20_odn_edit_dpm_table,
4396 /* for sysfs to retrive/set gfxclk/memclk */
4397 .force_clock_level = vega20_force_clock_level,
4398 .print_clock_levels = vega20_print_clock_levels,
4399 .read_sensor = vega20_read_sensor,
4400 .get_ppfeature_status = vega20_get_ppfeature_status,
4401 .set_ppfeature_status = vega20_set_ppfeature_status,
4402 /* powergate related */
4403 .powergate_uvd = vega20_power_gate_uvd,
4404 .powergate_vce = vega20_power_gate_vce,
4405 /* thermal related */
4406 .start_thermal_controller = vega20_start_thermal_controller,
4407 .stop_thermal_controller = vega20_thermal_stop_thermal_controller,
4408 .get_thermal_temperature_range = vega20_get_thermal_temperature_range,
4409 .register_irq_handlers = smu9_register_irq_handlers,
4410 .disable_smc_firmware_ctf = vega20_thermal_disable_alert,
4411 /* fan control related */
4412 .get_fan_speed_pwm = vega20_fan_ctrl_get_fan_speed_pwm,
4413 .set_fan_speed_pwm = vega20_fan_ctrl_set_fan_speed_pwm,
4414 .get_fan_speed_info = vega20_fan_ctrl_get_fan_speed_info,
4415 .get_fan_speed_rpm = vega20_fan_ctrl_get_fan_speed_rpm,
4416 .set_fan_speed_rpm = vega20_fan_ctrl_set_fan_speed_rpm,
4417 .get_fan_control_mode = vega20_get_fan_control_mode,
4418 .set_fan_control_mode = vega20_set_fan_control_mode,
4419 /* smu memory related */
4420 .notify_cac_buffer_info = vega20_notify_cac_buffer_info,
4421 .enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost,
4422 /* BACO related */
4423 .get_asic_baco_capability = vega20_baco_get_capability,
4424 .get_asic_baco_state = vega20_baco_get_state,
4425 .set_asic_baco_state = vega20_baco_set_state,
4426 .set_mp1_state = vega20_set_mp1_state,
4427 .smu_i2c_bus_access = vega20_smu_i2c_bus_access,
4428 .set_df_cstate = vega20_set_df_cstate,
4429 .set_xgmi_pstate = vega20_set_xgmi_pstate,
4430 .get_gpu_metrics = vega20_get_gpu_metrics,
4431 };
4432
vega20_hwmgr_init(struct pp_hwmgr * hwmgr)4433 int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
4434 {
4435 hwmgr->hwmgr_func = &vega20_hwmgr_funcs;
4436 hwmgr->pptable_func = &vega20_pptable_funcs;
4437
4438 return 0;
4439 }
4440