1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/delay.h>
25 #include <linux/fb.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 
29 #include "hwmgr.h"
30 #include "amd_powerplay.h"
31 #include "hardwaremanager.h"
32 #include "ppatomfwctrl.h"
33 #include "atomfirmware.h"
34 #include "cgs_common.h"
35 #include "vega10_powertune.h"
36 #include "smu9.h"
37 #include "smu9_driver_if.h"
38 #include "vega10_inc.h"
39 #include "soc15_common.h"
40 #include "pppcielanes.h"
41 #include "vega10_hwmgr.h"
42 #include "vega10_processpptables.h"
43 #include "vega10_pptable.h"
44 #include "vega10_thermal.h"
45 #include "pp_debug.h"
46 #include "amd_pcie_helpers.h"
47 #include "ppinterrupt.h"
48 #include "pp_overdriver.h"
49 #include "pp_thermal.h"
50 
51 #include "smuio/smuio_9_0_offset.h"
52 #include "smuio/smuio_9_0_sh_mask.h"
53 
54 #define HBM_MEMORY_CHANNEL_WIDTH    128
55 
56 static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
57 
58 #define mmDF_CS_AON0_DramBaseAddress0                                                                  0x0044
59 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX                                                         0
60 
61 //DF_CS_AON0_DramBaseAddress0
62 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT                                                        0x0
63 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT                                                    0x1
64 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT                                                      0x4
65 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT                                                      0x8
66 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT                                                      0xc
67 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK                                                          0x00000001L
68 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK                                                      0x00000002L
69 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK                                                        0x000000F0L
70 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK                                                        0x00000700L
71 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK                                                        0xFFFFF000L
72 
73 static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
74 
cast_phw_vega10_power_state(struct pp_hw_power_state * hw_ps)75 struct vega10_power_state *cast_phw_vega10_power_state(
76 				  struct pp_hw_power_state *hw_ps)
77 {
78 	PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
79 				"Invalid Powerstate Type!",
80 				 return NULL;);
81 
82 	return (struct vega10_power_state *)hw_ps;
83 }
84 
cast_const_phw_vega10_power_state(const struct pp_hw_power_state * hw_ps)85 const struct vega10_power_state *cast_const_phw_vega10_power_state(
86 				 const struct pp_hw_power_state *hw_ps)
87 {
88 	PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
89 				"Invalid Powerstate Type!",
90 				 return NULL;);
91 
92 	return (const struct vega10_power_state *)hw_ps;
93 }
94 
vega10_set_default_registry_data(struct pp_hwmgr * hwmgr)95 static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
96 {
97 	struct vega10_hwmgr *data = hwmgr->backend;
98 
99 	data->registry_data.sclk_dpm_key_disabled =
100 			hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
101 	data->registry_data.socclk_dpm_key_disabled =
102 			hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true;
103 	data->registry_data.mclk_dpm_key_disabled =
104 			hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
105 	data->registry_data.pcie_dpm_key_disabled =
106 			hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
107 
108 	data->registry_data.dcefclk_dpm_key_disabled =
109 			hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true;
110 
111 	if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
112 		data->registry_data.power_containment_support = 1;
113 		data->registry_data.enable_pkg_pwr_tracking_feature = 1;
114 		data->registry_data.enable_tdc_limit_feature = 1;
115 	}
116 
117 	data->registry_data.clock_stretcher_support =
118 			hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK ? true : false;
119 
120 	data->registry_data.ulv_support =
121 			hwmgr->feature_mask & PP_ULV_MASK ? true : false;
122 
123 	data->registry_data.sclk_deep_sleep_support =
124 			hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK ? true : false;
125 
126 	data->registry_data.disable_water_mark = 0;
127 
128 	data->registry_data.fan_control_support = 1;
129 	data->registry_data.thermal_support = 1;
130 	data->registry_data.fw_ctf_enabled = 1;
131 
132 	data->registry_data.avfs_support = 1;
133 	data->registry_data.led_dpm_enabled = 1;
134 
135 	data->registry_data.vr0hot_enabled = 1;
136 	data->registry_data.vr1hot_enabled = 1;
137 	data->registry_data.regulator_hot_gpio_support = 1;
138 
139 	data->registry_data.didt_support = 1;
140 	if (data->registry_data.didt_support) {
141 		data->registry_data.didt_mode = 6;
142 		data->registry_data.sq_ramping_support = 1;
143 		data->registry_data.db_ramping_support = 0;
144 		data->registry_data.td_ramping_support = 0;
145 		data->registry_data.tcp_ramping_support = 0;
146 		data->registry_data.dbr_ramping_support = 0;
147 		data->registry_data.edc_didt_support = 1;
148 		data->registry_data.gc_didt_support = 0;
149 		data->registry_data.psm_didt_support = 0;
150 	}
151 
152 	data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT;
153 	data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
154 	data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
155 	data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
156 	data->disp_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
157 	data->disp_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
158 	data->disp_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
159 	data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
160 	data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
161 	data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
162 	data->phy_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
163 	data->phy_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
164 	data->phy_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
165 
166 	data->gfxclk_average_alpha = PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT;
167 	data->socclk_average_alpha = PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT;
168 	data->uclk_average_alpha = PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT;
169 	data->gfx_activity_average_alpha = PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT;
170 }
171 
vega10_set_features_platform_caps(struct pp_hwmgr * hwmgr)172 static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
173 {
174 	struct vega10_hwmgr *data = hwmgr->backend;
175 	struct phm_ppt_v2_information *table_info =
176 			(struct phm_ppt_v2_information *)hwmgr->pptable;
177 	struct amdgpu_device *adev = hwmgr->adev;
178 
179 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
180 			PHM_PlatformCaps_SclkDeepSleep);
181 
182 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
183 			PHM_PlatformCaps_DynamicPatchPowerState);
184 
185 	if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE)
186 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
187 				PHM_PlatformCaps_ControlVDDCI);
188 
189 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
190 			PHM_PlatformCaps_EnableSMU7ThermalManagement);
191 
192 	if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
193 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
194 				PHM_PlatformCaps_UVDPowerGating);
195 
196 	if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
197 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
198 				PHM_PlatformCaps_VCEPowerGating);
199 
200 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
201 			PHM_PlatformCaps_UnTabledHardwareInterface);
202 
203 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
204 			PHM_PlatformCaps_FanSpeedInTableIsRPM);
205 
206 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
207 			PHM_PlatformCaps_ODFuzzyFanControlSupport);
208 
209 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
210 				PHM_PlatformCaps_DynamicPowerManagement);
211 
212 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
213 			PHM_PlatformCaps_SMC);
214 
215 	/* power tune caps */
216 	/* assume disabled */
217 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
218 			PHM_PlatformCaps_PowerContainment);
219 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
220 			PHM_PlatformCaps_DiDtSupport);
221 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
222 			PHM_PlatformCaps_SQRamping);
223 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
224 			PHM_PlatformCaps_DBRamping);
225 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
226 			PHM_PlatformCaps_TDRamping);
227 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
228 			PHM_PlatformCaps_TCPRamping);
229 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
230 			PHM_PlatformCaps_DBRRamping);
231 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
232 			PHM_PlatformCaps_DiDtEDCEnable);
233 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
234 			PHM_PlatformCaps_GCEDC);
235 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
236 			PHM_PlatformCaps_PSM);
237 
238 	if (data->registry_data.didt_support) {
239 		phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport);
240 		if (data->registry_data.sq_ramping_support)
241 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping);
242 		if (data->registry_data.db_ramping_support)
243 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping);
244 		if (data->registry_data.td_ramping_support)
245 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping);
246 		if (data->registry_data.tcp_ramping_support)
247 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping);
248 		if (data->registry_data.dbr_ramping_support)
249 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping);
250 		if (data->registry_data.edc_didt_support)
251 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable);
252 		if (data->registry_data.gc_didt_support)
253 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC);
254 		if (data->registry_data.psm_didt_support)
255 			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
256 	}
257 
258 	if (data->registry_data.power_containment_support)
259 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
260 				PHM_PlatformCaps_PowerContainment);
261 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
262 			PHM_PlatformCaps_CAC);
263 
264 	if (table_info->tdp_table->usClockStretchAmount &&
265 			data->registry_data.clock_stretcher_support)
266 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
267 				PHM_PlatformCaps_ClockStretcher);
268 
269 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
270 			PHM_PlatformCaps_RegulatorHot);
271 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
272 			PHM_PlatformCaps_AutomaticDCTransition);
273 
274 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
275 			PHM_PlatformCaps_UVDDPM);
276 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
277 			PHM_PlatformCaps_VCEDPM);
278 
279 	return 0;
280 }
281 
vega10_odn_initial_default_setting(struct pp_hwmgr * hwmgr)282 static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
283 {
284 	struct vega10_hwmgr *data = hwmgr->backend;
285 	struct phm_ppt_v2_information *table_info =
286 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
287 	struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
288 	struct vega10_odn_vddc_lookup_table *od_lookup_table;
289 	struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
290 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3];
291 	struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3];
292 	struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
293 	uint32_t i;
294 	int result;
295 
296 	result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
297 	if (!result) {
298 		data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
299 		data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
300 	}
301 
302 	od_lookup_table = &odn_table->vddc_lookup_table;
303 	vddc_lookup_table = table_info->vddc_lookup_table;
304 
305 	for (i = 0; i < vddc_lookup_table->count; i++)
306 		od_lookup_table->entries[i].us_vdd = vddc_lookup_table->entries[i].us_vdd;
307 
308 	od_lookup_table->count = vddc_lookup_table->count;
309 
310 	dep_table[0] = table_info->vdd_dep_on_sclk;
311 	dep_table[1] = table_info->vdd_dep_on_mclk;
312 	dep_table[2] = table_info->vdd_dep_on_socclk;
313 	od_table[0] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_sclk;
314 	od_table[1] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_mclk;
315 	od_table[2] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_socclk;
316 
317 	for (i = 0; i < 3; i++)
318 		smu_get_voltage_dependency_table_ppt_v1(dep_table[i], od_table[i]);
319 
320 	if (odn_table->max_vddc == 0 || odn_table->max_vddc > 2000)
321 		odn_table->max_vddc = dep_table[0]->entries[dep_table[0]->count - 1].vddc;
322 	if (odn_table->min_vddc == 0 || odn_table->min_vddc > 2000)
323 		odn_table->min_vddc = dep_table[0]->entries[0].vddc;
324 
325 	i = od_table[2]->count - 1;
326 	od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock > od_table[2]->entries[i].clk ?
327 					hwmgr->platform_descriptor.overdriveLimit.memoryClock :
328 					od_table[2]->entries[i].clk;
329 	od_table[2]->entries[i].vddc = odn_table->max_vddc > od_table[2]->entries[i].vddc ?
330 					odn_table->max_vddc :
331 					od_table[2]->entries[i].vddc;
332 
333 	return 0;
334 }
335 
vega10_init_dpm_defaults(struct pp_hwmgr * hwmgr)336 static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
337 {
338 	struct vega10_hwmgr *data = hwmgr->backend;
339 	int i;
340 	uint32_t sub_vendor_id, hw_revision;
341 	struct amdgpu_device *adev = hwmgr->adev;
342 
343 	vega10_initialize_power_tune_defaults(hwmgr);
344 
345 	for (i = 0; i < GNLD_FEATURES_MAX; i++) {
346 		data->smu_features[i].smu_feature_id = 0xffff;
347 		data->smu_features[i].smu_feature_bitmap = 1 << i;
348 		data->smu_features[i].enabled = false;
349 		data->smu_features[i].supported = false;
350 	}
351 
352 	data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
353 			FEATURE_DPM_PREFETCHER_BIT;
354 	data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
355 			FEATURE_DPM_GFXCLK_BIT;
356 	data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
357 			FEATURE_DPM_UCLK_BIT;
358 	data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
359 			FEATURE_DPM_SOCCLK_BIT;
360 	data->smu_features[GNLD_DPM_UVD].smu_feature_id =
361 			FEATURE_DPM_UVD_BIT;
362 	data->smu_features[GNLD_DPM_VCE].smu_feature_id =
363 			FEATURE_DPM_VCE_BIT;
364 	data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
365 			FEATURE_DPM_MP0CLK_BIT;
366 	data->smu_features[GNLD_DPM_LINK].smu_feature_id =
367 			FEATURE_DPM_LINK_BIT;
368 	data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
369 			FEATURE_DPM_DCEFCLK_BIT;
370 	data->smu_features[GNLD_ULV].smu_feature_id =
371 			FEATURE_ULV_BIT;
372 	data->smu_features[GNLD_AVFS].smu_feature_id =
373 			FEATURE_AVFS_BIT;
374 	data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
375 			FEATURE_DS_GFXCLK_BIT;
376 	data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
377 			FEATURE_DS_SOCCLK_BIT;
378 	data->smu_features[GNLD_DS_LCLK].smu_feature_id =
379 			FEATURE_DS_LCLK_BIT;
380 	data->smu_features[GNLD_PPT].smu_feature_id =
381 			FEATURE_PPT_BIT;
382 	data->smu_features[GNLD_TDC].smu_feature_id =
383 			FEATURE_TDC_BIT;
384 	data->smu_features[GNLD_THERMAL].smu_feature_id =
385 			FEATURE_THERMAL_BIT;
386 	data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
387 			FEATURE_GFX_PER_CU_CG_BIT;
388 	data->smu_features[GNLD_RM].smu_feature_id =
389 			FEATURE_RM_BIT;
390 	data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
391 			FEATURE_DS_DCEFCLK_BIT;
392 	data->smu_features[GNLD_ACDC].smu_feature_id =
393 			FEATURE_ACDC_BIT;
394 	data->smu_features[GNLD_VR0HOT].smu_feature_id =
395 			FEATURE_VR0HOT_BIT;
396 	data->smu_features[GNLD_VR1HOT].smu_feature_id =
397 			FEATURE_VR1HOT_BIT;
398 	data->smu_features[GNLD_FW_CTF].smu_feature_id =
399 			FEATURE_FW_CTF_BIT;
400 	data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
401 			FEATURE_LED_DISPLAY_BIT;
402 	data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
403 			FEATURE_FAN_CONTROL_BIT;
404 	data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
405 	data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
406 	data->smu_features[GNLD_PCC_LIMIT].smu_feature_id = FEATURE_PCC_LIMIT_CONTROL_BIT;
407 
408 	if (!data->registry_data.prefetcher_dpm_key_disabled)
409 		data->smu_features[GNLD_DPM_PREFETCHER].supported = true;
410 
411 	if (!data->registry_data.sclk_dpm_key_disabled)
412 		data->smu_features[GNLD_DPM_GFXCLK].supported = true;
413 
414 	if (!data->registry_data.mclk_dpm_key_disabled)
415 		data->smu_features[GNLD_DPM_UCLK].supported = true;
416 
417 	if (!data->registry_data.socclk_dpm_key_disabled)
418 		data->smu_features[GNLD_DPM_SOCCLK].supported = true;
419 
420 	if (PP_CAP(PHM_PlatformCaps_UVDDPM))
421 		data->smu_features[GNLD_DPM_UVD].supported = true;
422 
423 	if (PP_CAP(PHM_PlatformCaps_VCEDPM))
424 		data->smu_features[GNLD_DPM_VCE].supported = true;
425 
426 	if (!data->registry_data.pcie_dpm_key_disabled)
427 		data->smu_features[GNLD_DPM_LINK].supported = true;
428 
429 	if (!data->registry_data.dcefclk_dpm_key_disabled)
430 		data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
431 
432 	if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep) &&
433 	    data->registry_data.sclk_deep_sleep_support) {
434 		data->smu_features[GNLD_DS_GFXCLK].supported = true;
435 		data->smu_features[GNLD_DS_SOCCLK].supported = true;
436 		data->smu_features[GNLD_DS_LCLK].supported = true;
437 		data->smu_features[GNLD_DS_DCEFCLK].supported = true;
438 	}
439 
440 	if (data->registry_data.enable_pkg_pwr_tracking_feature)
441 		data->smu_features[GNLD_PPT].supported = true;
442 
443 	if (data->registry_data.enable_tdc_limit_feature)
444 		data->smu_features[GNLD_TDC].supported = true;
445 
446 	if (data->registry_data.thermal_support)
447 		data->smu_features[GNLD_THERMAL].supported = true;
448 
449 	if (data->registry_data.fan_control_support)
450 		data->smu_features[GNLD_FAN_CONTROL].supported = true;
451 
452 	if (data->registry_data.fw_ctf_enabled)
453 		data->smu_features[GNLD_FW_CTF].supported = true;
454 
455 	if (data->registry_data.avfs_support)
456 		data->smu_features[GNLD_AVFS].supported = true;
457 
458 	if (data->registry_data.led_dpm_enabled)
459 		data->smu_features[GNLD_LED_DISPLAY].supported = true;
460 
461 	if (data->registry_data.vr1hot_enabled)
462 		data->smu_features[GNLD_VR1HOT].supported = true;
463 
464 	if (data->registry_data.vr0hot_enabled)
465 		data->smu_features[GNLD_VR0HOT].supported = true;
466 
467 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
468 	hwmgr->smu_version = smum_get_argument(hwmgr);
469 		/* ACG firmware has major version 5 */
470 	if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
471 		data->smu_features[GNLD_ACG].supported = true;
472 	if (data->registry_data.didt_support)
473 		data->smu_features[GNLD_DIDT].supported = true;
474 
475 	hw_revision = adev->pdev->revision;
476 	sub_vendor_id = adev->pdev->subsystem_vendor;
477 
478 	if ((hwmgr->chip_id == 0x6862 ||
479 		hwmgr->chip_id == 0x6861 ||
480 		hwmgr->chip_id == 0x6868) &&
481 		(hw_revision == 0) &&
482 		(sub_vendor_id != 0x1002))
483 		data->smu_features[GNLD_PCC_LIMIT].supported = true;
484 }
485 
486 #ifdef PPLIB_VEGA10_EVV_SUPPORT
vega10_get_socclk_for_voltage_evv(struct pp_hwmgr * hwmgr,phm_ppt_v1_voltage_lookup_table * lookup_table,uint16_t virtual_voltage_id,int32_t * socclk)487 static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
488 	phm_ppt_v1_voltage_lookup_table *lookup_table,
489 	uint16_t virtual_voltage_id, int32_t *socclk)
490 {
491 	uint8_t entry_id;
492 	uint8_t voltage_id;
493 	struct phm_ppt_v2_information *table_info =
494 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
495 
496 	PP_ASSERT_WITH_CODE(lookup_table->count != 0,
497 			"Lookup table is empty",
498 			return -EINVAL);
499 
500 	/* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */
501 	for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
502 		voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd;
503 		if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
504 			break;
505 	}
506 
507 	PP_ASSERT_WITH_CODE(entry_id < table_info->vdd_dep_on_socclk->count,
508 			"Can't find requested voltage id in vdd_dep_on_socclk table!",
509 			return -EINVAL);
510 
511 	*socclk = table_info->vdd_dep_on_socclk->entries[entry_id].clk;
512 
513 	return 0;
514 }
515 
516 #define ATOM_VIRTUAL_VOLTAGE_ID0             0xff01
517 /**
518 * Get Leakage VDDC based on leakage ID.
519 *
520 * @param    hwmgr  the address of the powerplay hardware manager.
521 * @return   always 0.
522 */
vega10_get_evv_voltages(struct pp_hwmgr * hwmgr)523 static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
524 {
525 	struct vega10_hwmgr *data = hwmgr->backend;
526 	uint16_t vv_id;
527 	uint32_t vddc = 0;
528 	uint16_t i, j;
529 	uint32_t sclk = 0;
530 	struct phm_ppt_v2_information *table_info =
531 			(struct phm_ppt_v2_information *)hwmgr->pptable;
532 	struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
533 			table_info->vdd_dep_on_socclk;
534 	int result;
535 
536 	for (i = 0; i < VEGA10_MAX_LEAKAGE_COUNT; i++) {
537 		vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
538 
539 		if (!vega10_get_socclk_for_voltage_evv(hwmgr,
540 				table_info->vddc_lookup_table, vv_id, &sclk)) {
541 			if (PP_CAP(PHM_PlatformCaps_ClockStretcher)) {
542 				for (j = 1; j < socclk_table->count; j++) {
543 					if (socclk_table->entries[j].clk == sclk &&
544 							socclk_table->entries[j].cks_enable == 0) {
545 						sclk += 5000;
546 						break;
547 					}
548 				}
549 			}
550 
551 			PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
552 					VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
553 					"Error retrieving EVV voltage value!",
554 					continue);
555 
556 
557 			/* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
558 			PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
559 					"Invalid VDDC value", result = -EINVAL;);
560 
561 			/* the voltage should not be zero nor equal to leakage ID */
562 			if (vddc != 0 && vddc != vv_id) {
563 				data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
564 				data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
565 				data->vddc_leakage.count++;
566 			}
567 		}
568 	}
569 
570 	return 0;
571 }
572 
573 /**
574  * Change virtual leakage voltage to actual value.
575  *
576  * @param     hwmgr  the address of the powerplay hardware manager.
577  * @param     pointer to changing voltage
578  * @param     pointer to leakage table
579  */
vega10_patch_with_vdd_leakage(struct pp_hwmgr * hwmgr,uint16_t * voltage,struct vega10_leakage_voltage * leakage_table)580 static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
581 		uint16_t *voltage, struct vega10_leakage_voltage *leakage_table)
582 {
583 	uint32_t index;
584 
585 	/* search for leakage voltage ID 0xff01 ~ 0xff08 */
586 	for (index = 0; index < leakage_table->count; index++) {
587 		/* if this voltage matches a leakage voltage ID */
588 		/* patch with actual leakage voltage */
589 		if (leakage_table->leakage_id[index] == *voltage) {
590 			*voltage = leakage_table->actual_voltage[index];
591 			break;
592 		}
593 	}
594 
595 	if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
596 		pr_info("Voltage value looks like a Leakage ID but it's not patched\n");
597 }
598 
599 /**
600 * Patch voltage lookup table by EVV leakages.
601 *
602 * @param     hwmgr  the address of the powerplay hardware manager.
603 * @param     pointer to voltage lookup table
604 * @param     pointer to leakage table
605 * @return     always 0
606 */
vega10_patch_lookup_table_with_leakage(struct pp_hwmgr * hwmgr,phm_ppt_v1_voltage_lookup_table * lookup_table,struct vega10_leakage_voltage * leakage_table)607 static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
608 		phm_ppt_v1_voltage_lookup_table *lookup_table,
609 		struct vega10_leakage_voltage *leakage_table)
610 {
611 	uint32_t i;
612 
613 	for (i = 0; i < lookup_table->count; i++)
614 		vega10_patch_with_vdd_leakage(hwmgr,
615 				&lookup_table->entries[i].us_vdd, leakage_table);
616 
617 	return 0;
618 }
619 
vega10_patch_clock_voltage_limits_with_vddc_leakage(struct pp_hwmgr * hwmgr,struct vega10_leakage_voltage * leakage_table,uint16_t * vddc)620 static int vega10_patch_clock_voltage_limits_with_vddc_leakage(
621 		struct pp_hwmgr *hwmgr, struct vega10_leakage_voltage *leakage_table,
622 		uint16_t *vddc)
623 {
624 	vega10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
625 
626 	return 0;
627 }
628 #endif
629 
vega10_patch_voltage_dependency_tables_with_lookup_table(struct pp_hwmgr * hwmgr)630 static int vega10_patch_voltage_dependency_tables_with_lookup_table(
631 		struct pp_hwmgr *hwmgr)
632 {
633 	uint8_t entry_id, voltage_id;
634 	unsigned i;
635 	struct phm_ppt_v2_information *table_info =
636 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
637 	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
638 			table_info->mm_dep_table;
639 	struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
640 			table_info->vdd_dep_on_mclk;
641 
642 	for (i = 0; i < 6; i++) {
643 		struct phm_ppt_v1_clock_voltage_dependency_table *vdt;
644 		switch (i) {
645 			case 0: vdt = table_info->vdd_dep_on_socclk; break;
646 			case 1: vdt = table_info->vdd_dep_on_sclk; break;
647 			case 2: vdt = table_info->vdd_dep_on_dcefclk; break;
648 			case 3: vdt = table_info->vdd_dep_on_pixclk; break;
649 			case 4: vdt = table_info->vdd_dep_on_dispclk; break;
650 			case 5: vdt = table_info->vdd_dep_on_phyclk; break;
651 		}
652 
653 		for (entry_id = 0; entry_id < vdt->count; entry_id++) {
654 			voltage_id = vdt->entries[entry_id].vddInd;
655 			vdt->entries[entry_id].vddc =
656 					table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
657 		}
658 	}
659 
660 	for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
661 		voltage_id = mm_table->entries[entry_id].vddcInd;
662 		mm_table->entries[entry_id].vddc =
663 			table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
664 	}
665 
666 	for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
667 		voltage_id = mclk_table->entries[entry_id].vddInd;
668 		mclk_table->entries[entry_id].vddc =
669 				table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
670 		voltage_id = mclk_table->entries[entry_id].vddciInd;
671 		mclk_table->entries[entry_id].vddci =
672 				table_info->vddci_lookup_table->entries[voltage_id].us_vdd;
673 		voltage_id = mclk_table->entries[entry_id].mvddInd;
674 		mclk_table->entries[entry_id].mvdd =
675 				table_info->vddmem_lookup_table->entries[voltage_id].us_vdd;
676 	}
677 
678 
679 	return 0;
680 
681 }
682 
vega10_sort_lookup_table(struct pp_hwmgr * hwmgr,struct phm_ppt_v1_voltage_lookup_table * lookup_table)683 static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
684 		struct phm_ppt_v1_voltage_lookup_table *lookup_table)
685 {
686 	uint32_t table_size, i, j;
687 	struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
688 
689 	PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
690 		"Lookup table is empty", return -EINVAL);
691 
692 	table_size = lookup_table->count;
693 
694 	/* Sorting voltages */
695 	for (i = 0; i < table_size - 1; i++) {
696 		for (j = i + 1; j > 0; j--) {
697 			if (lookup_table->entries[j].us_vdd <
698 					lookup_table->entries[j - 1].us_vdd) {
699 				tmp_voltage_lookup_record = lookup_table->entries[j - 1];
700 				lookup_table->entries[j - 1] = lookup_table->entries[j];
701 				lookup_table->entries[j] = tmp_voltage_lookup_record;
702 			}
703 		}
704 	}
705 
706 	return 0;
707 }
708 
vega10_complete_dependency_tables(struct pp_hwmgr * hwmgr)709 static int vega10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
710 {
711 	int result = 0;
712 	int tmp_result;
713 	struct phm_ppt_v2_information *table_info =
714 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
715 #ifdef PPLIB_VEGA10_EVV_SUPPORT
716 	struct vega10_hwmgr *data = hwmgr->backend;
717 
718 	tmp_result = vega10_patch_lookup_table_with_leakage(hwmgr,
719 			table_info->vddc_lookup_table, &(data->vddc_leakage));
720 	if (tmp_result)
721 		result = tmp_result;
722 
723 	tmp_result = vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
724 			&(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
725 	if (tmp_result)
726 		result = tmp_result;
727 #endif
728 
729 	tmp_result = vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
730 	if (tmp_result)
731 		result = tmp_result;
732 
733 	tmp_result = vega10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
734 	if (tmp_result)
735 		result = tmp_result;
736 
737 	return result;
738 }
739 
vega10_set_private_data_based_on_pptable(struct pp_hwmgr * hwmgr)740 static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
741 {
742 	struct phm_ppt_v2_information *table_info =
743 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
744 	struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
745 			table_info->vdd_dep_on_socclk;
746 	struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
747 			table_info->vdd_dep_on_mclk;
748 
749 	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table,
750 		"VDD dependency on SCLK table is missing. This table is mandatory", return -EINVAL);
751 	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
752 		"VDD dependency on SCLK table is empty. This table is mandatory", return -EINVAL);
753 
754 	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table,
755 		"VDD dependency on MCLK table is missing.  This table is mandatory", return -EINVAL);
756 	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
757 		"VDD dependency on MCLK table is empty.  This table is mandatory", return -EINVAL);
758 
759 	table_info->max_clock_voltage_on_ac.sclk =
760 		allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
761 	table_info->max_clock_voltage_on_ac.mclk =
762 		allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
763 	table_info->max_clock_voltage_on_ac.vddc =
764 		allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
765 	table_info->max_clock_voltage_on_ac.vddci =
766 		allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
767 
768 	hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
769 		table_info->max_clock_voltage_on_ac.sclk;
770 	hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
771 		table_info->max_clock_voltage_on_ac.mclk;
772 	hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
773 		table_info->max_clock_voltage_on_ac.vddc;
774 	hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
775 		table_info->max_clock_voltage_on_ac.vddci;
776 
777 	return 0;
778 }
779 
vega10_hwmgr_backend_fini(struct pp_hwmgr * hwmgr)780 static int vega10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
781 {
782 	kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
783 	hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
784 
785 	kfree(hwmgr->backend);
786 	hwmgr->backend = NULL;
787 
788 	return 0;
789 }
790 
vega10_hwmgr_backend_init(struct pp_hwmgr * hwmgr)791 static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
792 {
793 	int result = 0;
794 	struct vega10_hwmgr *data;
795 	uint32_t config_telemetry = 0;
796 	struct pp_atomfwctrl_voltage_table vol_table;
797 	struct amdgpu_device *adev = hwmgr->adev;
798 
799 	data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
800 	if (data == NULL)
801 		return -ENOMEM;
802 
803 	hwmgr->backend = data;
804 
805 	hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO];
806 	hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
807 	hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
808 
809 	vega10_set_default_registry_data(hwmgr);
810 	data->disable_dpm_mask = 0xff;
811 
812 	/* need to set voltage control types before EVV patching */
813 	data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE;
814 	data->mvdd_control = VEGA10_VOLTAGE_CONTROL_NONE;
815 	data->vddci_control = VEGA10_VOLTAGE_CONTROL_NONE;
816 
817 	/* VDDCR_SOC */
818 	if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
819 			VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
820 		if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
821 				VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2,
822 				&vol_table)) {
823 			config_telemetry = ((vol_table.telemetry_slope << 8) & 0xff00) |
824 					(vol_table.telemetry_offset & 0xff);
825 			data->vddc_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
826 		}
827 	} else {
828 		kfree(hwmgr->backend);
829 		hwmgr->backend = NULL;
830 		PP_ASSERT_WITH_CODE(false,
831 				"VDDCR_SOC is not SVID2!",
832 				return -1);
833 	}
834 
835 	/* MVDDC */
836 	if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
837 			VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) {
838 		if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
839 				VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2,
840 				&vol_table)) {
841 			config_telemetry |=
842 					((vol_table.telemetry_slope << 24) & 0xff000000) |
843 					((vol_table.telemetry_offset << 16) & 0xff0000);
844 			data->mvdd_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
845 		}
846 	}
847 
848 	 /* VDDCI_MEM */
849 	if (PP_CAP(PHM_PlatformCaps_ControlVDDCI)) {
850 		if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
851 				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
852 			data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO;
853 	}
854 
855 	data->config_telemetry = config_telemetry;
856 
857 	vega10_set_features_platform_caps(hwmgr);
858 
859 	vega10_init_dpm_defaults(hwmgr);
860 
861 #ifdef PPLIB_VEGA10_EVV_SUPPORT
862 	/* Get leakage voltage based on leakage ID. */
863 	PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr),
864 			"Get EVV Voltage Failed.  Abort Driver loading!",
865 			return -1);
866 #endif
867 
868 	/* Patch our voltage dependency table with actual leakage voltage
869 	 * We need to perform leakage translation before it's used by other functions
870 	 */
871 	vega10_complete_dependency_tables(hwmgr);
872 
873 	/* Parse pptable data read from VBIOS */
874 	vega10_set_private_data_based_on_pptable(hwmgr);
875 
876 	data->is_tlu_enabled = false;
877 
878 	hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
879 			VEGA10_MAX_HARDWARE_POWERLEVELS;
880 	hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
881 	hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
882 
883 	hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
884 	/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
885 	hwmgr->platform_descriptor.clockStep.engineClock = 500;
886 	hwmgr->platform_descriptor.clockStep.memoryClock = 500;
887 
888 	data->total_active_cus = adev->gfx.cu_info.number;
889 	/* Setup default Overdrive Fan control settings */
890 	data->odn_fan_table.target_fan_speed =
891 			hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
892 	data->odn_fan_table.target_temperature =
893 			hwmgr->thermal_controller.
894 			advanceFanControlParameters.ucTargetTemperature;
895 	data->odn_fan_table.min_performance_clock =
896 			hwmgr->thermal_controller.advanceFanControlParameters.
897 			ulMinFanSCLKAcousticLimit;
898 	data->odn_fan_table.min_fan_limit =
899 			hwmgr->thermal_controller.
900 			advanceFanControlParameters.usFanPWMMinLimit *
901 			hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
902 
903 	data->mem_channels = (RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0) &
904 			DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
905 			DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
906 	PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number),
907 			"Mem Channel Index Exceeded maximum!",
908 			return -EINVAL);
909 
910 	return result;
911 }
912 
vega10_init_sclk_threshold(struct pp_hwmgr * hwmgr)913 static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
914 {
915 	struct vega10_hwmgr *data = hwmgr->backend;
916 
917 	data->low_sclk_interrupt_threshold = 0;
918 
919 	return 0;
920 }
921 
vega10_setup_dpm_led_config(struct pp_hwmgr * hwmgr)922 static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
923 {
924 	struct vega10_hwmgr *data = hwmgr->backend;
925 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
926 
927 	struct pp_atomfwctrl_voltage_table table;
928 	uint8_t i, j;
929 	uint32_t mask = 0;
930 	uint32_t tmp;
931 	int32_t ret = 0;
932 
933 	ret = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_LEDDPM,
934 						VOLTAGE_OBJ_GPIO_LUT, &table);
935 
936 	if (!ret) {
937 		tmp = table.mask_low;
938 		for (i = 0, j = 0; i < 32; i++) {
939 			if (tmp & 1) {
940 				mask |= (uint32_t)(i << (8 * j));
941 				if (++j >= 3)
942 					break;
943 			}
944 			tmp >>= 1;
945 		}
946 	}
947 
948 	pp_table->LedPin0 = (uint8_t)(mask & 0xff);
949 	pp_table->LedPin1 = (uint8_t)((mask >> 8) & 0xff);
950 	pp_table->LedPin2 = (uint8_t)((mask >> 16) & 0xff);
951 	return 0;
952 }
953 
vega10_setup_asic_task(struct pp_hwmgr * hwmgr)954 static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
955 {
956 	PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr),
957 			"Failed to init sclk threshold!",
958 			return -EINVAL);
959 
960 	PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr),
961 			"Failed to set up led dpm config!",
962 			return -EINVAL);
963 
964 	smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_NumOfDisplays, 0);
965 
966 	return 0;
967 }
968 
969 /**
970 * Remove repeated voltage values and create table with unique values.
971 *
972 * @param    hwmgr  the address of the powerplay hardware manager.
973 * @param    vol_table  the pointer to changing voltage table
974 * @return    0 in success
975 */
976 
vega10_trim_voltage_table(struct pp_hwmgr * hwmgr,struct pp_atomfwctrl_voltage_table * vol_table)977 static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr,
978 		struct pp_atomfwctrl_voltage_table *vol_table)
979 {
980 	uint32_t i, j;
981 	uint16_t vvalue;
982 	bool found = false;
983 	struct pp_atomfwctrl_voltage_table *table;
984 
985 	PP_ASSERT_WITH_CODE(vol_table,
986 			"Voltage Table empty.", return -EINVAL);
987 	table = kzalloc(sizeof(struct pp_atomfwctrl_voltage_table),
988 			GFP_KERNEL);
989 
990 	if (!table)
991 		return -ENOMEM;
992 
993 	table->mask_low = vol_table->mask_low;
994 	table->phase_delay = vol_table->phase_delay;
995 
996 	for (i = 0; i < vol_table->count; i++) {
997 		vvalue = vol_table->entries[i].value;
998 		found = false;
999 
1000 		for (j = 0; j < table->count; j++) {
1001 			if (vvalue == table->entries[j].value) {
1002 				found = true;
1003 				break;
1004 			}
1005 		}
1006 
1007 		if (!found) {
1008 			table->entries[table->count].value = vvalue;
1009 			table->entries[table->count].smio_low =
1010 					vol_table->entries[i].smio_low;
1011 			table->count++;
1012 		}
1013 	}
1014 
1015 	memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table));
1016 	kfree(table);
1017 
1018 	return 0;
1019 }
1020 
vega10_get_mvdd_voltage_table(struct pp_hwmgr * hwmgr,phm_ppt_v1_clock_voltage_dependency_table * dep_table,struct pp_atomfwctrl_voltage_table * vol_table)1021 static int vega10_get_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
1022 		phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1023 		struct pp_atomfwctrl_voltage_table *vol_table)
1024 {
1025 	int i;
1026 
1027 	PP_ASSERT_WITH_CODE(dep_table->count,
1028 			"Voltage Dependency Table empty.",
1029 			return -EINVAL);
1030 
1031 	vol_table->mask_low = 0;
1032 	vol_table->phase_delay = 0;
1033 	vol_table->count = dep_table->count;
1034 
1035 	for (i = 0; i < vol_table->count; i++) {
1036 		vol_table->entries[i].value = dep_table->entries[i].mvdd;
1037 		vol_table->entries[i].smio_low = 0;
1038 	}
1039 
1040 	PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr,
1041 			vol_table),
1042 			"Failed to trim MVDD Table!",
1043 			return -1);
1044 
1045 	return 0;
1046 }
1047 
vega10_get_vddci_voltage_table(struct pp_hwmgr * hwmgr,phm_ppt_v1_clock_voltage_dependency_table * dep_table,struct pp_atomfwctrl_voltage_table * vol_table)1048 static int vega10_get_vddci_voltage_table(struct pp_hwmgr *hwmgr,
1049 		phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1050 		struct pp_atomfwctrl_voltage_table *vol_table)
1051 {
1052 	uint32_t i;
1053 
1054 	PP_ASSERT_WITH_CODE(dep_table->count,
1055 			"Voltage Dependency Table empty.",
1056 			return -EINVAL);
1057 
1058 	vol_table->mask_low = 0;
1059 	vol_table->phase_delay = 0;
1060 	vol_table->count = dep_table->count;
1061 
1062 	for (i = 0; i < dep_table->count; i++) {
1063 		vol_table->entries[i].value = dep_table->entries[i].vddci;
1064 		vol_table->entries[i].smio_low = 0;
1065 	}
1066 
1067 	PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, vol_table),
1068 			"Failed to trim VDDCI table.",
1069 			return -1);
1070 
1071 	return 0;
1072 }
1073 
vega10_get_vdd_voltage_table(struct pp_hwmgr * hwmgr,phm_ppt_v1_clock_voltage_dependency_table * dep_table,struct pp_atomfwctrl_voltage_table * vol_table)1074 static int vega10_get_vdd_voltage_table(struct pp_hwmgr *hwmgr,
1075 		phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1076 		struct pp_atomfwctrl_voltage_table *vol_table)
1077 {
1078 	int i;
1079 
1080 	PP_ASSERT_WITH_CODE(dep_table->count,
1081 			"Voltage Dependency Table empty.",
1082 			return -EINVAL);
1083 
1084 	vol_table->mask_low = 0;
1085 	vol_table->phase_delay = 0;
1086 	vol_table->count = dep_table->count;
1087 
1088 	for (i = 0; i < vol_table->count; i++) {
1089 		vol_table->entries[i].value = dep_table->entries[i].vddc;
1090 		vol_table->entries[i].smio_low = 0;
1091 	}
1092 
1093 	return 0;
1094 }
1095 
1096 /* ---- Voltage Tables ----
1097  * If the voltage table would be bigger than
1098  * what will fit into the state table on
1099  * the SMC keep only the higher entries.
1100  */
vega10_trim_voltage_table_to_fit_state_table(struct pp_hwmgr * hwmgr,uint32_t max_vol_steps,struct pp_atomfwctrl_voltage_table * vol_table)1101 static void vega10_trim_voltage_table_to_fit_state_table(
1102 		struct pp_hwmgr *hwmgr,
1103 		uint32_t max_vol_steps,
1104 		struct pp_atomfwctrl_voltage_table *vol_table)
1105 {
1106 	unsigned int i, diff;
1107 
1108 	if (vol_table->count <= max_vol_steps)
1109 		return;
1110 
1111 	diff = vol_table->count - max_vol_steps;
1112 
1113 	for (i = 0; i < max_vol_steps; i++)
1114 		vol_table->entries[i] = vol_table->entries[i + diff];
1115 
1116 	vol_table->count = max_vol_steps;
1117 }
1118 
1119 /**
1120 * Create Voltage Tables.
1121 *
1122 * @param    hwmgr  the address of the powerplay hardware manager.
1123 * @return   always 0
1124 */
vega10_construct_voltage_tables(struct pp_hwmgr * hwmgr)1125 static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
1126 {
1127 	struct vega10_hwmgr *data = hwmgr->backend;
1128 	struct phm_ppt_v2_information *table_info =
1129 			(struct phm_ppt_v2_information *)hwmgr->pptable;
1130 	int result;
1131 
1132 	if (data->mvdd_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1133 			data->mvdd_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1134 		result = vega10_get_mvdd_voltage_table(hwmgr,
1135 				table_info->vdd_dep_on_mclk,
1136 				&(data->mvdd_voltage_table));
1137 		PP_ASSERT_WITH_CODE(!result,
1138 				"Failed to retrieve MVDDC table!",
1139 				return result);
1140 	}
1141 
1142 	if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1143 		result = vega10_get_vddci_voltage_table(hwmgr,
1144 				table_info->vdd_dep_on_mclk,
1145 				&(data->vddci_voltage_table));
1146 		PP_ASSERT_WITH_CODE(!result,
1147 				"Failed to retrieve VDDCI_MEM table!",
1148 				return result);
1149 	}
1150 
1151 	if (data->vddc_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1152 			data->vddc_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1153 		result = vega10_get_vdd_voltage_table(hwmgr,
1154 				table_info->vdd_dep_on_sclk,
1155 				&(data->vddc_voltage_table));
1156 		PP_ASSERT_WITH_CODE(!result,
1157 				"Failed to retrieve VDDCR_SOC table!",
1158 				return result);
1159 	}
1160 
1161 	PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 16,
1162 			"Too many voltage values for VDDC. Trimming to fit state table.",
1163 			vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1164 					16, &(data->vddc_voltage_table)));
1165 
1166 	PP_ASSERT_WITH_CODE(data->vddci_voltage_table.count <= 16,
1167 			"Too many voltage values for VDDCI. Trimming to fit state table.",
1168 			vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1169 					16, &(data->vddci_voltage_table)));
1170 
1171 	PP_ASSERT_WITH_CODE(data->mvdd_voltage_table.count <= 16,
1172 			"Too many voltage values for MVDD. Trimming to fit state table.",
1173 			vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1174 					16, &(data->mvdd_voltage_table)));
1175 
1176 
1177 	return 0;
1178 }
1179 
1180 /*
1181  * @fn vega10_init_dpm_state
1182  * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
1183  *
1184  * @param    dpm_state - the address of the DPM Table to initiailize.
1185  * @return   None.
1186  */
vega10_init_dpm_state(struct vega10_dpm_state * dpm_state)1187 static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state)
1188 {
1189 	dpm_state->soft_min_level = 0xff;
1190 	dpm_state->soft_max_level = 0xff;
1191 	dpm_state->hard_min_level = 0xff;
1192 	dpm_state->hard_max_level = 0xff;
1193 }
1194 
vega10_setup_default_single_dpm_table(struct pp_hwmgr * hwmgr,struct vega10_single_dpm_table * dpm_table,struct phm_ppt_v1_clock_voltage_dependency_table * dep_table)1195 static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
1196 		struct vega10_single_dpm_table *dpm_table,
1197 		struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1198 {
1199 	int i;
1200 
1201 	dpm_table->count = 0;
1202 
1203 	for (i = 0; i < dep_table->count; i++) {
1204 		if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <=
1205 				dep_table->entries[i].clk) {
1206 			dpm_table->dpm_levels[dpm_table->count].value =
1207 					dep_table->entries[i].clk;
1208 			dpm_table->dpm_levels[dpm_table->count].enabled = true;
1209 			dpm_table->count++;
1210 		}
1211 	}
1212 }
vega10_setup_default_pcie_table(struct pp_hwmgr * hwmgr)1213 static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
1214 {
1215 	struct vega10_hwmgr *data = hwmgr->backend;
1216 	struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
1217 	struct phm_ppt_v2_information *table_info =
1218 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
1219 	struct phm_ppt_v1_pcie_table *bios_pcie_table =
1220 			table_info->pcie_table;
1221 	uint32_t i;
1222 
1223 	PP_ASSERT_WITH_CODE(bios_pcie_table->count,
1224 			"Incorrect number of PCIE States from VBIOS!",
1225 			return -1);
1226 
1227 	for (i = 0; i < NUM_LINK_LEVELS; i++) {
1228 		if (data->registry_data.pcieSpeedOverride)
1229 			pcie_table->pcie_gen[i] =
1230 					data->registry_data.pcieSpeedOverride;
1231 		else
1232 			pcie_table->pcie_gen[i] =
1233 					bios_pcie_table->entries[i].gen_speed;
1234 
1235 		if (data->registry_data.pcieLaneOverride)
1236 			pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1237 					data->registry_data.pcieLaneOverride);
1238 		else
1239 			pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1240 							bios_pcie_table->entries[i].lane_width);
1241 		if (data->registry_data.pcieClockOverride)
1242 			pcie_table->lclk[i] =
1243 					data->registry_data.pcieClockOverride;
1244 		else
1245 			pcie_table->lclk[i] =
1246 					bios_pcie_table->entries[i].pcie_sclk;
1247 	}
1248 
1249 	pcie_table->count = NUM_LINK_LEVELS;
1250 
1251 	return 0;
1252 }
1253 
1254 /*
1255  * This function is to initialize all DPM state tables
1256  * for SMU based on the dependency table.
1257  * Dynamic state patching function will then trim these
1258  * state tables to the allowed range based
1259  * on the power policy or external client requests,
1260  * such as UVD request, etc.
1261  */
vega10_setup_default_dpm_tables(struct pp_hwmgr * hwmgr)1262 static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1263 {
1264 	struct vega10_hwmgr *data = hwmgr->backend;
1265 	struct phm_ppt_v2_information *table_info =
1266 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
1267 	struct vega10_single_dpm_table *dpm_table;
1268 	uint32_t i;
1269 
1270 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_soc_table =
1271 			table_info->vdd_dep_on_socclk;
1272 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_gfx_table =
1273 			table_info->vdd_dep_on_sclk;
1274 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
1275 			table_info->vdd_dep_on_mclk;
1276 	struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_mm_table =
1277 			table_info->mm_dep_table;
1278 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_dcef_table =
1279 			table_info->vdd_dep_on_dcefclk;
1280 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_pix_table =
1281 			table_info->vdd_dep_on_pixclk;
1282 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_disp_table =
1283 			table_info->vdd_dep_on_dispclk;
1284 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_phy_table =
1285 			table_info->vdd_dep_on_phyclk;
1286 
1287 	PP_ASSERT_WITH_CODE(dep_soc_table,
1288 			"SOCCLK dependency table is missing. This table is mandatory",
1289 			return -EINVAL);
1290 	PP_ASSERT_WITH_CODE(dep_soc_table->count >= 1,
1291 			"SOCCLK dependency table is empty. This table is mandatory",
1292 			return -EINVAL);
1293 
1294 	PP_ASSERT_WITH_CODE(dep_gfx_table,
1295 			"GFXCLK dependency table is missing. This table is mandatory",
1296 			return -EINVAL);
1297 	PP_ASSERT_WITH_CODE(dep_gfx_table->count >= 1,
1298 			"GFXCLK dependency table is empty. This table is mandatory",
1299 			return -EINVAL);
1300 
1301 	PP_ASSERT_WITH_CODE(dep_mclk_table,
1302 			"MCLK dependency table is missing. This table is mandatory",
1303 			return -EINVAL);
1304 	PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
1305 			"MCLK dependency table has to have is missing. This table is mandatory",
1306 			return -EINVAL);
1307 
1308 	/* Initialize Sclk DPM table based on allow Sclk values */
1309 	dpm_table = &(data->dpm_table.soc_table);
1310 	vega10_setup_default_single_dpm_table(hwmgr,
1311 			dpm_table,
1312 			dep_soc_table);
1313 
1314 	vega10_init_dpm_state(&(dpm_table->dpm_state));
1315 
1316 	dpm_table = &(data->dpm_table.gfx_table);
1317 	vega10_setup_default_single_dpm_table(hwmgr,
1318 			dpm_table,
1319 			dep_gfx_table);
1320 	if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
1321 		hwmgr->platform_descriptor.overdriveLimit.engineClock =
1322 					dpm_table->dpm_levels[dpm_table->count-1].value;
1323 	vega10_init_dpm_state(&(dpm_table->dpm_state));
1324 
1325 	/* Initialize Mclk DPM table based on allow Mclk values */
1326 	data->dpm_table.mem_table.count = 0;
1327 	dpm_table = &(data->dpm_table.mem_table);
1328 	vega10_setup_default_single_dpm_table(hwmgr,
1329 			dpm_table,
1330 			dep_mclk_table);
1331 	if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
1332 		hwmgr->platform_descriptor.overdriveLimit.memoryClock =
1333 					dpm_table->dpm_levels[dpm_table->count-1].value;
1334 
1335 	vega10_init_dpm_state(&(dpm_table->dpm_state));
1336 
1337 	data->dpm_table.eclk_table.count = 0;
1338 	dpm_table = &(data->dpm_table.eclk_table);
1339 	for (i = 0; i < dep_mm_table->count; i++) {
1340 		if (i == 0 || dpm_table->dpm_levels
1341 				[dpm_table->count - 1].value <=
1342 						dep_mm_table->entries[i].eclk) {
1343 			dpm_table->dpm_levels[dpm_table->count].value =
1344 					dep_mm_table->entries[i].eclk;
1345 			dpm_table->dpm_levels[dpm_table->count].enabled =
1346 					(i == 0) ? true : false;
1347 			dpm_table->count++;
1348 		}
1349 	}
1350 	vega10_init_dpm_state(&(dpm_table->dpm_state));
1351 
1352 	data->dpm_table.vclk_table.count = 0;
1353 	data->dpm_table.dclk_table.count = 0;
1354 	dpm_table = &(data->dpm_table.vclk_table);
1355 	for (i = 0; i < dep_mm_table->count; i++) {
1356 		if (i == 0 || dpm_table->dpm_levels
1357 				[dpm_table->count - 1].value <=
1358 						dep_mm_table->entries[i].vclk) {
1359 			dpm_table->dpm_levels[dpm_table->count].value =
1360 					dep_mm_table->entries[i].vclk;
1361 			dpm_table->dpm_levels[dpm_table->count].enabled =
1362 					(i == 0) ? true : false;
1363 			dpm_table->count++;
1364 		}
1365 	}
1366 	vega10_init_dpm_state(&(dpm_table->dpm_state));
1367 
1368 	dpm_table = &(data->dpm_table.dclk_table);
1369 	for (i = 0; i < dep_mm_table->count; i++) {
1370 		if (i == 0 || dpm_table->dpm_levels
1371 				[dpm_table->count - 1].value <=
1372 						dep_mm_table->entries[i].dclk) {
1373 			dpm_table->dpm_levels[dpm_table->count].value =
1374 					dep_mm_table->entries[i].dclk;
1375 			dpm_table->dpm_levels[dpm_table->count].enabled =
1376 					(i == 0) ? true : false;
1377 			dpm_table->count++;
1378 		}
1379 	}
1380 	vega10_init_dpm_state(&(dpm_table->dpm_state));
1381 
1382 	/* Assume there is no headless Vega10 for now */
1383 	dpm_table = &(data->dpm_table.dcef_table);
1384 	vega10_setup_default_single_dpm_table(hwmgr,
1385 			dpm_table,
1386 			dep_dcef_table);
1387 
1388 	vega10_init_dpm_state(&(dpm_table->dpm_state));
1389 
1390 	dpm_table = &(data->dpm_table.pixel_table);
1391 	vega10_setup_default_single_dpm_table(hwmgr,
1392 			dpm_table,
1393 			dep_pix_table);
1394 
1395 	vega10_init_dpm_state(&(dpm_table->dpm_state));
1396 
1397 	dpm_table = &(data->dpm_table.display_table);
1398 	vega10_setup_default_single_dpm_table(hwmgr,
1399 			dpm_table,
1400 			dep_disp_table);
1401 
1402 	vega10_init_dpm_state(&(dpm_table->dpm_state));
1403 
1404 	dpm_table = &(data->dpm_table.phy_table);
1405 	vega10_setup_default_single_dpm_table(hwmgr,
1406 			dpm_table,
1407 			dep_phy_table);
1408 
1409 	vega10_init_dpm_state(&(dpm_table->dpm_state));
1410 
1411 	vega10_setup_default_pcie_table(hwmgr);
1412 
1413 	/* save a copy of the default DPM table */
1414 	memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1415 			sizeof(struct vega10_dpm_table));
1416 
1417 	return 0;
1418 }
1419 
1420 /*
1421  * @fn vega10_populate_ulv_state
1422  * @brief Function to provide parameters for Utral Low Voltage state to SMC.
1423  *
1424  * @param    hwmgr - the address of the hardware manager.
1425  * @return   Always 0.
1426  */
vega10_populate_ulv_state(struct pp_hwmgr * hwmgr)1427 static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
1428 {
1429 	struct vega10_hwmgr *data = hwmgr->backend;
1430 	struct phm_ppt_v2_information *table_info =
1431 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
1432 
1433 	data->smc_state_table.pp_table.UlvOffsetVid =
1434 			(uint8_t)table_info->us_ulv_voltage_offset;
1435 
1436 	data->smc_state_table.pp_table.UlvSmnclkDid =
1437 			(uint8_t)(table_info->us_ulv_smnclk_did);
1438 	data->smc_state_table.pp_table.UlvMp1clkDid =
1439 			(uint8_t)(table_info->us_ulv_mp1clk_did);
1440 	data->smc_state_table.pp_table.UlvGfxclkBypass =
1441 			(uint8_t)(table_info->us_ulv_gfxclk_bypass);
1442 	data->smc_state_table.pp_table.UlvPhaseSheddingPsi0 =
1443 			(uint8_t)(data->vddc_voltage_table.psi0_enable);
1444 	data->smc_state_table.pp_table.UlvPhaseSheddingPsi1 =
1445 			(uint8_t)(data->vddc_voltage_table.psi1_enable);
1446 
1447 	return 0;
1448 }
1449 
vega10_populate_single_lclk_level(struct pp_hwmgr * hwmgr,uint32_t lclock,uint8_t * curr_lclk_did)1450 static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
1451 		uint32_t lclock, uint8_t *curr_lclk_did)
1452 {
1453 	struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1454 
1455 	PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1456 			hwmgr,
1457 			COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1458 			lclock, &dividers),
1459 			"Failed to get LCLK clock settings from VBIOS!",
1460 			return -1);
1461 
1462 	*curr_lclk_did = dividers.ulDid;
1463 
1464 	return 0;
1465 }
1466 
vega10_populate_smc_link_levels(struct pp_hwmgr * hwmgr)1467 static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
1468 {
1469 	int result = -1;
1470 	struct vega10_hwmgr *data = hwmgr->backend;
1471 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1472 	struct vega10_pcie_table *pcie_table =
1473 			&(data->dpm_table.pcie_table);
1474 	uint32_t i, j;
1475 
1476 	for (i = 0; i < pcie_table->count; i++) {
1477 		pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[i];
1478 		pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[i];
1479 
1480 		result = vega10_populate_single_lclk_level(hwmgr,
1481 				pcie_table->lclk[i], &(pp_table->LclkDid[i]));
1482 		if (result) {
1483 			pr_info("Populate LClock Level %d Failed!\n", i);
1484 			return result;
1485 		}
1486 	}
1487 
1488 	j = i - 1;
1489 	while (i < NUM_LINK_LEVELS) {
1490 		pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[j];
1491 		pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[j];
1492 
1493 		result = vega10_populate_single_lclk_level(hwmgr,
1494 				pcie_table->lclk[j], &(pp_table->LclkDid[i]));
1495 		if (result) {
1496 			pr_info("Populate LClock Level %d Failed!\n", i);
1497 			return result;
1498 		}
1499 		i++;
1500 	}
1501 
1502 	return result;
1503 }
1504 
1505 /**
1506 * Populates single SMC GFXSCLK structure using the provided engine clock
1507 *
1508 * @param    hwmgr      the address of the hardware manager
1509 * @param    gfx_clock  the GFX clock to use to populate the structure.
1510 * @param    current_gfxclk_level  location in PPTable for the SMC GFXCLK structure.
1511 */
1512 
vega10_populate_single_gfx_level(struct pp_hwmgr * hwmgr,uint32_t gfx_clock,PllSetting_t * current_gfxclk_level,uint32_t * acg_freq)1513 static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
1514 		uint32_t gfx_clock, PllSetting_t *current_gfxclk_level,
1515 		uint32_t *acg_freq)
1516 {
1517 	struct phm_ppt_v2_information *table_info =
1518 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
1519 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk;
1520 	struct vega10_hwmgr *data = hwmgr->backend;
1521 	struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1522 	uint32_t gfx_max_clock =
1523 			hwmgr->platform_descriptor.overdriveLimit.engineClock;
1524 	uint32_t i = 0;
1525 
1526 	if (hwmgr->od_enabled)
1527 		dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1528 						&(data->odn_dpm_table.vdd_dep_on_sclk);
1529 	else
1530 		dep_on_sclk = table_info->vdd_dep_on_sclk;
1531 
1532 	PP_ASSERT_WITH_CODE(dep_on_sclk,
1533 			"Invalid SOC_VDD-GFX_CLK Dependency Table!",
1534 			return -EINVAL);
1535 
1536 	if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
1537 		gfx_clock = gfx_clock > gfx_max_clock ? gfx_max_clock : gfx_clock;
1538 	else {
1539 		for (i = 0; i < dep_on_sclk->count; i++) {
1540 			if (dep_on_sclk->entries[i].clk == gfx_clock)
1541 				break;
1542 		}
1543 		PP_ASSERT_WITH_CODE(dep_on_sclk->count > i,
1544 				"Cannot find gfx_clk in SOC_VDD-GFX_CLK!",
1545 				return -EINVAL);
1546 	}
1547 
1548 	PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1549 			COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK,
1550 			gfx_clock, &dividers),
1551 			"Failed to get GFX Clock settings from VBIOS!",
1552 			return -EINVAL);
1553 
1554 	/* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */
1555 	current_gfxclk_level->FbMult =
1556 			cpu_to_le32(dividers.ulPll_fb_mult);
1557 	/* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */
1558 	current_gfxclk_level->SsOn = dividers.ucPll_ss_enable;
1559 	current_gfxclk_level->SsFbMult =
1560 			cpu_to_le32(dividers.ulPll_ss_fbsmult);
1561 	current_gfxclk_level->SsSlewFrac =
1562 			cpu_to_le16(dividers.usPll_ss_slew_frac);
1563 	current_gfxclk_level->Did = (uint8_t)(dividers.ulDid);
1564 
1565 	*acg_freq = gfx_clock / 100; /* 100 Khz to Mhz conversion */
1566 
1567 	return 0;
1568 }
1569 
1570 /**
1571  * @brief Populates single SMC SOCCLK structure using the provided clock.
1572  *
1573  * @param    hwmgr - the address of the hardware manager.
1574  * @param    soc_clock - the SOC clock to use to populate the structure.
1575  * @param    current_socclk_level - location in PPTable for the SMC SOCCLK structure.
1576  * @return   0 on success..
1577  */
vega10_populate_single_soc_level(struct pp_hwmgr * hwmgr,uint32_t soc_clock,uint8_t * current_soc_did,uint8_t * current_vol_index)1578 static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
1579 		uint32_t soc_clock, uint8_t *current_soc_did,
1580 		uint8_t *current_vol_index)
1581 {
1582 	struct vega10_hwmgr *data = hwmgr->backend;
1583 	struct phm_ppt_v2_information *table_info =
1584 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
1585 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc;
1586 	struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1587 	uint32_t i;
1588 
1589 	if (hwmgr->od_enabled) {
1590 		dep_on_soc = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1591 						&data->odn_dpm_table.vdd_dep_on_socclk;
1592 		for (i = 0; i < dep_on_soc->count; i++) {
1593 			if (dep_on_soc->entries[i].clk >= soc_clock)
1594 				break;
1595 		}
1596 	} else {
1597 		dep_on_soc = table_info->vdd_dep_on_socclk;
1598 		for (i = 0; i < dep_on_soc->count; i++) {
1599 			if (dep_on_soc->entries[i].clk == soc_clock)
1600 				break;
1601 		}
1602 	}
1603 
1604 	PP_ASSERT_WITH_CODE(dep_on_soc->count > i,
1605 			"Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table",
1606 			return -EINVAL);
1607 
1608 	PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1609 			COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1610 			soc_clock, &dividers),
1611 			"Failed to get SOC Clock settings from VBIOS!",
1612 			return -EINVAL);
1613 
1614 	*current_soc_did = (uint8_t)dividers.ulDid;
1615 	*current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd);
1616 	return 0;
1617 }
1618 
1619 /**
1620 * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
1621 *
1622 * @param    hwmgr      the address of the hardware manager
1623 */
vega10_populate_all_graphic_levels(struct pp_hwmgr * hwmgr)1624 static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1625 {
1626 	struct vega10_hwmgr *data = hwmgr->backend;
1627 	struct phm_ppt_v2_information *table_info =
1628 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
1629 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1630 	struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
1631 	int result = 0;
1632 	uint32_t i, j;
1633 
1634 	for (i = 0; i < dpm_table->count; i++) {
1635 		result = vega10_populate_single_gfx_level(hwmgr,
1636 				dpm_table->dpm_levels[i].value,
1637 				&(pp_table->GfxclkLevel[i]),
1638 				&(pp_table->AcgFreqTable[i]));
1639 		if (result)
1640 			return result;
1641 	}
1642 
1643 	j = i - 1;
1644 	while (i < NUM_GFXCLK_DPM_LEVELS) {
1645 		result = vega10_populate_single_gfx_level(hwmgr,
1646 				dpm_table->dpm_levels[j].value,
1647 				&(pp_table->GfxclkLevel[i]),
1648 				&(pp_table->AcgFreqTable[i]));
1649 		if (result)
1650 			return result;
1651 		i++;
1652 	}
1653 
1654 	pp_table->GfxclkSlewRate =
1655 			cpu_to_le16(table_info->us_gfxclk_slew_rate);
1656 
1657 	dpm_table = &(data->dpm_table.soc_table);
1658 	for (i = 0; i < dpm_table->count; i++) {
1659 		result = vega10_populate_single_soc_level(hwmgr,
1660 				dpm_table->dpm_levels[i].value,
1661 				&(pp_table->SocclkDid[i]),
1662 				&(pp_table->SocDpmVoltageIndex[i]));
1663 		if (result)
1664 			return result;
1665 	}
1666 
1667 	j = i - 1;
1668 	while (i < NUM_SOCCLK_DPM_LEVELS) {
1669 		result = vega10_populate_single_soc_level(hwmgr,
1670 				dpm_table->dpm_levels[j].value,
1671 				&(pp_table->SocclkDid[i]),
1672 				&(pp_table->SocDpmVoltageIndex[i]));
1673 		if (result)
1674 			return result;
1675 		i++;
1676 	}
1677 
1678 	return result;
1679 }
1680 
vega10_populate_vddc_soc_levels(struct pp_hwmgr * hwmgr)1681 static void vega10_populate_vddc_soc_levels(struct pp_hwmgr *hwmgr)
1682 {
1683 	struct vega10_hwmgr *data = hwmgr->backend;
1684 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1685 	struct phm_ppt_v2_information *table_info = hwmgr->pptable;
1686 	struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
1687 
1688 	uint8_t soc_vid = 0;
1689 	uint32_t i, max_vddc_level;
1690 
1691 	if (hwmgr->od_enabled)
1692 		vddc_lookup_table = (struct phm_ppt_v1_voltage_lookup_table *)&data->odn_dpm_table.vddc_lookup_table;
1693 	else
1694 		vddc_lookup_table = table_info->vddc_lookup_table;
1695 
1696 	max_vddc_level = vddc_lookup_table->count;
1697 	for (i = 0; i < max_vddc_level; i++) {
1698 		soc_vid = (uint8_t)convert_to_vid(vddc_lookup_table->entries[i].us_vdd);
1699 		pp_table->SocVid[i] = soc_vid;
1700 	}
1701 	while (i < MAX_REGULAR_DPM_NUMBER) {
1702 		pp_table->SocVid[i] = soc_vid;
1703 		i++;
1704 	}
1705 }
1706 
1707 /**
1708  * @brief Populates single SMC GFXCLK structure using the provided clock.
1709  *
1710  * @param    hwmgr - the address of the hardware manager.
1711  * @param    mem_clock - the memory clock to use to populate the structure.
1712  * @return   0 on success..
1713  */
vega10_populate_single_memory_level(struct pp_hwmgr * hwmgr,uint32_t mem_clock,uint8_t * current_mem_vid,PllSetting_t * current_memclk_level,uint8_t * current_mem_soc_vind)1714 static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1715 		uint32_t mem_clock, uint8_t *current_mem_vid,
1716 		PllSetting_t *current_memclk_level, uint8_t *current_mem_soc_vind)
1717 {
1718 	struct vega10_hwmgr *data = hwmgr->backend;
1719 	struct phm_ppt_v2_information *table_info =
1720 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
1721 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk;
1722 	struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1723 	uint32_t mem_max_clock =
1724 			hwmgr->platform_descriptor.overdriveLimit.memoryClock;
1725 	uint32_t i = 0;
1726 
1727 	if (hwmgr->od_enabled)
1728 		dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1729 					&data->odn_dpm_table.vdd_dep_on_mclk;
1730 	else
1731 		dep_on_mclk = table_info->vdd_dep_on_mclk;
1732 
1733 	PP_ASSERT_WITH_CODE(dep_on_mclk,
1734 			"Invalid SOC_VDD-UCLK Dependency Table!",
1735 			return -EINVAL);
1736 
1737 	if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
1738 		mem_clock = mem_clock > mem_max_clock ? mem_max_clock : mem_clock;
1739 	} else {
1740 		for (i = 0; i < dep_on_mclk->count; i++) {
1741 			if (dep_on_mclk->entries[i].clk == mem_clock)
1742 				break;
1743 		}
1744 		PP_ASSERT_WITH_CODE(dep_on_mclk->count > i,
1745 				"Cannot find UCLK in SOC_VDD-UCLK Dependency Table!",
1746 				return -EINVAL);
1747 	}
1748 
1749 	PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1750 			hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_UCLK, mem_clock, &dividers),
1751 			"Failed to get UCLK settings from VBIOS!",
1752 			return -1);
1753 
1754 	*current_mem_vid =
1755 			(uint8_t)(convert_to_vid(dep_on_mclk->entries[i].mvdd));
1756 	*current_mem_soc_vind =
1757 			(uint8_t)(dep_on_mclk->entries[i].vddInd);
1758 	current_memclk_level->FbMult = cpu_to_le32(dividers.ulPll_fb_mult);
1759 	current_memclk_level->Did = (uint8_t)(dividers.ulDid);
1760 
1761 	PP_ASSERT_WITH_CODE(current_memclk_level->Did >= 1,
1762 			"Invalid Divider ID!",
1763 			return -EINVAL);
1764 
1765 	return 0;
1766 }
1767 
1768 /**
1769  * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
1770  *
1771  * @param    pHwMgr - the address of the hardware manager.
1772  * @return   PP_Result_OK on success.
1773  */
vega10_populate_all_memory_levels(struct pp_hwmgr * hwmgr)1774 static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1775 {
1776 	struct vega10_hwmgr *data = hwmgr->backend;
1777 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1778 	struct vega10_single_dpm_table *dpm_table =
1779 			&(data->dpm_table.mem_table);
1780 	int result = 0;
1781 	uint32_t i, j;
1782 
1783 	for (i = 0; i < dpm_table->count; i++) {
1784 		result = vega10_populate_single_memory_level(hwmgr,
1785 				dpm_table->dpm_levels[i].value,
1786 				&(pp_table->MemVid[i]),
1787 				&(pp_table->UclkLevel[i]),
1788 				&(pp_table->MemSocVoltageIndex[i]));
1789 		if (result)
1790 			return result;
1791 	}
1792 
1793 	j = i - 1;
1794 	while (i < NUM_UCLK_DPM_LEVELS) {
1795 		result = vega10_populate_single_memory_level(hwmgr,
1796 				dpm_table->dpm_levels[j].value,
1797 				&(pp_table->MemVid[i]),
1798 				&(pp_table->UclkLevel[i]),
1799 				&(pp_table->MemSocVoltageIndex[i]));
1800 		if (result)
1801 			return result;
1802 		i++;
1803 	}
1804 
1805 	pp_table->NumMemoryChannels = (uint16_t)(data->mem_channels);
1806 	pp_table->MemoryChannelWidth =
1807 			(uint16_t)(HBM_MEMORY_CHANNEL_WIDTH *
1808 					channel_number[data->mem_channels]);
1809 
1810 	pp_table->LowestUclkReservedForUlv =
1811 			(uint8_t)(data->lowest_uclk_reserved_for_ulv);
1812 
1813 	return result;
1814 }
1815 
vega10_populate_single_display_type(struct pp_hwmgr * hwmgr,DSPCLK_e disp_clock)1816 static int vega10_populate_single_display_type(struct pp_hwmgr *hwmgr,
1817 		DSPCLK_e disp_clock)
1818 {
1819 	struct vega10_hwmgr *data = hwmgr->backend;
1820 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1821 	struct phm_ppt_v2_information *table_info =
1822 			(struct phm_ppt_v2_information *)
1823 			(hwmgr->pptable);
1824 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
1825 	uint32_t i;
1826 	uint16_t clk = 0, vddc = 0;
1827 	uint8_t vid = 0;
1828 
1829 	switch (disp_clock) {
1830 	case DSPCLK_DCEFCLK:
1831 		dep_table = table_info->vdd_dep_on_dcefclk;
1832 		break;
1833 	case DSPCLK_DISPCLK:
1834 		dep_table = table_info->vdd_dep_on_dispclk;
1835 		break;
1836 	case DSPCLK_PIXCLK:
1837 		dep_table = table_info->vdd_dep_on_pixclk;
1838 		break;
1839 	case DSPCLK_PHYCLK:
1840 		dep_table = table_info->vdd_dep_on_phyclk;
1841 		break;
1842 	default:
1843 		return -1;
1844 	}
1845 
1846 	PP_ASSERT_WITH_CODE(dep_table->count <= NUM_DSPCLK_LEVELS,
1847 			"Number Of Entries Exceeded maximum!",
1848 			return -1);
1849 
1850 	for (i = 0; i < dep_table->count; i++) {
1851 		clk = (uint16_t)(dep_table->entries[i].clk / 100);
1852 		vddc = table_info->vddc_lookup_table->
1853 				entries[dep_table->entries[i].vddInd].us_vdd;
1854 		vid = (uint8_t)convert_to_vid(vddc);
1855 		pp_table->DisplayClockTable[disp_clock][i].Freq =
1856 				cpu_to_le16(clk);
1857 		pp_table->DisplayClockTable[disp_clock][i].Vid =
1858 				cpu_to_le16(vid);
1859 	}
1860 
1861 	while (i < NUM_DSPCLK_LEVELS) {
1862 		pp_table->DisplayClockTable[disp_clock][i].Freq =
1863 				cpu_to_le16(clk);
1864 		pp_table->DisplayClockTable[disp_clock][i].Vid =
1865 				cpu_to_le16(vid);
1866 		i++;
1867 	}
1868 
1869 	return 0;
1870 }
1871 
vega10_populate_all_display_clock_levels(struct pp_hwmgr * hwmgr)1872 static int vega10_populate_all_display_clock_levels(struct pp_hwmgr *hwmgr)
1873 {
1874 	uint32_t i;
1875 
1876 	for (i = 0; i < DSPCLK_COUNT; i++) {
1877 		PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr, i),
1878 				"Failed to populate Clock in DisplayClockTable!",
1879 				return -1);
1880 	}
1881 
1882 	return 0;
1883 }
1884 
vega10_populate_single_eclock_level(struct pp_hwmgr * hwmgr,uint32_t eclock,uint8_t * current_eclk_did,uint8_t * current_soc_vol)1885 static int vega10_populate_single_eclock_level(struct pp_hwmgr *hwmgr,
1886 		uint32_t eclock, uint8_t *current_eclk_did,
1887 		uint8_t *current_soc_vol)
1888 {
1889 	struct phm_ppt_v2_information *table_info =
1890 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
1891 	struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1892 			table_info->mm_dep_table;
1893 	struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1894 	uint32_t i;
1895 
1896 	PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1897 			COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1898 			eclock, &dividers),
1899 			"Failed to get ECLK clock settings from VBIOS!",
1900 			return -1);
1901 
1902 	*current_eclk_did = (uint8_t)dividers.ulDid;
1903 
1904 	for (i = 0; i < dep_table->count; i++) {
1905 		if (dep_table->entries[i].eclk == eclock)
1906 			*current_soc_vol = dep_table->entries[i].vddcInd;
1907 	}
1908 
1909 	return 0;
1910 }
1911 
vega10_populate_smc_vce_levels(struct pp_hwmgr * hwmgr)1912 static int vega10_populate_smc_vce_levels(struct pp_hwmgr *hwmgr)
1913 {
1914 	struct vega10_hwmgr *data = hwmgr->backend;
1915 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1916 	struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.eclk_table);
1917 	int result = -EINVAL;
1918 	uint32_t i, j;
1919 
1920 	for (i = 0; i < dpm_table->count; i++) {
1921 		result = vega10_populate_single_eclock_level(hwmgr,
1922 				dpm_table->dpm_levels[i].value,
1923 				&(pp_table->EclkDid[i]),
1924 				&(pp_table->VceDpmVoltageIndex[i]));
1925 		if (result)
1926 			return result;
1927 	}
1928 
1929 	j = i - 1;
1930 	while (i < NUM_VCE_DPM_LEVELS) {
1931 		result = vega10_populate_single_eclock_level(hwmgr,
1932 				dpm_table->dpm_levels[j].value,
1933 				&(pp_table->EclkDid[i]),
1934 				&(pp_table->VceDpmVoltageIndex[i]));
1935 		if (result)
1936 			return result;
1937 		i++;
1938 	}
1939 
1940 	return result;
1941 }
1942 
vega10_populate_single_vclock_level(struct pp_hwmgr * hwmgr,uint32_t vclock,uint8_t * current_vclk_did)1943 static int vega10_populate_single_vclock_level(struct pp_hwmgr *hwmgr,
1944 		uint32_t vclock, uint8_t *current_vclk_did)
1945 {
1946 	struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1947 
1948 	PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1949 			COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1950 			vclock, &dividers),
1951 			"Failed to get VCLK clock settings from VBIOS!",
1952 			return -EINVAL);
1953 
1954 	*current_vclk_did = (uint8_t)dividers.ulDid;
1955 
1956 	return 0;
1957 }
1958 
vega10_populate_single_dclock_level(struct pp_hwmgr * hwmgr,uint32_t dclock,uint8_t * current_dclk_did)1959 static int vega10_populate_single_dclock_level(struct pp_hwmgr *hwmgr,
1960 		uint32_t dclock, uint8_t *current_dclk_did)
1961 {
1962 	struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1963 
1964 	PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1965 			COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1966 			dclock, &dividers),
1967 			"Failed to get DCLK clock settings from VBIOS!",
1968 			return -EINVAL);
1969 
1970 	*current_dclk_did = (uint8_t)dividers.ulDid;
1971 
1972 	return 0;
1973 }
1974 
vega10_populate_smc_uvd_levels(struct pp_hwmgr * hwmgr)1975 static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr)
1976 {
1977 	struct vega10_hwmgr *data = hwmgr->backend;
1978 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1979 	struct vega10_single_dpm_table *vclk_dpm_table =
1980 			&(data->dpm_table.vclk_table);
1981 	struct vega10_single_dpm_table *dclk_dpm_table =
1982 			&(data->dpm_table.dclk_table);
1983 	struct phm_ppt_v2_information *table_info =
1984 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
1985 	struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1986 			table_info->mm_dep_table;
1987 	int result = -EINVAL;
1988 	uint32_t i, j;
1989 
1990 	for (i = 0; i < vclk_dpm_table->count; i++) {
1991 		result = vega10_populate_single_vclock_level(hwmgr,
1992 				vclk_dpm_table->dpm_levels[i].value,
1993 				&(pp_table->VclkDid[i]));
1994 		if (result)
1995 			return result;
1996 	}
1997 
1998 	j = i - 1;
1999 	while (i < NUM_UVD_DPM_LEVELS) {
2000 		result = vega10_populate_single_vclock_level(hwmgr,
2001 				vclk_dpm_table->dpm_levels[j].value,
2002 				&(pp_table->VclkDid[i]));
2003 		if (result)
2004 			return result;
2005 		i++;
2006 	}
2007 
2008 	for (i = 0; i < dclk_dpm_table->count; i++) {
2009 		result = vega10_populate_single_dclock_level(hwmgr,
2010 				dclk_dpm_table->dpm_levels[i].value,
2011 				&(pp_table->DclkDid[i]));
2012 		if (result)
2013 			return result;
2014 	}
2015 
2016 	j = i - 1;
2017 	while (i < NUM_UVD_DPM_LEVELS) {
2018 		result = vega10_populate_single_dclock_level(hwmgr,
2019 				dclk_dpm_table->dpm_levels[j].value,
2020 				&(pp_table->DclkDid[i]));
2021 		if (result)
2022 			return result;
2023 		i++;
2024 	}
2025 
2026 	for (i = 0; i < dep_table->count; i++) {
2027 		if (dep_table->entries[i].vclk ==
2028 				vclk_dpm_table->dpm_levels[i].value &&
2029 			dep_table->entries[i].dclk ==
2030 				dclk_dpm_table->dpm_levels[i].value)
2031 			pp_table->UvdDpmVoltageIndex[i] =
2032 					dep_table->entries[i].vddcInd;
2033 		else
2034 			return -1;
2035 	}
2036 
2037 	j = i - 1;
2038 	while (i < NUM_UVD_DPM_LEVELS) {
2039 		pp_table->UvdDpmVoltageIndex[i] = dep_table->entries[j].vddcInd;
2040 		i++;
2041 	}
2042 
2043 	return 0;
2044 }
2045 
vega10_populate_clock_stretcher_table(struct pp_hwmgr * hwmgr)2046 static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr)
2047 {
2048 	struct vega10_hwmgr *data = hwmgr->backend;
2049 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2050 	struct phm_ppt_v2_information *table_info =
2051 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
2052 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2053 			table_info->vdd_dep_on_sclk;
2054 	uint32_t i;
2055 
2056 	for (i = 0; i < dep_table->count; i++) {
2057 		pp_table->CksEnable[i] = dep_table->entries[i].cks_enable;
2058 		pp_table->CksVidOffset[i] = (uint8_t)(dep_table->entries[i].cks_voffset
2059 				* VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2060 	}
2061 
2062 	return 0;
2063 }
2064 
vega10_populate_avfs_parameters(struct pp_hwmgr * hwmgr)2065 static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
2066 {
2067 	struct vega10_hwmgr *data = hwmgr->backend;
2068 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2069 	struct phm_ppt_v2_information *table_info =
2070 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
2071 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2072 			table_info->vdd_dep_on_sclk;
2073 	struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
2074 	int result = 0;
2075 	uint32_t i;
2076 
2077 	pp_table->MinVoltageVid = (uint8_t)0xff;
2078 	pp_table->MaxVoltageVid = (uint8_t)0;
2079 
2080 	if (data->smu_features[GNLD_AVFS].supported) {
2081 		result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
2082 		if (!result) {
2083 			pp_table->MinVoltageVid = (uint8_t)
2084 					convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
2085 			pp_table->MaxVoltageVid = (uint8_t)
2086 					convert_to_vid((uint16_t)(avfs_params.ulMaxVddc));
2087 
2088 			pp_table->AConstant[0] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0);
2089 			pp_table->AConstant[1] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1);
2090 			pp_table->AConstant[2] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2);
2091 			pp_table->DC_tol_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2092 			pp_table->Platform_mean = cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean);
2093 			pp_table->Platform_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2094 			pp_table->PSM_Age_CompFactor = cpu_to_le16(avfs_params.usPsmAgeComfactor);
2095 
2096 			pp_table->BtcGbVdroopTableCksOff.a0 =
2097 					cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0);
2098 			pp_table->BtcGbVdroopTableCksOff.a0_shift = 20;
2099 			pp_table->BtcGbVdroopTableCksOff.a1 =
2100 					cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1);
2101 			pp_table->BtcGbVdroopTableCksOff.a1_shift = 20;
2102 			pp_table->BtcGbVdroopTableCksOff.a2 =
2103 					cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2);
2104 			pp_table->BtcGbVdroopTableCksOff.a2_shift = 20;
2105 
2106 			pp_table->OverrideBtcGbCksOn = avfs_params.ucEnableGbVdroopTableCkson;
2107 			pp_table->BtcGbVdroopTableCksOn.a0 =
2108 					cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0);
2109 			pp_table->BtcGbVdroopTableCksOn.a0_shift = 20;
2110 			pp_table->BtcGbVdroopTableCksOn.a1 =
2111 					cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1);
2112 			pp_table->BtcGbVdroopTableCksOn.a1_shift = 20;
2113 			pp_table->BtcGbVdroopTableCksOn.a2 =
2114 					cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2);
2115 			pp_table->BtcGbVdroopTableCksOn.a2_shift = 20;
2116 
2117 			pp_table->AvfsGbCksOn.m1 =
2118 					cpu_to_le32(avfs_params.ulGbFuseTableCksonM1);
2119 			pp_table->AvfsGbCksOn.m2 =
2120 					cpu_to_le32(avfs_params.ulGbFuseTableCksonM2);
2121 			pp_table->AvfsGbCksOn.b =
2122 					cpu_to_le32(avfs_params.ulGbFuseTableCksonB);
2123 			pp_table->AvfsGbCksOn.m1_shift = 24;
2124 			pp_table->AvfsGbCksOn.m2_shift = 12;
2125 			pp_table->AvfsGbCksOn.b_shift = 0;
2126 
2127 			pp_table->OverrideAvfsGbCksOn =
2128 					avfs_params.ucEnableGbFuseTableCkson;
2129 			pp_table->AvfsGbCksOff.m1 =
2130 					cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1);
2131 			pp_table->AvfsGbCksOff.m2 =
2132 					cpu_to_le32(avfs_params.ulGbFuseTableCksoffM2);
2133 			pp_table->AvfsGbCksOff.b =
2134 					cpu_to_le32(avfs_params.ulGbFuseTableCksoffB);
2135 			pp_table->AvfsGbCksOff.m1_shift = 24;
2136 			pp_table->AvfsGbCksOff.m2_shift = 12;
2137 			pp_table->AvfsGbCksOff.b_shift = 0;
2138 
2139 			for (i = 0; i < dep_table->count; i++)
2140 				pp_table->StaticVoltageOffsetVid[i] =
2141 						convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset));
2142 
2143 			if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2144 					data->disp_clk_quad_eqn_a) &&
2145 				(PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2146 					data->disp_clk_quad_eqn_b)) {
2147 				pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2148 						(int32_t)data->disp_clk_quad_eqn_a;
2149 				pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
2150 						(int32_t)data->disp_clk_quad_eqn_b;
2151 				pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2152 						(int32_t)data->disp_clk_quad_eqn_c;
2153 			} else {
2154 				pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2155 						(int32_t)avfs_params.ulDispclk2GfxclkM1;
2156 				pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
2157 						(int32_t)avfs_params.ulDispclk2GfxclkM2;
2158 				pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2159 						(int32_t)avfs_params.ulDispclk2GfxclkB;
2160 			}
2161 
2162 			pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24;
2163 			pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12;
2164 			pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b_shift = 12;
2165 
2166 			if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2167 					data->dcef_clk_quad_eqn_a) &&
2168 				(PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2169 					data->dcef_clk_quad_eqn_b)) {
2170 				pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2171 						(int32_t)data->dcef_clk_quad_eqn_a;
2172 				pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
2173 						(int32_t)data->dcef_clk_quad_eqn_b;
2174 				pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2175 						(int32_t)data->dcef_clk_quad_eqn_c;
2176 			} else {
2177 				pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2178 						(int32_t)avfs_params.ulDcefclk2GfxclkM1;
2179 				pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
2180 						(int32_t)avfs_params.ulDcefclk2GfxclkM2;
2181 				pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2182 						(int32_t)avfs_params.ulDcefclk2GfxclkB;
2183 			}
2184 
2185 			pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24;
2186 			pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12;
2187 			pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b_shift = 12;
2188 
2189 			if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2190 					data->pixel_clk_quad_eqn_a) &&
2191 				(PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2192 					data->pixel_clk_quad_eqn_b)) {
2193 				pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2194 						(int32_t)data->pixel_clk_quad_eqn_a;
2195 				pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
2196 						(int32_t)data->pixel_clk_quad_eqn_b;
2197 				pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2198 						(int32_t)data->pixel_clk_quad_eqn_c;
2199 			} else {
2200 				pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2201 						(int32_t)avfs_params.ulPixelclk2GfxclkM1;
2202 				pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
2203 						(int32_t)avfs_params.ulPixelclk2GfxclkM2;
2204 				pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2205 						(int32_t)avfs_params.ulPixelclk2GfxclkB;
2206 			}
2207 
2208 			pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24;
2209 			pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12;
2210 			pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b_shift = 12;
2211 			if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2212 					data->phy_clk_quad_eqn_a) &&
2213 				(PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2214 					data->phy_clk_quad_eqn_b)) {
2215 				pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2216 						(int32_t)data->phy_clk_quad_eqn_a;
2217 				pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
2218 						(int32_t)data->phy_clk_quad_eqn_b;
2219 				pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2220 						(int32_t)data->phy_clk_quad_eqn_c;
2221 			} else {
2222 				pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2223 						(int32_t)avfs_params.ulPhyclk2GfxclkM1;
2224 				pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
2225 						(int32_t)avfs_params.ulPhyclk2GfxclkM2;
2226 				pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2227 						(int32_t)avfs_params.ulPhyclk2GfxclkB;
2228 			}
2229 
2230 			pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24;
2231 			pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12;
2232 			pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12;
2233 
2234 			pp_table->AcgBtcGbVdroopTable.a0       = avfs_params.ulAcgGbVdroopTableA0;
2235 			pp_table->AcgBtcGbVdroopTable.a0_shift = 20;
2236 			pp_table->AcgBtcGbVdroopTable.a1       = avfs_params.ulAcgGbVdroopTableA1;
2237 			pp_table->AcgBtcGbVdroopTable.a1_shift = 20;
2238 			pp_table->AcgBtcGbVdroopTable.a2       = avfs_params.ulAcgGbVdroopTableA2;
2239 			pp_table->AcgBtcGbVdroopTable.a2_shift = 20;
2240 
2241 			pp_table->AcgAvfsGb.m1                   = avfs_params.ulAcgGbFuseTableM1;
2242 			pp_table->AcgAvfsGb.m2                   = avfs_params.ulAcgGbFuseTableM2;
2243 			pp_table->AcgAvfsGb.b                    = avfs_params.ulAcgGbFuseTableB;
2244 			pp_table->AcgAvfsGb.m1_shift             = 0;
2245 			pp_table->AcgAvfsGb.m2_shift             = 0;
2246 			pp_table->AcgAvfsGb.b_shift              = 0;
2247 
2248 		} else {
2249 			data->smu_features[GNLD_AVFS].supported = false;
2250 		}
2251 	}
2252 
2253 	return 0;
2254 }
2255 
vega10_acg_enable(struct pp_hwmgr * hwmgr)2256 static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
2257 {
2258 	struct vega10_hwmgr *data = hwmgr->backend;
2259 	uint32_t agc_btc_response;
2260 
2261 	if (data->smu_features[GNLD_ACG].supported) {
2262 		if (0 == vega10_enable_smc_features(hwmgr, true,
2263 					data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
2264 			data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
2265 
2266 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg);
2267 
2268 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc);
2269 		agc_btc_response = smum_get_argument(hwmgr);
2270 
2271 		if (1 == agc_btc_response) {
2272 			if (1 == data->acg_loop_state)
2273 				smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop);
2274 			else if (2 == data->acg_loop_state)
2275 				smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop);
2276 			if (0 == vega10_enable_smc_features(hwmgr, true,
2277 				data->smu_features[GNLD_ACG].smu_feature_bitmap))
2278 					data->smu_features[GNLD_ACG].enabled = true;
2279 		} else {
2280 			pr_info("[ACG_Enable] ACG BTC Returned Failed Status!\n");
2281 			data->smu_features[GNLD_ACG].enabled = false;
2282 		}
2283 	}
2284 
2285 	return 0;
2286 }
2287 
vega10_acg_disable(struct pp_hwmgr * hwmgr)2288 static int vega10_acg_disable(struct pp_hwmgr *hwmgr)
2289 {
2290 	struct vega10_hwmgr *data = hwmgr->backend;
2291 
2292 	if (data->smu_features[GNLD_ACG].supported &&
2293 	    data->smu_features[GNLD_ACG].enabled)
2294 		if (!vega10_enable_smc_features(hwmgr, false,
2295 			data->smu_features[GNLD_ACG].smu_feature_bitmap))
2296 			data->smu_features[GNLD_ACG].enabled = false;
2297 
2298 	return 0;
2299 }
2300 
vega10_populate_gpio_parameters(struct pp_hwmgr * hwmgr)2301 static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
2302 {
2303 	struct vega10_hwmgr *data = hwmgr->backend;
2304 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2305 	struct pp_atomfwctrl_gpio_parameters gpio_params = {0};
2306 	int result;
2307 
2308 	result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params);
2309 	if (!result) {
2310 		if (PP_CAP(PHM_PlatformCaps_RegulatorHot) &&
2311 		    data->registry_data.regulator_hot_gpio_support) {
2312 			pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio;
2313 			pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity;
2314 			pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio;
2315 			pp_table->VR1HotPolarity = gpio_params.ucVR1HotPolarity;
2316 		} else {
2317 			pp_table->VR0HotGpio = 0;
2318 			pp_table->VR0HotPolarity = 0;
2319 			pp_table->VR1HotGpio = 0;
2320 			pp_table->VR1HotPolarity = 0;
2321 		}
2322 
2323 		if (PP_CAP(PHM_PlatformCaps_AutomaticDCTransition) &&
2324 		    data->registry_data.ac_dc_switch_gpio_support) {
2325 			pp_table->AcDcGpio = gpio_params.ucAcDcGpio;
2326 			pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity;
2327 		} else {
2328 			pp_table->AcDcGpio = 0;
2329 			pp_table->AcDcPolarity = 0;
2330 		}
2331 	}
2332 
2333 	return result;
2334 }
2335 
vega10_avfs_enable(struct pp_hwmgr * hwmgr,bool enable)2336 static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
2337 {
2338 	struct vega10_hwmgr *data = hwmgr->backend;
2339 
2340 	if (data->smu_features[GNLD_AVFS].supported) {
2341 		if (enable) {
2342 			PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2343 					true,
2344 					data->smu_features[GNLD_AVFS].smu_feature_bitmap),
2345 					"[avfs_control] Attempt to Enable AVFS feature Failed!",
2346 					return -1);
2347 			data->smu_features[GNLD_AVFS].enabled = true;
2348 		} else {
2349 			PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2350 					false,
2351 					data->smu_features[GNLD_AVFS].smu_feature_bitmap),
2352 					"[avfs_control] Attempt to Disable AVFS feature Failed!",
2353 					return -1);
2354 			data->smu_features[GNLD_AVFS].enabled = false;
2355 		}
2356 	}
2357 
2358 	return 0;
2359 }
2360 
vega10_update_avfs(struct pp_hwmgr * hwmgr)2361 static int vega10_update_avfs(struct pp_hwmgr *hwmgr)
2362 {
2363 	struct vega10_hwmgr *data = hwmgr->backend;
2364 
2365 	if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
2366 		vega10_avfs_enable(hwmgr, false);
2367 	} else if (data->need_update_dpm_table) {
2368 		vega10_avfs_enable(hwmgr, false);
2369 		vega10_avfs_enable(hwmgr, true);
2370 	} else {
2371 		vega10_avfs_enable(hwmgr, true);
2372 	}
2373 
2374 	return 0;
2375 }
2376 
vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr * hwmgr)2377 static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
2378 {
2379 	int result = 0;
2380 
2381 	uint64_t serial_number = 0;
2382 	uint32_t top32, bottom32;
2383 	struct phm_fuses_default fuse;
2384 
2385 	struct vega10_hwmgr *data = hwmgr->backend;
2386 	AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
2387 
2388 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
2389 	top32 = smum_get_argument(hwmgr);
2390 
2391 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
2392 	bottom32 = smum_get_argument(hwmgr);
2393 
2394 	serial_number = ((uint64_t)bottom32 << 32) | top32;
2395 
2396 	if (pp_override_get_default_fuse_value(serial_number, &fuse) == 0) {
2397 		avfs_fuse_table->VFT0_b  = fuse.VFT0_b;
2398 		avfs_fuse_table->VFT0_m1 = fuse.VFT0_m1;
2399 		avfs_fuse_table->VFT0_m2 = fuse.VFT0_m2;
2400 		avfs_fuse_table->VFT1_b  = fuse.VFT1_b;
2401 		avfs_fuse_table->VFT1_m1 = fuse.VFT1_m1;
2402 		avfs_fuse_table->VFT1_m2 = fuse.VFT1_m2;
2403 		avfs_fuse_table->VFT2_b  = fuse.VFT2_b;
2404 		avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1;
2405 		avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2;
2406 		result = smum_smc_table_manager(hwmgr,  (uint8_t *)avfs_fuse_table,
2407 						AVFSFUSETABLE, false);
2408 		PP_ASSERT_WITH_CODE(!result,
2409 			"Failed to upload FuseOVerride!",
2410 			);
2411 	}
2412 
2413 	return result;
2414 }
2415 
vega10_check_dpm_table_updated(struct pp_hwmgr * hwmgr)2416 static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
2417 {
2418 	struct vega10_hwmgr *data = hwmgr->backend;
2419 	struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
2420 	struct phm_ppt_v2_information *table_info = hwmgr->pptable;
2421 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
2422 	struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
2423 	uint32_t i;
2424 
2425 	dep_table = table_info->vdd_dep_on_mclk;
2426 	odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk);
2427 
2428 	for (i = 0; i < dep_table->count; i++) {
2429 		if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
2430 			data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
2431 			return;
2432 		}
2433 	}
2434 
2435 	dep_table = table_info->vdd_dep_on_sclk;
2436 	odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk);
2437 	for (i = 0; i < dep_table->count; i++) {
2438 		if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
2439 			data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
2440 			return;
2441 		}
2442 	}
2443 
2444 	if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
2445 		data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
2446 		data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
2447 	}
2448 }
2449 
2450 /**
2451 * Initializes the SMC table and uploads it
2452 *
2453 * @param    hwmgr  the address of the powerplay hardware manager.
2454 * @param    pInput  the pointer to input data (PowerState)
2455 * @return   always 0
2456 */
vega10_init_smc_table(struct pp_hwmgr * hwmgr)2457 static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
2458 {
2459 	int result;
2460 	struct vega10_hwmgr *data = hwmgr->backend;
2461 	struct phm_ppt_v2_information *table_info =
2462 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
2463 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2464 	struct pp_atomfwctrl_voltage_table voltage_table;
2465 	struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
2466 	struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
2467 
2468 	result = vega10_setup_default_dpm_tables(hwmgr);
2469 	PP_ASSERT_WITH_CODE(!result,
2470 			"Failed to setup default DPM tables!",
2471 			return result);
2472 
2473 	/* initialize ODN table */
2474 	if (hwmgr->od_enabled) {
2475 		if (odn_table->max_vddc) {
2476 			data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
2477 			vega10_check_dpm_table_updated(hwmgr);
2478 		} else {
2479 			vega10_odn_initial_default_setting(hwmgr);
2480 		}
2481 	}
2482 
2483 	pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
2484 			VOLTAGE_OBJ_SVID2,  &voltage_table);
2485 	pp_table->MaxVidStep = voltage_table.max_vid_step;
2486 
2487 	pp_table->GfxDpmVoltageMode =
2488 			(uint8_t)(table_info->uc_gfx_dpm_voltage_mode);
2489 	pp_table->SocDpmVoltageMode =
2490 			(uint8_t)(table_info->uc_soc_dpm_voltage_mode);
2491 	pp_table->UclkDpmVoltageMode =
2492 			(uint8_t)(table_info->uc_uclk_dpm_voltage_mode);
2493 	pp_table->UvdDpmVoltageMode =
2494 			(uint8_t)(table_info->uc_uvd_dpm_voltage_mode);
2495 	pp_table->VceDpmVoltageMode =
2496 			(uint8_t)(table_info->uc_vce_dpm_voltage_mode);
2497 	pp_table->Mp0DpmVoltageMode =
2498 			(uint8_t)(table_info->uc_mp0_dpm_voltage_mode);
2499 
2500 	pp_table->DisplayDpmVoltageMode =
2501 			(uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
2502 
2503 	data->vddc_voltage_table.psi0_enable = voltage_table.psi0_enable;
2504 	data->vddc_voltage_table.psi1_enable = voltage_table.psi1_enable;
2505 
2506 	if (data->registry_data.ulv_support &&
2507 			table_info->us_ulv_voltage_offset) {
2508 		result = vega10_populate_ulv_state(hwmgr);
2509 		PP_ASSERT_WITH_CODE(!result,
2510 				"Failed to initialize ULV state!",
2511 				return result);
2512 	}
2513 
2514 	result = vega10_populate_smc_link_levels(hwmgr);
2515 	PP_ASSERT_WITH_CODE(!result,
2516 			"Failed to initialize Link Level!",
2517 			return result);
2518 
2519 	result = vega10_populate_all_graphic_levels(hwmgr);
2520 	PP_ASSERT_WITH_CODE(!result,
2521 			"Failed to initialize Graphics Level!",
2522 			return result);
2523 
2524 	result = vega10_populate_all_memory_levels(hwmgr);
2525 	PP_ASSERT_WITH_CODE(!result,
2526 			"Failed to initialize Memory Level!",
2527 			return result);
2528 
2529 	vega10_populate_vddc_soc_levels(hwmgr);
2530 
2531 	result = vega10_populate_all_display_clock_levels(hwmgr);
2532 	PP_ASSERT_WITH_CODE(!result,
2533 			"Failed to initialize Display Level!",
2534 			return result);
2535 
2536 	result = vega10_populate_smc_vce_levels(hwmgr);
2537 	PP_ASSERT_WITH_CODE(!result,
2538 			"Failed to initialize VCE Level!",
2539 			return result);
2540 
2541 	result = vega10_populate_smc_uvd_levels(hwmgr);
2542 	PP_ASSERT_WITH_CODE(!result,
2543 			"Failed to initialize UVD Level!",
2544 			return result);
2545 
2546 	if (data->registry_data.clock_stretcher_support) {
2547 		result = vega10_populate_clock_stretcher_table(hwmgr);
2548 		PP_ASSERT_WITH_CODE(!result,
2549 				"Failed to populate Clock Stretcher Table!",
2550 				return result);
2551 	}
2552 
2553 	result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
2554 	if (!result) {
2555 		data->vbios_boot_state.vddc     = boot_up_values.usVddc;
2556 		data->vbios_boot_state.vddci    = boot_up_values.usVddci;
2557 		data->vbios_boot_state.mvddc    = boot_up_values.usMvddc;
2558 		data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
2559 		data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
2560 		pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
2561 				SMU9_SYSPLL0_SOCCLK_ID, &boot_up_values.ulSocClk);
2562 
2563 		pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
2564 				SMU9_SYSPLL0_DCEFCLK_ID, &boot_up_values.ulDCEFClk);
2565 
2566 		data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
2567 		data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
2568 		if (0 != boot_up_values.usVddc) {
2569 			smum_send_msg_to_smc_with_parameter(hwmgr,
2570 						PPSMC_MSG_SetFloorSocVoltage,
2571 						(boot_up_values.usVddc * 4));
2572 			data->vbios_boot_state.bsoc_vddc_lock = true;
2573 		} else {
2574 			data->vbios_boot_state.bsoc_vddc_lock = false;
2575 		}
2576 		smum_send_msg_to_smc_with_parameter(hwmgr,
2577 				PPSMC_MSG_SetMinDeepSleepDcefclk,
2578 			(uint32_t)(data->vbios_boot_state.dcef_clock / 100));
2579 	}
2580 
2581 	result = vega10_populate_avfs_parameters(hwmgr);
2582 	PP_ASSERT_WITH_CODE(!result,
2583 			"Failed to initialize AVFS Parameters!",
2584 			return result);
2585 
2586 	result = vega10_populate_gpio_parameters(hwmgr);
2587 	PP_ASSERT_WITH_CODE(!result,
2588 			"Failed to initialize GPIO Parameters!",
2589 			return result);
2590 
2591 	pp_table->GfxclkAverageAlpha = (uint8_t)
2592 			(data->gfxclk_average_alpha);
2593 	pp_table->SocclkAverageAlpha = (uint8_t)
2594 			(data->socclk_average_alpha);
2595 	pp_table->UclkAverageAlpha = (uint8_t)
2596 			(data->uclk_average_alpha);
2597 	pp_table->GfxActivityAverageAlpha = (uint8_t)
2598 			(data->gfx_activity_average_alpha);
2599 
2600 	vega10_populate_and_upload_avfs_fuse_override(hwmgr);
2601 
2602 	result = smum_smc_table_manager(hwmgr, (uint8_t *)pp_table, PPTABLE, false);
2603 
2604 	PP_ASSERT_WITH_CODE(!result,
2605 			"Failed to upload PPtable!", return result);
2606 
2607 	result = vega10_avfs_enable(hwmgr, true);
2608 	PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
2609 					return result);
2610 	vega10_acg_enable(hwmgr);
2611 
2612 	return 0;
2613 }
2614 
vega10_enable_thermal_protection(struct pp_hwmgr * hwmgr)2615 static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
2616 {
2617 	struct vega10_hwmgr *data = hwmgr->backend;
2618 
2619 	if (data->smu_features[GNLD_THERMAL].supported) {
2620 		if (data->smu_features[GNLD_THERMAL].enabled)
2621 			pr_info("THERMAL Feature Already enabled!");
2622 
2623 		PP_ASSERT_WITH_CODE(
2624 				!vega10_enable_smc_features(hwmgr,
2625 				true,
2626 				data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2627 				"Enable THERMAL Feature Failed!",
2628 				return -1);
2629 		data->smu_features[GNLD_THERMAL].enabled = true;
2630 	}
2631 
2632 	return 0;
2633 }
2634 
vega10_disable_thermal_protection(struct pp_hwmgr * hwmgr)2635 static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
2636 {
2637 	struct vega10_hwmgr *data = hwmgr->backend;
2638 
2639 	if (data->smu_features[GNLD_THERMAL].supported) {
2640 		if (!data->smu_features[GNLD_THERMAL].enabled)
2641 			pr_info("THERMAL Feature Already disabled!");
2642 
2643 		PP_ASSERT_WITH_CODE(
2644 				!vega10_enable_smc_features(hwmgr,
2645 				false,
2646 				data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2647 				"disable THERMAL Feature Failed!",
2648 				return -1);
2649 		data->smu_features[GNLD_THERMAL].enabled = false;
2650 	}
2651 
2652 	return 0;
2653 }
2654 
vega10_enable_vrhot_feature(struct pp_hwmgr * hwmgr)2655 static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
2656 {
2657 	struct vega10_hwmgr *data = hwmgr->backend;
2658 
2659 	if (PP_CAP(PHM_PlatformCaps_RegulatorHot)) {
2660 		if (data->smu_features[GNLD_VR0HOT].supported) {
2661 			PP_ASSERT_WITH_CODE(
2662 					!vega10_enable_smc_features(hwmgr,
2663 					true,
2664 					data->smu_features[GNLD_VR0HOT].smu_feature_bitmap),
2665 					"Attempt to Enable VR0 Hot feature Failed!",
2666 					return -1);
2667 			data->smu_features[GNLD_VR0HOT].enabled = true;
2668 		} else {
2669 			if (data->smu_features[GNLD_VR1HOT].supported) {
2670 				PP_ASSERT_WITH_CODE(
2671 						!vega10_enable_smc_features(hwmgr,
2672 						true,
2673 						data->smu_features[GNLD_VR1HOT].smu_feature_bitmap),
2674 						"Attempt to Enable VR0 Hot feature Failed!",
2675 						return -1);
2676 				data->smu_features[GNLD_VR1HOT].enabled = true;
2677 			}
2678 		}
2679 	}
2680 	return 0;
2681 }
2682 
vega10_enable_ulv(struct pp_hwmgr * hwmgr)2683 static int vega10_enable_ulv(struct pp_hwmgr *hwmgr)
2684 {
2685 	struct vega10_hwmgr *data = hwmgr->backend;
2686 
2687 	if (data->registry_data.ulv_support) {
2688 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2689 				true, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2690 				"Enable ULV Feature Failed!",
2691 				return -1);
2692 		data->smu_features[GNLD_ULV].enabled = true;
2693 	}
2694 
2695 	return 0;
2696 }
2697 
vega10_disable_ulv(struct pp_hwmgr * hwmgr)2698 static int vega10_disable_ulv(struct pp_hwmgr *hwmgr)
2699 {
2700 	struct vega10_hwmgr *data = hwmgr->backend;
2701 
2702 	if (data->registry_data.ulv_support) {
2703 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2704 				false, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2705 				"disable ULV Feature Failed!",
2706 				return -EINVAL);
2707 		data->smu_features[GNLD_ULV].enabled = false;
2708 	}
2709 
2710 	return 0;
2711 }
2712 
vega10_enable_deep_sleep_master_switch(struct pp_hwmgr * hwmgr)2713 static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2714 {
2715 	struct vega10_hwmgr *data = hwmgr->backend;
2716 
2717 	if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2718 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2719 				true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2720 				"Attempt to Enable DS_GFXCLK Feature Failed!",
2721 				return -EINVAL);
2722 		data->smu_features[GNLD_DS_GFXCLK].enabled = true;
2723 	}
2724 
2725 	if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2726 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2727 				true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2728 				"Attempt to Enable DS_SOCCLK Feature Failed!",
2729 				return -EINVAL);
2730 		data->smu_features[GNLD_DS_SOCCLK].enabled = true;
2731 	}
2732 
2733 	if (data->smu_features[GNLD_DS_LCLK].supported) {
2734 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2735 				true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2736 				"Attempt to Enable DS_LCLK Feature Failed!",
2737 				return -EINVAL);
2738 		data->smu_features[GNLD_DS_LCLK].enabled = true;
2739 	}
2740 
2741 	if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
2742 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2743 				true, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2744 				"Attempt to Enable DS_DCEFCLK Feature Failed!",
2745 				return -EINVAL);
2746 		data->smu_features[GNLD_DS_DCEFCLK].enabled = true;
2747 	}
2748 
2749 	return 0;
2750 }
2751 
vega10_disable_deep_sleep_master_switch(struct pp_hwmgr * hwmgr)2752 static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2753 {
2754 	struct vega10_hwmgr *data = hwmgr->backend;
2755 
2756 	if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2757 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2758 				false, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2759 				"Attempt to disable DS_GFXCLK Feature Failed!",
2760 				return -EINVAL);
2761 		data->smu_features[GNLD_DS_GFXCLK].enabled = false;
2762 	}
2763 
2764 	if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2765 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2766 				false, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2767 				"Attempt to disable DS_ Feature Failed!",
2768 				return -EINVAL);
2769 		data->smu_features[GNLD_DS_SOCCLK].enabled = false;
2770 	}
2771 
2772 	if (data->smu_features[GNLD_DS_LCLK].supported) {
2773 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2774 				false, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2775 				"Attempt to disable DS_LCLK Feature Failed!",
2776 				return -EINVAL);
2777 		data->smu_features[GNLD_DS_LCLK].enabled = false;
2778 	}
2779 
2780 	if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
2781 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2782 				false, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2783 				"Attempt to disable DS_DCEFCLK Feature Failed!",
2784 				return -EINVAL);
2785 		data->smu_features[GNLD_DS_DCEFCLK].enabled = false;
2786 	}
2787 
2788 	return 0;
2789 }
2790 
vega10_stop_dpm(struct pp_hwmgr * hwmgr,uint32_t bitmap)2791 static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2792 {
2793 	struct vega10_hwmgr *data = hwmgr->backend;
2794 	uint32_t i, feature_mask = 0;
2795 
2796 
2797 	if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2798 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2799 				false, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2800 		"Attempt to disable LED DPM feature failed!", return -EINVAL);
2801 		data->smu_features[GNLD_LED_DISPLAY].enabled = false;
2802 	}
2803 
2804 	for (i = 0; i < GNLD_DPM_MAX; i++) {
2805 		if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2806 			if (data->smu_features[i].supported) {
2807 				if (data->smu_features[i].enabled) {
2808 					feature_mask |= data->smu_features[i].
2809 							smu_feature_bitmap;
2810 					data->smu_features[i].enabled = false;
2811 				}
2812 			}
2813 		}
2814 	}
2815 
2816 	vega10_enable_smc_features(hwmgr, false, feature_mask);
2817 
2818 	return 0;
2819 }
2820 
2821 /**
2822  * @brief Tell SMC to enabled the supported DPMs.
2823  *
2824  * @param    hwmgr - the address of the powerplay hardware manager.
2825  * @Param    bitmap - bitmap for the features to enabled.
2826  * @return   0 on at least one DPM is successfully enabled.
2827  */
vega10_start_dpm(struct pp_hwmgr * hwmgr,uint32_t bitmap)2828 static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2829 {
2830 	struct vega10_hwmgr *data = hwmgr->backend;
2831 	uint32_t i, feature_mask = 0;
2832 
2833 	for (i = 0; i < GNLD_DPM_MAX; i++) {
2834 		if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2835 			if (data->smu_features[i].supported) {
2836 				if (!data->smu_features[i].enabled) {
2837 					feature_mask |= data->smu_features[i].
2838 							smu_feature_bitmap;
2839 					data->smu_features[i].enabled = true;
2840 				}
2841 			}
2842 		}
2843 	}
2844 
2845 	if (vega10_enable_smc_features(hwmgr,
2846 			true, feature_mask)) {
2847 		for (i = 0; i < GNLD_DPM_MAX; i++) {
2848 			if (data->smu_features[i].smu_feature_bitmap &
2849 					feature_mask)
2850 				data->smu_features[i].enabled = false;
2851 		}
2852 	}
2853 
2854 	if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2855 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2856 				true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2857 		"Attempt to Enable LED DPM feature Failed!", return -EINVAL);
2858 		data->smu_features[GNLD_LED_DISPLAY].enabled = true;
2859 	}
2860 
2861 	if (data->vbios_boot_state.bsoc_vddc_lock) {
2862 		smum_send_msg_to_smc_with_parameter(hwmgr,
2863 						PPSMC_MSG_SetFloorSocVoltage, 0);
2864 		data->vbios_boot_state.bsoc_vddc_lock = false;
2865 	}
2866 
2867 	if (PP_CAP(PHM_PlatformCaps_Falcon_QuickTransition)) {
2868 		if (data->smu_features[GNLD_ACDC].supported) {
2869 			PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2870 					true, data->smu_features[GNLD_ACDC].smu_feature_bitmap),
2871 					"Attempt to Enable DS_GFXCLK Feature Failed!",
2872 					return -1);
2873 			data->smu_features[GNLD_ACDC].enabled = true;
2874 		}
2875 	}
2876 
2877 	return 0;
2878 }
2879 
vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr * hwmgr,bool enable)2880 static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool enable)
2881 {
2882 	struct vega10_hwmgr *data = hwmgr->backend;
2883 
2884 	if (data->smu_features[GNLD_PCC_LIMIT].supported) {
2885 		if (enable == data->smu_features[GNLD_PCC_LIMIT].enabled)
2886 			pr_info("GNLD_PCC_LIMIT has been %s \n", enable ? "enabled" : "disabled");
2887 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2888 				enable, data->smu_features[GNLD_PCC_LIMIT].smu_feature_bitmap),
2889 				"Attempt to Enable PCC Limit feature Failed!",
2890 				return -EINVAL);
2891 		data->smu_features[GNLD_PCC_LIMIT].enabled = enable;
2892 	}
2893 
2894 	return 0;
2895 }
2896 
vega10_enable_dpm_tasks(struct pp_hwmgr * hwmgr)2897 static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2898 {
2899 	struct vega10_hwmgr *data = hwmgr->backend;
2900 	int tmp_result, result = 0;
2901 
2902 	vega10_enable_disable_PCC_limit_feature(hwmgr, true);
2903 
2904 	smum_send_msg_to_smc_with_parameter(hwmgr,
2905 		PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
2906 
2907 	tmp_result = vega10_construct_voltage_tables(hwmgr);
2908 	PP_ASSERT_WITH_CODE(!tmp_result,
2909 			"Failed to construct voltage tables!",
2910 			result = tmp_result);
2911 
2912 	tmp_result = vega10_init_smc_table(hwmgr);
2913 	PP_ASSERT_WITH_CODE(!tmp_result,
2914 			"Failed to initialize SMC table!",
2915 			result = tmp_result);
2916 
2917 	if (PP_CAP(PHM_PlatformCaps_ThermalController)) {
2918 		tmp_result = vega10_enable_thermal_protection(hwmgr);
2919 		PP_ASSERT_WITH_CODE(!tmp_result,
2920 				"Failed to enable thermal protection!",
2921 				result = tmp_result);
2922 	}
2923 
2924 	tmp_result = vega10_enable_vrhot_feature(hwmgr);
2925 	PP_ASSERT_WITH_CODE(!tmp_result,
2926 			"Failed to enable VR hot feature!",
2927 			result = tmp_result);
2928 
2929 	tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
2930 	PP_ASSERT_WITH_CODE(!tmp_result,
2931 			"Failed to enable deep sleep master switch!",
2932 			result = tmp_result);
2933 
2934 	tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
2935 	PP_ASSERT_WITH_CODE(!tmp_result,
2936 			"Failed to start DPM!", result = tmp_result);
2937 
2938 	/* enable didt, do not abort if failed didt */
2939 	tmp_result = vega10_enable_didt_config(hwmgr);
2940 	PP_ASSERT(!tmp_result,
2941 			"Failed to enable didt config!");
2942 
2943 	tmp_result = vega10_enable_power_containment(hwmgr);
2944 	PP_ASSERT_WITH_CODE(!tmp_result,
2945 			"Failed to enable power containment!",
2946 			result = tmp_result);
2947 
2948 	tmp_result = vega10_power_control_set_level(hwmgr);
2949 	PP_ASSERT_WITH_CODE(!tmp_result,
2950 			"Failed to power control set level!",
2951 			result = tmp_result);
2952 
2953 	tmp_result = vega10_enable_ulv(hwmgr);
2954 	PP_ASSERT_WITH_CODE(!tmp_result,
2955 			"Failed to enable ULV!",
2956 			result = tmp_result);
2957 
2958 	return result;
2959 }
2960 
vega10_get_power_state_size(struct pp_hwmgr * hwmgr)2961 static int vega10_get_power_state_size(struct pp_hwmgr *hwmgr)
2962 {
2963 	return sizeof(struct vega10_power_state);
2964 }
2965 
vega10_get_pp_table_entry_callback_func(struct pp_hwmgr * hwmgr,void * state,struct pp_power_state * power_state,void * pp_table,uint32_t classification_flag)2966 static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
2967 		void *state, struct pp_power_state *power_state,
2968 		void *pp_table, uint32_t classification_flag)
2969 {
2970 	ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_V2;
2971 	struct vega10_power_state *vega10_power_state =
2972 			cast_phw_vega10_power_state(&(power_state->hardware));
2973 	struct vega10_performance_level *performance_level;
2974 	ATOM_Vega10_State *state_entry = (ATOM_Vega10_State *)state;
2975 	ATOM_Vega10_POWERPLAYTABLE *powerplay_table =
2976 			(ATOM_Vega10_POWERPLAYTABLE *)pp_table;
2977 	ATOM_Vega10_SOCCLK_Dependency_Table *socclk_dep_table =
2978 			(ATOM_Vega10_SOCCLK_Dependency_Table *)
2979 			(((unsigned long)powerplay_table) +
2980 			le16_to_cpu(powerplay_table->usSocclkDependencyTableOffset));
2981 	ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
2982 			(ATOM_Vega10_GFXCLK_Dependency_Table *)
2983 			(((unsigned long)powerplay_table) +
2984 			le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
2985 	ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table =
2986 			(ATOM_Vega10_MCLK_Dependency_Table *)
2987 			(((unsigned long)powerplay_table) +
2988 			le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
2989 
2990 
2991 	/* The following fields are not initialized here:
2992 	 * id orderedList allStatesList
2993 	 */
2994 	power_state->classification.ui_label =
2995 			(le16_to_cpu(state_entry->usClassification) &
2996 			ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
2997 			ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
2998 	power_state->classification.flags = classification_flag;
2999 	/* NOTE: There is a classification2 flag in BIOS
3000 	 * that is not being used right now
3001 	 */
3002 	power_state->classification.temporary_state = false;
3003 	power_state->classification.to_be_deleted = false;
3004 
3005 	power_state->validation.disallowOnDC =
3006 			((le32_to_cpu(state_entry->ulCapsAndSettings) &
3007 					ATOM_Vega10_DISALLOW_ON_DC) != 0);
3008 
3009 	power_state->display.disableFrameModulation = false;
3010 	power_state->display.limitRefreshrate = false;
3011 	power_state->display.enableVariBright =
3012 			((le32_to_cpu(state_entry->ulCapsAndSettings) &
3013 					ATOM_Vega10_ENABLE_VARIBRIGHT) != 0);
3014 
3015 	power_state->validation.supportedPowerLevels = 0;
3016 	power_state->uvd_clocks.VCLK = 0;
3017 	power_state->uvd_clocks.DCLK = 0;
3018 	power_state->temperatures.min = 0;
3019 	power_state->temperatures.max = 0;
3020 
3021 	performance_level = &(vega10_power_state->performance_levels
3022 			[vega10_power_state->performance_level_count++]);
3023 
3024 	PP_ASSERT_WITH_CODE(
3025 			(vega10_power_state->performance_level_count <
3026 					NUM_GFXCLK_DPM_LEVELS),
3027 			"Performance levels exceeds SMC limit!",
3028 			return -1);
3029 
3030 	PP_ASSERT_WITH_CODE(
3031 			(vega10_power_state->performance_level_count <=
3032 					hwmgr->platform_descriptor.
3033 					hardwareActivityPerformanceLevels),
3034 			"Performance levels exceeds Driver limit!",
3035 			return -1);
3036 
3037 	/* Performance levels are arranged from low to high. */
3038 	performance_level->soc_clock = socclk_dep_table->entries
3039 			[state_entry->ucSocClockIndexLow].ulClk;
3040 	performance_level->gfx_clock = gfxclk_dep_table->entries
3041 			[state_entry->ucGfxClockIndexLow].ulClk;
3042 	performance_level->mem_clock = mclk_dep_table->entries
3043 			[state_entry->ucMemClockIndexLow].ulMemClk;
3044 
3045 	performance_level = &(vega10_power_state->performance_levels
3046 				[vega10_power_state->performance_level_count++]);
3047 	performance_level->soc_clock = socclk_dep_table->entries
3048 				[state_entry->ucSocClockIndexHigh].ulClk;
3049 	if (gfxclk_dep_table->ucRevId == 0) {
3050 		performance_level->gfx_clock = gfxclk_dep_table->entries
3051 			[state_entry->ucGfxClockIndexHigh].ulClk;
3052 	} else if (gfxclk_dep_table->ucRevId == 1) {
3053 		patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
3054 		performance_level->gfx_clock = patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk;
3055 	}
3056 
3057 	performance_level->mem_clock = mclk_dep_table->entries
3058 			[state_entry->ucMemClockIndexHigh].ulMemClk;
3059 	return 0;
3060 }
3061 
vega10_get_pp_table_entry(struct pp_hwmgr * hwmgr,unsigned long entry_index,struct pp_power_state * state)3062 static int vega10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3063 		unsigned long entry_index, struct pp_power_state *state)
3064 {
3065 	int result;
3066 	struct vega10_power_state *ps;
3067 
3068 	state->hardware.magic = PhwVega10_Magic;
3069 
3070 	ps = cast_phw_vega10_power_state(&state->hardware);
3071 
3072 	result = vega10_get_powerplay_table_entry(hwmgr, entry_index, state,
3073 			vega10_get_pp_table_entry_callback_func);
3074 
3075 	/*
3076 	 * This is the earliest time we have all the dependency table
3077 	 * and the VBIOS boot state
3078 	 */
3079 	/* set DC compatible flag if this state supports DC */
3080 	if (!state->validation.disallowOnDC)
3081 		ps->dc_compatible = true;
3082 
3083 	ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3084 	ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3085 
3086 	return 0;
3087 }
3088 
vega10_patch_boot_state(struct pp_hwmgr * hwmgr,struct pp_hw_power_state * hw_ps)3089 static int vega10_patch_boot_state(struct pp_hwmgr *hwmgr,
3090 	     struct pp_hw_power_state *hw_ps)
3091 {
3092 	return 0;
3093 }
3094 
vega10_apply_state_adjust_rules(struct pp_hwmgr * hwmgr,struct pp_power_state * request_ps,const struct pp_power_state * current_ps)3095 static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3096 				struct pp_power_state  *request_ps,
3097 			const struct pp_power_state *current_ps)
3098 {
3099 	struct amdgpu_device *adev = hwmgr->adev;
3100 	struct vega10_power_state *vega10_ps =
3101 				cast_phw_vega10_power_state(&request_ps->hardware);
3102 	uint32_t sclk;
3103 	uint32_t mclk;
3104 	struct PP_Clocks minimum_clocks = {0};
3105 	bool disable_mclk_switching;
3106 	bool disable_mclk_switching_for_frame_lock;
3107 	bool disable_mclk_switching_for_vr;
3108 	bool force_mclk_high;
3109 	const struct phm_clock_and_voltage_limits *max_limits;
3110 	uint32_t i;
3111 	struct vega10_hwmgr *data = hwmgr->backend;
3112 	struct phm_ppt_v2_information *table_info =
3113 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
3114 	int32_t count;
3115 	uint32_t stable_pstate_sclk_dpm_percentage;
3116 	uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3117 	uint32_t latency;
3118 
3119 	data->battery_state = (PP_StateUILabel_Battery ==
3120 			request_ps->classification.ui_label);
3121 
3122 	if (vega10_ps->performance_level_count != 2)
3123 		pr_info("VI should always have 2 performance levels");
3124 
3125 	max_limits = adev->pm.ac_power ?
3126 			&(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3127 			&(hwmgr->dyn_state.max_clock_voltage_on_dc);
3128 
3129 	/* Cap clock DPM tables at DC MAX if it is in DC. */
3130 	if (!adev->pm.ac_power) {
3131 		for (i = 0; i < vega10_ps->performance_level_count; i++) {
3132 			if (vega10_ps->performance_levels[i].mem_clock >
3133 				max_limits->mclk)
3134 				vega10_ps->performance_levels[i].mem_clock =
3135 						max_limits->mclk;
3136 			if (vega10_ps->performance_levels[i].gfx_clock >
3137 				max_limits->sclk)
3138 				vega10_ps->performance_levels[i].gfx_clock =
3139 						max_limits->sclk;
3140 		}
3141 	}
3142 
3143 	/* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
3144 	minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
3145 	minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
3146 
3147 	if (PP_CAP(PHM_PlatformCaps_StablePState)) {
3148 		stable_pstate_sclk_dpm_percentage =
3149 			data->registry_data.stable_pstate_sclk_dpm_percentage;
3150 		PP_ASSERT_WITH_CODE(
3151 			data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
3152 			data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
3153 			"percent sclk value must range from 1% to 100%, setting default value",
3154 			stable_pstate_sclk_dpm_percentage = 75);
3155 
3156 		max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3157 		stable_pstate_sclk = (max_limits->sclk *
3158 				stable_pstate_sclk_dpm_percentage) / 100;
3159 
3160 		for (count = table_info->vdd_dep_on_sclk->count - 1;
3161 				count >= 0; count--) {
3162 			if (stable_pstate_sclk >=
3163 					table_info->vdd_dep_on_sclk->entries[count].clk) {
3164 				stable_pstate_sclk =
3165 						table_info->vdd_dep_on_sclk->entries[count].clk;
3166 				break;
3167 			}
3168 		}
3169 
3170 		if (count < 0)
3171 			stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3172 
3173 		stable_pstate_mclk = max_limits->mclk;
3174 
3175 		minimum_clocks.engineClock = stable_pstate_sclk;
3176 		minimum_clocks.memoryClock = stable_pstate_mclk;
3177 	}
3178 
3179 	disable_mclk_switching_for_frame_lock =
3180 		PP_CAP(PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3181 	disable_mclk_switching_for_vr =
3182 		PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
3183 	force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
3184 
3185 	if (hwmgr->display_config->num_display == 0)
3186 		disable_mclk_switching = false;
3187 	else
3188 		disable_mclk_switching = (hwmgr->display_config->num_display > 1) ||
3189 			disable_mclk_switching_for_frame_lock ||
3190 			disable_mclk_switching_for_vr ||
3191 			force_mclk_high;
3192 
3193 	sclk = vega10_ps->performance_levels[0].gfx_clock;
3194 	mclk = vega10_ps->performance_levels[0].mem_clock;
3195 
3196 	if (sclk < minimum_clocks.engineClock)
3197 		sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3198 				max_limits->sclk : minimum_clocks.engineClock;
3199 
3200 	if (mclk < minimum_clocks.memoryClock)
3201 		mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3202 				max_limits->mclk : minimum_clocks.memoryClock;
3203 
3204 	vega10_ps->performance_levels[0].gfx_clock = sclk;
3205 	vega10_ps->performance_levels[0].mem_clock = mclk;
3206 
3207 	if (vega10_ps->performance_levels[1].gfx_clock <
3208 			vega10_ps->performance_levels[0].gfx_clock)
3209 		vega10_ps->performance_levels[0].gfx_clock =
3210 				vega10_ps->performance_levels[1].gfx_clock;
3211 
3212 	if (disable_mclk_switching) {
3213 		/* Set Mclk the max of level 0 and level 1 */
3214 		if (mclk < vega10_ps->performance_levels[1].mem_clock)
3215 			mclk = vega10_ps->performance_levels[1].mem_clock;
3216 
3217 		/* Find the lowest MCLK frequency that is within
3218 		 * the tolerable latency defined in DAL
3219 		 */
3220 		latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
3221 		for (i = 0; i < data->mclk_latency_table.count; i++) {
3222 			if ((data->mclk_latency_table.entries[i].latency <= latency) &&
3223 				(data->mclk_latency_table.entries[i].frequency >=
3224 						vega10_ps->performance_levels[0].mem_clock) &&
3225 				(data->mclk_latency_table.entries[i].frequency <=
3226 						vega10_ps->performance_levels[1].mem_clock))
3227 				mclk = data->mclk_latency_table.entries[i].frequency;
3228 		}
3229 		vega10_ps->performance_levels[0].mem_clock = mclk;
3230 	} else {
3231 		if (vega10_ps->performance_levels[1].mem_clock <
3232 				vega10_ps->performance_levels[0].mem_clock)
3233 			vega10_ps->performance_levels[0].mem_clock =
3234 					vega10_ps->performance_levels[1].mem_clock;
3235 	}
3236 
3237 	if (PP_CAP(PHM_PlatformCaps_StablePState)) {
3238 		for (i = 0; i < vega10_ps->performance_level_count; i++) {
3239 			vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk;
3240 			vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk;
3241 		}
3242 	}
3243 
3244 	return 0;
3245 }
3246 
vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr * hwmgr,const void * input)3247 static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3248 {
3249 	struct vega10_hwmgr *data = hwmgr->backend;
3250 
3251 	if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
3252 		data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3253 
3254 	return 0;
3255 }
3256 
vega10_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr * hwmgr,const void * input)3257 static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3258 		struct pp_hwmgr *hwmgr, const void *input)
3259 {
3260 	int result = 0;
3261 	struct vega10_hwmgr *data = hwmgr->backend;
3262 	struct vega10_dpm_table *dpm_table = &data->dpm_table;
3263 	struct vega10_odn_dpm_table *odn_table = &data->odn_dpm_table;
3264 	struct vega10_odn_clock_voltage_dependency_table *odn_clk_table = &odn_table->vdd_dep_on_sclk;
3265 	int count;
3266 
3267 	if (!data->need_update_dpm_table)
3268 		return 0;
3269 
3270 	if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
3271 		for (count = 0; count < dpm_table->gfx_table.count; count++)
3272 			dpm_table->gfx_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
3273 	}
3274 
3275 	odn_clk_table = &odn_table->vdd_dep_on_mclk;
3276 	if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
3277 		for (count = 0; count < dpm_table->mem_table.count; count++)
3278 			dpm_table->mem_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
3279 	}
3280 
3281 	if (data->need_update_dpm_table &
3282 			(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) {
3283 		result = vega10_populate_all_graphic_levels(hwmgr);
3284 		PP_ASSERT_WITH_CODE((0 == result),
3285 				"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3286 				return result);
3287 	}
3288 
3289 	if (data->need_update_dpm_table &
3290 			(DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3291 		result = vega10_populate_all_memory_levels(hwmgr);
3292 		PP_ASSERT_WITH_CODE((0 == result),
3293 				"Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3294 				return result);
3295 	}
3296 
3297 	vega10_populate_vddc_soc_levels(hwmgr);
3298 
3299 	return result;
3300 }
3301 
vega10_trim_single_dpm_states(struct pp_hwmgr * hwmgr,struct vega10_single_dpm_table * dpm_table,uint32_t low_limit,uint32_t high_limit)3302 static int vega10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3303 		struct vega10_single_dpm_table *dpm_table,
3304 		uint32_t low_limit, uint32_t high_limit)
3305 {
3306 	uint32_t i;
3307 
3308 	for (i = 0; i < dpm_table->count; i++) {
3309 		if ((dpm_table->dpm_levels[i].value < low_limit) ||
3310 		    (dpm_table->dpm_levels[i].value > high_limit))
3311 			dpm_table->dpm_levels[i].enabled = false;
3312 		else
3313 			dpm_table->dpm_levels[i].enabled = true;
3314 	}
3315 	return 0;
3316 }
3317 
vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr * hwmgr,struct vega10_single_dpm_table * dpm_table,uint32_t low_limit,uint32_t high_limit,uint32_t disable_dpm_mask)3318 static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr,
3319 		struct vega10_single_dpm_table *dpm_table,
3320 		uint32_t low_limit, uint32_t high_limit,
3321 		uint32_t disable_dpm_mask)
3322 {
3323 	uint32_t i;
3324 
3325 	for (i = 0; i < dpm_table->count; i++) {
3326 		if ((dpm_table->dpm_levels[i].value < low_limit) ||
3327 		    (dpm_table->dpm_levels[i].value > high_limit))
3328 			dpm_table->dpm_levels[i].enabled = false;
3329 		else if (!((1 << i) & disable_dpm_mask))
3330 			dpm_table->dpm_levels[i].enabled = false;
3331 		else
3332 			dpm_table->dpm_levels[i].enabled = true;
3333 	}
3334 	return 0;
3335 }
3336 
vega10_trim_dpm_states(struct pp_hwmgr * hwmgr,const struct vega10_power_state * vega10_ps)3337 static int vega10_trim_dpm_states(struct pp_hwmgr *hwmgr,
3338 		const struct vega10_power_state *vega10_ps)
3339 {
3340 	struct vega10_hwmgr *data = hwmgr->backend;
3341 	uint32_t high_limit_count;
3342 
3343 	PP_ASSERT_WITH_CODE((vega10_ps->performance_level_count >= 1),
3344 			"power state did not have any performance level",
3345 			return -1);
3346 
3347 	high_limit_count = (vega10_ps->performance_level_count == 1) ? 0 : 1;
3348 
3349 	vega10_trim_single_dpm_states(hwmgr,
3350 			&(data->dpm_table.soc_table),
3351 			vega10_ps->performance_levels[0].soc_clock,
3352 			vega10_ps->performance_levels[high_limit_count].soc_clock);
3353 
3354 	vega10_trim_single_dpm_states_with_mask(hwmgr,
3355 			&(data->dpm_table.gfx_table),
3356 			vega10_ps->performance_levels[0].gfx_clock,
3357 			vega10_ps->performance_levels[high_limit_count].gfx_clock,
3358 			data->disable_dpm_mask);
3359 
3360 	vega10_trim_single_dpm_states(hwmgr,
3361 			&(data->dpm_table.mem_table),
3362 			vega10_ps->performance_levels[0].mem_clock,
3363 			vega10_ps->performance_levels[high_limit_count].mem_clock);
3364 
3365 	return 0;
3366 }
3367 
vega10_find_lowest_dpm_level(struct vega10_single_dpm_table * table)3368 static uint32_t vega10_find_lowest_dpm_level(
3369 		struct vega10_single_dpm_table *table)
3370 {
3371 	uint32_t i;
3372 
3373 	for (i = 0; i < table->count; i++) {
3374 		if (table->dpm_levels[i].enabled)
3375 			break;
3376 	}
3377 
3378 	return i;
3379 }
3380 
vega10_find_highest_dpm_level(struct vega10_single_dpm_table * table)3381 static uint32_t vega10_find_highest_dpm_level(
3382 		struct vega10_single_dpm_table *table)
3383 {
3384 	uint32_t i = 0;
3385 
3386 	if (table->count <= MAX_REGULAR_DPM_NUMBER) {
3387 		for (i = table->count; i > 0; i--) {
3388 			if (table->dpm_levels[i - 1].enabled)
3389 				return i - 1;
3390 		}
3391 	} else {
3392 		pr_info("DPM Table Has Too Many Entries!");
3393 		return MAX_REGULAR_DPM_NUMBER - 1;
3394 	}
3395 
3396 	return i;
3397 }
3398 
vega10_apply_dal_minimum_voltage_request(struct pp_hwmgr * hwmgr)3399 static void vega10_apply_dal_minimum_voltage_request(
3400 		struct pp_hwmgr *hwmgr)
3401 {
3402 	return;
3403 }
3404 
vega10_get_soc_index_for_max_uclk(struct pp_hwmgr * hwmgr)3405 static int vega10_get_soc_index_for_max_uclk(struct pp_hwmgr *hwmgr)
3406 {
3407 	struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table_on_mclk;
3408 	struct phm_ppt_v2_information *table_info =
3409 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
3410 
3411 	vdd_dep_table_on_mclk  = table_info->vdd_dep_on_mclk;
3412 
3413 	return vdd_dep_table_on_mclk->entries[NUM_UCLK_DPM_LEVELS - 1].vddInd + 1;
3414 }
3415 
vega10_upload_dpm_bootup_level(struct pp_hwmgr * hwmgr)3416 static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
3417 {
3418 	struct vega10_hwmgr *data = hwmgr->backend;
3419 	uint32_t socclk_idx;
3420 
3421 	vega10_apply_dal_minimum_voltage_request(hwmgr);
3422 
3423 	if (!data->registry_data.sclk_dpm_key_disabled) {
3424 		if (data->smc_state_table.gfx_boot_level !=
3425 				data->dpm_table.gfx_table.dpm_state.soft_min_level) {
3426 			smum_send_msg_to_smc_with_parameter(hwmgr,
3427 				PPSMC_MSG_SetSoftMinGfxclkByIndex,
3428 				data->smc_state_table.gfx_boot_level);
3429 			data->dpm_table.gfx_table.dpm_state.soft_min_level =
3430 					data->smc_state_table.gfx_boot_level;
3431 		}
3432 	}
3433 
3434 	if (!data->registry_data.mclk_dpm_key_disabled) {
3435 		if (data->smc_state_table.mem_boot_level !=
3436 				data->dpm_table.mem_table.dpm_state.soft_min_level) {
3437 			if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
3438 				socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
3439 				smum_send_msg_to_smc_with_parameter(hwmgr,
3440 						PPSMC_MSG_SetSoftMinSocclkByIndex,
3441 						socclk_idx);
3442 			} else {
3443 				smum_send_msg_to_smc_with_parameter(hwmgr,
3444 						PPSMC_MSG_SetSoftMinUclkByIndex,
3445 						data->smc_state_table.mem_boot_level);
3446 			}
3447 			data->dpm_table.mem_table.dpm_state.soft_min_level =
3448 					data->smc_state_table.mem_boot_level;
3449 		}
3450 	}
3451 
3452 	return 0;
3453 }
3454 
vega10_upload_dpm_max_level(struct pp_hwmgr * hwmgr)3455 static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
3456 {
3457 	struct vega10_hwmgr *data = hwmgr->backend;
3458 
3459 	vega10_apply_dal_minimum_voltage_request(hwmgr);
3460 
3461 	if (!data->registry_data.sclk_dpm_key_disabled) {
3462 		if (data->smc_state_table.gfx_max_level !=
3463 			data->dpm_table.gfx_table.dpm_state.soft_max_level) {
3464 			smum_send_msg_to_smc_with_parameter(hwmgr,
3465 				PPSMC_MSG_SetSoftMaxGfxclkByIndex,
3466 				data->smc_state_table.gfx_max_level);
3467 			data->dpm_table.gfx_table.dpm_state.soft_max_level =
3468 					data->smc_state_table.gfx_max_level;
3469 		}
3470 	}
3471 
3472 	if (!data->registry_data.mclk_dpm_key_disabled) {
3473 		if (data->smc_state_table.mem_max_level !=
3474 			data->dpm_table.mem_table.dpm_state.soft_max_level) {
3475 			smum_send_msg_to_smc_with_parameter(hwmgr,
3476 					PPSMC_MSG_SetSoftMaxUclkByIndex,
3477 					data->smc_state_table.mem_max_level);
3478 			data->dpm_table.mem_table.dpm_state.soft_max_level =
3479 					data->smc_state_table.mem_max_level;
3480 		}
3481 	}
3482 
3483 	return 0;
3484 }
3485 
vega10_generate_dpm_level_enable_mask(struct pp_hwmgr * hwmgr,const void * input)3486 static int vega10_generate_dpm_level_enable_mask(
3487 		struct pp_hwmgr *hwmgr, const void *input)
3488 {
3489 	struct vega10_hwmgr *data = hwmgr->backend;
3490 	const struct phm_set_power_state_input *states =
3491 			(const struct phm_set_power_state_input *)input;
3492 	const struct vega10_power_state *vega10_ps =
3493 			cast_const_phw_vega10_power_state(states->pnew_state);
3494 	int i;
3495 
3496 	PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
3497 			"Attempt to Trim DPM States Failed!",
3498 			return -1);
3499 
3500 	data->smc_state_table.gfx_boot_level =
3501 			vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3502 	data->smc_state_table.gfx_max_level =
3503 			vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3504 	data->smc_state_table.mem_boot_level =
3505 			vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3506 	data->smc_state_table.mem_max_level =
3507 			vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3508 
3509 	PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3510 			"Attempt to upload DPM Bootup Levels Failed!",
3511 			return -1);
3512 	PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3513 			"Attempt to upload DPM Max Levels Failed!",
3514 			return -1);
3515 	for(i = data->smc_state_table.gfx_boot_level; i < data->smc_state_table.gfx_max_level; i++)
3516 		data->dpm_table.gfx_table.dpm_levels[i].enabled = true;
3517 
3518 
3519 	for(i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++)
3520 		data->dpm_table.mem_table.dpm_levels[i].enabled = true;
3521 
3522 	return 0;
3523 }
3524 
vega10_enable_disable_vce_dpm(struct pp_hwmgr * hwmgr,bool enable)3525 int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
3526 {
3527 	struct vega10_hwmgr *data = hwmgr->backend;
3528 
3529 	if (data->smu_features[GNLD_DPM_VCE].supported) {
3530 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
3531 				enable,
3532 				data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
3533 				"Attempt to Enable/Disable DPM VCE Failed!",
3534 				return -1);
3535 		data->smu_features[GNLD_DPM_VCE].enabled = enable;
3536 	}
3537 
3538 	return 0;
3539 }
3540 
vega10_update_sclk_threshold(struct pp_hwmgr * hwmgr)3541 static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
3542 {
3543 	struct vega10_hwmgr *data = hwmgr->backend;
3544 	uint32_t low_sclk_interrupt_threshold = 0;
3545 
3546 	if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) &&
3547 		(data->low_sclk_interrupt_threshold != 0)) {
3548 		low_sclk_interrupt_threshold =
3549 				data->low_sclk_interrupt_threshold;
3550 
3551 		data->smc_state_table.pp_table.LowGfxclkInterruptThreshold =
3552 				cpu_to_le32(low_sclk_interrupt_threshold);
3553 
3554 		/* This message will also enable SmcToHost Interrupt */
3555 		smum_send_msg_to_smc_with_parameter(hwmgr,
3556 				PPSMC_MSG_SetLowGfxclkInterruptThreshold,
3557 				(uint32_t)low_sclk_interrupt_threshold);
3558 	}
3559 
3560 	return 0;
3561 }
3562 
vega10_set_power_state_tasks(struct pp_hwmgr * hwmgr,const void * input)3563 static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
3564 		const void *input)
3565 {
3566 	int tmp_result, result = 0;
3567 	struct vega10_hwmgr *data = hwmgr->backend;
3568 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
3569 
3570 	tmp_result = vega10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3571 	PP_ASSERT_WITH_CODE(!tmp_result,
3572 			"Failed to find DPM states clocks in DPM table!",
3573 			result = tmp_result);
3574 
3575 	tmp_result = vega10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3576 	PP_ASSERT_WITH_CODE(!tmp_result,
3577 			"Failed to populate and upload SCLK MCLK DPM levels!",
3578 			result = tmp_result);
3579 
3580 	tmp_result = vega10_generate_dpm_level_enable_mask(hwmgr, input);
3581 	PP_ASSERT_WITH_CODE(!tmp_result,
3582 			"Failed to generate DPM level enabled mask!",
3583 			result = tmp_result);
3584 
3585 	tmp_result = vega10_update_sclk_threshold(hwmgr);
3586 	PP_ASSERT_WITH_CODE(!tmp_result,
3587 			"Failed to update SCLK threshold!",
3588 			result = tmp_result);
3589 
3590 	result = smum_smc_table_manager(hwmgr, (uint8_t *)pp_table, PPTABLE, false);
3591 	PP_ASSERT_WITH_CODE(!result,
3592 			"Failed to upload PPtable!", return result);
3593 
3594 	vega10_update_avfs(hwmgr);
3595 
3596 	data->need_update_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
3597 
3598 	return 0;
3599 }
3600 
vega10_dpm_get_sclk(struct pp_hwmgr * hwmgr,bool low)3601 static uint32_t vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3602 {
3603 	struct pp_power_state *ps;
3604 	struct vega10_power_state *vega10_ps;
3605 
3606 	if (hwmgr == NULL)
3607 		return -EINVAL;
3608 
3609 	ps = hwmgr->request_ps;
3610 
3611 	if (ps == NULL)
3612 		return -EINVAL;
3613 
3614 	vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3615 
3616 	if (low)
3617 		return vega10_ps->performance_levels[0].gfx_clock;
3618 	else
3619 		return vega10_ps->performance_levels
3620 				[vega10_ps->performance_level_count - 1].gfx_clock;
3621 }
3622 
vega10_dpm_get_mclk(struct pp_hwmgr * hwmgr,bool low)3623 static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3624 {
3625 	struct pp_power_state *ps;
3626 	struct vega10_power_state *vega10_ps;
3627 
3628 	if (hwmgr == NULL)
3629 		return -EINVAL;
3630 
3631 	ps = hwmgr->request_ps;
3632 
3633 	if (ps == NULL)
3634 		return -EINVAL;
3635 
3636 	vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3637 
3638 	if (low)
3639 		return vega10_ps->performance_levels[0].mem_clock;
3640 	else
3641 		return vega10_ps->performance_levels
3642 				[vega10_ps->performance_level_count-1].mem_clock;
3643 }
3644 
vega10_get_gpu_power(struct pp_hwmgr * hwmgr,uint32_t * query)3645 static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
3646 		uint32_t *query)
3647 {
3648 	uint32_t value;
3649 
3650 	if (!query)
3651 		return -EINVAL;
3652 
3653 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr);
3654 	value = smum_get_argument(hwmgr);
3655 
3656 	/* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
3657 	*query = value << 8;
3658 
3659 	return 0;
3660 }
3661 
vega10_read_sensor(struct pp_hwmgr * hwmgr,int idx,void * value,int * size)3662 static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3663 			      void *value, int *size)
3664 {
3665 	struct amdgpu_device *adev = hwmgr->adev;
3666 	uint32_t sclk_mhz, mclk_idx, activity_percent = 0;
3667 	struct vega10_hwmgr *data = hwmgr->backend;
3668 	struct vega10_dpm_table *dpm_table = &data->dpm_table;
3669 	int ret = 0;
3670 	uint32_t val_vid;
3671 
3672 	switch (idx) {
3673 	case AMDGPU_PP_SENSOR_GFX_SCLK:
3674 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency);
3675 		sclk_mhz = smum_get_argument(hwmgr);
3676 		*((uint32_t *)value) = sclk_mhz * 100;
3677 		break;
3678 	case AMDGPU_PP_SENSOR_GFX_MCLK:
3679 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
3680 		mclk_idx = smum_get_argument(hwmgr);
3681 		if (mclk_idx < dpm_table->mem_table.count) {
3682 			*((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
3683 			*size = 4;
3684 		} else {
3685 			ret = -EINVAL;
3686 		}
3687 		break;
3688 	case AMDGPU_PP_SENSOR_GPU_LOAD:
3689 		smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
3690 		activity_percent = smum_get_argument(hwmgr);
3691 		*((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3692 		*size = 4;
3693 		break;
3694 	case AMDGPU_PP_SENSOR_GPU_TEMP:
3695 		*((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr);
3696 		*size = 4;
3697 		break;
3698 	case AMDGPU_PP_SENSOR_UVD_POWER:
3699 		*((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3700 		*size = 4;
3701 		break;
3702 	case AMDGPU_PP_SENSOR_VCE_POWER:
3703 		*((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3704 		*size = 4;
3705 		break;
3706 	case AMDGPU_PP_SENSOR_GPU_POWER:
3707 		ret = vega10_get_gpu_power(hwmgr, (uint32_t *)value);
3708 		break;
3709 	case AMDGPU_PP_SENSOR_VDDGFX:
3710 		val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_PLANE0_CURRENTVID) &
3711 			SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK) >>
3712 			SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT;
3713 		*((uint32_t *)value) = (uint32_t)convert_to_vddc((uint8_t)val_vid);
3714 		return 0;
3715 	default:
3716 		ret = -EINVAL;
3717 		break;
3718 	}
3719 
3720 	return ret;
3721 }
3722 
vega10_notify_smc_display_change(struct pp_hwmgr * hwmgr,bool has_disp)3723 static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
3724 		bool has_disp)
3725 {
3726 	smum_send_msg_to_smc_with_parameter(hwmgr,
3727 			PPSMC_MSG_SetUclkFastSwitch,
3728 			has_disp ? 1 : 0);
3729 }
3730 
vega10_display_clock_voltage_request(struct pp_hwmgr * hwmgr,struct pp_display_clock_request * clock_req)3731 int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
3732 		struct pp_display_clock_request *clock_req)
3733 {
3734 	int result = 0;
3735 	enum amd_pp_clock_type clk_type = clock_req->clock_type;
3736 	uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
3737 	DSPCLK_e clk_select = 0;
3738 	uint32_t clk_request = 0;
3739 
3740 	switch (clk_type) {
3741 	case amd_pp_dcef_clock:
3742 		clk_select = DSPCLK_DCEFCLK;
3743 		break;
3744 	case amd_pp_disp_clock:
3745 		clk_select = DSPCLK_DISPCLK;
3746 		break;
3747 	case amd_pp_pixel_clock:
3748 		clk_select = DSPCLK_PIXCLK;
3749 		break;
3750 	case amd_pp_phy_clock:
3751 		clk_select = DSPCLK_PHYCLK;
3752 		break;
3753 	default:
3754 		pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
3755 		result = -1;
3756 		break;
3757 	}
3758 
3759 	if (!result) {
3760 		clk_request = (clk_freq << 16) | clk_select;
3761 		smum_send_msg_to_smc_with_parameter(hwmgr,
3762 				PPSMC_MSG_RequestDisplayClockByFreq,
3763 				clk_request);
3764 	}
3765 
3766 	return result;
3767 }
3768 
vega10_get_uclk_index(struct pp_hwmgr * hwmgr,struct phm_ppt_v1_clock_voltage_dependency_table * mclk_table,uint32_t frequency)3769 static uint8_t vega10_get_uclk_index(struct pp_hwmgr *hwmgr,
3770 			struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table,
3771 						uint32_t frequency)
3772 {
3773 	uint8_t count;
3774 	uint8_t i;
3775 
3776 	if (mclk_table == NULL || mclk_table->count == 0)
3777 		return 0;
3778 
3779 	count = (uint8_t)(mclk_table->count);
3780 
3781 	for(i = 0; i < count; i++) {
3782 		if(mclk_table->entries[i].clk >= frequency)
3783 			return i;
3784 	}
3785 
3786 	return i-1;
3787 }
3788 
vega10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr * hwmgr)3789 static int vega10_notify_smc_display_config_after_ps_adjustment(
3790 		struct pp_hwmgr *hwmgr)
3791 {
3792 	struct vega10_hwmgr *data = hwmgr->backend;
3793 	struct vega10_single_dpm_table *dpm_table =
3794 			&data->dpm_table.dcef_table;
3795 	struct phm_ppt_v2_information *table_info =
3796 			(struct phm_ppt_v2_information *)hwmgr->pptable;
3797 	struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = table_info->vdd_dep_on_mclk;
3798 	uint32_t idx;
3799 	struct PP_Clocks min_clocks = {0};
3800 	uint32_t i;
3801 	struct pp_display_clock_request clock_req;
3802 
3803 	if ((hwmgr->display_config->num_display > 1) &&
3804 	     !hwmgr->display_config->multi_monitor_in_sync &&
3805 	     !hwmgr->display_config->nb_pstate_switch_disable)
3806 		vega10_notify_smc_display_change(hwmgr, false);
3807 	else
3808 		vega10_notify_smc_display_change(hwmgr, true);
3809 
3810 	min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
3811 	min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
3812 	min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
3813 
3814 	for (i = 0; i < dpm_table->count; i++) {
3815 		if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock)
3816 			break;
3817 	}
3818 
3819 	if (i < dpm_table->count) {
3820 		clock_req.clock_type = amd_pp_dcef_clock;
3821 		clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value * 10;
3822 		if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
3823 			smum_send_msg_to_smc_with_parameter(
3824 					hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
3825 					min_clocks.dcefClockInSR / 100);
3826 		} else {
3827 			pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
3828 		}
3829 	} else {
3830 		pr_debug("Cannot find requested DCEFCLK!");
3831 	}
3832 
3833 	if (min_clocks.memoryClock != 0) {
3834 		idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock);
3835 		smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
3836 		data->dpm_table.mem_table.dpm_state.soft_min_level= idx;
3837 	}
3838 
3839 	return 0;
3840 }
3841 
vega10_force_dpm_highest(struct pp_hwmgr * hwmgr)3842 static int vega10_force_dpm_highest(struct pp_hwmgr *hwmgr)
3843 {
3844 	struct vega10_hwmgr *data = hwmgr->backend;
3845 
3846 	data->smc_state_table.gfx_boot_level =
3847 	data->smc_state_table.gfx_max_level =
3848 			vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3849 	data->smc_state_table.mem_boot_level =
3850 	data->smc_state_table.mem_max_level =
3851 			vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3852 
3853 	PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3854 			"Failed to upload boot level to highest!",
3855 			return -1);
3856 
3857 	PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3858 			"Failed to upload dpm max level to highest!",
3859 			return -1);
3860 
3861 	return 0;
3862 }
3863 
vega10_force_dpm_lowest(struct pp_hwmgr * hwmgr)3864 static int vega10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
3865 {
3866 	struct vega10_hwmgr *data = hwmgr->backend;
3867 
3868 	data->smc_state_table.gfx_boot_level =
3869 	data->smc_state_table.gfx_max_level =
3870 			vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3871 	data->smc_state_table.mem_boot_level =
3872 	data->smc_state_table.mem_max_level =
3873 			vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3874 
3875 	PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3876 			"Failed to upload boot level to highest!",
3877 			return -1);
3878 
3879 	PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3880 			"Failed to upload dpm max level to highest!",
3881 			return -1);
3882 
3883 	return 0;
3884 
3885 }
3886 
vega10_unforce_dpm_levels(struct pp_hwmgr * hwmgr)3887 static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
3888 {
3889 	struct vega10_hwmgr *data = hwmgr->backend;
3890 
3891 	data->smc_state_table.gfx_boot_level =
3892 			vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3893 	data->smc_state_table.gfx_max_level =
3894 			vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3895 	data->smc_state_table.mem_boot_level =
3896 			vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3897 	data->smc_state_table.mem_max_level =
3898 			vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3899 
3900 	PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3901 			"Failed to upload DPM Bootup Levels!",
3902 			return -1);
3903 
3904 	PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3905 			"Failed to upload DPM Max Levels!",
3906 			return -1);
3907 	return 0;
3908 }
3909 
vega10_get_profiling_clk_mask(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level level,uint32_t * sclk_mask,uint32_t * mclk_mask,uint32_t * soc_mask)3910 static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
3911 				uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
3912 {
3913 	struct phm_ppt_v2_information *table_info =
3914 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
3915 
3916 	if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL &&
3917 		table_info->vdd_dep_on_socclk->count > VEGA10_UMD_PSTATE_SOCCLK_LEVEL &&
3918 		table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) {
3919 		*sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
3920 		*soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
3921 		*mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
3922 		hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
3923 		hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
3924 	}
3925 
3926 	if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
3927 		*sclk_mask = 0;
3928 	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
3929 		*mclk_mask = 0;
3930 	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3931 		*sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
3932 		*soc_mask = table_info->vdd_dep_on_socclk->count - 1;
3933 		*mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
3934 	}
3935 	return 0;
3936 }
3937 
vega10_set_fan_control_mode(struct pp_hwmgr * hwmgr,uint32_t mode)3938 static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
3939 {
3940 	switch (mode) {
3941 	case AMD_FAN_CTRL_NONE:
3942 		vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
3943 		break;
3944 	case AMD_FAN_CTRL_MANUAL:
3945 		if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
3946 			vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
3947 		break;
3948 	case AMD_FAN_CTRL_AUTO:
3949 		if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
3950 			vega10_fan_ctrl_start_smc_fan_control(hwmgr);
3951 		break;
3952 	default:
3953 		break;
3954 	}
3955 }
3956 
vega10_force_clock_level(struct pp_hwmgr * hwmgr,enum pp_clock_type type,uint32_t mask)3957 static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
3958 		enum pp_clock_type type, uint32_t mask)
3959 {
3960 	struct vega10_hwmgr *data = hwmgr->backend;
3961 
3962 	switch (type) {
3963 	case PP_SCLK:
3964 		data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
3965 		data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
3966 
3967 		PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3968 			"Failed to upload boot level to lowest!",
3969 			return -EINVAL);
3970 
3971 		PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3972 			"Failed to upload dpm max level to highest!",
3973 			return -EINVAL);
3974 		break;
3975 
3976 	case PP_MCLK:
3977 		data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
3978 		data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
3979 
3980 		PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3981 			"Failed to upload boot level to lowest!",
3982 			return -EINVAL);
3983 
3984 		PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3985 			"Failed to upload dpm max level to highest!",
3986 			return -EINVAL);
3987 
3988 		break;
3989 
3990 	case PP_PCIE:
3991 	default:
3992 		break;
3993 	}
3994 
3995 	return 0;
3996 }
3997 
vega10_dpm_force_dpm_level(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level level)3998 static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
3999 				enum amd_dpm_forced_level level)
4000 {
4001 	int ret = 0;
4002 	uint32_t sclk_mask = 0;
4003 	uint32_t mclk_mask = 0;
4004 	uint32_t soc_mask = 0;
4005 
4006 	if (hwmgr->pstate_sclk == 0)
4007 		vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
4008 
4009 	switch (level) {
4010 	case AMD_DPM_FORCED_LEVEL_HIGH:
4011 		ret = vega10_force_dpm_highest(hwmgr);
4012 		break;
4013 	case AMD_DPM_FORCED_LEVEL_LOW:
4014 		ret = vega10_force_dpm_lowest(hwmgr);
4015 		break;
4016 	case AMD_DPM_FORCED_LEVEL_AUTO:
4017 		ret = vega10_unforce_dpm_levels(hwmgr);
4018 		break;
4019 	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
4020 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
4021 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
4022 	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
4023 		ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
4024 		if (ret)
4025 			return ret;
4026 		vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
4027 		vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
4028 		break;
4029 	case AMD_DPM_FORCED_LEVEL_MANUAL:
4030 	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
4031 	default:
4032 		break;
4033 	}
4034 
4035 	if (!ret) {
4036 		if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4037 			vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
4038 		else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4039 			vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
4040 	}
4041 
4042 	return ret;
4043 }
4044 
vega10_get_fan_control_mode(struct pp_hwmgr * hwmgr)4045 static uint32_t vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4046 {
4047 	struct vega10_hwmgr *data = hwmgr->backend;
4048 
4049 	if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
4050 		return AMD_FAN_CTRL_MANUAL;
4051 	else
4052 		return AMD_FAN_CTRL_AUTO;
4053 }
4054 
vega10_get_dal_power_level(struct pp_hwmgr * hwmgr,struct amd_pp_simple_clock_info * info)4055 static int vega10_get_dal_power_level(struct pp_hwmgr *hwmgr,
4056 		struct amd_pp_simple_clock_info *info)
4057 {
4058 	struct phm_ppt_v2_information *table_info =
4059 			(struct phm_ppt_v2_information *)hwmgr->pptable;
4060 	struct phm_clock_and_voltage_limits *max_limits =
4061 			&table_info->max_clock_voltage_on_ac;
4062 
4063 	info->engine_max_clock = max_limits->sclk;
4064 	info->memory_max_clock = max_limits->mclk;
4065 
4066 	return 0;
4067 }
4068 
vega10_get_sclks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)4069 static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
4070 		struct pp_clock_levels_with_latency *clocks)
4071 {
4072 	struct phm_ppt_v2_information *table_info =
4073 			(struct phm_ppt_v2_information *)hwmgr->pptable;
4074 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4075 			table_info->vdd_dep_on_sclk;
4076 	uint32_t i;
4077 
4078 	clocks->num_levels = 0;
4079 	for (i = 0; i < dep_table->count; i++) {
4080 		if (dep_table->entries[i].clk) {
4081 			clocks->data[clocks->num_levels].clocks_in_khz =
4082 					dep_table->entries[i].clk * 10;
4083 			clocks->num_levels++;
4084 		}
4085 	}
4086 
4087 }
4088 
vega10_get_memclocks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)4089 static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
4090 		struct pp_clock_levels_with_latency *clocks)
4091 {
4092 	struct phm_ppt_v2_information *table_info =
4093 			(struct phm_ppt_v2_information *)hwmgr->pptable;
4094 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4095 			table_info->vdd_dep_on_mclk;
4096 	struct vega10_hwmgr *data = hwmgr->backend;
4097 	uint32_t j = 0;
4098 	uint32_t i;
4099 
4100 	for (i = 0; i < dep_table->count; i++) {
4101 		if (dep_table->entries[i].clk) {
4102 
4103 			clocks->data[j].clocks_in_khz =
4104 						dep_table->entries[i].clk * 10;
4105 			data->mclk_latency_table.entries[j].frequency =
4106 							dep_table->entries[i].clk;
4107 			clocks->data[j].latency_in_us =
4108 				data->mclk_latency_table.entries[j].latency = 25;
4109 			j++;
4110 		}
4111 	}
4112 	clocks->num_levels = data->mclk_latency_table.count = j;
4113 }
4114 
vega10_get_dcefclocks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)4115 static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
4116 		struct pp_clock_levels_with_latency *clocks)
4117 {
4118 	struct phm_ppt_v2_information *table_info =
4119 			(struct phm_ppt_v2_information *)hwmgr->pptable;
4120 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4121 			table_info->vdd_dep_on_dcefclk;
4122 	uint32_t i;
4123 
4124 	for (i = 0; i < dep_table->count; i++) {
4125 		clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
4126 		clocks->data[i].latency_in_us = 0;
4127 		clocks->num_levels++;
4128 	}
4129 }
4130 
vega10_get_socclocks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)4131 static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
4132 		struct pp_clock_levels_with_latency *clocks)
4133 {
4134 	struct phm_ppt_v2_information *table_info =
4135 			(struct phm_ppt_v2_information *)hwmgr->pptable;
4136 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4137 			table_info->vdd_dep_on_socclk;
4138 	uint32_t i;
4139 
4140 	for (i = 0; i < dep_table->count; i++) {
4141 		clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
4142 		clocks->data[i].latency_in_us = 0;
4143 		clocks->num_levels++;
4144 	}
4145 }
4146 
vega10_get_clock_by_type_with_latency(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)4147 static int vega10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
4148 		enum amd_pp_clock_type type,
4149 		struct pp_clock_levels_with_latency *clocks)
4150 {
4151 	switch (type) {
4152 	case amd_pp_sys_clock:
4153 		vega10_get_sclks(hwmgr, clocks);
4154 		break;
4155 	case amd_pp_mem_clock:
4156 		vega10_get_memclocks(hwmgr, clocks);
4157 		break;
4158 	case amd_pp_dcef_clock:
4159 		vega10_get_dcefclocks(hwmgr, clocks);
4160 		break;
4161 	case amd_pp_soc_clock:
4162 		vega10_get_socclocks(hwmgr, clocks);
4163 		break;
4164 	default:
4165 		return -1;
4166 	}
4167 
4168 	return 0;
4169 }
4170 
vega10_get_clock_by_type_with_voltage(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)4171 static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
4172 		enum amd_pp_clock_type type,
4173 		struct pp_clock_levels_with_voltage *clocks)
4174 {
4175 	struct phm_ppt_v2_information *table_info =
4176 			(struct phm_ppt_v2_information *)hwmgr->pptable;
4177 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
4178 	uint32_t i;
4179 
4180 	switch (type) {
4181 	case amd_pp_mem_clock:
4182 		dep_table = table_info->vdd_dep_on_mclk;
4183 		break;
4184 	case amd_pp_dcef_clock:
4185 		dep_table = table_info->vdd_dep_on_dcefclk;
4186 		break;
4187 	case amd_pp_disp_clock:
4188 		dep_table = table_info->vdd_dep_on_dispclk;
4189 		break;
4190 	case amd_pp_pixel_clock:
4191 		dep_table = table_info->vdd_dep_on_pixclk;
4192 		break;
4193 	case amd_pp_phy_clock:
4194 		dep_table = table_info->vdd_dep_on_phyclk;
4195 		break;
4196 	default:
4197 		return -1;
4198 	}
4199 
4200 	for (i = 0; i < dep_table->count; i++) {
4201 		clocks->data[i].clocks_in_khz = dep_table->entries[i].clk  * 10;
4202 		clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
4203 				entries[dep_table->entries[i].vddInd].us_vdd);
4204 		clocks->num_levels++;
4205 	}
4206 
4207 	if (i < dep_table->count)
4208 		return -1;
4209 
4210 	return 0;
4211 }
4212 
vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr * hwmgr,void * clock_range)4213 static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
4214 							void *clock_range)
4215 {
4216 	struct vega10_hwmgr *data = hwmgr->backend;
4217 	struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range;
4218 	Watermarks_t *table = &(data->smc_state_table.water_marks_table);
4219 	int result = 0;
4220 
4221 	if (!data->registry_data.disable_water_mark) {
4222 		smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
4223 		data->water_marks_bitmap = WaterMarksExist;
4224 	}
4225 
4226 	return result;
4227 }
4228 
vega10_print_clock_levels(struct pp_hwmgr * hwmgr,enum pp_clock_type type,char * buf)4229 static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4230 		enum pp_clock_type type, char *buf)
4231 {
4232 	struct vega10_hwmgr *data = hwmgr->backend;
4233 	struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4234 	struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4235 	struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
4236 	struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
4237 
4238 	int i, now, size = 0;
4239 
4240 	switch (type) {
4241 	case PP_SCLK:
4242 		if (data->registry_data.sclk_dpm_key_disabled)
4243 			break;
4244 
4245 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
4246 		now = smum_get_argument(hwmgr);
4247 
4248 		for (i = 0; i < sclk_table->count; i++)
4249 			size += sprintf(buf + size, "%d: %uMhz %s\n",
4250 					i, sclk_table->dpm_levels[i].value / 100,
4251 					(i == now) ? "*" : "");
4252 		break;
4253 	case PP_MCLK:
4254 		if (data->registry_data.mclk_dpm_key_disabled)
4255 			break;
4256 
4257 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
4258 		now = smum_get_argument(hwmgr);
4259 
4260 		for (i = 0; i < mclk_table->count; i++)
4261 			size += sprintf(buf + size, "%d: %uMhz %s\n",
4262 					i, mclk_table->dpm_levels[i].value / 100,
4263 					(i == now) ? "*" : "");
4264 		break;
4265 	case PP_PCIE:
4266 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex);
4267 		now = smum_get_argument(hwmgr);
4268 
4269 		for (i = 0; i < pcie_table->count; i++)
4270 			size += sprintf(buf + size, "%d: %s %s\n", i,
4271 					(pcie_table->pcie_gen[i] == 0) ? "2.5GT/s, x1" :
4272 					(pcie_table->pcie_gen[i] == 1) ? "5.0GT/s, x16" :
4273 					(pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "",
4274 					(i == now) ? "*" : "");
4275 		break;
4276 	case OD_SCLK:
4277 		if (hwmgr->od_enabled) {
4278 			size = sprintf(buf, "%s:\n", "OD_SCLK");
4279 			podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
4280 			for (i = 0; i < podn_vdd_dep->count; i++)
4281 				size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
4282 					i, podn_vdd_dep->entries[i].clk / 100,
4283 						podn_vdd_dep->entries[i].vddc);
4284 		}
4285 		break;
4286 	case OD_MCLK:
4287 		if (hwmgr->od_enabled) {
4288 			size = sprintf(buf, "%s:\n", "OD_MCLK");
4289 			podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
4290 			for (i = 0; i < podn_vdd_dep->count; i++)
4291 				size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
4292 					i, podn_vdd_dep->entries[i].clk/100,
4293 						podn_vdd_dep->entries[i].vddc);
4294 		}
4295 		break;
4296 	case OD_RANGE:
4297 		if (hwmgr->od_enabled) {
4298 			size = sprintf(buf, "%s:\n", "OD_RANGE");
4299 			size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
4300 				data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
4301 				hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4302 			size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
4303 				data->golden_dpm_table.mem_table.dpm_levels[0].value/100,
4304 				hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4305 			size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
4306 				data->odn_dpm_table.min_vddc,
4307 				data->odn_dpm_table.max_vddc);
4308 		}
4309 		break;
4310 	default:
4311 		break;
4312 	}
4313 	return size;
4314 }
4315 
vega10_display_configuration_changed_task(struct pp_hwmgr * hwmgr)4316 static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4317 {
4318 	struct vega10_hwmgr *data = hwmgr->backend;
4319 	Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
4320 	int result = 0;
4321 
4322 	if ((data->water_marks_bitmap & WaterMarksExist) &&
4323 			!(data->water_marks_bitmap & WaterMarksLoaded)) {
4324 		result = smum_smc_table_manager(hwmgr, (uint8_t *)wm_table, WMTABLE, false);
4325 		PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
4326 		data->water_marks_bitmap |= WaterMarksLoaded;
4327 	}
4328 
4329 	if (data->water_marks_bitmap & WaterMarksLoaded) {
4330 		smum_send_msg_to_smc_with_parameter(hwmgr,
4331 			PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
4332 	}
4333 
4334 	return result;
4335 }
4336 
vega10_enable_disable_uvd_dpm(struct pp_hwmgr * hwmgr,bool enable)4337 int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
4338 {
4339 	struct vega10_hwmgr *data = hwmgr->backend;
4340 
4341 	if (data->smu_features[GNLD_DPM_UVD].supported) {
4342 		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
4343 				enable,
4344 				data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
4345 				"Attempt to Enable/Disable DPM UVD Failed!",
4346 				return -1);
4347 		data->smu_features[GNLD_DPM_UVD].enabled = enable;
4348 	}
4349 	return 0;
4350 }
4351 
vega10_power_gate_vce(struct pp_hwmgr * hwmgr,bool bgate)4352 static void vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
4353 {
4354 	struct vega10_hwmgr *data = hwmgr->backend;
4355 
4356 	data->vce_power_gated = bgate;
4357 	vega10_enable_disable_vce_dpm(hwmgr, !bgate);
4358 }
4359 
vega10_power_gate_uvd(struct pp_hwmgr * hwmgr,bool bgate)4360 static void vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
4361 {
4362 	struct vega10_hwmgr *data = hwmgr->backend;
4363 
4364 	data->uvd_power_gated = bgate;
4365 	vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
4366 }
4367 
vega10_are_power_levels_equal(const struct vega10_performance_level * pl1,const struct vega10_performance_level * pl2)4368 static inline bool vega10_are_power_levels_equal(
4369 				const struct vega10_performance_level *pl1,
4370 				const struct vega10_performance_level *pl2)
4371 {
4372 	return ((pl1->soc_clock == pl2->soc_clock) &&
4373 			(pl1->gfx_clock == pl2->gfx_clock) &&
4374 			(pl1->mem_clock == pl2->mem_clock));
4375 }
4376 
vega10_check_states_equal(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * pstate1,const struct pp_hw_power_state * pstate2,bool * equal)4377 static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
4378 				const struct pp_hw_power_state *pstate1,
4379 			const struct pp_hw_power_state *pstate2, bool *equal)
4380 {
4381 	const struct vega10_power_state *psa;
4382 	const struct vega10_power_state *psb;
4383 	int i;
4384 
4385 	if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4386 		return -EINVAL;
4387 
4388 	psa = cast_const_phw_vega10_power_state(pstate1);
4389 	psb = cast_const_phw_vega10_power_state(pstate2);
4390 	/* If the two states don't even have the same number of performance levels they cannot be the same state. */
4391 	if (psa->performance_level_count != psb->performance_level_count) {
4392 		*equal = false;
4393 		return 0;
4394 	}
4395 
4396 	for (i = 0; i < psa->performance_level_count; i++) {
4397 		if (!vega10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4398 			/* If we have found even one performance level pair that is different the states are different. */
4399 			*equal = false;
4400 			return 0;
4401 		}
4402 	}
4403 
4404 	/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4405 	*equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4406 	*equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4407 	*equal &= (psa->sclk_threshold == psb->sclk_threshold);
4408 
4409 	return 0;
4410 }
4411 
4412 static bool
vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr * hwmgr)4413 vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4414 {
4415 	struct vega10_hwmgr *data = hwmgr->backend;
4416 	bool is_update_required = false;
4417 
4418 	if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4419 		is_update_required = true;
4420 
4421 	if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep)) {
4422 		if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr)
4423 			is_update_required = true;
4424 	}
4425 
4426 	return is_update_required;
4427 }
4428 
vega10_disable_dpm_tasks(struct pp_hwmgr * hwmgr)4429 static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
4430 {
4431 	int tmp_result, result = 0;
4432 
4433 	if (PP_CAP(PHM_PlatformCaps_ThermalController))
4434 		vega10_disable_thermal_protection(hwmgr);
4435 
4436 	tmp_result = vega10_disable_power_containment(hwmgr);
4437 	PP_ASSERT_WITH_CODE((tmp_result == 0),
4438 			"Failed to disable power containment!", result = tmp_result);
4439 
4440 	tmp_result = vega10_disable_didt_config(hwmgr);
4441 	PP_ASSERT_WITH_CODE((tmp_result == 0),
4442 			"Failed to disable didt config!", result = tmp_result);
4443 
4444 	tmp_result = vega10_avfs_enable(hwmgr, false);
4445 	PP_ASSERT_WITH_CODE((tmp_result == 0),
4446 			"Failed to disable AVFS!", result = tmp_result);
4447 
4448 	tmp_result = vega10_stop_dpm(hwmgr, SMC_DPM_FEATURES);
4449 	PP_ASSERT_WITH_CODE((tmp_result == 0),
4450 			"Failed to stop DPM!", result = tmp_result);
4451 
4452 	tmp_result = vega10_disable_deep_sleep_master_switch(hwmgr);
4453 	PP_ASSERT_WITH_CODE((tmp_result == 0),
4454 			"Failed to disable deep sleep!", result = tmp_result);
4455 
4456 	tmp_result = vega10_disable_ulv(hwmgr);
4457 	PP_ASSERT_WITH_CODE((tmp_result == 0),
4458 			"Failed to disable ulv!", result = tmp_result);
4459 
4460 	tmp_result =  vega10_acg_disable(hwmgr);
4461 	PP_ASSERT_WITH_CODE((tmp_result == 0),
4462 			"Failed to disable acg!", result = tmp_result);
4463 
4464 	vega10_enable_disable_PCC_limit_feature(hwmgr, false);
4465 	return result;
4466 }
4467 
vega10_power_off_asic(struct pp_hwmgr * hwmgr)4468 static int vega10_power_off_asic(struct pp_hwmgr *hwmgr)
4469 {
4470 	struct vega10_hwmgr *data = hwmgr->backend;
4471 	int result;
4472 
4473 	result = vega10_disable_dpm_tasks(hwmgr);
4474 	PP_ASSERT_WITH_CODE((0 == result),
4475 			"[disable_dpm_tasks] Failed to disable DPM!",
4476 			);
4477 	data->water_marks_bitmap &= ~(WaterMarksLoaded);
4478 
4479 	return result;
4480 }
4481 
vega10_get_sclk_od(struct pp_hwmgr * hwmgr)4482 static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
4483 {
4484 	struct vega10_hwmgr *data = hwmgr->backend;
4485 	struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4486 	struct vega10_single_dpm_table *golden_sclk_table =
4487 			&(data->golden_dpm_table.gfx_table);
4488 	int value;
4489 
4490 	value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
4491 			golden_sclk_table->dpm_levels
4492 			[golden_sclk_table->count - 1].value) *
4493 			100 /
4494 			golden_sclk_table->dpm_levels
4495 			[golden_sclk_table->count - 1].value;
4496 
4497 	return value;
4498 }
4499 
vega10_set_sclk_od(struct pp_hwmgr * hwmgr,uint32_t value)4500 static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4501 {
4502 	struct vega10_hwmgr *data = hwmgr->backend;
4503 	struct vega10_single_dpm_table *golden_sclk_table =
4504 			&(data->golden_dpm_table.gfx_table);
4505 	struct pp_power_state *ps;
4506 	struct vega10_power_state *vega10_ps;
4507 
4508 	ps = hwmgr->request_ps;
4509 
4510 	if (ps == NULL)
4511 		return -EINVAL;
4512 
4513 	vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
4514 
4515 	vega10_ps->performance_levels
4516 	[vega10_ps->performance_level_count - 1].gfx_clock =
4517 			golden_sclk_table->dpm_levels
4518 			[golden_sclk_table->count - 1].value *
4519 			value / 100 +
4520 			golden_sclk_table->dpm_levels
4521 			[golden_sclk_table->count - 1].value;
4522 
4523 	if (vega10_ps->performance_levels
4524 			[vega10_ps->performance_level_count - 1].gfx_clock >
4525 			hwmgr->platform_descriptor.overdriveLimit.engineClock)
4526 		vega10_ps->performance_levels
4527 		[vega10_ps->performance_level_count - 1].gfx_clock =
4528 				hwmgr->platform_descriptor.overdriveLimit.engineClock;
4529 
4530 	return 0;
4531 }
4532 
vega10_get_mclk_od(struct pp_hwmgr * hwmgr)4533 static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr)
4534 {
4535 	struct vega10_hwmgr *data = hwmgr->backend;
4536 	struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4537 	struct vega10_single_dpm_table *golden_mclk_table =
4538 			&(data->golden_dpm_table.mem_table);
4539 	int value;
4540 
4541 	value = (mclk_table->dpm_levels
4542 			[mclk_table->count - 1].value -
4543 			golden_mclk_table->dpm_levels
4544 			[golden_mclk_table->count - 1].value) *
4545 			100 /
4546 			golden_mclk_table->dpm_levels
4547 			[golden_mclk_table->count - 1].value;
4548 
4549 	return value;
4550 }
4551 
vega10_set_mclk_od(struct pp_hwmgr * hwmgr,uint32_t value)4552 static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4553 {
4554 	struct vega10_hwmgr *data = hwmgr->backend;
4555 	struct vega10_single_dpm_table *golden_mclk_table =
4556 			&(data->golden_dpm_table.mem_table);
4557 	struct pp_power_state  *ps;
4558 	struct vega10_power_state  *vega10_ps;
4559 
4560 	ps = hwmgr->request_ps;
4561 
4562 	if (ps == NULL)
4563 		return -EINVAL;
4564 
4565 	vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
4566 
4567 	vega10_ps->performance_levels
4568 	[vega10_ps->performance_level_count - 1].mem_clock =
4569 			golden_mclk_table->dpm_levels
4570 			[golden_mclk_table->count - 1].value *
4571 			value / 100 +
4572 			golden_mclk_table->dpm_levels
4573 			[golden_mclk_table->count - 1].value;
4574 
4575 	if (vega10_ps->performance_levels
4576 			[vega10_ps->performance_level_count - 1].mem_clock >
4577 			hwmgr->platform_descriptor.overdriveLimit.memoryClock)
4578 		vega10_ps->performance_levels
4579 		[vega10_ps->performance_level_count - 1].mem_clock =
4580 				hwmgr->platform_descriptor.overdriveLimit.memoryClock;
4581 
4582 	return 0;
4583 }
4584 
vega10_notify_cac_buffer_info(struct pp_hwmgr * hwmgr,uint32_t virtual_addr_low,uint32_t virtual_addr_hi,uint32_t mc_addr_low,uint32_t mc_addr_hi,uint32_t size)4585 static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4586 					uint32_t virtual_addr_low,
4587 					uint32_t virtual_addr_hi,
4588 					uint32_t mc_addr_low,
4589 					uint32_t mc_addr_hi,
4590 					uint32_t size)
4591 {
4592 	smum_send_msg_to_smc_with_parameter(hwmgr,
4593 					PPSMC_MSG_SetSystemVirtualDramAddrHigh,
4594 					virtual_addr_hi);
4595 	smum_send_msg_to_smc_with_parameter(hwmgr,
4596 					PPSMC_MSG_SetSystemVirtualDramAddrLow,
4597 					virtual_addr_low);
4598 	smum_send_msg_to_smc_with_parameter(hwmgr,
4599 					PPSMC_MSG_DramLogSetDramAddrHigh,
4600 					mc_addr_hi);
4601 
4602 	smum_send_msg_to_smc_with_parameter(hwmgr,
4603 					PPSMC_MSG_DramLogSetDramAddrLow,
4604 					mc_addr_low);
4605 
4606 	smum_send_msg_to_smc_with_parameter(hwmgr,
4607 					PPSMC_MSG_DramLogSetDramSize,
4608 					size);
4609 	return 0;
4610 }
4611 
vega10_get_thermal_temperature_range(struct pp_hwmgr * hwmgr,struct PP_TemperatureRange * thermal_data)4612 static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
4613 		struct PP_TemperatureRange *thermal_data)
4614 {
4615 	struct phm_ppt_v2_information *table_info =
4616 			(struct phm_ppt_v2_information *)hwmgr->pptable;
4617 
4618 	memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
4619 
4620 	thermal_data->max = table_info->tdp_table->usSoftwareShutdownTemp *
4621 		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4622 
4623 	return 0;
4624 }
4625 
vega10_get_power_profile_mode(struct pp_hwmgr * hwmgr,char * buf)4626 static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4627 {
4628 	struct vega10_hwmgr *data = hwmgr->backend;
4629 	uint32_t i, size = 0;
4630 	static const uint8_t profile_mode_setting[5][4] = {{70, 60, 1, 3,},
4631 						{90, 60, 0, 0,},
4632 						{70, 60, 0, 0,},
4633 						{70, 90, 0, 0,},
4634 						{30, 60, 0, 6,},
4635 						};
4636 	static const char *profile_name[6] = {"3D_FULL_SCREEN",
4637 					"POWER_SAVING",
4638 					"VIDEO",
4639 					"VR",
4640 					"COMPUTE",
4641 					"CUSTOM"};
4642 	static const char *title[6] = {"NUM",
4643 			"MODE_NAME",
4644 			"BUSY_SET_POINT",
4645 			"FPS",
4646 			"USE_RLC_BUSY",
4647 			"MIN_ACTIVE_LEVEL"};
4648 
4649 	if (!buf)
4650 		return -EINVAL;
4651 
4652 	size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0],
4653 			title[1], title[2], title[3], title[4], title[5]);
4654 
4655 	for (i = 0; i < PP_SMC_POWER_PROFILE_CUSTOM; i++)
4656 		size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n",
4657 			i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
4658 			profile_mode_setting[i][0], profile_mode_setting[i][1],
4659 			profile_mode_setting[i][2], profile_mode_setting[i][3]);
4660 	size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n", i,
4661 			profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
4662 			data->custom_profile_mode[0], data->custom_profile_mode[1],
4663 			data->custom_profile_mode[2], data->custom_profile_mode[3]);
4664 	return size;
4665 }
4666 
vega10_set_power_profile_mode(struct pp_hwmgr * hwmgr,long * input,uint32_t size)4667 static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
4668 {
4669 	struct vega10_hwmgr *data = hwmgr->backend;
4670 	uint8_t busy_set_point;
4671 	uint8_t FPS;
4672 	uint8_t use_rlc_busy;
4673 	uint8_t min_active_level;
4674 
4675 	hwmgr->power_profile_mode = input[size];
4676 
4677 	smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
4678 						1<<hwmgr->power_profile_mode);
4679 
4680 	if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
4681 		if (size == 0 || size > 4)
4682 			return -EINVAL;
4683 
4684 		data->custom_profile_mode[0] = busy_set_point = input[0];
4685 		data->custom_profile_mode[1] = FPS = input[1];
4686 		data->custom_profile_mode[2] = use_rlc_busy = input[2];
4687 		data->custom_profile_mode[3] = min_active_level = input[3];
4688 		smum_send_msg_to_smc_with_parameter(hwmgr,
4689 					PPSMC_MSG_SetCustomGfxDpmParameters,
4690 					busy_set_point | FPS<<8 |
4691 					use_rlc_busy << 16 | min_active_level<<24);
4692 	}
4693 
4694 	return 0;
4695 }
4696 
4697 
vega10_check_clk_voltage_valid(struct pp_hwmgr * hwmgr,enum PP_OD_DPM_TABLE_COMMAND type,uint32_t clk,uint32_t voltage)4698 static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
4699 					enum PP_OD_DPM_TABLE_COMMAND type,
4700 					uint32_t clk,
4701 					uint32_t voltage)
4702 {
4703 	struct vega10_hwmgr *data = hwmgr->backend;
4704 	struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4705 	struct vega10_single_dpm_table *golden_table;
4706 
4707 	if (voltage < odn_table->min_vddc || voltage > odn_table->max_vddc) {
4708 		pr_info("OD voltage is out of range [%d - %d] mV\n", odn_table->min_vddc, odn_table->max_vddc);
4709 		return false;
4710 	}
4711 
4712 	if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
4713 		golden_table = &(data->golden_dpm_table.gfx_table);
4714 		if (golden_table->dpm_levels[0].value > clk ||
4715 			hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
4716 			pr_info("OD engine clock is out of range [%d - %d] MHz\n",
4717 				golden_table->dpm_levels[0].value/100,
4718 				hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4719 			return false;
4720 		}
4721 	} else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
4722 		golden_table = &(data->golden_dpm_table.mem_table);
4723 		if (golden_table->dpm_levels[0].value > clk ||
4724 			hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
4725 			pr_info("OD memory clock is out of range [%d - %d] MHz\n",
4726 				golden_table->dpm_levels[0].value/100,
4727 				hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4728 			return false;
4729 		}
4730 	} else {
4731 		return false;
4732 	}
4733 
4734 	return true;
4735 }
4736 
vega10_odn_update_soc_table(struct pp_hwmgr * hwmgr,enum PP_OD_DPM_TABLE_COMMAND type)4737 static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
4738 						enum PP_OD_DPM_TABLE_COMMAND type)
4739 {
4740 	struct vega10_hwmgr *data = hwmgr->backend;
4741 	struct phm_ppt_v2_information *table_info = hwmgr->pptable;
4742 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = table_info->vdd_dep_on_socclk;
4743 	struct vega10_single_dpm_table *dpm_table = &data->golden_dpm_table.soc_table;
4744 
4745 	struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_on_socclk =
4746 							&data->odn_dpm_table.vdd_dep_on_socclk;
4747 	struct vega10_odn_vddc_lookup_table *od_vddc_lookup_table = &data->odn_dpm_table.vddc_lookup_table;
4748 
4749 	struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep;
4750 	uint8_t i, j;
4751 
4752 	if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
4753 		podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
4754 		for (i = 0; i < podn_vdd_dep->count - 1; i++)
4755 			od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
4756 		if (od_vddc_lookup_table->entries[i].us_vdd < podn_vdd_dep->entries[i].vddc)
4757 			od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
4758 	} else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
4759 		podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
4760 		for (i = 0; i < dpm_table->count; i++) {
4761 			for (j = 0; j < od_vddc_lookup_table->count; j++) {
4762 				if (od_vddc_lookup_table->entries[j].us_vdd >
4763 					podn_vdd_dep->entries[i].vddc)
4764 					break;
4765 			}
4766 			if (j == od_vddc_lookup_table->count) {
4767 				od_vddc_lookup_table->entries[j-1].us_vdd =
4768 					podn_vdd_dep->entries[i].vddc;
4769 				data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
4770 			}
4771 			podn_vdd_dep->entries[i].vddInd = j;
4772 		}
4773 		dpm_table = &data->dpm_table.soc_table;
4774 		for (i = 0; i < dep_table->count; i++) {
4775 			if (dep_table->entries[i].vddInd == podn_vdd_dep->entries[dep_table->count-1].vddInd &&
4776 					dep_table->entries[i].clk < podn_vdd_dep->entries[dep_table->count-1].clk) {
4777 				data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
4778 				podn_vdd_dep_on_socclk->entries[i].clk = podn_vdd_dep->entries[dep_table->count-1].clk;
4779 				dpm_table->dpm_levels[i].value = podn_vdd_dep_on_socclk->entries[i].clk;
4780 			}
4781 		}
4782 		if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk <
4783 					podn_vdd_dep->entries[dep_table->count-1].clk) {
4784 			data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
4785 			podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk = podn_vdd_dep->entries[dep_table->count-1].clk;
4786 			dpm_table->dpm_levels[podn_vdd_dep_on_socclk->count - 1].value = podn_vdd_dep->entries[dep_table->count-1].clk;
4787 		}
4788 		if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd <
4789 					podn_vdd_dep->entries[dep_table->count-1].vddInd) {
4790 			data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
4791 			podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd = podn_vdd_dep->entries[dep_table->count-1].vddInd;
4792 		}
4793 	}
4794 }
4795 
vega10_odn_edit_dpm_table(struct pp_hwmgr * hwmgr,enum PP_OD_DPM_TABLE_COMMAND type,long * input,uint32_t size)4796 static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
4797 					enum PP_OD_DPM_TABLE_COMMAND type,
4798 					long *input, uint32_t size)
4799 {
4800 	struct vega10_hwmgr *data = hwmgr->backend;
4801 	struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_table;
4802 	struct vega10_single_dpm_table *dpm_table;
4803 
4804 	uint32_t input_clk;
4805 	uint32_t input_vol;
4806 	uint32_t input_level;
4807 	uint32_t i;
4808 
4809 	PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
4810 				return -EINVAL);
4811 
4812 	if (!hwmgr->od_enabled) {
4813 		pr_info("OverDrive feature not enabled\n");
4814 		return -EINVAL;
4815 	}
4816 
4817 	if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
4818 		dpm_table = &data->dpm_table.gfx_table;
4819 		podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_sclk;
4820 		data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4821 	} else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
4822 		dpm_table = &data->dpm_table.mem_table;
4823 		podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_mclk;
4824 		data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4825 	} else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
4826 		memcpy(&(data->dpm_table), &(data->golden_dpm_table), sizeof(struct vega10_dpm_table));
4827 		vega10_odn_initial_default_setting(hwmgr);
4828 		return 0;
4829 	} else if (PP_OD_COMMIT_DPM_TABLE == type) {
4830 		vega10_check_dpm_table_updated(hwmgr);
4831 		return 0;
4832 	} else {
4833 		return -EINVAL;
4834 	}
4835 
4836 	for (i = 0; i < size; i += 3) {
4837 		if (i + 3 > size || input[i] >= podn_vdd_dep_table->count) {
4838 			pr_info("invalid clock voltage input\n");
4839 			return 0;
4840 		}
4841 		input_level = input[i];
4842 		input_clk = input[i+1] * 100;
4843 		input_vol = input[i+2];
4844 
4845 		if (vega10_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
4846 			dpm_table->dpm_levels[input_level].value = input_clk;
4847 			podn_vdd_dep_table->entries[input_level].clk = input_clk;
4848 			podn_vdd_dep_table->entries[input_level].vddc = input_vol;
4849 		} else {
4850 			return -EINVAL;
4851 		}
4852 	}
4853 	vega10_odn_update_soc_table(hwmgr, type);
4854 	return 0;
4855 }
4856 
4857 static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
4858 	.backend_init = vega10_hwmgr_backend_init,
4859 	.backend_fini = vega10_hwmgr_backend_fini,
4860 	.asic_setup = vega10_setup_asic_task,
4861 	.dynamic_state_management_enable = vega10_enable_dpm_tasks,
4862 	.dynamic_state_management_disable = vega10_disable_dpm_tasks,
4863 	.get_num_of_pp_table_entries =
4864 			vega10_get_number_of_powerplay_table_entries,
4865 	.get_power_state_size = vega10_get_power_state_size,
4866 	.get_pp_table_entry = vega10_get_pp_table_entry,
4867 	.patch_boot_state = vega10_patch_boot_state,
4868 	.apply_state_adjust_rules = vega10_apply_state_adjust_rules,
4869 	.power_state_set = vega10_set_power_state_tasks,
4870 	.get_sclk = vega10_dpm_get_sclk,
4871 	.get_mclk = vega10_dpm_get_mclk,
4872 	.notify_smc_display_config_after_ps_adjustment =
4873 			vega10_notify_smc_display_config_after_ps_adjustment,
4874 	.force_dpm_level = vega10_dpm_force_dpm_level,
4875 	.stop_thermal_controller = vega10_thermal_stop_thermal_controller,
4876 	.get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info,
4877 	.get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent,
4878 	.set_fan_speed_percent = vega10_fan_ctrl_set_fan_speed_percent,
4879 	.reset_fan_speed_to_default =
4880 			vega10_fan_ctrl_reset_fan_speed_to_default,
4881 	.get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm,
4882 	.set_fan_speed_rpm = vega10_fan_ctrl_set_fan_speed_rpm,
4883 	.uninitialize_thermal_controller =
4884 			vega10_thermal_ctrl_uninitialize_thermal_controller,
4885 	.set_fan_control_mode = vega10_set_fan_control_mode,
4886 	.get_fan_control_mode = vega10_get_fan_control_mode,
4887 	.read_sensor = vega10_read_sensor,
4888 	.get_dal_power_level = vega10_get_dal_power_level,
4889 	.get_clock_by_type_with_latency = vega10_get_clock_by_type_with_latency,
4890 	.get_clock_by_type_with_voltage = vega10_get_clock_by_type_with_voltage,
4891 	.set_watermarks_for_clocks_ranges = vega10_set_watermarks_for_clocks_ranges,
4892 	.display_clock_voltage_request = vega10_display_clock_voltage_request,
4893 	.force_clock_level = vega10_force_clock_level,
4894 	.print_clock_levels = vega10_print_clock_levels,
4895 	.display_config_changed = vega10_display_configuration_changed_task,
4896 	.powergate_uvd = vega10_power_gate_uvd,
4897 	.powergate_vce = vega10_power_gate_vce,
4898 	.check_states_equal = vega10_check_states_equal,
4899 	.check_smc_update_required_for_display_configuration =
4900 			vega10_check_smc_update_required_for_display_configuration,
4901 	.power_off_asic = vega10_power_off_asic,
4902 	.disable_smc_firmware_ctf = vega10_thermal_disable_alert,
4903 	.get_sclk_od = vega10_get_sclk_od,
4904 	.set_sclk_od = vega10_set_sclk_od,
4905 	.get_mclk_od = vega10_get_mclk_od,
4906 	.set_mclk_od = vega10_set_mclk_od,
4907 	.avfs_control = vega10_avfs_enable,
4908 	.notify_cac_buffer_info = vega10_notify_cac_buffer_info,
4909 	.get_thermal_temperature_range = vega10_get_thermal_temperature_range,
4910 	.register_irq_handlers = smu9_register_irq_handlers,
4911 	.start_thermal_controller = vega10_start_thermal_controller,
4912 	.get_power_profile_mode = vega10_get_power_profile_mode,
4913 	.set_power_profile_mode = vega10_set_power_profile_mode,
4914 	.set_power_limit = vega10_set_power_limit,
4915 	.odn_edit_dpm_table = vega10_odn_edit_dpm_table,
4916 };
4917 
vega10_enable_smc_features(struct pp_hwmgr * hwmgr,bool enable,uint32_t feature_mask)4918 int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
4919 		bool enable, uint32_t feature_mask)
4920 {
4921 	int msg = enable ? PPSMC_MSG_EnableSmuFeatures :
4922 			PPSMC_MSG_DisableSmuFeatures;
4923 
4924 	return smum_send_msg_to_smc_with_parameter(hwmgr,
4925 			msg, feature_mask);
4926 }
4927 
vega10_hwmgr_init(struct pp_hwmgr * hwmgr)4928 int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
4929 {
4930 	hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
4931 	hwmgr->pptable_func = &vega10_pptable_funcs;
4932 
4933 	return 0;
4934 }
4935