1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/pci.h>
25 
26 #include "pp_debug.h"
27 #include "smumgr.h"
28 #include "smu74.h"
29 #include "smu_ucode_xfer_vi.h"
30 #include "polaris10_smumgr.h"
31 #include "smu74_discrete.h"
32 #include "smu/smu_7_1_3_d.h"
33 #include "smu/smu_7_1_3_sh_mask.h"
34 #include "gmc/gmc_8_1_d.h"
35 #include "gmc/gmc_8_1_sh_mask.h"
36 #include "oss/oss_3_0_d.h"
37 #include "gca/gfx_8_0_d.h"
38 #include "bif/bif_5_0_d.h"
39 #include "bif/bif_5_0_sh_mask.h"
40 #include "ppatomctrl.h"
41 #include "cgs_common.h"
42 #include "smu7_ppsmc.h"
43 #include "smu7_smumgr.h"
44 
45 #include "smu7_dyn_defaults.h"
46 
47 #include "smu7_hwmgr.h"
48 #include "hardwaremanager.h"
49 #include "atombios.h"
50 #include "pppcielanes.h"
51 
52 #include "dce/dce_10_0_d.h"
53 #include "dce/dce_10_0_sh_mask.h"
54 
55 #define POLARIS10_SMC_SIZE 0x20000
56 #define POWERTUNE_DEFAULT_SET_MAX    1
57 #define VDDC_VDDCI_DELTA            200
58 #define MC_CG_ARB_FREQ_F1           0x0b
59 
60 static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
61 	/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
62 	 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
63 	{ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
64 	{ 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
65 	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
66 };
67 
68 static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = {
69 			{VCO_2_4, POSTDIV_DIV_BY_16,  75, 160, 112},
70 			{VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
71 			{VCO_2_4, POSTDIV_DIV_BY_8,   75, 160, 112},
72 			{VCO_3_6, POSTDIV_DIV_BY_8,  112, 224, 160},
73 			{VCO_2_4, POSTDIV_DIV_BY_4,   75, 160, 112},
74 			{VCO_3_6, POSTDIV_DIV_BY_4,  112, 216, 160},
75 			{VCO_2_4, POSTDIV_DIV_BY_2,   75, 160, 108},
76 			{VCO_3_6, POSTDIV_DIV_BY_2,  112, 216, 160} };
77 
78 #define PPPOLARIS10_TARGETACTIVITY_DFLT                     50
79 
80 static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
81 	/*  Min      pcie   DeepSleep Activity  CgSpll      CgSpll    CcPwr  CcPwr  Sclk         Enabled      Enabled                       Voltage    Power */
82 	/* Voltage, DpmLevel, DivId,  Level,  FuncCntl3,  FuncCntl4,  DynRm, DynRm1 Did, Padding,ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */
83 	{ 0x100ea446, 0x00, 0x03, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x30750000, 0x3000, 0, 0x2600, 0, 0, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } },
84 	{ 0x400ea446, 0x01, 0x04, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x409c0000, 0x2000, 0, 0x1e00, 1, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } },
85 	{ 0x740ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x50c30000, 0x2800, 0, 0x2000, 1, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } },
86 	{ 0xa40ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x60ea0000, 0x3000, 0, 0x2600, 1, 1, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } },
87 	{ 0xd80ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x70110100, 0x3800, 0, 0x2c00, 1, 1, 0x0004, 0x1203, 0xffff, 0x3600, 0xc9e2, 0x2e00 } },
88 	{ 0x3c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x80380100, 0x2000, 0, 0x1e00, 2, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } },
89 	{ 0x6c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x905f0100, 0x2400, 0, 0x1e00, 2, 1, 0x0004, 0x8901, 0xffff, 0x2300, 0x314c, 0x1d00 } },
90 	{ 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } }
91 };
92 
93 static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = {
94 	0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
95 
polaris10_perform_btc(struct pp_hwmgr * hwmgr)96 static int polaris10_perform_btc(struct pp_hwmgr *hwmgr)
97 {
98 	int result = 0;
99 	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
100 
101 	if (0 != smu_data->avfs_btc_param) {
102 		if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) {
103 			pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
104 			result = -1;
105 		}
106 	}
107 	if (smu_data->avfs_btc_param > 1) {
108 		/* Soft-Reset to reset the engine before loading uCode */
109 		/* halt */
110 		cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000);
111 		/* reset everything */
112 		cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0xffffffff);
113 		cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0);
114 	}
115 	return result;
116 }
117 
118 
polaris10_setup_graphics_level_structure(struct pp_hwmgr * hwmgr)119 static int polaris10_setup_graphics_level_structure(struct pp_hwmgr *hwmgr)
120 {
121 	uint32_t vr_config;
122 	uint32_t dpm_table_start;
123 
124 	uint16_t u16_boot_mvdd;
125 	uint32_t graphics_level_address, vr_config_address, graphics_level_size;
126 
127 	graphics_level_size = sizeof(avfs_graphics_level_polaris10);
128 	u16_boot_mvdd = PP_HOST_TO_SMC_US(1300 * VOLTAGE_SCALE);
129 
130 	PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(hwmgr,
131 				SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable),
132 				&dpm_table_start, 0x40000),
133 			"[AVFS][Polaris10_SetupGfxLvlStruct] SMU could not communicate starting address of DPM table",
134 			return -1);
135 
136 	/*  Default value for VRConfig = VR_MERGED_WITH_VDDC + VR_STATIC_VOLTAGE(VDDCI) */
137 	vr_config = 0x01000500; /* Real value:0x50001 */
138 
139 	vr_config_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VRConfig);
140 
141 	PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_address,
142 				(uint8_t *)&vr_config, sizeof(uint32_t), 0x40000),
143 			"[AVFS][Polaris10_SetupGfxLvlStruct] Problems copying VRConfig value over to SMC",
144 			return -1);
145 
146 	graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
147 
148 	PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address,
149 				(uint8_t *)(&avfs_graphics_level_polaris10),
150 				graphics_level_size, 0x40000),
151 			"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of SCLK DPM table failed!",
152 			return -1);
153 
154 	graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
155 
156 	PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address,
157 				(uint8_t *)(&avfs_memory_level_polaris10), sizeof(avfs_memory_level_polaris10), 0x40000),
158 				"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of MCLK DPM table failed!",
159 			return -1);
160 
161 	/* MVDD Boot value - neccessary for getting rid of the hang that occurs during Mclk DPM enablement */
162 
163 	graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, BootMVdd);
164 
165 	PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address,
166 			(uint8_t *)(&u16_boot_mvdd), sizeof(u16_boot_mvdd), 0x40000),
167 			"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of DPM table failed!",
168 			return -1);
169 
170 	return 0;
171 }
172 
173 
polaris10_avfs_event_mgr(struct pp_hwmgr * hwmgr)174 static int polaris10_avfs_event_mgr(struct pp_hwmgr *hwmgr)
175 {
176 	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
177 
178 	if (!hwmgr->avfs_supported)
179 		return 0;
180 
181 	PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(hwmgr),
182 		"[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU",
183 		return -EINVAL);
184 
185 	if (smu_data->avfs_btc_param > 1) {
186 		pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting.");
187 		PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
188 		"[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ",
189 		return -EINVAL);
190 	}
191 
192 	PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(hwmgr),
193 				"[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled",
194 			 return -EINVAL);
195 
196 	return 0;
197 }
198 
polaris10_start_smu_in_protection_mode(struct pp_hwmgr * hwmgr)199 static int polaris10_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr)
200 {
201 	int result = 0;
202 
203 	/* Wait for smc boot up */
204 	/* PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */
205 
206 	/* Assert reset */
207 	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
208 					SMC_SYSCON_RESET_CNTL, rst_reg, 1);
209 
210 	result = smu7_upload_smu_firmware_image(hwmgr);
211 	if (result != 0)
212 		return result;
213 
214 	/* Clear status */
215 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0);
216 
217 	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
218 					SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
219 
220 	/* De-assert reset */
221 	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
222 					SMC_SYSCON_RESET_CNTL, rst_reg, 0);
223 
224 
225 	PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1);
226 
227 
228 	/* Call Test SMU message with 0x20000 offset to trigger SMU start */
229 	smu7_send_msg_to_smc_offset(hwmgr);
230 
231 	/* Wait done bit to be set */
232 	/* Check pass/failed indicator */
233 
234 	PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0);
235 
236 	if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
237 						SMU_STATUS, SMU_PASS))
238 		PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1);
239 
240 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0);
241 
242 	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
243 					SMC_SYSCON_RESET_CNTL, rst_reg, 1);
244 
245 	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
246 					SMC_SYSCON_RESET_CNTL, rst_reg, 0);
247 
248 	/* Wait for firmware to initialize */
249 	PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
250 
251 	return result;
252 }
253 
polaris10_start_smu_in_non_protection_mode(struct pp_hwmgr * hwmgr)254 static int polaris10_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr)
255 {
256 	int result = 0;
257 
258 	/* wait for smc boot up */
259 	PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0);
260 
261 	/* Clear firmware interrupt enable flag */
262 	/* PHM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */
263 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
264 				ixFIRMWARE_FLAGS, 0);
265 
266 	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
267 					SMC_SYSCON_RESET_CNTL,
268 					rst_reg, 1);
269 
270 	result = smu7_upload_smu_firmware_image(hwmgr);
271 	if (result != 0)
272 		return result;
273 
274 	/* Set smc instruct start point at 0x0 */
275 	smu7_program_jump_on_start(hwmgr);
276 
277 	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
278 					SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
279 
280 	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
281 					SMC_SYSCON_RESET_CNTL, rst_reg, 0);
282 
283 	/* Wait for firmware to initialize */
284 
285 	PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
286 					FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
287 
288 	return result;
289 }
290 
polaris10_start_smu(struct pp_hwmgr * hwmgr)291 static int polaris10_start_smu(struct pp_hwmgr *hwmgr)
292 {
293 	int result = 0;
294 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
295 
296 	/* Only start SMC if SMC RAM is not running */
297 	if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) {
298 		smu_data->protected_mode = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
299 		smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
300 
301 		/* Check if SMU is running in protected mode */
302 		if (smu_data->protected_mode == 0)
303 			result = polaris10_start_smu_in_non_protection_mode(hwmgr);
304 		else
305 			result = polaris10_start_smu_in_protection_mode(hwmgr);
306 
307 		if (result != 0)
308 			PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result);
309 
310 		polaris10_avfs_event_mgr(hwmgr);
311 	}
312 
313 	/* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
314 	smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
315 					&(smu_data->smu7_data.soft_regs_start), 0x40000);
316 
317 	result = smu7_request_smu_load_fw(hwmgr);
318 
319 	return result;
320 }
321 
polaris10_is_hw_avfs_present(struct pp_hwmgr * hwmgr)322 static bool polaris10_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
323 {
324 	uint32_t efuse;
325 
326 	efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
327 	efuse &= 0x00000001;
328 	if (efuse)
329 		return true;
330 
331 	return false;
332 }
333 
polaris10_smu_init(struct pp_hwmgr * hwmgr)334 static int polaris10_smu_init(struct pp_hwmgr *hwmgr)
335 {
336 	struct polaris10_smumgr *smu_data;
337 
338 	smu_data = kzalloc(sizeof(struct polaris10_smumgr), GFP_KERNEL);
339 	if (smu_data == NULL)
340 		return -ENOMEM;
341 
342 	hwmgr->smu_backend = smu_data;
343 
344 	if (smu7_init(hwmgr)) {
345 		kfree(smu_data);
346 		return -EINVAL;
347 	}
348 
349 	return 0;
350 }
351 
polaris10_get_dependency_volt_by_clk(struct pp_hwmgr * hwmgr,struct phm_ppt_v1_clock_voltage_dependency_table * dep_table,uint32_t clock,SMU_VoltageLevel * voltage,uint32_t * mvdd)352 static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
353 		struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
354 		uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
355 {
356 	uint32_t i;
357 	uint16_t vddci;
358 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
359 
360 	*voltage = *mvdd = 0;
361 
362 	/* clock - voltage dependency table is empty table */
363 	if (dep_table->count == 0)
364 		return -EINVAL;
365 
366 	for (i = 0; i < dep_table->count; i++) {
367 		/* find first sclk bigger than request */
368 		if (dep_table->entries[i].clk >= clock) {
369 			*voltage |= (dep_table->entries[i].vddc *
370 					VOLTAGE_SCALE) << VDDC_SHIFT;
371 			if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
372 				*voltage |= (data->vbios_boot_state.vddci_bootup_value *
373 						VOLTAGE_SCALE) << VDDCI_SHIFT;
374 			else if (dep_table->entries[i].vddci)
375 				*voltage |= (dep_table->entries[i].vddci *
376 						VOLTAGE_SCALE) << VDDCI_SHIFT;
377 			else {
378 				vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
379 						(dep_table->entries[i].vddc -
380 								(uint16_t)VDDC_VDDCI_DELTA));
381 				*voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
382 			}
383 
384 			if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
385 				*mvdd = data->vbios_boot_state.mvdd_bootup_value *
386 					VOLTAGE_SCALE;
387 			else if (dep_table->entries[i].mvdd)
388 				*mvdd = (uint32_t) dep_table->entries[i].mvdd *
389 					VOLTAGE_SCALE;
390 
391 			*voltage |= 1 << PHASES_SHIFT;
392 			return 0;
393 		}
394 	}
395 
396 	/* sclk is bigger than max sclk in the dependence table */
397 	*voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
398 
399 	if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
400 		*voltage |= (data->vbios_boot_state.vddci_bootup_value *
401 				VOLTAGE_SCALE) << VDDCI_SHIFT;
402 	else if (dep_table->entries[i-1].vddci) {
403 		vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
404 				(dep_table->entries[i].vddc -
405 						(uint16_t)VDDC_VDDCI_DELTA));
406 		*voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
407 	}
408 
409 	if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
410 		*mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
411 	else if (dep_table->entries[i].mvdd)
412 		*mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
413 
414 	return 0;
415 }
416 
scale_fan_gain_settings(uint16_t raw_setting)417 static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
418 {
419 	uint32_t tmp;
420 	tmp = raw_setting * 4096 / 100;
421 	return (uint16_t)tmp;
422 }
423 
polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr * hwmgr)424 static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
425 {
426 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
427 
428 	const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
429 	SMU74_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
430 	struct phm_ppt_v1_information *table_info =
431 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
432 	struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
433 	struct pp_advance_fan_control_parameters *fan_table =
434 			&hwmgr->thermal_controller.advanceFanControlParameters;
435 	int i, j, k;
436 	const uint16_t *pdef1;
437 	const uint16_t *pdef2;
438 
439 	table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
440 	table->TargetTdp  = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
441 
442 	PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
443 				"Target Operating Temp is out of Range!",
444 				);
445 
446 	table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
447 			cac_dtp_table->usTargetOperatingTemp * 256);
448 	table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
449 			cac_dtp_table->usTemperatureLimitHotspot * 256);
450 	table->FanGainEdge = PP_HOST_TO_SMC_US(
451 			scale_fan_gain_settings(fan_table->usFanGainEdge));
452 	table->FanGainHotspot = PP_HOST_TO_SMC_US(
453 			scale_fan_gain_settings(fan_table->usFanGainHotspot));
454 
455 	pdef1 = defaults->BAPMTI_R;
456 	pdef2 = defaults->BAPMTI_RC;
457 
458 	for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
459 		for (j = 0; j < SMU74_DTE_SOURCES; j++) {
460 			for (k = 0; k < SMU74_DTE_SINKS; k++) {
461 				table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
462 				table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
463 				pdef1++;
464 				pdef2++;
465 			}
466 		}
467 	}
468 
469 	return 0;
470 }
471 
polaris10_populate_svi_load_line(struct pp_hwmgr * hwmgr)472 static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
473 {
474 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
475 	const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
476 
477 	smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
478 	smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
479 	smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
480 	smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
481 
482 	return 0;
483 }
484 
polaris10_populate_tdc_limit(struct pp_hwmgr * hwmgr)485 static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
486 {
487 	uint16_t tdc_limit;
488 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
489 	struct phm_ppt_v1_information *table_info =
490 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
491 	const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
492 
493 	tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
494 	smu_data->power_tune_table.TDC_VDDC_PkgLimit =
495 			CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
496 	smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
497 			defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
498 	smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
499 
500 	return 0;
501 }
502 
polaris10_populate_dw8(struct pp_hwmgr * hwmgr,uint32_t fuse_table_offset)503 static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
504 {
505 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
506 	const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
507 	uint32_t temp;
508 
509 	if (smu7_read_smc_sram_dword(hwmgr,
510 			fuse_table_offset +
511 			offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
512 			(uint32_t *)&temp, SMC_RAM_END))
513 		PP_ASSERT_WITH_CODE(false,
514 				"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
515 				return -EINVAL);
516 	else {
517 		smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
518 		smu_data->power_tune_table.LPMLTemperatureMin =
519 				(uint8_t)((temp >> 16) & 0xff);
520 		smu_data->power_tune_table.LPMLTemperatureMax =
521 				(uint8_t)((temp >> 8) & 0xff);
522 		smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
523 	}
524 	return 0;
525 }
526 
polaris10_populate_temperature_scaler(struct pp_hwmgr * hwmgr)527 static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
528 {
529 	int i;
530 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
531 
532 	/* Currently not used. Set all to zero. */
533 	for (i = 0; i < 16; i++)
534 		smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
535 
536 	return 0;
537 }
538 
polaris10_populate_fuzzy_fan(struct pp_hwmgr * hwmgr)539 static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
540 {
541 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
542 
543 /* TO DO move to hwmgr */
544 	if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
545 		|| 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
546 		hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
547 			hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
548 
549 	smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
550 				hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
551 	return 0;
552 }
553 
polaris10_populate_gnb_lpml(struct pp_hwmgr * hwmgr)554 static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
555 {
556 	int i;
557 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
558 
559 	/* Currently not used. Set all to zero. */
560 	for (i = 0; i < 16; i++)
561 		smu_data->power_tune_table.GnbLPML[i] = 0;
562 
563 	return 0;
564 }
565 
polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr * hwmgr)566 static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
567 {
568 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
569 	struct phm_ppt_v1_information *table_info =
570 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
571 	uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
572 	uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
573 	struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
574 
575 	hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
576 	lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
577 
578 	smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
579 			CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
580 	smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
581 			CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
582 
583 	return 0;
584 }
585 
polaris10_populate_pm_fuses(struct pp_hwmgr * hwmgr)586 static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
587 {
588 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
589 	uint32_t pm_fuse_table_offset;
590 
591 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
592 			PHM_PlatformCaps_PowerContainment)) {
593 		if (smu7_read_smc_sram_dword(hwmgr,
594 				SMU7_FIRMWARE_HEADER_LOCATION +
595 				offsetof(SMU74_Firmware_Header, PmFuseTable),
596 				&pm_fuse_table_offset, SMC_RAM_END))
597 			PP_ASSERT_WITH_CODE(false,
598 					"Attempt to get pm_fuse_table_offset Failed!",
599 					return -EINVAL);
600 
601 		if (polaris10_populate_svi_load_line(hwmgr))
602 			PP_ASSERT_WITH_CODE(false,
603 					"Attempt to populate SviLoadLine Failed!",
604 					return -EINVAL);
605 
606 		if (polaris10_populate_tdc_limit(hwmgr))
607 			PP_ASSERT_WITH_CODE(false,
608 					"Attempt to populate TDCLimit Failed!", return -EINVAL);
609 
610 		if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
611 			PP_ASSERT_WITH_CODE(false,
612 					"Attempt to populate TdcWaterfallCtl, "
613 					"LPMLTemperature Min and Max Failed!",
614 					return -EINVAL);
615 
616 		if (0 != polaris10_populate_temperature_scaler(hwmgr))
617 			PP_ASSERT_WITH_CODE(false,
618 					"Attempt to populate LPMLTemperatureScaler Failed!",
619 					return -EINVAL);
620 
621 		if (polaris10_populate_fuzzy_fan(hwmgr))
622 			PP_ASSERT_WITH_CODE(false,
623 					"Attempt to populate Fuzzy Fan Control parameters Failed!",
624 					return -EINVAL);
625 
626 		if (polaris10_populate_gnb_lpml(hwmgr))
627 			PP_ASSERT_WITH_CODE(false,
628 					"Attempt to populate GnbLPML Failed!",
629 					return -EINVAL);
630 
631 		if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
632 			PP_ASSERT_WITH_CODE(false,
633 					"Attempt to populate BapmVddCBaseLeakage Hi and Lo "
634 					"Sidd Failed!", return -EINVAL);
635 
636 		if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
637 				(uint8_t *)&smu_data->power_tune_table,
638 				(sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END))
639 			PP_ASSERT_WITH_CODE(false,
640 					"Attempt to download PmFuseTable Failed!",
641 					return -EINVAL);
642 	}
643 	return 0;
644 }
645 
polaris10_populate_smc_mvdd_table(struct pp_hwmgr * hwmgr,SMU74_Discrete_DpmTable * table)646 static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
647 			SMU74_Discrete_DpmTable *table)
648 {
649 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
650 	uint32_t count, level;
651 
652 	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
653 		count = data->mvdd_voltage_table.count;
654 		if (count > SMU_MAX_SMIO_LEVELS)
655 			count = SMU_MAX_SMIO_LEVELS;
656 		for (level = 0; level < count; level++) {
657 			table->SmioTable2.Pattern[level].Voltage =
658 				PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE);
659 			/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
660 			table->SmioTable2.Pattern[level].Smio =
661 				(uint8_t) level;
662 			table->Smio[level] |=
663 				data->mvdd_voltage_table.entries[level].smio_low;
664 		}
665 		table->SmioMask2 = data->mvdd_voltage_table.mask_low;
666 
667 		table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
668 	}
669 
670 	return 0;
671 }
672 
polaris10_populate_smc_vddci_table(struct pp_hwmgr * hwmgr,struct SMU74_Discrete_DpmTable * table)673 static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
674 					struct SMU74_Discrete_DpmTable *table)
675 {
676 	uint32_t count, level;
677 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
678 
679 	count = data->vddci_voltage_table.count;
680 
681 	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
682 		if (count > SMU_MAX_SMIO_LEVELS)
683 			count = SMU_MAX_SMIO_LEVELS;
684 		for (level = 0; level < count; ++level) {
685 			table->SmioTable1.Pattern[level].Voltage =
686 				PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
687 			table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
688 
689 			table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
690 		}
691 	}
692 
693 	table->SmioMask1 = data->vddci_voltage_table.mask_low;
694 
695 	return 0;
696 }
697 
polaris10_populate_cac_table(struct pp_hwmgr * hwmgr,struct SMU74_Discrete_DpmTable * table)698 static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
699 		struct SMU74_Discrete_DpmTable *table)
700 {
701 	uint32_t count;
702 	uint8_t index;
703 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
704 	struct phm_ppt_v1_information *table_info =
705 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
706 	struct phm_ppt_v1_voltage_lookup_table *lookup_table =
707 			table_info->vddc_lookup_table;
708 	/* tables is already swapped, so in order to use the value from it,
709 	 * we need to swap it back.
710 	 * We are populating vddc CAC data to BapmVddc table
711 	 * in split and merged mode
712 	 */
713 	for (count = 0; count < lookup_table->count; count++) {
714 		index = phm_get_voltage_index(lookup_table,
715 				data->vddc_voltage_table.entries[count].value);
716 		table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
717 		table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
718 		table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
719 	}
720 
721 	return 0;
722 }
723 
polaris10_populate_smc_voltage_tables(struct pp_hwmgr * hwmgr,struct SMU74_Discrete_DpmTable * table)724 static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
725 		struct SMU74_Discrete_DpmTable *table)
726 {
727 	polaris10_populate_smc_vddci_table(hwmgr, table);
728 	polaris10_populate_smc_mvdd_table(hwmgr, table);
729 	polaris10_populate_cac_table(hwmgr, table);
730 
731 	return 0;
732 }
733 
polaris10_populate_ulv_level(struct pp_hwmgr * hwmgr,struct SMU74_Discrete_Ulv * state)734 static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
735 		struct SMU74_Discrete_Ulv *state)
736 {
737 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
738 	struct phm_ppt_v1_information *table_info =
739 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
740 
741 	state->CcPwrDynRm = 0;
742 	state->CcPwrDynRm1 = 0;
743 
744 	state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
745 	state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
746 			VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
747 
748 	if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker)
749 		state->VddcPhase = data->vddc_phase_shed_control ^ 0x3;
750 	else
751 		state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
752 
753 	CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
754 	CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
755 	CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
756 
757 	return 0;
758 }
759 
polaris10_populate_ulv_state(struct pp_hwmgr * hwmgr,struct SMU74_Discrete_DpmTable * table)760 static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
761 		struct SMU74_Discrete_DpmTable *table)
762 {
763 	return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
764 }
765 
polaris10_populate_smc_link_level(struct pp_hwmgr * hwmgr,struct SMU74_Discrete_DpmTable * table)766 static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
767 		struct SMU74_Discrete_DpmTable *table)
768 {
769 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
770 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
771 	struct smu7_dpm_table *dpm_table = &data->dpm_table;
772 	int i;
773 
774 	/* Index (dpm_table->pcie_speed_table.count)
775 	 * is reserved for PCIE boot level. */
776 	for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
777 		table->LinkLevel[i].PcieGenSpeed  =
778 				(uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
779 		table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
780 				dpm_table->pcie_speed_table.dpm_levels[i].param1);
781 		table->LinkLevel[i].EnabledForActivity = 1;
782 		table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
783 		table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
784 		table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
785 	}
786 
787 	smu_data->smc_state_table.LinkLevelCount =
788 			(uint8_t)dpm_table->pcie_speed_table.count;
789 
790 /* To Do move to hwmgr */
791 	data->dpm_level_enable_mask.pcie_dpm_enable_mask =
792 			phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
793 
794 	return 0;
795 }
796 
797 
polaris10_get_sclk_range_table(struct pp_hwmgr * hwmgr,SMU74_Discrete_DpmTable * table)798 static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr,
799 				   SMU74_Discrete_DpmTable  *table)
800 {
801 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
802 	uint32_t i, ref_clk;
803 
804 	struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
805 
806 	ref_clk = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
807 
808 	if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
809 		for (i = 0; i < NUM_SCLK_RANGE; i++) {
810 			table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
811 			table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
812 			table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
813 
814 			table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
815 			table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
816 
817 			CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
818 			CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
819 			CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
820 		}
821 		return;
822 	}
823 
824 	for (i = 0; i < NUM_SCLK_RANGE; i++) {
825 		smu_data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
826 		smu_data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
827 
828 		table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
829 		table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
830 		table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
831 
832 		table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
833 		table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
834 
835 		CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
836 		CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
837 		CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
838 	}
839 }
840 
polaris10_calculate_sclk_params(struct pp_hwmgr * hwmgr,uint32_t clock,SMU_SclkSetting * sclk_setting)841 static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
842 		uint32_t clock, SMU_SclkSetting *sclk_setting)
843 {
844 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
845 	const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
846 	struct pp_atomctrl_clock_dividers_ai dividers;
847 	uint32_t ref_clock;
848 	uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
849 	uint8_t i;
850 	int result;
851 	uint64_t temp;
852 
853 	sclk_setting->SclkFrequency = clock;
854 	/* get the engine clock dividers for this clock value */
855 	result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock,  &dividers);
856 	if (result == 0) {
857 		sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
858 		sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
859 		sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
860 		sclk_setting->PllRange = dividers.ucSclkPllRange;
861 		sclk_setting->Sclk_slew_rate = 0x400;
862 		sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
863 		sclk_setting->Pcc_down_slew_rate = 0xffff;
864 		sclk_setting->SSc_En = dividers.ucSscEnable;
865 		sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
866 		sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
867 		sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
868 		return result;
869 	}
870 
871 	ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
872 
873 	for (i = 0; i < NUM_SCLK_RANGE; i++) {
874 		if (clock > smu_data->range_table[i].trans_lower_frequency
875 		&& clock <= smu_data->range_table[i].trans_upper_frequency) {
876 			sclk_setting->PllRange = i;
877 			break;
878 		}
879 	}
880 
881 	sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
882 	temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
883 	temp <<= 0x10;
884 	do_div(temp, ref_clock);
885 	sclk_setting->Fcw_frac = temp & 0xffff;
886 
887 	pcc_target_percent = 10; /*  Hardcode 10% for now. */
888 	pcc_target_freq = clock - (clock * pcc_target_percent / 100);
889 	sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
890 
891 	ss_target_percent = 2; /*  Hardcode 2% for now. */
892 	sclk_setting->SSc_En = 0;
893 	if (ss_target_percent) {
894 		sclk_setting->SSc_En = 1;
895 		ss_target_freq = clock - (clock * ss_target_percent / 100);
896 		sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
897 		temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
898 		temp <<= 0x10;
899 		do_div(temp, ref_clock);
900 		sclk_setting->Fcw1_frac = temp & 0xffff;
901 	}
902 
903 	return 0;
904 }
905 
polaris10_populate_single_graphic_level(struct pp_hwmgr * hwmgr,uint32_t clock,struct SMU74_Discrete_GraphicsLevel * level)906 static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
907 		uint32_t clock, struct SMU74_Discrete_GraphicsLevel *level)
908 {
909 	int result;
910 	/* PP_Clocks minClocks; */
911 	uint32_t mvdd;
912 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
913 	struct phm_ppt_v1_information *table_info =
914 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
915 	SMU_SclkSetting curr_sclk_setting = { 0 };
916 	phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL;
917 
918 	result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
919 
920 	if (hwmgr->od_enabled)
921 		vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_sclk;
922 	else
923 		vdd_dep_table = table_info->vdd_dep_on_sclk;
924 
925 	/* populate graphics levels */
926 	result = polaris10_get_dependency_volt_by_clk(hwmgr,
927 			vdd_dep_table, clock,
928 			&level->MinVoltage, &mvdd);
929 
930 	PP_ASSERT_WITH_CODE((0 == result),
931 			"can not find VDDC voltage value for "
932 			"VDDC engine clock dependency table",
933 			return result);
934 	level->ActivityLevel = data->current_profile_setting.sclk_activity;
935 
936 	level->CcPwrDynRm = 0;
937 	level->CcPwrDynRm1 = 0;
938 	level->EnabledForActivity = 0;
939 	level->EnabledForThrottle = 1;
940 	level->UpHyst = data->current_profile_setting.sclk_up_hyst;
941 	level->DownHyst = data->current_profile_setting.sclk_down_hyst;
942 	level->VoltageDownHyst = 0;
943 	level->PowerThrottle = 0;
944 	data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr;
945 
946 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
947 		level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
948 								hwmgr->display_config->min_core_set_clock_in_sr);
949 
950 	/* Default to slow, highest DPM level will be
951 	 * set to PPSMC_DISPLAY_WATERMARK_LOW later.
952 	 */
953 	if (data->update_up_hyst)
954 		level->UpHyst = (uint8_t)data->up_hyst;
955 	if (data->update_down_hyst)
956 		level->DownHyst = (uint8_t)data->down_hyst;
957 
958 	level->SclkSetting = curr_sclk_setting;
959 
960 	CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
961 	CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
962 	CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
963 	CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
964 	CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
965 	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
966 	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
967 	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
968 	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
969 	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
970 	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
971 	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
972 	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
973 	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
974 	return 0;
975 }
976 
polaris10_populate_all_graphic_levels(struct pp_hwmgr * hwmgr)977 static int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
978 {
979 	struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
980 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
981 	struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
982 	struct phm_ppt_v1_information *table_info =
983 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
984 	struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
985 	uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count;
986 	int result = 0;
987 	uint32_t array = smu_data->smu7_data.dpm_table_start +
988 			offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
989 	uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
990 			SMU74_MAX_LEVELS_GRAPHICS;
991 	struct SMU74_Discrete_GraphicsLevel *levels =
992 			smu_data->smc_state_table.GraphicsLevel;
993 	uint32_t i, max_entry;
994 	uint8_t hightest_pcie_level_enabled = 0,
995 		lowest_pcie_level_enabled = 0,
996 		mid_pcie_level_enabled = 0,
997 		count = 0;
998 
999 	polaris10_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table));
1000 
1001 	for (i = 0; i < dpm_table->sclk_table.count; i++) {
1002 
1003 		result = polaris10_populate_single_graphic_level(hwmgr,
1004 				dpm_table->sclk_table.dpm_levels[i].value,
1005 				&(smu_data->smc_state_table.GraphicsLevel[i]));
1006 		if (result)
1007 			return result;
1008 
1009 		/* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
1010 		if (i > 1)
1011 			levels[i].DeepSleepDivId = 0;
1012 	}
1013 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1014 					PHM_PlatformCaps_SPLLShutdownSupport))
1015 		smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
1016 
1017 	smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
1018 	smu_data->smc_state_table.GraphicsDpmLevelCount =
1019 			(uint8_t)dpm_table->sclk_table.count;
1020 	hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask =
1021 			phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
1022 
1023 
1024 	if (pcie_table != NULL) {
1025 		PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
1026 				"There must be 1 or more PCIE levels defined in PPTable.",
1027 				return -EINVAL);
1028 		max_entry = pcie_entry_cnt - 1;
1029 		for (i = 0; i < dpm_table->sclk_table.count; i++)
1030 			levels[i].pcieDpmLevel =
1031 					(uint8_t) ((i < max_entry) ? i : max_entry);
1032 	} else {
1033 		while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
1034 				((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
1035 						(1 << (hightest_pcie_level_enabled + 1))) != 0))
1036 			hightest_pcie_level_enabled++;
1037 
1038 		while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
1039 				((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
1040 						(1 << lowest_pcie_level_enabled)) == 0))
1041 			lowest_pcie_level_enabled++;
1042 
1043 		while ((count < hightest_pcie_level_enabled) &&
1044 				((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
1045 						(1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
1046 			count++;
1047 
1048 		mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
1049 				hightest_pcie_level_enabled ?
1050 						(lowest_pcie_level_enabled + 1 + count) :
1051 						hightest_pcie_level_enabled;
1052 
1053 		/* set pcieDpmLevel to hightest_pcie_level_enabled */
1054 		for (i = 2; i < dpm_table->sclk_table.count; i++)
1055 			levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
1056 
1057 		/* set pcieDpmLevel to lowest_pcie_level_enabled */
1058 		levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
1059 
1060 		/* set pcieDpmLevel to mid_pcie_level_enabled */
1061 		levels[1].pcieDpmLevel = mid_pcie_level_enabled;
1062 	}
1063 	/* level count will send to smc once at init smc table and never change */
1064 	result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
1065 			(uint32_t)array_size, SMC_RAM_END);
1066 
1067 	return result;
1068 }
1069 
1070 
polaris10_populate_single_memory_level(struct pp_hwmgr * hwmgr,uint32_t clock,struct SMU74_Discrete_MemoryLevel * mem_level)1071 static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1072 		uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
1073 {
1074 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1075 	struct phm_ppt_v1_information *table_info =
1076 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
1077 	int result = 0;
1078 	uint32_t mclk_stutter_mode_threshold = 40000;
1079 	phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL;
1080 
1081 
1082 	if (hwmgr->od_enabled)
1083 		vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_mclk;
1084 	else
1085 		vdd_dep_table = table_info->vdd_dep_on_mclk;
1086 
1087 	if (vdd_dep_table) {
1088 		result = polaris10_get_dependency_volt_by_clk(hwmgr,
1089 				vdd_dep_table, clock,
1090 				&mem_level->MinVoltage, &mem_level->MinMvdd);
1091 		PP_ASSERT_WITH_CODE((0 == result),
1092 				"can not find MinVddc voltage value from memory "
1093 				"VDDC voltage dependency table", return result);
1094 	}
1095 
1096 	mem_level->MclkFrequency = clock;
1097 	mem_level->EnabledForThrottle = 1;
1098 	mem_level->EnabledForActivity = 0;
1099 	mem_level->UpHyst = data->current_profile_setting.mclk_up_hyst;
1100 	mem_level->DownHyst = data->current_profile_setting.mclk_down_hyst;
1101 	mem_level->VoltageDownHyst = 0;
1102 	mem_level->ActivityLevel = data->current_profile_setting.mclk_activity;
1103 	mem_level->StutterEnable = false;
1104 	mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1105 
1106 	data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
1107 	data->display_timing.vrefresh = hwmgr->display_config->vrefresh;
1108 
1109 	if (mclk_stutter_mode_threshold &&
1110 		(clock <= mclk_stutter_mode_threshold) &&
1111 		(PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
1112 				STUTTER_ENABLE) & 0x1))
1113 		mem_level->StutterEnable = true;
1114 
1115 	if (!result) {
1116 		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
1117 		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
1118 		CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
1119 		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
1120 	}
1121 	return result;
1122 }
1123 
polaris10_populate_all_memory_levels(struct pp_hwmgr * hwmgr)1124 static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1125 {
1126 	struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
1127 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1128 	struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
1129 	int result;
1130 	/* populate MCLK dpm table to SMU7 */
1131 	uint32_t array = smu_data->smu7_data.dpm_table_start +
1132 			offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
1133 	uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
1134 			SMU74_MAX_LEVELS_MEMORY;
1135 	struct SMU74_Discrete_MemoryLevel *levels =
1136 			smu_data->smc_state_table.MemoryLevel;
1137 	uint32_t i;
1138 
1139 	for (i = 0; i < dpm_table->mclk_table.count; i++) {
1140 		PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1141 				"can not populate memory level as memory clock is zero",
1142 				return -EINVAL);
1143 		result = polaris10_populate_single_memory_level(hwmgr,
1144 				dpm_table->mclk_table.dpm_levels[i].value,
1145 				&levels[i]);
1146 		if (i == dpm_table->mclk_table.count - 1) {
1147 			levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
1148 			levels[i].EnabledForActivity = 1;
1149 		}
1150 		if (result)
1151 			return result;
1152 	}
1153 
1154 	/* In order to prevent MC activity from stutter mode to push DPM up,
1155 	 * the UVD change complements this by putting the MCLK in
1156 	 * a higher state by default such that we are not affected by
1157 	 * up threshold or and MCLK DPM latency.
1158 	 */
1159 	levels[0].ActivityLevel = 0x1f;
1160 	CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
1161 
1162 	smu_data->smc_state_table.MemoryDpmLevelCount =
1163 			(uint8_t)dpm_table->mclk_table.count;
1164 	hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask =
1165 			phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1166 
1167 	/* level count will send to smc once at init smc table and never change */
1168 	result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
1169 			(uint32_t)array_size, SMC_RAM_END);
1170 
1171 	return result;
1172 }
1173 
polaris10_populate_mvdd_value(struct pp_hwmgr * hwmgr,uint32_t mclk,SMIO_Pattern * smio_pat)1174 static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
1175 		uint32_t mclk, SMIO_Pattern *smio_pat)
1176 {
1177 	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1178 	struct phm_ppt_v1_information *table_info =
1179 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
1180 	uint32_t i = 0;
1181 
1182 	if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1183 		/* find mvdd value which clock is more than request */
1184 		for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
1185 			if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
1186 				smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
1187 				break;
1188 			}
1189 		}
1190 		PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
1191 				"MVDD Voltage is outside the supported range.",
1192 				return -EINVAL);
1193 	} else
1194 		return -EINVAL;
1195 
1196 	return 0;
1197 }
1198 
polaris10_populate_smc_acpi_level(struct pp_hwmgr * hwmgr,SMU74_Discrete_DpmTable * table)1199 static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1200 		SMU74_Discrete_DpmTable *table)
1201 {
1202 	int result = 0;
1203 	uint32_t sclk_frequency;
1204 	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1205 	struct phm_ppt_v1_information *table_info =
1206 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
1207 	SMIO_Pattern vol_level;
1208 	uint32_t mvdd;
1209 
1210 	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1211 
1212 	/* Get MinVoltage and Frequency from DPM0,
1213 	 * already converted to SMC_UL */
1214 	sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
1215 	result = polaris10_get_dependency_volt_by_clk(hwmgr,
1216 			table_info->vdd_dep_on_sclk,
1217 			sclk_frequency,
1218 			&table->ACPILevel.MinVoltage, &mvdd);
1219 	PP_ASSERT_WITH_CODE((0 == result),
1220 			"Cannot find ACPI VDDC voltage value "
1221 			"in Clock Dependency Table",
1222 			);
1223 
1224 	result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency,  &(table->ACPILevel.SclkSetting));
1225 	PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
1226 
1227 	table->ACPILevel.DeepSleepDivId = 0;
1228 	table->ACPILevel.CcPwrDynRm = 0;
1229 	table->ACPILevel.CcPwrDynRm1 = 0;
1230 
1231 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1232 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
1233 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1234 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1235 
1236 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
1237 	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
1238 	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
1239 	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
1240 	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
1241 	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
1242 	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
1243 	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
1244 	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
1245 	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
1246 
1247 
1248 	/* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
1249 	table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
1250 	result = polaris10_get_dependency_volt_by_clk(hwmgr,
1251 			table_info->vdd_dep_on_mclk,
1252 			table->MemoryACPILevel.MclkFrequency,
1253 			&table->MemoryACPILevel.MinVoltage, &mvdd);
1254 	PP_ASSERT_WITH_CODE((0 == result),
1255 			"Cannot find ACPI VDDCI voltage value "
1256 			"in Clock Dependency Table",
1257 			);
1258 
1259 	if (!((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
1260 			(data->mclk_dpm_key_disabled)))
1261 		polaris10_populate_mvdd_value(hwmgr,
1262 				data->dpm_table.mclk_table.dpm_levels[0].value,
1263 				&vol_level);
1264 
1265 	if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
1266 		table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
1267 	else
1268 		table->MemoryACPILevel.MinMvdd = 0;
1269 
1270 	table->MemoryACPILevel.StutterEnable = false;
1271 
1272 	table->MemoryACPILevel.EnabledForThrottle = 0;
1273 	table->MemoryACPILevel.EnabledForActivity = 0;
1274 	table->MemoryACPILevel.UpHyst = 0;
1275 	table->MemoryACPILevel.DownHyst = 100;
1276 	table->MemoryACPILevel.VoltageDownHyst = 0;
1277 	table->MemoryACPILevel.ActivityLevel =
1278 			PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
1279 
1280 	CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
1281 	CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
1282 
1283 	return result;
1284 }
1285 
polaris10_populate_smc_vce_level(struct pp_hwmgr * hwmgr,SMU74_Discrete_DpmTable * table)1286 static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1287 		SMU74_Discrete_DpmTable *table)
1288 {
1289 	int result = -EINVAL;
1290 	uint8_t count;
1291 	struct pp_atomctrl_clock_dividers_vi dividers;
1292 	struct phm_ppt_v1_information *table_info =
1293 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
1294 	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1295 			table_info->mm_dep_table;
1296 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1297 	uint32_t vddci;
1298 
1299 	table->VceLevelCount = (uint8_t)(mm_table->count);
1300 	table->VceBootLevel = 0;
1301 
1302 	for (count = 0; count < table->VceLevelCount; count++) {
1303 		table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
1304 		table->VceLevel[count].MinVoltage = 0;
1305 		table->VceLevel[count].MinVoltage |=
1306 				(mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
1307 
1308 		if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1309 			vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1310 						mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1311 		else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1312 			vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1313 		else
1314 			vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1315 
1316 
1317 		table->VceLevel[count].MinVoltage |=
1318 				(vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1319 		table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1320 
1321 		/*retrieve divider value for VBIOS */
1322 		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1323 				table->VceLevel[count].Frequency, &dividers);
1324 		PP_ASSERT_WITH_CODE((0 == result),
1325 				"can not find divide id for VCE engine clock",
1326 				return result);
1327 
1328 		table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1329 
1330 		CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1331 		CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
1332 	}
1333 	return result;
1334 }
1335 
polaris10_populate_memory_timing_parameters(struct pp_hwmgr * hwmgr,int32_t eng_clock,int32_t mem_clock,SMU74_Discrete_MCArbDramTimingTableEntry * arb_regs)1336 static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
1337 		int32_t eng_clock, int32_t mem_clock,
1338 		SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
1339 {
1340 	uint32_t dram_timing;
1341 	uint32_t dram_timing2;
1342 	uint32_t burst_time;
1343 	int result;
1344 
1345 	result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1346 			eng_clock, mem_clock);
1347 	PP_ASSERT_WITH_CODE(result == 0,
1348 			"Error calling VBIOS to set DRAM_TIMING.", return result);
1349 
1350 	dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1351 	dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1352 	burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1353 
1354 
1355 	arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dram_timing);
1356 	arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
1357 	arb_regs->McArbBurstTime   = (uint8_t)burst_time;
1358 
1359 	return 0;
1360 }
1361 
polaris10_program_memory_timing_parameters(struct pp_hwmgr * hwmgr)1362 static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1363 {
1364 	struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
1365 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1366 	struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
1367 	uint32_t i, j;
1368 	int result = 0;
1369 
1370 	for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
1371 		for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
1372 			result = polaris10_populate_memory_timing_parameters(hwmgr,
1373 					hw_data->dpm_table.sclk_table.dpm_levels[i].value,
1374 					hw_data->dpm_table.mclk_table.dpm_levels[j].value,
1375 					&arb_regs.entries[i][j]);
1376 			if (result == 0)
1377 				result = atomctrl_set_ac_timing_ai(hwmgr, hw_data->dpm_table.mclk_table.dpm_levels[j].value, j);
1378 			if (result != 0)
1379 				return result;
1380 		}
1381 	}
1382 
1383 	result = smu7_copy_bytes_to_smc(
1384 			hwmgr,
1385 			smu_data->smu7_data.arb_table_start,
1386 			(uint8_t *)&arb_regs,
1387 			sizeof(SMU74_Discrete_MCArbDramTimingTable),
1388 			SMC_RAM_END);
1389 	return result;
1390 }
1391 
polaris10_populate_smc_uvd_level(struct pp_hwmgr * hwmgr,struct SMU74_Discrete_DpmTable * table)1392 static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1393 		struct SMU74_Discrete_DpmTable *table)
1394 {
1395 	int result = -EINVAL;
1396 	uint8_t count;
1397 	struct pp_atomctrl_clock_dividers_vi dividers;
1398 	struct phm_ppt_v1_information *table_info =
1399 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
1400 	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1401 			table_info->mm_dep_table;
1402 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1403 	uint32_t vddci;
1404 
1405 	table->UvdLevelCount = (uint8_t)(mm_table->count);
1406 	table->UvdBootLevel = 0;
1407 
1408 	for (count = 0; count < table->UvdLevelCount; count++) {
1409 		table->UvdLevel[count].MinVoltage = 0;
1410 		table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
1411 		table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
1412 		table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1413 				VOLTAGE_SCALE) << VDDC_SHIFT;
1414 
1415 		if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1416 			vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1417 						mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1418 		else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1419 			vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1420 		else
1421 			vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1422 
1423 		table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1424 		table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1425 
1426 		/* retrieve divider value for VBIOS */
1427 		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1428 				table->UvdLevel[count].VclkFrequency, &dividers);
1429 		PP_ASSERT_WITH_CODE((0 == result),
1430 				"can not find divide id for Vclk clock", return result);
1431 
1432 		table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1433 
1434 		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1435 				table->UvdLevel[count].DclkFrequency, &dividers);
1436 		PP_ASSERT_WITH_CODE((0 == result),
1437 				"can not find divide id for Dclk clock", return result);
1438 
1439 		table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1440 
1441 		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1442 		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1443 		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
1444 	}
1445 
1446 	return result;
1447 }
1448 
polaris10_populate_smc_boot_level(struct pp_hwmgr * hwmgr,struct SMU74_Discrete_DpmTable * table)1449 static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1450 		struct SMU74_Discrete_DpmTable *table)
1451 {
1452 	int result = 0;
1453 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1454 
1455 	table->GraphicsBootLevel = 0;
1456 	table->MemoryBootLevel = 0;
1457 
1458 	/* find boot level from dpm table */
1459 	result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1460 			data->vbios_boot_state.sclk_bootup_value,
1461 			(uint32_t *)&(table->GraphicsBootLevel));
1462 
1463 	result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1464 			data->vbios_boot_state.mclk_bootup_value,
1465 			(uint32_t *)&(table->MemoryBootLevel));
1466 
1467 	table->BootVddc  = data->vbios_boot_state.vddc_bootup_value *
1468 			VOLTAGE_SCALE;
1469 	table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
1470 			VOLTAGE_SCALE;
1471 	table->BootMVdd  = data->vbios_boot_state.mvdd_bootup_value *
1472 			VOLTAGE_SCALE;
1473 
1474 	CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
1475 	CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
1476 	CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
1477 
1478 	return 0;
1479 }
1480 
polaris10_populate_smc_initailial_state(struct pp_hwmgr * hwmgr)1481 static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
1482 {
1483 	struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
1484 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1485 	struct phm_ppt_v1_information *table_info =
1486 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
1487 	uint8_t count, level;
1488 
1489 	count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
1490 
1491 	for (level = 0; level < count; level++) {
1492 		if (table_info->vdd_dep_on_sclk->entries[level].clk >=
1493 				hw_data->vbios_boot_state.sclk_bootup_value) {
1494 			smu_data->smc_state_table.GraphicsBootLevel = level;
1495 			break;
1496 		}
1497 	}
1498 
1499 	count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
1500 	for (level = 0; level < count; level++) {
1501 		if (table_info->vdd_dep_on_mclk->entries[level].clk >=
1502 				hw_data->vbios_boot_state.mclk_bootup_value) {
1503 			smu_data->smc_state_table.MemoryBootLevel = level;
1504 			break;
1505 		}
1506 	}
1507 
1508 	return 0;
1509 }
1510 
polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr * hwmgr)1511 static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1512 {
1513 	uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
1514 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1515 
1516 	uint8_t i, stretch_amount, volt_offset = 0;
1517 	struct phm_ppt_v1_information *table_info =
1518 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
1519 	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1520 			table_info->vdd_dep_on_sclk;
1521 
1522 	stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
1523 
1524 	/* Read SMU_Eefuse to read and calculate RO and determine
1525 	 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
1526 	 */
1527 	efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1528 			ixSMU_EFUSE_0 + (67 * 4));
1529 	efuse &= 0xFF000000;
1530 	efuse = efuse >> 24;
1531 
1532 	if (hwmgr->chip_id == CHIP_POLARIS10) {
1533 		if (hwmgr->is_kicker) {
1534 			min = 1200;
1535 			max = 2500;
1536 		} else {
1537 			min = 1000;
1538 			max = 2300;
1539 		}
1540 	} else if (hwmgr->chip_id == CHIP_POLARIS11) {
1541 		if (hwmgr->is_kicker) {
1542 			min = 900;
1543 			max = 2100;
1544 		} else {
1545 			min = 1100;
1546 			max = 2100;
1547 		}
1548 	} else {
1549 		min = 1100;
1550 		max = 2100;
1551 	}
1552 
1553 	ro = efuse * (max - min) / 255 + min;
1554 
1555 	/* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
1556 	for (i = 0; i < sclk_table->count; i++) {
1557 		smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
1558 				sclk_table->entries[i].cks_enable << i;
1559 		if (hwmgr->chip_id == CHIP_POLARIS10) {
1560 			volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 - (ro - 70) * 1000000) / \
1561 						(2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
1562 			volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
1563 					(2522480 - sclk_table->entries[i].clk/100 * 115764/100));
1564 		} else {
1565 			volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 - (ro - 50) * 1000000) / \
1566 						(2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
1567 			volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
1568 					(3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
1569 		}
1570 
1571 		if (volt_without_cks >= volt_with_cks)
1572 			volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
1573 					sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
1574 
1575 		smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
1576 	}
1577 
1578 	smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
1579 	/* Populate CKS Lookup Table */
1580 	if (stretch_amount == 0 || stretch_amount > 5) {
1581 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1582 				PHM_PlatformCaps_ClockStretcher);
1583 		PP_ASSERT_WITH_CODE(false,
1584 				"Stretch Amount in PPTable not supported",
1585 				return -EINVAL);
1586 	}
1587 
1588 	value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
1589 	value &= 0xFFFFFFFE;
1590 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
1591 
1592 	return 0;
1593 }
1594 
polaris10_populate_vr_config(struct pp_hwmgr * hwmgr,struct SMU74_Discrete_DpmTable * table)1595 static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
1596 		struct SMU74_Discrete_DpmTable *table)
1597 {
1598 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1599 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1600 	uint16_t config;
1601 
1602 	config = VR_MERGED_WITH_VDDC;
1603 	table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
1604 
1605 	/* Set Vddc Voltage Controller */
1606 	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1607 		config = VR_SVI2_PLANE_1;
1608 		table->VRConfig |= config;
1609 	} else {
1610 		PP_ASSERT_WITH_CODE(false,
1611 				"VDDC should be on SVI2 control in merged mode!",
1612 				);
1613 	}
1614 	/* Set Vddci Voltage Controller */
1615 	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1616 		config = VR_SVI2_PLANE_2;  /* only in merged mode */
1617 		table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1618 	} else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1619 		config = VR_SMIO_PATTERN_1;
1620 		table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1621 	} else {
1622 		config = VR_STATIC_VOLTAGE;
1623 		table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
1624 	}
1625 	/* Set Mvdd Voltage Controller */
1626 	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
1627 		config = VR_SVI2_PLANE_2;
1628 		table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1629 		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, smu_data->smu7_data.soft_regs_start +
1630 			offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
1631 	} else {
1632 		config = VR_STATIC_VOLTAGE;
1633 		table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1634 	}
1635 
1636 	return 0;
1637 }
1638 
1639 
polaris10_populate_avfs_parameters(struct pp_hwmgr * hwmgr)1640 static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
1641 {
1642 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1643 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1644 	struct amdgpu_device *adev = hwmgr->adev;
1645 
1646 	SMU74_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
1647 	int result = 0;
1648 	struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
1649 	AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
1650 	AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
1651 	uint32_t tmp, i;
1652 
1653 	struct phm_ppt_v1_information *table_info =
1654 			(struct phm_ppt_v1_information *)hwmgr->pptable;
1655 	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1656 			table_info->vdd_dep_on_sclk;
1657 
1658 
1659 	if (!hwmgr->avfs_supported)
1660 		return 0;
1661 
1662 	result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
1663 
1664 	if (0 == result) {
1665 		if (((adev->pdev->device == 0x67ef) &&
1666 		     ((adev->pdev->revision == 0xe0) ||
1667 		      (adev->pdev->revision == 0xe5))) ||
1668 		    ((adev->pdev->device == 0x67ff) &&
1669 		     ((adev->pdev->revision == 0xcf) ||
1670 		      (adev->pdev->revision == 0xef) ||
1671 		      (adev->pdev->revision == 0xff)))) {
1672 			avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1;
1673 			if ((adev->pdev->device == 0x67ef && adev->pdev->revision == 0xe5) ||
1674 			    (adev->pdev->device == 0x67ff && adev->pdev->revision == 0xef)) {
1675 				if ((avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 == 0xEA522DD3) &&
1676 				    (avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 == 0x5645A) &&
1677 				    (avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 == 0x33F9E) &&
1678 				    (avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 == 0xFFFFC5CC) &&
1679 				    (avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 == 0x1B1A) &&
1680 				    (avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b == 0xFFFFFCED)) {
1681 					avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0   = 0xF718F1D4;
1682 					avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1   = 0x323FD;
1683 					avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2   = 0x1E455;
1684 					avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0;
1685 					avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0;
1686 					avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b  = 0x23;
1687 				}
1688 			}
1689 		} else if (hwmgr->chip_id == CHIP_POLARIS12 && !hwmgr->is_kicker) {
1690 			avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1;
1691 			avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0   = 0xF6B024DD;
1692 			avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1   = 0x3005E;
1693 			avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2   = 0x18A5F;
1694 			avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0x315;
1695 			avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0xFED1;
1696 			avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b  = 0x3B;
1697 		} else if (((adev->pdev->device == 0x67df) &&
1698 			    ((adev->pdev->revision == 0xe0) ||
1699 			     (adev->pdev->revision == 0xe3) ||
1700 			     (adev->pdev->revision == 0xe4) ||
1701 			     (adev->pdev->revision == 0xe5) ||
1702 			     (adev->pdev->revision == 0xe7) ||
1703 			     (adev->pdev->revision == 0xef))) ||
1704 			   ((adev->pdev->device == 0x6fdf) &&
1705 			    ((adev->pdev->revision == 0xef) ||
1706 			     (adev->pdev->revision == 0xff)))) {
1707 			avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1;
1708 			avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0   = 0xF843B66B;
1709 			avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1   = 0x59CB5;
1710 			avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2   = 0xFFFF287F;
1711 			avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0;
1712 			avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0xFF23;
1713 			avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b  = 0x58;
1714 		}
1715 	}
1716 
1717 	if (0 == result) {
1718 		table->BTCGB_VDROOP_TABLE[0].a0  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
1719 		table->BTCGB_VDROOP_TABLE[0].a1  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
1720 		table->BTCGB_VDROOP_TABLE[0].a2  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
1721 		table->BTCGB_VDROOP_TABLE[1].a0  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
1722 		table->BTCGB_VDROOP_TABLE[1].a1  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
1723 		table->BTCGB_VDROOP_TABLE[1].a2  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
1724 		table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
1725 		table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
1726 		table->AVFSGB_VDROOP_TABLE[0].b  = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
1727 		table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
1728 		table->AVFSGB_VDROOP_TABLE[0].m2_shift  = 12;
1729 		table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
1730 		table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
1731 		table->AVFSGB_VDROOP_TABLE[1].b  = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
1732 		table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
1733 		table->AVFSGB_VDROOP_TABLE[1].m2_shift  = 12;
1734 		table->MaxVoltage                = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
1735 		AVFS_meanNsigma.Aconstant[0]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
1736 		AVFS_meanNsigma.Aconstant[1]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
1737 		AVFS_meanNsigma.Aconstant[2]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
1738 		AVFS_meanNsigma.DC_tol_sigma      = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
1739 		AVFS_meanNsigma.Platform_mean     = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
1740 		AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
1741 		AVFS_meanNsigma.Platform_sigma     = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
1742 
1743 		for (i = 0; i < NUM_VFT_COLUMNS; i++) {
1744 			AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
1745 			AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
1746 		}
1747 
1748 		result = smu7_read_smc_sram_dword(hwmgr,
1749 				SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
1750 				&tmp, SMC_RAM_END);
1751 
1752 		smu7_copy_bytes_to_smc(hwmgr,
1753 					tmp,
1754 					(uint8_t *)&AVFS_meanNsigma,
1755 					sizeof(AVFS_meanNsigma_t),
1756 					SMC_RAM_END);
1757 
1758 		result = smu7_read_smc_sram_dword(hwmgr,
1759 				SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
1760 				&tmp, SMC_RAM_END);
1761 		smu7_copy_bytes_to_smc(hwmgr,
1762 					tmp,
1763 					(uint8_t *)&AVFS_SclkOffset,
1764 					sizeof(AVFS_Sclk_Offset_t),
1765 					SMC_RAM_END);
1766 
1767 		data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
1768 						(avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
1769 						(avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
1770 						(avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
1771 		data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
1772 	}
1773 	return result;
1774 }
1775 
polaris10_init_arb_table_index(struct pp_hwmgr * hwmgr)1776 static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr)
1777 {
1778 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1779 	uint32_t tmp;
1780 	int result;
1781 
1782 	/* This is a read-modify-write on the first byte of the ARB table.
1783 	 * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
1784 	 * is the field 'current'.
1785 	 * This solution is ugly, but we never write the whole table only
1786 	 * individual fields in it.
1787 	 * In reality this field should not be in that structure
1788 	 * but in a soft register.
1789 	 */
1790 	result = smu7_read_smc_sram_dword(hwmgr,
1791 			smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
1792 
1793 	if (result)
1794 		return result;
1795 
1796 	tmp &= 0x00FFFFFF;
1797 	tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
1798 
1799 	return smu7_write_smc_sram_dword(hwmgr,
1800 			smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
1801 }
1802 
polaris10_initialize_power_tune_defaults(struct pp_hwmgr * hwmgr)1803 static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
1804 {
1805 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1806 	struct  phm_ppt_v1_information *table_info =
1807 			(struct  phm_ppt_v1_information *)(hwmgr->pptable);
1808 
1809 	if (table_info &&
1810 			table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
1811 			table_info->cac_dtp_table->usPowerTuneDataSetID)
1812 		smu_data->power_tune_defaults =
1813 				&polaris10_power_tune_data_set_array
1814 				[table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
1815 	else
1816 		smu_data->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
1817 
1818 }
1819 
polaris10_init_smc_table(struct pp_hwmgr * hwmgr)1820 static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
1821 {
1822 	int result;
1823 	struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
1824 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1825 
1826 	struct phm_ppt_v1_information *table_info =
1827 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
1828 	struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1829 	uint8_t i;
1830 	struct pp_atomctrl_gpio_pin_assignment gpio_pin;
1831 	pp_atomctrl_clock_dividers_vi dividers;
1832 
1833 	polaris10_initialize_power_tune_defaults(hwmgr);
1834 
1835 	if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control)
1836 		polaris10_populate_smc_voltage_tables(hwmgr, table);
1837 
1838 	table->SystemFlags = 0;
1839 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1840 			PHM_PlatformCaps_AutomaticDCTransition))
1841 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1842 
1843 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1844 			PHM_PlatformCaps_StepVddc))
1845 		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1846 
1847 	if (hw_data->is_memory_gddr5)
1848 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1849 
1850 	if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) {
1851 		result = polaris10_populate_ulv_state(hwmgr, table);
1852 		PP_ASSERT_WITH_CODE(0 == result,
1853 				"Failed to initialize ULV state!", return result);
1854 		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1855 				ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT);
1856 	}
1857 
1858 	result = polaris10_populate_smc_link_level(hwmgr, table);
1859 	PP_ASSERT_WITH_CODE(0 == result,
1860 			"Failed to initialize Link Level!", return result);
1861 
1862 	result = polaris10_populate_all_graphic_levels(hwmgr);
1863 	PP_ASSERT_WITH_CODE(0 == result,
1864 			"Failed to initialize Graphics Level!", return result);
1865 
1866 	result = polaris10_populate_all_memory_levels(hwmgr);
1867 	PP_ASSERT_WITH_CODE(0 == result,
1868 			"Failed to initialize Memory Level!", return result);
1869 
1870 	result = polaris10_populate_smc_acpi_level(hwmgr, table);
1871 	PP_ASSERT_WITH_CODE(0 == result,
1872 			"Failed to initialize ACPI Level!", return result);
1873 
1874 	result = polaris10_populate_smc_vce_level(hwmgr, table);
1875 	PP_ASSERT_WITH_CODE(0 == result,
1876 			"Failed to initialize VCE Level!", return result);
1877 
1878 	/* Since only the initial state is completely set up at this point
1879 	 * (the other states are just copies of the boot state) we only
1880 	 * need to populate the  ARB settings for the initial state.
1881 	 */
1882 	result = polaris10_program_memory_timing_parameters(hwmgr);
1883 	PP_ASSERT_WITH_CODE(0 == result,
1884 			"Failed to Write ARB settings for the initial state.", return result);
1885 
1886 	result = polaris10_populate_smc_uvd_level(hwmgr, table);
1887 	PP_ASSERT_WITH_CODE(0 == result,
1888 			"Failed to initialize UVD Level!", return result);
1889 
1890 	result = polaris10_populate_smc_boot_level(hwmgr, table);
1891 	PP_ASSERT_WITH_CODE(0 == result,
1892 			"Failed to initialize Boot Level!", return result);
1893 
1894 	result = polaris10_populate_smc_initailial_state(hwmgr);
1895 	PP_ASSERT_WITH_CODE(0 == result,
1896 			"Failed to initialize Boot State!", return result);
1897 
1898 	result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
1899 	PP_ASSERT_WITH_CODE(0 == result,
1900 			"Failed to populate BAPM Parameters!", return result);
1901 
1902 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1903 			PHM_PlatformCaps_ClockStretcher)) {
1904 		result = polaris10_populate_clock_stretcher_data_table(hwmgr);
1905 		PP_ASSERT_WITH_CODE(0 == result,
1906 				"Failed to populate Clock Stretcher Data Table!",
1907 				return result);
1908 	}
1909 
1910 	result = polaris10_populate_avfs_parameters(hwmgr);
1911 	PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
1912 
1913 	table->CurrSclkPllRange = 0xff;
1914 	table->GraphicsVoltageChangeEnable  = 1;
1915 	table->GraphicsThermThrottleEnable  = 1;
1916 	table->GraphicsInterval = 1;
1917 	table->VoltageInterval  = 1;
1918 	table->ThermalInterval  = 1;
1919 	table->TemperatureLimitHigh =
1920 			table_info->cac_dtp_table->usTargetOperatingTemp *
1921 			SMU7_Q88_FORMAT_CONVERSION_UNIT;
1922 	table->TemperatureLimitLow  =
1923 			(table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
1924 			SMU7_Q88_FORMAT_CONVERSION_UNIT;
1925 	table->MemoryVoltageChangeEnable = 1;
1926 	table->MemoryInterval = 1;
1927 	table->VoltageResponseTime = 0;
1928 	table->PhaseResponseTime = 0;
1929 	table->MemoryThermThrottleEnable = 1;
1930 	table->PCIeBootLinkLevel = 0;
1931 	table->PCIeGenInterval = 1;
1932 	table->VRConfig = 0;
1933 
1934 	result = polaris10_populate_vr_config(hwmgr, table);
1935 	PP_ASSERT_WITH_CODE(0 == result,
1936 			"Failed to populate VRConfig setting!", return result);
1937 	hw_data->vr_config = table->VRConfig;
1938 	table->ThermGpio = 17;
1939 	table->SclkStepSize = 0x4000;
1940 
1941 	if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
1942 		table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
1943 	} else {
1944 		table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
1945 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1946 				PHM_PlatformCaps_RegulatorHot);
1947 	}
1948 
1949 	if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
1950 			&gpio_pin)) {
1951 		table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
1952 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1953 				PHM_PlatformCaps_AutomaticDCTransition);
1954 	} else {
1955 		table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
1956 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1957 				PHM_PlatformCaps_AutomaticDCTransition);
1958 	}
1959 
1960 	/* Thermal Output GPIO */
1961 	if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
1962 			&gpio_pin)) {
1963 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1964 				PHM_PlatformCaps_ThermalOutGPIO);
1965 
1966 		table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
1967 
1968 		/* For porlarity read GPIOPAD_A with assigned Gpio pin
1969 		 * since VBIOS will program this register to set 'inactive state',
1970 		 * driver can then determine 'active state' from this and
1971 		 * program SMU with correct polarity
1972 		 */
1973 		table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
1974 					& (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
1975 		table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
1976 
1977 		/* if required, combine VRHot/PCC with thermal out GPIO */
1978 		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
1979 		&& phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
1980 			table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
1981 	} else {
1982 		table->ThermOutGpio = 17;
1983 		table->ThermOutPolarity = 1;
1984 		table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
1985 	}
1986 
1987 	/* Populate BIF_SCLK levels into SMC DPM table */
1988 	for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) {
1989 		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, smu_data->bif_sclk_table[i], &dividers);
1990 		PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
1991 
1992 		if (i == 0)
1993 			table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
1994 		else
1995 			table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
1996 	}
1997 
1998 	for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
1999 		table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
2000 
2001 	CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2002 	CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
2003 	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
2004 	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
2005 	CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2006 	CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
2007 	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2008 	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2009 	CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
2010 	CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
2011 
2012 	/* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2013 	result = smu7_copy_bytes_to_smc(hwmgr,
2014 			smu_data->smu7_data.dpm_table_start +
2015 			offsetof(SMU74_Discrete_DpmTable, SystemFlags),
2016 			(uint8_t *)&(table->SystemFlags),
2017 			sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
2018 			SMC_RAM_END);
2019 	PP_ASSERT_WITH_CODE(0 == result,
2020 			"Failed to upload dpm data to SMC memory!", return result);
2021 
2022 	result = polaris10_init_arb_table_index(hwmgr);
2023 	PP_ASSERT_WITH_CODE(0 == result,
2024 			"Failed to upload arb data to SMC memory!", return result);
2025 
2026 	result = polaris10_populate_pm_fuses(hwmgr);
2027 	PP_ASSERT_WITH_CODE(0 == result,
2028 			"Failed to  populate PM fuses to SMC memory!", return result);
2029 
2030 	return 0;
2031 }
2032 
polaris10_program_mem_timing_parameters(struct pp_hwmgr * hwmgr)2033 static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2034 {
2035 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2036 
2037 	if (data->need_update_smu7_dpm_table &
2038 		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
2039 		return polaris10_program_memory_timing_parameters(hwmgr);
2040 
2041 	return 0;
2042 }
2043 
polaris10_thermal_avfs_enable(struct pp_hwmgr * hwmgr)2044 int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
2045 {
2046 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2047 
2048 	if (!hwmgr->avfs_supported)
2049 		return 0;
2050 
2051 	smum_send_msg_to_smc_with_parameter(hwmgr,
2052 			PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
2053 
2054 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
2055 
2056 	/* Apply avfs cks-off voltages to avoid the overshoot
2057 	 * when switching to the highest sclk frequency
2058 	 */
2059 	if (data->apply_avfs_cks_off_voltage)
2060 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
2061 
2062 	return 0;
2063 }
2064 
polaris10_thermal_setup_fan_table(struct pp_hwmgr * hwmgr)2065 static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
2066 {
2067 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
2068 	SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
2069 	uint32_t duty100;
2070 	uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
2071 	uint16_t fdo_min, slope1, slope2;
2072 	uint32_t reference_clock;
2073 	int res;
2074 	uint64_t tmp64;
2075 
2076 	if (hwmgr->thermal_controller.fanInfo.bNoFan) {
2077 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2078 			PHM_PlatformCaps_MicrocodeFanControl);
2079 		return 0;
2080 	}
2081 
2082 	if (smu_data->smu7_data.fan_table_start == 0) {
2083 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2084 				PHM_PlatformCaps_MicrocodeFanControl);
2085 		return 0;
2086 	}
2087 
2088 	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2089 			CG_FDO_CTRL1, FMAX_DUTY100);
2090 
2091 	if (duty100 == 0) {
2092 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2093 				PHM_PlatformCaps_MicrocodeFanControl);
2094 		return 0;
2095 	}
2096 
2097 	/* use hardware fan control */
2098 	if (hwmgr->thermal_controller.use_hw_fan_control)
2099 		return 0;
2100 
2101 	tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
2102 			usPWMMin * duty100;
2103 	do_div(tmp64, 10000);
2104 	fdo_min = (uint16_t)tmp64;
2105 
2106 	t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
2107 			hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
2108 	t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
2109 			hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
2110 
2111 	pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
2112 			hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
2113 	pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
2114 			hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
2115 
2116 	slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
2117 	slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
2118 
2119 	fan_table.TempMin = cpu_to_be16((50 + hwmgr->
2120 			thermal_controller.advanceFanControlParameters.usTMin) / 100);
2121 	fan_table.TempMed = cpu_to_be16((50 + hwmgr->
2122 			thermal_controller.advanceFanControlParameters.usTMed) / 100);
2123 	fan_table.TempMax = cpu_to_be16((50 + hwmgr->
2124 			thermal_controller.advanceFanControlParameters.usTMax) / 100);
2125 
2126 	fan_table.Slope1 = cpu_to_be16(slope1);
2127 	fan_table.Slope2 = cpu_to_be16(slope2);
2128 
2129 	fan_table.FdoMin = cpu_to_be16(fdo_min);
2130 
2131 	fan_table.HystDown = cpu_to_be16(hwmgr->
2132 			thermal_controller.advanceFanControlParameters.ucTHyst);
2133 
2134 	fan_table.HystUp = cpu_to_be16(1);
2135 
2136 	fan_table.HystSlope = cpu_to_be16(1);
2137 
2138 	fan_table.TempRespLim = cpu_to_be16(5);
2139 
2140 	reference_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
2141 
2142 	fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
2143 			thermal_controller.advanceFanControlParameters.ulCycleDelay *
2144 			reference_clock) / 1600);
2145 
2146 	fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
2147 
2148 	fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
2149 			hwmgr->device, CGS_IND_REG__SMC,
2150 			CG_MULT_THERMAL_CTRL, TEMP_SEL);
2151 
2152 	res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start,
2153 			(uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
2154 			SMC_RAM_END);
2155 
2156 	if (!res && hwmgr->thermal_controller.
2157 			advanceFanControlParameters.ucMinimumPWMLimit)
2158 		res = smum_send_msg_to_smc_with_parameter(hwmgr,
2159 				PPSMC_MSG_SetFanMinPwm,
2160 				hwmgr->thermal_controller.
2161 				advanceFanControlParameters.ucMinimumPWMLimit);
2162 
2163 	if (!res && hwmgr->thermal_controller.
2164 			advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
2165 		res = smum_send_msg_to_smc_with_parameter(hwmgr,
2166 				PPSMC_MSG_SetFanSclkTarget,
2167 				hwmgr->thermal_controller.
2168 				advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
2169 
2170 	if (res)
2171 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2172 				PHM_PlatformCaps_MicrocodeFanControl);
2173 
2174 	return 0;
2175 }
2176 
polaris10_update_uvd_smc_table(struct pp_hwmgr * hwmgr)2177 static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
2178 {
2179 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
2180 	uint32_t mm_boot_level_offset, mm_boot_level_value;
2181 	struct phm_ppt_v1_information *table_info =
2182 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
2183 
2184 	smu_data->smc_state_table.UvdBootLevel = 0;
2185 	if (table_info->mm_dep_table->count > 0)
2186 		smu_data->smc_state_table.UvdBootLevel =
2187 				(uint8_t) (table_info->mm_dep_table->count - 1);
2188 	mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable,
2189 						UvdBootLevel);
2190 	mm_boot_level_offset /= 4;
2191 	mm_boot_level_offset *= 4;
2192 	mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2193 			CGS_IND_REG__SMC, mm_boot_level_offset);
2194 	mm_boot_level_value &= 0x00FFFFFF;
2195 	mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
2196 	cgs_write_ind_register(hwmgr->device,
2197 			CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2198 
2199 	if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2200 			PHM_PlatformCaps_UVDDPM) ||
2201 		phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2202 			PHM_PlatformCaps_StablePState))
2203 		smum_send_msg_to_smc_with_parameter(hwmgr,
2204 				PPSMC_MSG_UVDDPM_SetEnabledMask,
2205 				(uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
2206 	return 0;
2207 }
2208 
polaris10_update_vce_smc_table(struct pp_hwmgr * hwmgr)2209 static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
2210 {
2211 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
2212 	uint32_t mm_boot_level_offset, mm_boot_level_value;
2213 	struct phm_ppt_v1_information *table_info =
2214 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
2215 
2216 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2217 					PHM_PlatformCaps_StablePState))
2218 		smu_data->smc_state_table.VceBootLevel =
2219 			(uint8_t) (table_info->mm_dep_table->count - 1);
2220 	else
2221 		smu_data->smc_state_table.VceBootLevel = 0;
2222 
2223 	mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2224 					offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
2225 	mm_boot_level_offset /= 4;
2226 	mm_boot_level_offset *= 4;
2227 	mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
2228 			CGS_IND_REG__SMC, mm_boot_level_offset);
2229 	mm_boot_level_value &= 0xFF00FFFF;
2230 	mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
2231 	cgs_write_ind_register(hwmgr->device,
2232 			CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
2233 
2234 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
2235 		smum_send_msg_to_smc_with_parameter(hwmgr,
2236 				PPSMC_MSG_VCEDPM_SetEnabledMask,
2237 				(uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
2238 	return 0;
2239 }
2240 
polaris10_update_bif_smc_table(struct pp_hwmgr * hwmgr)2241 static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
2242 {
2243 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
2244 	struct phm_ppt_v1_information *table_info =
2245 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
2246 	struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
2247 	int max_entry, i;
2248 
2249 	max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
2250 						SMU74_MAX_LEVELS_LINK :
2251 						pcie_table->count;
2252 	/* Setup BIF_SCLK levels */
2253 	for (i = 0; i < max_entry; i++)
2254 		smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
2255 	return 0;
2256 }
2257 
polaris10_update_smc_table(struct pp_hwmgr * hwmgr,uint32_t type)2258 static int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
2259 {
2260 	switch (type) {
2261 	case SMU_UVD_TABLE:
2262 		polaris10_update_uvd_smc_table(hwmgr);
2263 		break;
2264 	case SMU_VCE_TABLE:
2265 		polaris10_update_vce_smc_table(hwmgr);
2266 		break;
2267 	case SMU_BIF_TABLE:
2268 		polaris10_update_bif_smc_table(hwmgr);
2269 	default:
2270 		break;
2271 	}
2272 	return 0;
2273 }
2274 
polaris10_update_sclk_threshold(struct pp_hwmgr * hwmgr)2275 static int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2276 {
2277 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2278 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
2279 
2280 	int result = 0;
2281 	uint32_t low_sclk_interrupt_threshold = 0;
2282 
2283 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2284 			PHM_PlatformCaps_SclkThrottleLowNotification)
2285 		&& (data->low_sclk_interrupt_threshold != 0)) {
2286 		low_sclk_interrupt_threshold =
2287 				data->low_sclk_interrupt_threshold;
2288 
2289 		CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2290 
2291 		result = smu7_copy_bytes_to_smc(
2292 				hwmgr,
2293 				smu_data->smu7_data.dpm_table_start +
2294 				offsetof(SMU74_Discrete_DpmTable,
2295 					LowSclkInterruptThreshold),
2296 				(uint8_t *)&low_sclk_interrupt_threshold,
2297 				sizeof(uint32_t),
2298 				SMC_RAM_END);
2299 	}
2300 	PP_ASSERT_WITH_CODE((result == 0),
2301 			"Failed to update SCLK threshold!", return result);
2302 
2303 	result = polaris10_program_mem_timing_parameters(hwmgr);
2304 	PP_ASSERT_WITH_CODE((result == 0),
2305 			"Failed to program memory timing parameters!",
2306 			);
2307 
2308 	return result;
2309 }
2310 
polaris10_get_offsetof(uint32_t type,uint32_t member)2311 static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
2312 {
2313 	switch (type) {
2314 	case SMU_SoftRegisters:
2315 		switch (member) {
2316 		case HandshakeDisables:
2317 			return offsetof(SMU74_SoftRegisters, HandshakeDisables);
2318 		case VoltageChangeTimeout:
2319 			return offsetof(SMU74_SoftRegisters, VoltageChangeTimeout);
2320 		case AverageGraphicsActivity:
2321 			return offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
2322 		case AverageMemoryActivity:
2323 			return offsetof(SMU74_SoftRegisters, AverageMemoryActivity);
2324 		case PreVBlankGap:
2325 			return offsetof(SMU74_SoftRegisters, PreVBlankGap);
2326 		case VBlankTimeout:
2327 			return offsetof(SMU74_SoftRegisters, VBlankTimeout);
2328 		case UcodeLoadStatus:
2329 			return offsetof(SMU74_SoftRegisters, UcodeLoadStatus);
2330 		case DRAM_LOG_ADDR_H:
2331 			return offsetof(SMU74_SoftRegisters, DRAM_LOG_ADDR_H);
2332 		case DRAM_LOG_ADDR_L:
2333 			return offsetof(SMU74_SoftRegisters, DRAM_LOG_ADDR_L);
2334 		case DRAM_LOG_PHY_ADDR_H:
2335 			return offsetof(SMU74_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
2336 		case DRAM_LOG_PHY_ADDR_L:
2337 			return offsetof(SMU74_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
2338 		case DRAM_LOG_BUFF_SIZE:
2339 			return offsetof(SMU74_SoftRegisters, DRAM_LOG_BUFF_SIZE);
2340 		}
2341 		break;
2342 	case SMU_Discrete_DpmTable:
2343 		switch (member) {
2344 		case UvdBootLevel:
2345 			return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
2346 		case VceBootLevel:
2347 			return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
2348 		case LowSclkInterruptThreshold:
2349 			return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
2350 		}
2351 		break;
2352 	}
2353 	pr_warn("can't get the offset of type %x member %x\n", type, member);
2354 	return 0;
2355 }
2356 
polaris10_get_mac_definition(uint32_t value)2357 static uint32_t polaris10_get_mac_definition(uint32_t value)
2358 {
2359 	switch (value) {
2360 	case SMU_MAX_LEVELS_GRAPHICS:
2361 		return SMU74_MAX_LEVELS_GRAPHICS;
2362 	case SMU_MAX_LEVELS_MEMORY:
2363 		return SMU74_MAX_LEVELS_MEMORY;
2364 	case SMU_MAX_LEVELS_LINK:
2365 		return SMU74_MAX_LEVELS_LINK;
2366 	case SMU_MAX_ENTRIES_SMIO:
2367 		return SMU74_MAX_ENTRIES_SMIO;
2368 	case SMU_MAX_LEVELS_VDDC:
2369 		return SMU74_MAX_LEVELS_VDDC;
2370 	case SMU_MAX_LEVELS_VDDGFX:
2371 		return SMU74_MAX_LEVELS_VDDGFX;
2372 	case SMU_MAX_LEVELS_VDDCI:
2373 		return SMU74_MAX_LEVELS_VDDCI;
2374 	case SMU_MAX_LEVELS_MVDD:
2375 		return SMU74_MAX_LEVELS_MVDD;
2376 	case SMU_UVD_MCLK_HANDSHAKE_DISABLE:
2377 		return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
2378 	}
2379 
2380 	pr_warn("can't get the mac of %x\n", value);
2381 	return 0;
2382 }
2383 
polaris10_process_firmware_header(struct pp_hwmgr * hwmgr)2384 static int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
2385 {
2386 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
2387 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2388 	uint32_t tmp;
2389 	int result;
2390 	bool error = false;
2391 
2392 	result = smu7_read_smc_sram_dword(hwmgr,
2393 			SMU7_FIRMWARE_HEADER_LOCATION +
2394 			offsetof(SMU74_Firmware_Header, DpmTable),
2395 			&tmp, SMC_RAM_END);
2396 
2397 	if (0 == result)
2398 		smu_data->smu7_data.dpm_table_start = tmp;
2399 
2400 	error |= (0 != result);
2401 
2402 	result = smu7_read_smc_sram_dword(hwmgr,
2403 			SMU7_FIRMWARE_HEADER_LOCATION +
2404 			offsetof(SMU74_Firmware_Header, SoftRegisters),
2405 			&tmp, SMC_RAM_END);
2406 
2407 	if (!result) {
2408 		data->soft_regs_start = tmp;
2409 		smu_data->smu7_data.soft_regs_start = tmp;
2410 	}
2411 
2412 	error |= (0 != result);
2413 
2414 	result = smu7_read_smc_sram_dword(hwmgr,
2415 			SMU7_FIRMWARE_HEADER_LOCATION +
2416 			offsetof(SMU74_Firmware_Header, mcRegisterTable),
2417 			&tmp, SMC_RAM_END);
2418 
2419 	if (!result)
2420 		smu_data->smu7_data.mc_reg_table_start = tmp;
2421 
2422 	result = smu7_read_smc_sram_dword(hwmgr,
2423 			SMU7_FIRMWARE_HEADER_LOCATION +
2424 			offsetof(SMU74_Firmware_Header, FanTable),
2425 			&tmp, SMC_RAM_END);
2426 
2427 	if (!result)
2428 		smu_data->smu7_data.fan_table_start = tmp;
2429 
2430 	error |= (0 != result);
2431 
2432 	result = smu7_read_smc_sram_dword(hwmgr,
2433 			SMU7_FIRMWARE_HEADER_LOCATION +
2434 			offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
2435 			&tmp, SMC_RAM_END);
2436 
2437 	if (!result)
2438 		smu_data->smu7_data.arb_table_start = tmp;
2439 
2440 	error |= (0 != result);
2441 
2442 	result = smu7_read_smc_sram_dword(hwmgr,
2443 			SMU7_FIRMWARE_HEADER_LOCATION +
2444 			offsetof(SMU74_Firmware_Header, Version),
2445 			&tmp, SMC_RAM_END);
2446 
2447 	if (!result)
2448 		hwmgr->microcode_version_info.SMC = tmp;
2449 
2450 	error |= (0 != result);
2451 
2452 	return error ? -1 : 0;
2453 }
2454 
polaris10_is_dpm_running(struct pp_hwmgr * hwmgr)2455 static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
2456 {
2457 	return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
2458 			CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
2459 			? true : false;
2460 }
2461 
polaris10_update_dpm_settings(struct pp_hwmgr * hwmgr,void * profile_setting)2462 static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr,
2463 				void *profile_setting)
2464 {
2465 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2466 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)
2467 			(hwmgr->smu_backend);
2468 	struct profile_mode_setting *setting;
2469 	struct SMU74_Discrete_GraphicsLevel *levels =
2470 			smu_data->smc_state_table.GraphicsLevel;
2471 	uint32_t array = smu_data->smu7_data.dpm_table_start +
2472 			offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
2473 
2474 	uint32_t mclk_array = smu_data->smu7_data.dpm_table_start +
2475 			offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
2476 	struct SMU74_Discrete_MemoryLevel *mclk_levels =
2477 			smu_data->smc_state_table.MemoryLevel;
2478 	uint32_t i;
2479 	uint32_t offset, up_hyst_offset, down_hyst_offset, clk_activity_offset, tmp;
2480 
2481 	if (profile_setting == NULL)
2482 		return -EINVAL;
2483 
2484 	setting = (struct profile_mode_setting *)profile_setting;
2485 
2486 	if (setting->bupdate_sclk) {
2487 		if (!data->sclk_dpm_key_disabled)
2488 			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
2489 		for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
2490 			if (levels[i].ActivityLevel !=
2491 				cpu_to_be16(setting->sclk_activity)) {
2492 				levels[i].ActivityLevel = cpu_to_be16(setting->sclk_activity);
2493 
2494 				clk_activity_offset = array + (sizeof(SMU74_Discrete_GraphicsLevel) * i)
2495 						+ offsetof(SMU74_Discrete_GraphicsLevel, ActivityLevel);
2496 				offset = clk_activity_offset & ~0x3;
2497 				tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2498 				tmp = phm_set_field_to_u32(clk_activity_offset, tmp, levels[i].ActivityLevel, sizeof(uint16_t));
2499 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2500 
2501 			}
2502 			if (levels[i].UpHyst != setting->sclk_up_hyst ||
2503 				levels[i].DownHyst != setting->sclk_down_hyst) {
2504 				levels[i].UpHyst = setting->sclk_up_hyst;
2505 				levels[i].DownHyst = setting->sclk_down_hyst;
2506 				up_hyst_offset = array + (sizeof(SMU74_Discrete_GraphicsLevel) * i)
2507 						+ offsetof(SMU74_Discrete_GraphicsLevel, UpHyst);
2508 				down_hyst_offset = array + (sizeof(SMU74_Discrete_GraphicsLevel) * i)
2509 						+ offsetof(SMU74_Discrete_GraphicsLevel, DownHyst);
2510 				offset = up_hyst_offset & ~0x3;
2511 				tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2512 				tmp = phm_set_field_to_u32(up_hyst_offset, tmp, levels[i].UpHyst, sizeof(uint8_t));
2513 				tmp = phm_set_field_to_u32(down_hyst_offset, tmp, levels[i].DownHyst, sizeof(uint8_t));
2514 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2515 			}
2516 		}
2517 		if (!data->sclk_dpm_key_disabled)
2518 			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
2519 	}
2520 
2521 	if (setting->bupdate_mclk) {
2522 		if (!data->mclk_dpm_key_disabled)
2523 			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
2524 		for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
2525 			if (mclk_levels[i].ActivityLevel !=
2526 				cpu_to_be16(setting->mclk_activity)) {
2527 				mclk_levels[i].ActivityLevel = cpu_to_be16(setting->mclk_activity);
2528 
2529 				clk_activity_offset = mclk_array + (sizeof(SMU74_Discrete_MemoryLevel) * i)
2530 						+ offsetof(SMU74_Discrete_MemoryLevel, ActivityLevel);
2531 				offset = clk_activity_offset & ~0x3;
2532 				tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2533 				tmp = phm_set_field_to_u32(clk_activity_offset, tmp, mclk_levels[i].ActivityLevel, sizeof(uint16_t));
2534 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2535 
2536 			}
2537 			if (mclk_levels[i].UpHyst != setting->mclk_up_hyst ||
2538 				mclk_levels[i].DownHyst != setting->mclk_down_hyst) {
2539 				mclk_levels[i].UpHyst = setting->mclk_up_hyst;
2540 				mclk_levels[i].DownHyst = setting->mclk_down_hyst;
2541 				up_hyst_offset = mclk_array + (sizeof(SMU74_Discrete_MemoryLevel) * i)
2542 						+ offsetof(SMU74_Discrete_MemoryLevel, UpHyst);
2543 				down_hyst_offset = mclk_array + (sizeof(SMU74_Discrete_MemoryLevel) * i)
2544 						+ offsetof(SMU74_Discrete_MemoryLevel, DownHyst);
2545 				offset = up_hyst_offset & ~0x3;
2546 				tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2547 				tmp = phm_set_field_to_u32(up_hyst_offset, tmp, mclk_levels[i].UpHyst, sizeof(uint8_t));
2548 				tmp = phm_set_field_to_u32(down_hyst_offset, tmp, mclk_levels[i].DownHyst, sizeof(uint8_t));
2549 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2550 			}
2551 		}
2552 		if (!data->mclk_dpm_key_disabled)
2553 			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
2554 	}
2555 	return 0;
2556 }
2557 
2558 const struct pp_smumgr_func polaris10_smu_funcs = {
2559 	.name = "polaris10_smu",
2560 	.smu_init = polaris10_smu_init,
2561 	.smu_fini = smu7_smu_fini,
2562 	.start_smu = polaris10_start_smu,
2563 	.check_fw_load_finish = smu7_check_fw_load_finish,
2564 	.request_smu_load_fw = smu7_reload_firmware,
2565 	.request_smu_load_specific_fw = NULL,
2566 	.send_msg_to_smc = smu7_send_msg_to_smc,
2567 	.send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter,
2568 	.download_pptable_settings = NULL,
2569 	.upload_pptable_settings = NULL,
2570 	.update_smc_table = polaris10_update_smc_table,
2571 	.get_offsetof = polaris10_get_offsetof,
2572 	.process_firmware_header = polaris10_process_firmware_header,
2573 	.init_smc_table = polaris10_init_smc_table,
2574 	.update_sclk_threshold = polaris10_update_sclk_threshold,
2575 	.thermal_avfs_enable = polaris10_thermal_avfs_enable,
2576 	.thermal_setup_fan_table = polaris10_thermal_setup_fan_table,
2577 	.populate_all_graphic_levels = polaris10_populate_all_graphic_levels,
2578 	.populate_all_memory_levels = polaris10_populate_all_memory_levels,
2579 	.get_mac_definition = polaris10_get_mac_definition,
2580 	.is_dpm_running = polaris10_is_dpm_running,
2581 	.is_hw_avfs_present = polaris10_is_hw_avfs_present,
2582 	.update_dpm_settings = polaris10_update_dpm_settings,
2583 };
2584