1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/fb.h>
26 #include "linux/delay.h"
27 #include <linux/types.h>
28 
29 #include "smumgr.h"
30 #include "pp_debug.h"
31 #include "ci_smumgr.h"
32 #include "ppsmc.h"
33 #include "smu7_hwmgr.h"
34 #include "hardwaremanager.h"
35 #include "ppatomctrl.h"
36 #include "cgs_common.h"
37 #include "atombios.h"
38 #include "pppcielanes.h"
39 
40 #include "smu/smu_7_0_1_d.h"
41 #include "smu/smu_7_0_1_sh_mask.h"
42 
43 #include "dce/dce_8_0_d.h"
44 #include "dce/dce_8_0_sh_mask.h"
45 
46 #include "bif/bif_4_1_d.h"
47 #include "bif/bif_4_1_sh_mask.h"
48 
49 #include "gca/gfx_7_2_d.h"
50 #include "gca/gfx_7_2_sh_mask.h"
51 
52 #include "gmc/gmc_7_1_d.h"
53 #include "gmc/gmc_7_1_sh_mask.h"
54 
55 #include "processpptables.h"
56 
57 #define MC_CG_ARB_FREQ_F0           0x0a
58 #define MC_CG_ARB_FREQ_F1           0x0b
59 #define MC_CG_ARB_FREQ_F2           0x0c
60 #define MC_CG_ARB_FREQ_F3           0x0d
61 
62 #define SMC_RAM_END 0x40000
63 
64 #define CISLAND_MINIMUM_ENGINE_CLOCK 800
65 #define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
66 
67 static const struct ci_pt_defaults defaults_hawaii_xt = {
68 	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
69 	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
70 	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
71 };
72 
73 static const struct ci_pt_defaults defaults_hawaii_pro = {
74 	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
75 	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
76 	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
77 };
78 
79 static const struct ci_pt_defaults defaults_bonaire_xt = {
80 	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
81 	{ 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
82 	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
83 };
84 
85 
86 static const struct ci_pt_defaults defaults_saturn_xt = {
87 	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
88 	{ 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
89 	{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
90 };
91 
92 
ci_set_smc_sram_address(struct pp_hwmgr * hwmgr,uint32_t smc_addr,uint32_t limit)93 static int ci_set_smc_sram_address(struct pp_hwmgr *hwmgr,
94 					uint32_t smc_addr, uint32_t limit)
95 {
96 	if ((0 != (3 & smc_addr))
97 		|| ((smc_addr + 3) >= limit)) {
98 		pr_err("smc_addr invalid \n");
99 		return -EINVAL;
100 	}
101 
102 	cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, smc_addr);
103 	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
104 	return 0;
105 }
106 
ci_copy_bytes_to_smc(struct pp_hwmgr * hwmgr,uint32_t smc_start_address,const uint8_t * src,uint32_t byte_count,uint32_t limit)107 static int ci_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
108 				const uint8_t *src, uint32_t byte_count, uint32_t limit)
109 {
110 	int result;
111 	uint32_t data = 0;
112 	uint32_t original_data;
113 	uint32_t addr = 0;
114 	uint32_t extra_shift;
115 
116 	if ((3 & smc_start_address)
117 		|| ((smc_start_address + byte_count) >= limit)) {
118 		pr_err("smc_start_address invalid \n");
119 		return -EINVAL;
120 	}
121 
122 	addr = smc_start_address;
123 
124 	while (byte_count >= 4) {
125 	/* Bytes are written into the SMC address space with the MSB first. */
126 		data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
127 
128 		result = ci_set_smc_sram_address(hwmgr, addr, limit);
129 
130 		if (0 != result)
131 			return result;
132 
133 		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
134 
135 		src += 4;
136 		byte_count -= 4;
137 		addr += 4;
138 	}
139 
140 	if (0 != byte_count) {
141 
142 		data = 0;
143 
144 		result = ci_set_smc_sram_address(hwmgr, addr, limit);
145 
146 		if (0 != result)
147 			return result;
148 
149 
150 		original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
151 
152 		extra_shift = 8 * (4 - byte_count);
153 
154 		while (byte_count > 0) {
155 			/* Bytes are written into the SMC addres space with the MSB first. */
156 			data = (0x100 * data) + *src++;
157 			byte_count--;
158 		}
159 
160 		data <<= extra_shift;
161 
162 		data |= (original_data & ~((~0UL) << extra_shift));
163 
164 		result = ci_set_smc_sram_address(hwmgr, addr, limit);
165 
166 		if (0 != result)
167 			return result;
168 
169 		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
170 	}
171 
172 	return 0;
173 }
174 
175 
ci_program_jump_on_start(struct pp_hwmgr * hwmgr)176 static int ci_program_jump_on_start(struct pp_hwmgr *hwmgr)
177 {
178 	static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
179 
180 	ci_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
181 
182 	return 0;
183 }
184 
ci_is_smc_ram_running(struct pp_hwmgr * hwmgr)185 bool ci_is_smc_ram_running(struct pp_hwmgr *hwmgr)
186 {
187 	return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
188 			CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
189 	&& (0x20100 <= cgs_read_ind_register(hwmgr->device,
190 			CGS_IND_REG__SMC, ixSMC_PC_C)));
191 }
192 
ci_read_smc_sram_dword(struct pp_hwmgr * hwmgr,uint32_t smc_addr,uint32_t * value,uint32_t limit)193 static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
194 				uint32_t *value, uint32_t limit)
195 {
196 	int result;
197 
198 	result = ci_set_smc_sram_address(hwmgr, smc_addr, limit);
199 
200 	if (result)
201 		return result;
202 
203 	*value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
204 	return 0;
205 }
206 
ci_send_msg_to_smc(struct pp_hwmgr * hwmgr,uint16_t msg)207 static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
208 {
209 	int ret;
210 
211 	cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0);
212 	cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
213 
214 	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
215 
216 	ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
217 
218 	if (ret != 1)
219 		pr_info("\n failed to send message %x ret is %d\n",  msg, ret);
220 
221 	return 0;
222 }
223 
ci_send_msg_to_smc_with_parameter(struct pp_hwmgr * hwmgr,uint16_t msg,uint32_t parameter)224 static int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
225 					uint16_t msg, uint32_t parameter)
226 {
227 	cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
228 	return ci_send_msg_to_smc(hwmgr, msg);
229 }
230 
ci_initialize_power_tune_defaults(struct pp_hwmgr * hwmgr)231 static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
232 {
233 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
234 	struct amdgpu_device *adev = hwmgr->adev;
235 	uint32_t dev_id;
236 
237 	dev_id = adev->pdev->device;
238 
239 	switch (dev_id) {
240 	case 0x67BA:
241 	case 0x66B1:
242 		smu_data->power_tune_defaults = &defaults_hawaii_pro;
243 		break;
244 	case 0x67B8:
245 	case 0x66B0:
246 		smu_data->power_tune_defaults = &defaults_hawaii_xt;
247 		break;
248 	case 0x6640:
249 	case 0x6641:
250 	case 0x6646:
251 	case 0x6647:
252 		smu_data->power_tune_defaults = &defaults_saturn_xt;
253 		break;
254 	case 0x6649:
255 	case 0x6650:
256 	case 0x6651:
257 	case 0x6658:
258 	case 0x665C:
259 	case 0x665D:
260 	case 0x67A0:
261 	case 0x67A1:
262 	case 0x67A2:
263 	case 0x67A8:
264 	case 0x67A9:
265 	case 0x67AA:
266 	case 0x67B9:
267 	case 0x67BE:
268 	default:
269 		smu_data->power_tune_defaults = &defaults_bonaire_xt;
270 		break;
271 	}
272 }
273 
ci_get_dependency_volt_by_clk(struct pp_hwmgr * hwmgr,struct phm_clock_voltage_dependency_table * allowed_clock_voltage_table,uint32_t clock,uint32_t * vol)274 static int ci_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
275 	struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
276 	uint32_t clock, uint32_t *vol)
277 {
278 	uint32_t i = 0;
279 
280 	if (allowed_clock_voltage_table->count == 0)
281 		return -EINVAL;
282 
283 	for (i = 0; i < allowed_clock_voltage_table->count; i++) {
284 		if (allowed_clock_voltage_table->entries[i].clk >= clock) {
285 			*vol = allowed_clock_voltage_table->entries[i].v;
286 			return 0;
287 		}
288 	}
289 
290 	*vol = allowed_clock_voltage_table->entries[i - 1].v;
291 	return 0;
292 }
293 
ci_calculate_sclk_params(struct pp_hwmgr * hwmgr,uint32_t clock,struct SMU7_Discrete_GraphicsLevel * sclk)294 static int ci_calculate_sclk_params(struct pp_hwmgr *hwmgr,
295 		uint32_t clock, struct SMU7_Discrete_GraphicsLevel *sclk)
296 {
297 	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
298 	struct pp_atomctrl_clock_dividers_vi dividers;
299 	uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
300 	uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
301 	uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
302 	uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
303 	uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
304 	uint32_t ref_clock;
305 	uint32_t ref_divider;
306 	uint32_t fbdiv;
307 	int result;
308 
309 	/* get the engine clock dividers for this clock value */
310 	result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock,  &dividers);
311 
312 	PP_ASSERT_WITH_CODE(result == 0,
313 			"Error retrieving Engine Clock dividers from VBIOS.",
314 			return result);
315 
316 	/* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
317 	ref_clock = atomctrl_get_reference_clock(hwmgr);
318 	ref_divider = 1 + dividers.uc_pll_ref_div;
319 
320 	/* low 14 bits is fraction and high 12 bits is divider */
321 	fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
322 
323 	/* SPLL_FUNC_CNTL setup */
324 	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
325 			SPLL_REF_DIV, dividers.uc_pll_ref_div);
326 	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
327 			SPLL_PDIV_A,  dividers.uc_pll_post_div);
328 
329 	/* SPLL_FUNC_CNTL_3 setup*/
330 	spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
331 			SPLL_FB_DIV, fbdiv);
332 
333 	/* set to use fractional accumulation*/
334 	spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
335 			SPLL_DITHEN, 1);
336 
337 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
338 				PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
339 		struct pp_atomctrl_internal_ss_info ss_info;
340 		uint32_t vco_freq = clock * dividers.uc_pll_post_div;
341 
342 		if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
343 				vco_freq, &ss_info)) {
344 			uint32_t clk_s = ref_clock * 5 /
345 					(ref_divider * ss_info.speed_spectrum_rate);
346 			uint32_t clk_v = 4 * ss_info.speed_spectrum_percentage *
347 					fbdiv / (clk_s * 10000);
348 
349 			cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
350 					CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
351 			cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
352 					CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
353 			cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
354 					CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
355 		}
356 	}
357 
358 	sclk->SclkFrequency        = clock;
359 	sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
360 	sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
361 	sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
362 	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
363 	sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
364 
365 	return 0;
366 }
367 
ci_populate_phase_value_based_on_sclk(struct pp_hwmgr * hwmgr,const struct phm_phase_shedding_limits_table * pl,uint32_t sclk,uint32_t * p_shed)368 static void ci_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
369 				const struct phm_phase_shedding_limits_table *pl,
370 					uint32_t sclk, uint32_t *p_shed)
371 {
372 	unsigned int i;
373 
374 	/* use the minimum phase shedding */
375 	*p_shed = 1;
376 
377 	for (i = 0; i < pl->count; i++) {
378 		if (sclk < pl->entries[i].Sclk) {
379 			*p_shed = i;
380 			break;
381 		}
382 	}
383 }
384 
ci_get_sleep_divider_id_from_clock(uint32_t clock,uint32_t clock_insr)385 static uint8_t ci_get_sleep_divider_id_from_clock(uint32_t clock,
386 			uint32_t clock_insr)
387 {
388 	uint8_t i;
389 	uint32_t temp;
390 	uint32_t min = min_t(uint32_t, clock_insr, CISLAND_MINIMUM_ENGINE_CLOCK);
391 
392 	if (clock < min) {
393 		pr_info("Engine clock can't satisfy stutter requirement!\n");
394 		return 0;
395 	}
396 	for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
397 		temp = clock >> i;
398 
399 		if (temp >= min || i == 0)
400 			break;
401 	}
402 	return i;
403 }
404 
ci_populate_single_graphic_level(struct pp_hwmgr * hwmgr,uint32_t clock,struct SMU7_Discrete_GraphicsLevel * level)405 static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
406 		uint32_t clock, struct SMU7_Discrete_GraphicsLevel *level)
407 {
408 	int result;
409 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
410 
411 
412 	result = ci_calculate_sclk_params(hwmgr, clock, level);
413 
414 	/* populate graphics levels */
415 	result = ci_get_dependency_volt_by_clk(hwmgr,
416 			hwmgr->dyn_state.vddc_dependency_on_sclk, clock,
417 			(uint32_t *)(&level->MinVddc));
418 	if (result) {
419 		pr_err("vdd_dep_on_sclk table is NULL\n");
420 		return result;
421 	}
422 
423 	level->SclkFrequency = clock;
424 	level->MinVddcPhases = 1;
425 
426 	if (data->vddc_phase_shed_control)
427 		ci_populate_phase_value_based_on_sclk(hwmgr,
428 				hwmgr->dyn_state.vddc_phase_shed_limits_table,
429 				clock,
430 				&level->MinVddcPhases);
431 
432 	level->ActivityLevel = data->current_profile_setting.sclk_activity;
433 	level->CcPwrDynRm = 0;
434 	level->CcPwrDynRm1 = 0;
435 	level->EnabledForActivity = 0;
436 	/* this level can be used for throttling.*/
437 	level->EnabledForThrottle = 1;
438 	level->UpH = data->current_profile_setting.sclk_up_hyst;
439 	level->DownH = data->current_profile_setting.sclk_down_hyst;
440 	level->VoltageDownH = 0;
441 	level->PowerThrottle = 0;
442 
443 
444 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
445 			PHM_PlatformCaps_SclkDeepSleep))
446 		level->DeepSleepDivId =
447 				ci_get_sleep_divider_id_from_clock(clock,
448 						CISLAND_MINIMUM_ENGINE_CLOCK);
449 
450 	/* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
451 	level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
452 
453 	if (0 == result) {
454 		level->MinVddc = PP_HOST_TO_SMC_UL(level->MinVddc * VOLTAGE_SCALE);
455 		CONVERT_FROM_HOST_TO_SMC_UL(level->MinVddcPhases);
456 		CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
457 		CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
458 		CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
459 		CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
460 		CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
461 		CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
462 		CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
463 		CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
464 	}
465 
466 	return result;
467 }
468 
ci_populate_all_graphic_levels(struct pp_hwmgr * hwmgr)469 static int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
470 {
471 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
472 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
473 	struct smu7_dpm_table *dpm_table = &data->dpm_table;
474 	int result = 0;
475 	uint32_t array = smu_data->dpm_table_start +
476 			offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
477 	uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
478 			SMU7_MAX_LEVELS_GRAPHICS;
479 	struct SMU7_Discrete_GraphicsLevel *levels =
480 			smu_data->smc_state_table.GraphicsLevel;
481 	uint32_t i;
482 
483 	for (i = 0; i < dpm_table->sclk_table.count; i++) {
484 		result = ci_populate_single_graphic_level(hwmgr,
485 				dpm_table->sclk_table.dpm_levels[i].value,
486 				&levels[i]);
487 		if (result)
488 			return result;
489 		if (i > 1)
490 			smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
491 		if (i == (dpm_table->sclk_table.count - 1))
492 			smu_data->smc_state_table.GraphicsLevel[i].DisplayWatermark =
493 				PPSMC_DISPLAY_WATERMARK_HIGH;
494 	}
495 
496 	smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
497 
498 	smu_data->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
499 	data->dpm_level_enable_mask.sclk_dpm_enable_mask =
500 		phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
501 
502 	result = ci_copy_bytes_to_smc(hwmgr, array,
503 				   (u8 *)levels, array_size,
504 				   SMC_RAM_END);
505 
506 	return result;
507 
508 }
509 
ci_populate_svi_load_line(struct pp_hwmgr * hwmgr)510 static int ci_populate_svi_load_line(struct pp_hwmgr *hwmgr)
511 {
512 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
513 	const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
514 
515 	smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
516 	smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
517 	smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
518 	smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
519 
520 	return 0;
521 }
522 
ci_populate_tdc_limit(struct pp_hwmgr * hwmgr)523 static int ci_populate_tdc_limit(struct pp_hwmgr *hwmgr)
524 {
525 	uint16_t tdc_limit;
526 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
527 	const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
528 
529 	tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
530 	smu_data->power_tune_table.TDC_VDDC_PkgLimit =
531 			CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
532 	smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
533 			defaults->tdc_vddc_throttle_release_limit_perc;
534 	smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
535 
536 	return 0;
537 }
538 
ci_populate_dw8(struct pp_hwmgr * hwmgr,uint32_t fuse_table_offset)539 static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
540 {
541 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
542 	const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
543 	uint32_t temp;
544 
545 	if (ci_read_smc_sram_dword(hwmgr,
546 			fuse_table_offset +
547 			offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
548 			(uint32_t *)&temp, SMC_RAM_END))
549 		PP_ASSERT_WITH_CODE(false,
550 				"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
551 				return -EINVAL);
552 	else
553 		smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
554 
555 	return 0;
556 }
557 
ci_populate_fuzzy_fan(struct pp_hwmgr * hwmgr,uint32_t fuse_table_offset)558 static int ci_populate_fuzzy_fan(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
559 {
560 	uint16_t tmp;
561 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
562 
563 	if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
564 		|| 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
565 		tmp = hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity;
566 	else
567 		tmp = hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
568 
569 	smu_data->power_tune_table.FuzzyFan_PwmSetDelta = CONVERT_FROM_HOST_TO_SMC_US(tmp);
570 
571 	return 0;
572 }
573 
ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr * hwmgr)574 static int ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
575 {
576 	int i;
577 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
578 	uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
579 	uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
580 	uint8_t *hi2_vid = smu_data->power_tune_table.BapmVddCVidHiSidd2;
581 
582 	PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
583 			    "The CAC Leakage table does not exist!", return -EINVAL);
584 	PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
585 			    "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
586 	PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
587 			    "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
588 
589 	for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
590 		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
591 			lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
592 			hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
593 			hi2_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc3);
594 		} else {
595 			lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc);
596 			hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Leakage);
597 		}
598 	}
599 
600 	return 0;
601 }
602 
ci_populate_vddc_vid(struct pp_hwmgr * hwmgr)603 static int ci_populate_vddc_vid(struct pp_hwmgr *hwmgr)
604 {
605 	int i;
606 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
607 	uint8_t *vid = smu_data->power_tune_table.VddCVid;
608 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
609 
610 	PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
611 		"There should never be more than 8 entries for VddcVid!!!",
612 		return -EINVAL);
613 
614 	for (i = 0; i < (int)data->vddc_voltage_table.count; i++)
615 		vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
616 
617 	return 0;
618 }
619 
ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr * hwmgr)620 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr)
621 {
622 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
623 	u8 *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
624 	u8 *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
625 	int i, min, max;
626 
627 	min = max = hi_vid[0];
628 	for (i = 0; i < 8; i++) {
629 		if (0 != hi_vid[i]) {
630 			if (min > hi_vid[i])
631 				min = hi_vid[i];
632 			if (max < hi_vid[i])
633 				max = hi_vid[i];
634 		}
635 
636 		if (0 != lo_vid[i]) {
637 			if (min > lo_vid[i])
638 				min = lo_vid[i];
639 			if (max < lo_vid[i])
640 				max = lo_vid[i];
641 		}
642 	}
643 
644 	if ((min == 0) || (max == 0))
645 		return -EINVAL;
646 	smu_data->power_tune_table.GnbLPMLMaxVid = (u8)max;
647 	smu_data->power_tune_table.GnbLPMLMinVid = (u8)min;
648 
649 	return 0;
650 }
651 
ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr * hwmgr)652 static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
653 {
654 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
655 	uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
656 	uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
657 	struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
658 
659 	HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
660 	LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
661 
662 	smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
663 			CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
664 	smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
665 			CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
666 
667 	return 0;
668 }
669 
ci_populate_pm_fuses(struct pp_hwmgr * hwmgr)670 static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr)
671 {
672 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
673 	uint32_t pm_fuse_table_offset;
674 	int ret = 0;
675 
676 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
677 			PHM_PlatformCaps_PowerContainment)) {
678 		if (ci_read_smc_sram_dword(hwmgr,
679 				SMU7_FIRMWARE_HEADER_LOCATION +
680 				offsetof(SMU7_Firmware_Header, PmFuseTable),
681 				&pm_fuse_table_offset, SMC_RAM_END)) {
682 			pr_err("Attempt to get pm_fuse_table_offset Failed!\n");
683 			return -EINVAL;
684 		}
685 
686 		/* DW0 - DW3 */
687 		ret = ci_populate_bapm_vddc_vid_sidd(hwmgr);
688 		/* DW4 - DW5 */
689 		ret |= ci_populate_vddc_vid(hwmgr);
690 		/* DW6 */
691 		ret |= ci_populate_svi_load_line(hwmgr);
692 		/* DW7 */
693 		ret |= ci_populate_tdc_limit(hwmgr);
694 		/* DW8 */
695 		ret |= ci_populate_dw8(hwmgr, pm_fuse_table_offset);
696 
697 		ret |= ci_populate_fuzzy_fan(hwmgr, pm_fuse_table_offset);
698 
699 		ret |= ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(hwmgr);
700 
701 		ret |= ci_populate_bapm_vddc_base_leakage_sidd(hwmgr);
702 		if (ret)
703 			return ret;
704 
705 		ret = ci_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
706 				(uint8_t *)&smu_data->power_tune_table,
707 				sizeof(struct SMU7_Discrete_PmFuses), SMC_RAM_END);
708 	}
709 	return ret;
710 }
711 
ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr * hwmgr)712 static int ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
713 {
714 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
715 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
716 	const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
717 	SMU7_Discrete_DpmTable  *dpm_table = &(smu_data->smc_state_table);
718 	struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
719 	struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
720 	const uint16_t *def1, *def2;
721 	int i, j, k;
722 
723 	dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
724 	dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
725 
726 	dpm_table->DTETjOffset = 0;
727 	dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
728 	dpm_table->GpuTjHyst = 8;
729 
730 	dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
731 
732 	if (ppm) {
733 		dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
734 		dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
735 	} else {
736 		dpm_table->PPM_PkgPwrLimit = 0;
737 		dpm_table->PPM_TemperatureLimit = 0;
738 	}
739 
740 	CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
741 	CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
742 
743 	dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
744 	def1 = defaults->bapmti_r;
745 	def2 = defaults->bapmti_rc;
746 
747 	for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
748 		for (j = 0; j < SMU7_DTE_SOURCES; j++) {
749 			for (k = 0; k < SMU7_DTE_SINKS; k++) {
750 				dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
751 				dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
752 				def1++;
753 				def2++;
754 			}
755 		}
756 	}
757 
758 	return 0;
759 }
760 
ci_get_std_voltage_value_sidd(struct pp_hwmgr * hwmgr,pp_atomctrl_voltage_table_entry * tab,uint16_t * hi,uint16_t * lo)761 static int ci_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
762 		pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
763 		uint16_t *lo)
764 {
765 	uint16_t v_index;
766 	bool vol_found = false;
767 	*hi = tab->value * VOLTAGE_SCALE;
768 	*lo = tab->value * VOLTAGE_SCALE;
769 
770 	PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
771 			"The SCLK/VDDC Dependency Table does not exist.\n",
772 			return -EINVAL);
773 
774 	if (NULL == hwmgr->dyn_state.cac_leakage_table) {
775 		pr_warn("CAC Leakage Table does not exist, using vddc.\n");
776 		return 0;
777 	}
778 
779 	for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
780 		if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
781 			vol_found = true;
782 			if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
783 				*lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
784 				*hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
785 			} else {
786 				pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
787 				*lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
788 				*hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
789 			}
790 			break;
791 		}
792 	}
793 
794 	if (!vol_found) {
795 		for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
796 			if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
797 				vol_found = true;
798 				if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
799 					*lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
800 					*hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
801 				} else {
802 					pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
803 					*lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
804 					*hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
805 				}
806 				break;
807 			}
808 		}
809 
810 		if (!vol_found)
811 			pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
812 	}
813 
814 	return 0;
815 }
816 
ci_populate_smc_voltage_table(struct pp_hwmgr * hwmgr,pp_atomctrl_voltage_table_entry * tab,SMU7_Discrete_VoltageLevel * smc_voltage_tab)817 static int ci_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
818 		pp_atomctrl_voltage_table_entry *tab,
819 		SMU7_Discrete_VoltageLevel *smc_voltage_tab)
820 {
821 	int result;
822 
823 	result = ci_get_std_voltage_value_sidd(hwmgr, tab,
824 			&smc_voltage_tab->StdVoltageHiSidd,
825 			&smc_voltage_tab->StdVoltageLoSidd);
826 	if (result) {
827 		smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
828 		smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
829 	}
830 
831 	smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
832 	CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
833 	CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageLoSidd);
834 
835 	return 0;
836 }
837 
ci_populate_smc_vddc_table(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)838 static int ci_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
839 			SMU7_Discrete_DpmTable *table)
840 {
841 	unsigned int count;
842 	int result;
843 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
844 
845 	table->VddcLevelCount = data->vddc_voltage_table.count;
846 	for (count = 0; count < table->VddcLevelCount; count++) {
847 		result = ci_populate_smc_voltage_table(hwmgr,
848 				&(data->vddc_voltage_table.entries[count]),
849 				&(table->VddcLevel[count]));
850 		PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
851 
852 		/* GPIO voltage control */
853 		if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
854 			table->VddcLevel[count].Smio = (uint8_t) count;
855 			table->Smio[count] |= data->vddc_voltage_table.entries[count].smio_low;
856 			table->SmioMaskVddcVid |= data->vddc_voltage_table.entries[count].smio_low;
857 		} else {
858 			table->VddcLevel[count].Smio = 0;
859 		}
860 	}
861 
862 	CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
863 
864 	return 0;
865 }
866 
ci_populate_smc_vdd_ci_table(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)867 static int ci_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
868 			SMU7_Discrete_DpmTable *table)
869 {
870 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
871 	uint32_t count;
872 	int result;
873 
874 	table->VddciLevelCount = data->vddci_voltage_table.count;
875 
876 	for (count = 0; count < table->VddciLevelCount; count++) {
877 		result = ci_populate_smc_voltage_table(hwmgr,
878 				&(data->vddci_voltage_table.entries[count]),
879 				&(table->VddciLevel[count]));
880 		PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
881 		if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
882 			table->VddciLevel[count].Smio = (uint8_t) count;
883 			table->Smio[count] |= data->vddci_voltage_table.entries[count].smio_low;
884 			table->SmioMaskVddciVid |= data->vddci_voltage_table.entries[count].smio_low;
885 		} else {
886 			table->VddciLevel[count].Smio = 0;
887 		}
888 	}
889 
890 	CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
891 
892 	return 0;
893 }
894 
ci_populate_smc_mvdd_table(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)895 static int ci_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
896 			SMU7_Discrete_DpmTable *table)
897 {
898 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
899 	uint32_t count;
900 	int result;
901 
902 	table->MvddLevelCount = data->mvdd_voltage_table.count;
903 
904 	for (count = 0; count < table->MvddLevelCount; count++) {
905 		result = ci_populate_smc_voltage_table(hwmgr,
906 				&(data->mvdd_voltage_table.entries[count]),
907 				&table->MvddLevel[count]);
908 		PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
909 		if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
910 			table->MvddLevel[count].Smio = (uint8_t) count;
911 			table->Smio[count] |= data->mvdd_voltage_table.entries[count].smio_low;
912 			table->SmioMaskMvddVid |= data->mvdd_voltage_table.entries[count].smio_low;
913 		} else {
914 			table->MvddLevel[count].Smio = 0;
915 		}
916 	}
917 
918 	CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
919 
920 	return 0;
921 }
922 
923 
ci_populate_smc_voltage_tables(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)924 static int ci_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
925 	SMU7_Discrete_DpmTable *table)
926 {
927 	int result;
928 
929 	result = ci_populate_smc_vddc_table(hwmgr, table);
930 	PP_ASSERT_WITH_CODE(0 == result,
931 			"can not populate VDDC voltage table to SMC", return -EINVAL);
932 
933 	result = ci_populate_smc_vdd_ci_table(hwmgr, table);
934 	PP_ASSERT_WITH_CODE(0 == result,
935 			"can not populate VDDCI voltage table to SMC", return -EINVAL);
936 
937 	result = ci_populate_smc_mvdd_table(hwmgr, table);
938 	PP_ASSERT_WITH_CODE(0 == result,
939 			"can not populate MVDD voltage table to SMC", return -EINVAL);
940 
941 	return 0;
942 }
943 
ci_populate_ulv_level(struct pp_hwmgr * hwmgr,struct SMU7_Discrete_Ulv * state)944 static int ci_populate_ulv_level(struct pp_hwmgr *hwmgr,
945 		struct SMU7_Discrete_Ulv *state)
946 {
947 	uint32_t voltage_response_time, ulv_voltage;
948 	int result;
949 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
950 
951 	state->CcPwrDynRm = 0;
952 	state->CcPwrDynRm1 = 0;
953 
954 	result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
955 	PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
956 
957 	if (ulv_voltage == 0) {
958 		data->ulv_supported = false;
959 		return 0;
960 	}
961 
962 	if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
963 		/* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
964 		if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
965 			state->VddcOffset = 0;
966 		else
967 			/* used in SMIO Mode. not implemented for now. this is backup only for CI. */
968 			state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
969 	} else {
970 		/* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
971 		if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
972 			state->VddcOffsetVid = 0;
973 		else  /* used in SVI2 Mode */
974 			state->VddcOffsetVid = (uint8_t)(
975 					(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
976 						* VOLTAGE_VID_OFFSET_SCALE2
977 						/ VOLTAGE_VID_OFFSET_SCALE1);
978 	}
979 	state->VddcPhase = 1;
980 
981 	CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
982 	CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
983 	CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
984 
985 	return 0;
986 }
987 
ci_populate_ulv_state(struct pp_hwmgr * hwmgr,SMU7_Discrete_Ulv * ulv_level)988 static int ci_populate_ulv_state(struct pp_hwmgr *hwmgr,
989 		 SMU7_Discrete_Ulv *ulv_level)
990 {
991 	return ci_populate_ulv_level(hwmgr, ulv_level);
992 }
993 
ci_populate_smc_link_level(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)994 static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
995 {
996 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
997 	struct smu7_dpm_table *dpm_table = &data->dpm_table;
998 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
999 	uint32_t i;
1000 
1001 /* Index dpm_table->pcie_speed_table.count is reserved for PCIE boot level.*/
1002 	for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
1003 		table->LinkLevel[i].PcieGenSpeed  =
1004 			(uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
1005 		table->LinkLevel[i].PcieLaneCount =
1006 			(uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
1007 		table->LinkLevel[i].EnabledForActivity = 1;
1008 		table->LinkLevel[i].DownT = PP_HOST_TO_SMC_UL(5);
1009 		table->LinkLevel[i].UpT = PP_HOST_TO_SMC_UL(30);
1010 	}
1011 
1012 	smu_data->smc_state_table.LinkLevelCount =
1013 		(uint8_t)dpm_table->pcie_speed_table.count;
1014 	data->dpm_level_enable_mask.pcie_dpm_enable_mask =
1015 		phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
1016 
1017 	return 0;
1018 }
1019 
ci_calculate_mclk_params(struct pp_hwmgr * hwmgr,uint32_t memory_clock,SMU7_Discrete_MemoryLevel * mclk,bool strobe_mode,bool dllStateOn)1020 static int ci_calculate_mclk_params(
1021 		struct pp_hwmgr *hwmgr,
1022 		uint32_t memory_clock,
1023 		SMU7_Discrete_MemoryLevel *mclk,
1024 		bool strobe_mode,
1025 		bool dllStateOn
1026 		)
1027 {
1028 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1029 	uint32_t  dll_cntl = data->clock_registers.vDLL_CNTL;
1030 	uint32_t  mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1031 	uint32_t  mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
1032 	uint32_t  mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
1033 	uint32_t  mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
1034 	uint32_t  mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
1035 	uint32_t  mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
1036 	uint32_t  mpll_ss1 = data->clock_registers.vMPLL_SS1;
1037 	uint32_t  mpll_ss2 = data->clock_registers.vMPLL_SS2;
1038 
1039 	pp_atomctrl_memory_clock_param mpll_param;
1040 	int result;
1041 
1042 	result = atomctrl_get_memory_pll_dividers_si(hwmgr,
1043 				memory_clock, &mpll_param, strobe_mode);
1044 	PP_ASSERT_WITH_CODE(0 == result,
1045 		"Error retrieving Memory Clock Parameters from VBIOS.", return result);
1046 
1047 	mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
1048 
1049 	mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
1050 							MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
1051 	mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
1052 							MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
1053 	mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
1054 							MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
1055 
1056 	mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
1057 							MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1058 
1059 	if (data->is_memory_gddr5) {
1060 		mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
1061 								MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
1062 		mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
1063 								MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1064 	}
1065 
1066 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1067 			PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
1068 		pp_atomctrl_internal_ss_info ss_info;
1069 		uint32_t freq_nom;
1070 		uint32_t tmp;
1071 		uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
1072 
1073 		/* for GDDR5 for all modes and DDR3 */
1074 		if (1 == mpll_param.qdr)
1075 			freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
1076 		else
1077 			freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
1078 
1079 		/* tmp = (freq_nom / reference_clock * reference_divider) ^ 2  Note: S.I. reference_divider = 1*/
1080 		tmp = (freq_nom / reference_clock);
1081 		tmp = tmp * tmp;
1082 
1083 		if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
1084 			uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
1085 			uint32_t clkv =
1086 				(uint32_t)((((131 * ss_info.speed_spectrum_percentage *
1087 							ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
1088 
1089 			mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
1090 			mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
1091 		}
1092 	}
1093 
1094 	mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1095 		MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
1096 	mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1097 		MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
1098 	mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1099 		MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
1100 
1101 
1102 	mclk->MclkFrequency   = memory_clock;
1103 	mclk->MpllFuncCntl    = mpll_func_cntl;
1104 	mclk->MpllFuncCntl_1  = mpll_func_cntl_1;
1105 	mclk->MpllFuncCntl_2  = mpll_func_cntl_2;
1106 	mclk->MpllAdFuncCntl  = mpll_ad_func_cntl;
1107 	mclk->MpllDqFuncCntl  = mpll_dq_func_cntl;
1108 	mclk->MclkPwrmgtCntl  = mclk_pwrmgt_cntl;
1109 	mclk->DllCntl         = dll_cntl;
1110 	mclk->MpllSs1         = mpll_ss1;
1111 	mclk->MpllSs2         = mpll_ss2;
1112 
1113 	return 0;
1114 }
1115 
ci_get_mclk_frequency_ratio(uint32_t memory_clock,bool strobe_mode)1116 static uint8_t ci_get_mclk_frequency_ratio(uint32_t memory_clock,
1117 		bool strobe_mode)
1118 {
1119 	uint8_t mc_para_index;
1120 
1121 	if (strobe_mode) {
1122 		if (memory_clock < 12500)
1123 			mc_para_index = 0x00;
1124 		else if (memory_clock > 47500)
1125 			mc_para_index = 0x0f;
1126 		else
1127 			mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
1128 	} else {
1129 		if (memory_clock < 65000)
1130 			mc_para_index = 0x00;
1131 		else if (memory_clock > 135000)
1132 			mc_para_index = 0x0f;
1133 		else
1134 			mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
1135 	}
1136 
1137 	return mc_para_index;
1138 }
1139 
ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)1140 static uint8_t ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
1141 {
1142 	uint8_t mc_para_index;
1143 
1144 	if (memory_clock < 10000)
1145 		mc_para_index = 0;
1146 	else if (memory_clock >= 80000)
1147 		mc_para_index = 0x0f;
1148 	else
1149 		mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
1150 
1151 	return mc_para_index;
1152 }
1153 
ci_populate_phase_value_based_on_mclk(struct pp_hwmgr * hwmgr,const struct phm_phase_shedding_limits_table * pl,uint32_t memory_clock,uint32_t * p_shed)1154 static int ci_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
1155 					uint32_t memory_clock, uint32_t *p_shed)
1156 {
1157 	unsigned int i;
1158 
1159 	*p_shed = 1;
1160 
1161 	for (i = 0; i < pl->count; i++) {
1162 		if (memory_clock < pl->entries[i].Mclk) {
1163 			*p_shed = i;
1164 			break;
1165 		}
1166 	}
1167 
1168 	return 0;
1169 }
1170 
ci_populate_single_memory_level(struct pp_hwmgr * hwmgr,uint32_t memory_clock,SMU7_Discrete_MemoryLevel * memory_level)1171 static int ci_populate_single_memory_level(
1172 		struct pp_hwmgr *hwmgr,
1173 		uint32_t memory_clock,
1174 		SMU7_Discrete_MemoryLevel *memory_level
1175 		)
1176 {
1177 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1178 	int result = 0;
1179 	bool dll_state_on;
1180 	uint32_t mclk_edc_wr_enable_threshold = 40000;
1181 	uint32_t mclk_edc_enable_threshold = 40000;
1182 	uint32_t mclk_strobe_mode_threshold = 40000;
1183 
1184 	if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
1185 		result = ci_get_dependency_volt_by_clk(hwmgr,
1186 			hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
1187 		PP_ASSERT_WITH_CODE((0 == result),
1188 			"can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
1189 	}
1190 
1191 	if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
1192 		result = ci_get_dependency_volt_by_clk(hwmgr,
1193 				hwmgr->dyn_state.vddci_dependency_on_mclk,
1194 				memory_clock,
1195 				&memory_level->MinVddci);
1196 		PP_ASSERT_WITH_CODE((0 == result),
1197 			"can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
1198 	}
1199 
1200 	if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
1201 		result = ci_get_dependency_volt_by_clk(hwmgr,
1202 				hwmgr->dyn_state.mvdd_dependency_on_mclk,
1203 				memory_clock,
1204 				&memory_level->MinMvdd);
1205 		PP_ASSERT_WITH_CODE((0 == result),
1206 			"can not find MinVddci voltage value from memory MVDD voltage dependency table", return result);
1207 	}
1208 
1209 	memory_level->MinVddcPhases = 1;
1210 
1211 	if (data->vddc_phase_shed_control) {
1212 		ci_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
1213 				memory_clock, &memory_level->MinVddcPhases);
1214 	}
1215 
1216 	memory_level->EnabledForThrottle = 1;
1217 	memory_level->EnabledForActivity = 1;
1218 	memory_level->UpH = data->current_profile_setting.mclk_up_hyst;
1219 	memory_level->DownH = data->current_profile_setting.mclk_down_hyst;
1220 	memory_level->VoltageDownH = 0;
1221 
1222 	/* Indicates maximum activity level for this performance level.*/
1223 	memory_level->ActivityLevel = data->current_profile_setting.mclk_activity;
1224 	memory_level->StutterEnable = 0;
1225 	memory_level->StrobeEnable = 0;
1226 	memory_level->EdcReadEnable = 0;
1227 	memory_level->EdcWriteEnable = 0;
1228 	memory_level->RttEnable = 0;
1229 
1230 	/* default set to low watermark. Highest level will be set to high later.*/
1231 	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1232 
1233 	data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
1234 
1235 	/* stutter mode not support on ci */
1236 
1237 	/* decide strobe mode*/
1238 	memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
1239 		(memory_clock <= mclk_strobe_mode_threshold);
1240 
1241 	/* decide EDC mode and memory clock ratio*/
1242 	if (data->is_memory_gddr5) {
1243 		memory_level->StrobeRatio = ci_get_mclk_frequency_ratio(memory_clock,
1244 					memory_level->StrobeEnable);
1245 
1246 		if ((mclk_edc_enable_threshold != 0) &&
1247 				(memory_clock > mclk_edc_enable_threshold)) {
1248 			memory_level->EdcReadEnable = 1;
1249 		}
1250 
1251 		if ((mclk_edc_wr_enable_threshold != 0) &&
1252 				(memory_clock > mclk_edc_wr_enable_threshold)) {
1253 			memory_level->EdcWriteEnable = 1;
1254 		}
1255 
1256 		if (memory_level->StrobeEnable) {
1257 			if (ci_get_mclk_frequency_ratio(memory_clock, 1) >=
1258 					((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
1259 				dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1260 			else
1261 				dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
1262 		} else
1263 			dll_state_on = data->dll_default_on;
1264 	} else {
1265 		memory_level->StrobeRatio =
1266 			ci_get_ddr3_mclk_frequency_ratio(memory_clock);
1267 		dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1268 	}
1269 
1270 	result = ci_calculate_mclk_params(hwmgr,
1271 		memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
1272 
1273 	if (0 == result) {
1274 		memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
1275 		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
1276 		memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
1277 		memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
1278 		/* MCLK frequency in units of 10KHz*/
1279 		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
1280 		/* Indicates maximum activity level for this performance level.*/
1281 		CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
1282 		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
1283 		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
1284 		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
1285 		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
1286 		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
1287 		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
1288 		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
1289 		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
1290 		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
1291 	}
1292 
1293 	return result;
1294 }
1295 
ci_populate_all_memory_levels(struct pp_hwmgr * hwmgr)1296 static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1297 {
1298 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1299 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1300 	struct smu7_dpm_table *dpm_table = &data->dpm_table;
1301 	int result;
1302 	struct amdgpu_device *adev = hwmgr->adev;
1303 	uint32_t dev_id;
1304 
1305 	uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
1306 	uint32_t level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * SMU7_MAX_LEVELS_MEMORY;
1307 	SMU7_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
1308 	uint32_t i;
1309 
1310 	memset(levels, 0x00, level_array_size);
1311 
1312 	for (i = 0; i < dpm_table->mclk_table.count; i++) {
1313 		PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1314 			"can not populate memory level as memory clock is zero", return -EINVAL);
1315 		result = ci_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
1316 			&(smu_data->smc_state_table.MemoryLevel[i]));
1317 		if (0 != result)
1318 			return result;
1319 	}
1320 
1321 	smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
1322 
1323 	dev_id = adev->pdev->device;
1324 
1325 	if ((dpm_table->mclk_table.count >= 2)
1326 		&& ((dev_id == 0x67B0) ||  (dev_id == 0x67B1))) {
1327 		smu_data->smc_state_table.MemoryLevel[1].MinVddci =
1328 				smu_data->smc_state_table.MemoryLevel[0].MinVddci;
1329 		smu_data->smc_state_table.MemoryLevel[1].MinMvdd =
1330 				smu_data->smc_state_table.MemoryLevel[0].MinMvdd;
1331 	}
1332 	smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
1333 	CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
1334 
1335 	smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
1336 	data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1337 	smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
1338 
1339 	result = ci_copy_bytes_to_smc(hwmgr,
1340 		level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
1341 		SMC_RAM_END);
1342 
1343 	return result;
1344 }
1345 
ci_populate_mvdd_value(struct pp_hwmgr * hwmgr,uint32_t mclk,SMU7_Discrete_VoltageLevel * voltage)1346 static int ci_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
1347 					SMU7_Discrete_VoltageLevel *voltage)
1348 {
1349 	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1350 
1351 	uint32_t i = 0;
1352 
1353 	if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1354 		/* find mvdd value which clock is more than request */
1355 		for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
1356 			if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
1357 				/* Always round to higher voltage. */
1358 				voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
1359 				break;
1360 			}
1361 		}
1362 
1363 		PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
1364 			"MVDD Voltage is outside the supported range.", return -EINVAL);
1365 
1366 	} else {
1367 		return -EINVAL;
1368 	}
1369 
1370 	return 0;
1371 }
1372 
ci_populate_smc_acpi_level(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)1373 static int ci_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1374 	SMU7_Discrete_DpmTable *table)
1375 {
1376 	int result = 0;
1377 	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1378 	struct pp_atomctrl_clock_dividers_vi dividers;
1379 
1380 	SMU7_Discrete_VoltageLevel voltage_level;
1381 	uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1382 	uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
1383 	uint32_t dll_cntl          = data->clock_registers.vDLL_CNTL;
1384 	uint32_t mclk_pwrmgt_cntl  = data->clock_registers.vMCLK_PWRMGT_CNTL;
1385 
1386 
1387 	/* The ACPI state should not do DPM on DC (or ever).*/
1388 	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1389 
1390 	if (data->acpi_vddc)
1391 		table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
1392 	else
1393 		table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
1394 
1395 	table->ACPILevel.MinVddcPhases = data->vddc_phase_shed_control ? 0 : 1;
1396 	/* assign zero for now*/
1397 	table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
1398 
1399 	/* get the engine clock dividers for this clock value*/
1400 	result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
1401 		table->ACPILevel.SclkFrequency,  &dividers);
1402 
1403 	PP_ASSERT_WITH_CODE(result == 0,
1404 		"Error retrieving Engine Clock dividers from VBIOS.", return result);
1405 
1406 	/* divider ID for required SCLK*/
1407 	table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
1408 	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1409 	table->ACPILevel.DeepSleepDivId = 0;
1410 
1411 	spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
1412 							CG_SPLL_FUNC_CNTL,   SPLL_PWRON,     0);
1413 	spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
1414 							CG_SPLL_FUNC_CNTL,   SPLL_RESET,     1);
1415 	spll_func_cntl_2    = PHM_SET_FIELD(spll_func_cntl_2,
1416 							CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL,   4);
1417 
1418 	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
1419 	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
1420 	table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1421 	table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1422 	table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1423 	table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1424 	table->ACPILevel.CcPwrDynRm = 0;
1425 	table->ACPILevel.CcPwrDynRm1 = 0;
1426 
1427 	/* For various features to be enabled/disabled while this level is active.*/
1428 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1429 	/* SCLK frequency in units of 10KHz*/
1430 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
1431 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
1432 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
1433 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
1434 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
1435 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
1436 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
1437 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1438 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1439 
1440 
1441 	/* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
1442 	table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
1443 	table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
1444 
1445 	if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
1446 		table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
1447 	else {
1448 		if (data->acpi_vddci != 0)
1449 			table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
1450 		else
1451 			table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
1452 	}
1453 
1454 	if (0 == ci_populate_mvdd_value(hwmgr, 0, &voltage_level))
1455 		table->MemoryACPILevel.MinMvdd =
1456 			PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
1457 	else
1458 		table->MemoryACPILevel.MinMvdd = 0;
1459 
1460 	/* Force reset on DLL*/
1461 	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1462 		MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
1463 	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1464 		MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
1465 
1466 	/* Disable DLL in ACPIState*/
1467 	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1468 		MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
1469 	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1470 		MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
1471 
1472 	/* Enable DLL bypass signal*/
1473 	dll_cntl            = PHM_SET_FIELD(dll_cntl,
1474 		DLL_CNTL, MRDCK0_BYPASS, 0);
1475 	dll_cntl            = PHM_SET_FIELD(dll_cntl,
1476 		DLL_CNTL, MRDCK1_BYPASS, 0);
1477 
1478 	table->MemoryACPILevel.DllCntl            =
1479 		PP_HOST_TO_SMC_UL(dll_cntl);
1480 	table->MemoryACPILevel.MclkPwrmgtCntl     =
1481 		PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
1482 	table->MemoryACPILevel.MpllAdFuncCntl     =
1483 		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
1484 	table->MemoryACPILevel.MpllDqFuncCntl     =
1485 		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
1486 	table->MemoryACPILevel.MpllFuncCntl       =
1487 		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
1488 	table->MemoryACPILevel.MpllFuncCntl_1     =
1489 		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
1490 	table->MemoryACPILevel.MpllFuncCntl_2     =
1491 		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
1492 	table->MemoryACPILevel.MpllSs1            =
1493 		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
1494 	table->MemoryACPILevel.MpllSs2            =
1495 		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
1496 
1497 	table->MemoryACPILevel.EnabledForThrottle = 0;
1498 	table->MemoryACPILevel.EnabledForActivity = 0;
1499 	table->MemoryACPILevel.UpH = 0;
1500 	table->MemoryACPILevel.DownH = 100;
1501 	table->MemoryACPILevel.VoltageDownH = 0;
1502 	/* Indicates maximum activity level for this performance level.*/
1503 	table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
1504 
1505 	table->MemoryACPILevel.StutterEnable = 0;
1506 	table->MemoryACPILevel.StrobeEnable = 0;
1507 	table->MemoryACPILevel.EdcReadEnable = 0;
1508 	table->MemoryACPILevel.EdcWriteEnable = 0;
1509 	table->MemoryACPILevel.RttEnable = 0;
1510 
1511 	return result;
1512 }
1513 
ci_populate_smc_uvd_level(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)1514 static int ci_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1515 					SMU7_Discrete_DpmTable *table)
1516 {
1517 	int result = 0;
1518 	uint8_t count;
1519 	struct pp_atomctrl_clock_dividers_vi dividers;
1520 	struct phm_uvd_clock_voltage_dependency_table *uvd_table =
1521 		hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1522 
1523 	table->UvdLevelCount = (uint8_t)(uvd_table->count);
1524 
1525 	for (count = 0; count < table->UvdLevelCount; count++) {
1526 		table->UvdLevel[count].VclkFrequency =
1527 					uvd_table->entries[count].vclk;
1528 		table->UvdLevel[count].DclkFrequency =
1529 					uvd_table->entries[count].dclk;
1530 		table->UvdLevel[count].MinVddc =
1531 					uvd_table->entries[count].v * VOLTAGE_SCALE;
1532 		table->UvdLevel[count].MinVddcPhases = 1;
1533 
1534 		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1535 				table->UvdLevel[count].VclkFrequency, &dividers);
1536 		PP_ASSERT_WITH_CODE((0 == result),
1537 				"can not find divide id for Vclk clock", return result);
1538 
1539 		table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1540 
1541 		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1542 				table->UvdLevel[count].DclkFrequency, &dividers);
1543 		PP_ASSERT_WITH_CODE((0 == result),
1544 				"can not find divide id for Dclk clock", return result);
1545 
1546 		table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1547 		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1548 		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1549 		CONVERT_FROM_HOST_TO_SMC_US(table->UvdLevel[count].MinVddc);
1550 	}
1551 
1552 	return result;
1553 }
1554 
ci_populate_smc_vce_level(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)1555 static int ci_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1556 		SMU7_Discrete_DpmTable *table)
1557 {
1558 	int result = -EINVAL;
1559 	uint8_t count;
1560 	struct pp_atomctrl_clock_dividers_vi dividers;
1561 	struct phm_vce_clock_voltage_dependency_table *vce_table =
1562 				hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1563 
1564 	table->VceLevelCount = (uint8_t)(vce_table->count);
1565 	table->VceBootLevel = 0;
1566 
1567 	for (count = 0; count < table->VceLevelCount; count++) {
1568 		table->VceLevel[count].Frequency = vce_table->entries[count].evclk;
1569 		table->VceLevel[count].MinVoltage =
1570 				vce_table->entries[count].v * VOLTAGE_SCALE;
1571 		table->VceLevel[count].MinPhases = 1;
1572 
1573 		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1574 				table->VceLevel[count].Frequency, &dividers);
1575 		PP_ASSERT_WITH_CODE((0 == result),
1576 				"can not find divide id for VCE engine clock",
1577 				return result);
1578 
1579 		table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1580 
1581 		CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1582 		CONVERT_FROM_HOST_TO_SMC_US(table->VceLevel[count].MinVoltage);
1583 	}
1584 	return result;
1585 }
1586 
ci_populate_smc_acp_level(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)1587 static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1588 					SMU7_Discrete_DpmTable *table)
1589 {
1590 	int result = -EINVAL;
1591 	uint8_t count;
1592 	struct pp_atomctrl_clock_dividers_vi dividers;
1593 	struct phm_acp_clock_voltage_dependency_table *acp_table =
1594 				hwmgr->dyn_state.acp_clock_voltage_dependency_table;
1595 
1596 	table->AcpLevelCount = (uint8_t)(acp_table->count);
1597 	table->AcpBootLevel = 0;
1598 
1599 	for (count = 0; count < table->AcpLevelCount; count++) {
1600 		table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk;
1601 		table->AcpLevel[count].MinVoltage = acp_table->entries[count].v;
1602 		table->AcpLevel[count].MinPhases = 1;
1603 
1604 		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1605 				table->AcpLevel[count].Frequency, &dividers);
1606 		PP_ASSERT_WITH_CODE((0 == result),
1607 				"can not find divide id for engine clock", return result);
1608 
1609 		table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1610 
1611 		CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
1612 		CONVERT_FROM_HOST_TO_SMC_US(table->AcpLevel[count].MinVoltage);
1613 	}
1614 	return result;
1615 }
1616 
ci_populate_memory_timing_parameters(struct pp_hwmgr * hwmgr,uint32_t engine_clock,uint32_t memory_clock,struct SMU7_Discrete_MCArbDramTimingTableEntry * arb_regs)1617 static int ci_populate_memory_timing_parameters(
1618 		struct pp_hwmgr *hwmgr,
1619 		uint32_t engine_clock,
1620 		uint32_t memory_clock,
1621 		struct SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs
1622 		)
1623 {
1624 	uint32_t dramTiming;
1625 	uint32_t dramTiming2;
1626 	uint32_t burstTime;
1627 	int result;
1628 
1629 	result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1630 				engine_clock, memory_clock);
1631 
1632 	PP_ASSERT_WITH_CODE(result == 0,
1633 		"Error calling VBIOS to set DRAM_TIMING.", return result);
1634 
1635 	dramTiming  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1636 	dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1637 	burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1638 
1639 	arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dramTiming);
1640 	arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
1641 	arb_regs->McArbBurstTime = (uint8_t)burstTime;
1642 
1643 	return 0;
1644 }
1645 
ci_program_memory_timing_parameters(struct pp_hwmgr * hwmgr)1646 static int ci_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1647 {
1648 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1649 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1650 	int result = 0;
1651 	SMU7_Discrete_MCArbDramTimingTable  arb_regs;
1652 	uint32_t i, j;
1653 
1654 	memset(&arb_regs, 0x00, sizeof(SMU7_Discrete_MCArbDramTimingTable));
1655 
1656 	for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1657 		for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1658 			result = ci_populate_memory_timing_parameters
1659 				(hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
1660 				 data->dpm_table.mclk_table.dpm_levels[j].value,
1661 				 &arb_regs.entries[i][j]);
1662 
1663 			if (0 != result)
1664 				break;
1665 		}
1666 	}
1667 
1668 	if (0 == result) {
1669 		result = ci_copy_bytes_to_smc(
1670 				hwmgr,
1671 				smu_data->arb_table_start,
1672 				(uint8_t *)&arb_regs,
1673 				sizeof(SMU7_Discrete_MCArbDramTimingTable),
1674 				SMC_RAM_END
1675 				);
1676 	}
1677 
1678 	return result;
1679 }
1680 
ci_populate_smc_boot_level(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)1681 static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1682 			SMU7_Discrete_DpmTable *table)
1683 {
1684 	int result = 0;
1685 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1686 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1687 
1688 	table->GraphicsBootLevel = 0;
1689 	table->MemoryBootLevel = 0;
1690 
1691 	/* find boot level from dpm table*/
1692 	result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1693 			data->vbios_boot_state.sclk_bootup_value,
1694 			(uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
1695 
1696 	if (0 != result) {
1697 		smu_data->smc_state_table.GraphicsBootLevel = 0;
1698 		pr_err("VBIOS did not find boot engine clock value in dependency table. Using Graphics DPM level 0!\n");
1699 		result = 0;
1700 	}
1701 
1702 	result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1703 		data->vbios_boot_state.mclk_bootup_value,
1704 		(uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
1705 
1706 	if (0 != result) {
1707 		smu_data->smc_state_table.MemoryBootLevel = 0;
1708 		pr_err("VBIOS did not find boot engine clock value in dependency table. Using Memory DPM level 0!\n");
1709 		result = 0;
1710 	}
1711 
1712 	table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
1713 	table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
1714 	table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
1715 
1716 	return result;
1717 }
1718 
ci_populate_mc_reg_address(struct pp_hwmgr * hwmgr,SMU7_Discrete_MCRegisters * mc_reg_table)1719 static int ci_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
1720 				 SMU7_Discrete_MCRegisters *mc_reg_table)
1721 {
1722 	const struct ci_smumgr *smu_data = (struct ci_smumgr *)hwmgr->smu_backend;
1723 
1724 	uint32_t i, j;
1725 
1726 	for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
1727 		if (smu_data->mc_reg_table.validflag & 1<<j) {
1728 			PP_ASSERT_WITH_CODE(i < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE,
1729 				"Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
1730 			mc_reg_table->address[i].s0 =
1731 				PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
1732 			mc_reg_table->address[i].s1 =
1733 				PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
1734 			i++;
1735 		}
1736 	}
1737 
1738 	mc_reg_table->last = (uint8_t)i;
1739 
1740 	return 0;
1741 }
1742 
ci_convert_mc_registers(const struct ci_mc_reg_entry * entry,SMU7_Discrete_MCRegisterSet * data,uint32_t num_entries,uint32_t valid_flag)1743 static void ci_convert_mc_registers(
1744 	const struct ci_mc_reg_entry *entry,
1745 	SMU7_Discrete_MCRegisterSet *data,
1746 	uint32_t num_entries, uint32_t valid_flag)
1747 {
1748 	uint32_t i, j;
1749 
1750 	for (i = 0, j = 0; j < num_entries; j++) {
1751 		if (valid_flag & 1<<j) {
1752 			data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
1753 			i++;
1754 		}
1755 	}
1756 }
1757 
ci_convert_mc_reg_table_entry_to_smc(struct pp_hwmgr * hwmgr,const uint32_t memory_clock,SMU7_Discrete_MCRegisterSet * mc_reg_table_data)1758 static int ci_convert_mc_reg_table_entry_to_smc(
1759 		struct pp_hwmgr *hwmgr,
1760 		const uint32_t memory_clock,
1761 		SMU7_Discrete_MCRegisterSet *mc_reg_table_data
1762 		)
1763 {
1764 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1765 	uint32_t i = 0;
1766 
1767 	for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
1768 		if (memory_clock <=
1769 			smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
1770 			break;
1771 		}
1772 	}
1773 
1774 	if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
1775 		--i;
1776 
1777 	ci_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
1778 				mc_reg_table_data, smu_data->mc_reg_table.last,
1779 				smu_data->mc_reg_table.validflag);
1780 
1781 	return 0;
1782 }
1783 
ci_convert_mc_reg_table_to_smc(struct pp_hwmgr * hwmgr,SMU7_Discrete_MCRegisters * mc_regs)1784 static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
1785 		SMU7_Discrete_MCRegisters *mc_regs)
1786 {
1787 	int result = 0;
1788 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1789 	int res;
1790 	uint32_t i;
1791 
1792 	for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
1793 		res = ci_convert_mc_reg_table_entry_to_smc(
1794 				hwmgr,
1795 				data->dpm_table.mclk_table.dpm_levels[i].value,
1796 				&mc_regs->data[i]
1797 				);
1798 
1799 		if (0 != res)
1800 			result = res;
1801 	}
1802 
1803 	return result;
1804 }
1805 
ci_update_and_upload_mc_reg_table(struct pp_hwmgr * hwmgr)1806 static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
1807 {
1808 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1809 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1810 	uint32_t address;
1811 	int32_t result;
1812 
1813 	if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
1814 		return 0;
1815 
1816 
1817 	memset(&smu_data->mc_regs, 0, sizeof(SMU7_Discrete_MCRegisters));
1818 
1819 	result = ci_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
1820 
1821 	if (result != 0)
1822 		return result;
1823 
1824 	address = smu_data->mc_reg_table_start + (uint32_t)offsetof(SMU7_Discrete_MCRegisters, data[0]);
1825 
1826 	return  ci_copy_bytes_to_smc(hwmgr, address,
1827 				 (uint8_t *)&smu_data->mc_regs.data[0],
1828 				sizeof(SMU7_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
1829 				SMC_RAM_END);
1830 }
1831 
ci_populate_initial_mc_reg_table(struct pp_hwmgr * hwmgr)1832 static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
1833 {
1834 	int result;
1835 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1836 
1837 	memset(&smu_data->mc_regs, 0x00, sizeof(SMU7_Discrete_MCRegisters));
1838 	result = ci_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
1839 	PP_ASSERT_WITH_CODE(0 == result,
1840 		"Failed to initialize MCRegTable for the MC register addresses!", return result;);
1841 
1842 	result = ci_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
1843 	PP_ASSERT_WITH_CODE(0 == result,
1844 		"Failed to initialize MCRegTable for driver state!", return result;);
1845 
1846 	return ci_copy_bytes_to_smc(hwmgr, smu_data->mc_reg_table_start,
1847 			(uint8_t *)&smu_data->mc_regs, sizeof(SMU7_Discrete_MCRegisters), SMC_RAM_END);
1848 }
1849 
ci_populate_smc_initial_state(struct pp_hwmgr * hwmgr)1850 static int ci_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
1851 {
1852 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1853 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1854 	uint8_t count, level;
1855 
1856 	count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
1857 
1858 	for (level = 0; level < count; level++) {
1859 		if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
1860 			 >= data->vbios_boot_state.sclk_bootup_value) {
1861 			smu_data->smc_state_table.GraphicsBootLevel = level;
1862 			break;
1863 		}
1864 	}
1865 
1866 	count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
1867 
1868 	for (level = 0; level < count; level++) {
1869 		if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
1870 			>= data->vbios_boot_state.mclk_bootup_value) {
1871 			smu_data->smc_state_table.MemoryBootLevel = level;
1872 			break;
1873 		}
1874 	}
1875 
1876 	return 0;
1877 }
1878 
ci_populate_smc_svi2_config(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)1879 static int ci_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
1880 					    SMU7_Discrete_DpmTable *table)
1881 {
1882 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1883 
1884 	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
1885 		table->SVI2Enable = 1;
1886 	else
1887 		table->SVI2Enable = 0;
1888 	return 0;
1889 }
1890 
ci_start_smc(struct pp_hwmgr * hwmgr)1891 static int ci_start_smc(struct pp_hwmgr *hwmgr)
1892 {
1893 	/* set smc instruct start point at 0x0 */
1894 	ci_program_jump_on_start(hwmgr);
1895 
1896 	/* enable smc clock */
1897 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
1898 
1899 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
1900 
1901 	PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS,
1902 				 INTERRUPTS_ENABLED, 1);
1903 
1904 	return 0;
1905 }
1906 
ci_populate_vr_config(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)1907 static int ci_populate_vr_config(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
1908 {
1909 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1910 	uint16_t config;
1911 
1912 	config = VR_SVI2_PLANE_1;
1913 	table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
1914 
1915 	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1916 		config = VR_SVI2_PLANE_2;
1917 		table->VRConfig |= config;
1918 	} else {
1919 		pr_info("VDDCshould be on SVI2 controller!");
1920 	}
1921 
1922 	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1923 		config = VR_SVI2_PLANE_2;
1924 		table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1925 	} else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1926 		config = VR_SMIO_PATTERN_1;
1927 		table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1928 	}
1929 
1930 	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1931 		config = VR_SMIO_PATTERN_2;
1932 		table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
1933 	}
1934 
1935 	return 0;
1936 }
1937 
ci_init_smc_table(struct pp_hwmgr * hwmgr)1938 static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
1939 {
1940 	int result;
1941 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1942 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1943 	SMU7_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
1944 	struct pp_atomctrl_gpio_pin_assignment gpio_pin;
1945 	u32 i;
1946 
1947 	ci_initialize_power_tune_defaults(hwmgr);
1948 	memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
1949 
1950 	if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
1951 		ci_populate_smc_voltage_tables(hwmgr, table);
1952 
1953 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1954 			PHM_PlatformCaps_AutomaticDCTransition))
1955 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1956 
1957 
1958 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1959 			PHM_PlatformCaps_StepVddc))
1960 		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1961 
1962 	if (data->is_memory_gddr5)
1963 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1964 
1965 	if (data->ulv_supported) {
1966 		result = ci_populate_ulv_state(hwmgr, &(table->Ulv));
1967 		PP_ASSERT_WITH_CODE(0 == result,
1968 			"Failed to initialize ULV state!", return result);
1969 
1970 		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1971 			ixCG_ULV_PARAMETER, 0x40035);
1972 	}
1973 
1974 	result = ci_populate_all_graphic_levels(hwmgr);
1975 	PP_ASSERT_WITH_CODE(0 == result,
1976 		"Failed to initialize Graphics Level!", return result);
1977 
1978 	result = ci_populate_all_memory_levels(hwmgr);
1979 	PP_ASSERT_WITH_CODE(0 == result,
1980 		"Failed to initialize Memory Level!", return result);
1981 
1982 	result = ci_populate_smc_link_level(hwmgr, table);
1983 	PP_ASSERT_WITH_CODE(0 == result,
1984 		"Failed to initialize Link Level!", return result);
1985 
1986 	result = ci_populate_smc_acpi_level(hwmgr, table);
1987 	PP_ASSERT_WITH_CODE(0 == result,
1988 		"Failed to initialize ACPI Level!", return result);
1989 
1990 	result = ci_populate_smc_vce_level(hwmgr, table);
1991 	PP_ASSERT_WITH_CODE(0 == result,
1992 		"Failed to initialize VCE Level!", return result);
1993 
1994 	result = ci_populate_smc_acp_level(hwmgr, table);
1995 	PP_ASSERT_WITH_CODE(0 == result,
1996 		"Failed to initialize ACP Level!", return result);
1997 
1998 	/* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
1999 	/* need to populate the  ARB settings for the initial state. */
2000 	result = ci_program_memory_timing_parameters(hwmgr);
2001 	PP_ASSERT_WITH_CODE(0 == result,
2002 		"Failed to Write ARB settings for the initial state.", return result);
2003 
2004 	result = ci_populate_smc_uvd_level(hwmgr, table);
2005 	PP_ASSERT_WITH_CODE(0 == result,
2006 		"Failed to initialize UVD Level!", return result);
2007 
2008 	table->UvdBootLevel  = 0;
2009 	table->VceBootLevel  = 0;
2010 	table->AcpBootLevel  = 0;
2011 	table->SamuBootLevel  = 0;
2012 
2013 	table->GraphicsBootLevel = 0;
2014 	table->MemoryBootLevel = 0;
2015 
2016 	result = ci_populate_smc_boot_level(hwmgr, table);
2017 	PP_ASSERT_WITH_CODE(0 == result,
2018 		"Failed to initialize Boot Level!", return result);
2019 
2020 	result = ci_populate_smc_initial_state(hwmgr);
2021 	PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
2022 
2023 	result = ci_populate_bapm_parameters_in_dpm_table(hwmgr);
2024 	PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
2025 
2026 	table->UVDInterval = 1;
2027 	table->VCEInterval = 1;
2028 	table->ACPInterval = 1;
2029 	table->SAMUInterval = 1;
2030 	table->GraphicsVoltageChangeEnable  = 1;
2031 	table->GraphicsThermThrottleEnable  = 1;
2032 	table->GraphicsInterval = 1;
2033 	table->VoltageInterval  = 1;
2034 	table->ThermalInterval  = 1;
2035 
2036 	table->TemperatureLimitHigh =
2037 		(data->thermal_temp_setting.temperature_high *
2038 		 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2039 	table->TemperatureLimitLow =
2040 		(data->thermal_temp_setting.temperature_low *
2041 		SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2042 
2043 	table->MemoryVoltageChangeEnable  = 1;
2044 	table->MemoryInterval  = 1;
2045 	table->VoltageResponseTime  = 0;
2046 	table->VddcVddciDelta = 4000;
2047 	table->PhaseResponseTime  = 0;
2048 	table->MemoryThermThrottleEnable  = 1;
2049 
2050 	PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
2051 			"There must be 1 or more PCIE levels defined in PPTable.",
2052 			return -EINVAL);
2053 
2054 	table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count;
2055 	table->PCIeGenInterval = 1;
2056 
2057 	result = ci_populate_vr_config(hwmgr, table);
2058 	PP_ASSERT_WITH_CODE(0 == result,
2059 			"Failed to populate VRConfig setting!", return result);
2060 	data->vr_config = table->VRConfig;
2061 
2062 	ci_populate_smc_svi2_config(hwmgr, table);
2063 
2064 	for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++)
2065 		CONVERT_FROM_HOST_TO_SMC_UL(table->Smio[i]);
2066 
2067 	table->ThermGpio  = 17;
2068 	table->SclkStepSize = 0x4000;
2069 	if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
2070 		table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
2071 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2072 				PHM_PlatformCaps_RegulatorHot);
2073 	} else {
2074 		table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
2075 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2076 				PHM_PlatformCaps_RegulatorHot);
2077 	}
2078 
2079 	table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
2080 
2081 	CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2082 	CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
2083 	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
2084 	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
2085 	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
2086 	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
2087 	CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2088 	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2089 	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2090 	table->VddcVddciDelta = PP_HOST_TO_SMC_US(table->VddcVddciDelta);
2091 	CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
2092 	CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
2093 
2094 	table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
2095 	table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
2096 	table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
2097 
2098 	/* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2099 	result = ci_copy_bytes_to_smc(hwmgr, smu_data->dpm_table_start +
2100 					offsetof(SMU7_Discrete_DpmTable, SystemFlags),
2101 					(uint8_t *)&(table->SystemFlags),
2102 					sizeof(SMU7_Discrete_DpmTable)-3 * sizeof(SMU7_PIDController),
2103 					SMC_RAM_END);
2104 
2105 	PP_ASSERT_WITH_CODE(0 == result,
2106 		"Failed to upload dpm data to SMC memory!", return result;);
2107 
2108 	result = ci_populate_initial_mc_reg_table(hwmgr);
2109 	PP_ASSERT_WITH_CODE((0 == result),
2110 		"Failed to populate initialize MC Reg table!", return result);
2111 
2112 	result = ci_populate_pm_fuses(hwmgr);
2113 	PP_ASSERT_WITH_CODE(0 == result,
2114 			"Failed to  populate PM fuses to SMC memory!", return result);
2115 
2116 	ci_start_smc(hwmgr);
2117 
2118 	return 0;
2119 }
2120 
ci_thermal_setup_fan_table(struct pp_hwmgr * hwmgr)2121 static int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
2122 {
2123 	struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2124 	SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
2125 	uint32_t duty100;
2126 	uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
2127 	uint16_t fdo_min, slope1, slope2;
2128 	uint32_t reference_clock;
2129 	int res;
2130 	uint64_t tmp64;
2131 
2132 	if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
2133 		return 0;
2134 
2135 	if (hwmgr->thermal_controller.fanInfo.bNoFan) {
2136 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2137 			PHM_PlatformCaps_MicrocodeFanControl);
2138 		return 0;
2139 	}
2140 
2141 	if (0 == ci_data->fan_table_start) {
2142 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2143 		return 0;
2144 	}
2145 
2146 	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
2147 
2148 	if (0 == duty100) {
2149 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2150 		return 0;
2151 	}
2152 
2153 	tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
2154 	do_div(tmp64, 10000);
2155 	fdo_min = (uint16_t)tmp64;
2156 
2157 	t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
2158 	t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
2159 
2160 	pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
2161 	pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
2162 
2163 	slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
2164 	slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
2165 
2166 	fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
2167 	fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
2168 	fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
2169 
2170 	fan_table.Slope1 = cpu_to_be16(slope1);
2171 	fan_table.Slope2 = cpu_to_be16(slope2);
2172 
2173 	fan_table.FdoMin = cpu_to_be16(fdo_min);
2174 
2175 	fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
2176 
2177 	fan_table.HystUp = cpu_to_be16(1);
2178 
2179 	fan_table.HystSlope = cpu_to_be16(1);
2180 
2181 	fan_table.TempRespLim = cpu_to_be16(5);
2182 
2183 	reference_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
2184 
2185 	fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
2186 
2187 	fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
2188 
2189 	fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
2190 
2191 	res = ci_copy_bytes_to_smc(hwmgr, ci_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
2192 
2193 	return 0;
2194 }
2195 
ci_program_mem_timing_parameters(struct pp_hwmgr * hwmgr)2196 static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2197 {
2198 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2199 
2200 	if (data->need_update_smu7_dpm_table &
2201 			(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
2202 		return ci_program_memory_timing_parameters(hwmgr);
2203 
2204 	return 0;
2205 }
2206 
ci_update_sclk_threshold(struct pp_hwmgr * hwmgr)2207 static int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2208 {
2209 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2210 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2211 
2212 	int result = 0;
2213 	uint32_t low_sclk_interrupt_threshold = 0;
2214 
2215 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2216 			PHM_PlatformCaps_SclkThrottleLowNotification)
2217 		&& (data->low_sclk_interrupt_threshold != 0)) {
2218 		low_sclk_interrupt_threshold =
2219 				data->low_sclk_interrupt_threshold;
2220 
2221 		CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2222 
2223 		result = ci_copy_bytes_to_smc(
2224 				hwmgr,
2225 				smu_data->dpm_table_start +
2226 				offsetof(SMU7_Discrete_DpmTable,
2227 					LowSclkInterruptT),
2228 				(uint8_t *)&low_sclk_interrupt_threshold,
2229 				sizeof(uint32_t),
2230 				SMC_RAM_END);
2231 	}
2232 
2233 	result = ci_update_and_upload_mc_reg_table(hwmgr);
2234 
2235 	PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
2236 
2237 	result = ci_program_mem_timing_parameters(hwmgr);
2238 	PP_ASSERT_WITH_CODE((result == 0),
2239 			"Failed to program memory timing parameters!",
2240 			);
2241 
2242 	return result;
2243 }
2244 
ci_get_offsetof(uint32_t type,uint32_t member)2245 static uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
2246 {
2247 	switch (type) {
2248 	case SMU_SoftRegisters:
2249 		switch (member) {
2250 		case HandshakeDisables:
2251 			return offsetof(SMU7_SoftRegisters, HandshakeDisables);
2252 		case VoltageChangeTimeout:
2253 			return offsetof(SMU7_SoftRegisters, VoltageChangeTimeout);
2254 		case AverageGraphicsActivity:
2255 			return offsetof(SMU7_SoftRegisters, AverageGraphicsA);
2256 		case PreVBlankGap:
2257 			return offsetof(SMU7_SoftRegisters, PreVBlankGap);
2258 		case VBlankTimeout:
2259 			return offsetof(SMU7_SoftRegisters, VBlankTimeout);
2260 		case DRAM_LOG_ADDR_H:
2261 			return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_H);
2262 		case DRAM_LOG_ADDR_L:
2263 			return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_L);
2264 		case DRAM_LOG_PHY_ADDR_H:
2265 			return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
2266 		case DRAM_LOG_PHY_ADDR_L:
2267 			return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
2268 		case DRAM_LOG_BUFF_SIZE:
2269 			return offsetof(SMU7_SoftRegisters, DRAM_LOG_BUFF_SIZE);
2270 		}
2271 	case SMU_Discrete_DpmTable:
2272 		switch (member) {
2273 		case LowSclkInterruptThreshold:
2274 			return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT);
2275 		}
2276 	}
2277 	pr_debug("can't get the offset of type %x member %x\n", type, member);
2278 	return 0;
2279 }
2280 
ci_get_mac_definition(uint32_t value)2281 static uint32_t ci_get_mac_definition(uint32_t value)
2282 {
2283 	switch (value) {
2284 	case SMU_MAX_LEVELS_GRAPHICS:
2285 		return SMU7_MAX_LEVELS_GRAPHICS;
2286 	case SMU_MAX_LEVELS_MEMORY:
2287 		return SMU7_MAX_LEVELS_MEMORY;
2288 	case SMU_MAX_LEVELS_LINK:
2289 		return SMU7_MAX_LEVELS_LINK;
2290 	case SMU_MAX_ENTRIES_SMIO:
2291 		return SMU7_MAX_ENTRIES_SMIO;
2292 	case SMU_MAX_LEVELS_VDDC:
2293 		return SMU7_MAX_LEVELS_VDDC;
2294 	case SMU_MAX_LEVELS_VDDCI:
2295 		return SMU7_MAX_LEVELS_VDDCI;
2296 	case SMU_MAX_LEVELS_MVDD:
2297 		return SMU7_MAX_LEVELS_MVDD;
2298 	}
2299 
2300 	pr_debug("can't get the mac of %x\n", value);
2301 	return 0;
2302 }
2303 
ci_load_smc_ucode(struct pp_hwmgr * hwmgr)2304 static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr)
2305 {
2306 	uint32_t byte_count, start_addr;
2307 	uint8_t *src;
2308 	uint32_t data;
2309 
2310 	struct cgs_firmware_info info = {0};
2311 
2312 	cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
2313 
2314 	hwmgr->is_kicker = info.is_kicker;
2315 	hwmgr->smu_version = info.version;
2316 	byte_count = info.image_size;
2317 	src = (uint8_t *)info.kptr;
2318 	start_addr = info.ucode_start_address;
2319 
2320 	if  (byte_count > SMC_RAM_END) {
2321 		pr_err("SMC address is beyond the SMC RAM area.\n");
2322 		return -EINVAL;
2323 	}
2324 
2325 	cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr);
2326 	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
2327 
2328 	for (; byte_count >= 4; byte_count -= 4) {
2329 		data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
2330 		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
2331 		src += 4;
2332 	}
2333 	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
2334 
2335 	if (0 != byte_count) {
2336 		pr_err("SMC size must be divisible by 4\n");
2337 		return -EINVAL;
2338 	}
2339 
2340 	return 0;
2341 }
2342 
ci_upload_firmware(struct pp_hwmgr * hwmgr)2343 static int ci_upload_firmware(struct pp_hwmgr *hwmgr)
2344 {
2345 	if (ci_is_smc_ram_running(hwmgr)) {
2346 		pr_info("smc is running, no need to load smc firmware\n");
2347 		return 0;
2348 	}
2349 	PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
2350 			boot_seq_done, 1);
2351 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_MISC_CNTL,
2352 			pre_fetcher_en, 1);
2353 
2354 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
2355 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
2356 	return ci_load_smc_ucode(hwmgr);
2357 }
2358 
ci_process_firmware_header(struct pp_hwmgr * hwmgr)2359 static int ci_process_firmware_header(struct pp_hwmgr *hwmgr)
2360 {
2361 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2362 	struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2363 
2364 	uint32_t tmp = 0;
2365 	int result;
2366 	bool error = false;
2367 
2368 	if (ci_upload_firmware(hwmgr))
2369 		return -EINVAL;
2370 
2371 	result = ci_read_smc_sram_dword(hwmgr,
2372 				SMU7_FIRMWARE_HEADER_LOCATION +
2373 				offsetof(SMU7_Firmware_Header, DpmTable),
2374 				&tmp, SMC_RAM_END);
2375 
2376 	if (0 == result)
2377 		ci_data->dpm_table_start = tmp;
2378 
2379 	error |= (0 != result);
2380 
2381 	result = ci_read_smc_sram_dword(hwmgr,
2382 				SMU7_FIRMWARE_HEADER_LOCATION +
2383 				offsetof(SMU7_Firmware_Header, SoftRegisters),
2384 				&tmp, SMC_RAM_END);
2385 
2386 	if (0 == result) {
2387 		data->soft_regs_start = tmp;
2388 		ci_data->soft_regs_start = tmp;
2389 	}
2390 
2391 	error |= (0 != result);
2392 
2393 	result = ci_read_smc_sram_dword(hwmgr,
2394 				SMU7_FIRMWARE_HEADER_LOCATION +
2395 				offsetof(SMU7_Firmware_Header, mcRegisterTable),
2396 				&tmp, SMC_RAM_END);
2397 
2398 	if (0 == result)
2399 		ci_data->mc_reg_table_start = tmp;
2400 
2401 	result = ci_read_smc_sram_dword(hwmgr,
2402 				SMU7_FIRMWARE_HEADER_LOCATION +
2403 				offsetof(SMU7_Firmware_Header, FanTable),
2404 				&tmp, SMC_RAM_END);
2405 
2406 	if (0 == result)
2407 		ci_data->fan_table_start = tmp;
2408 
2409 	error |= (0 != result);
2410 
2411 	result = ci_read_smc_sram_dword(hwmgr,
2412 				SMU7_FIRMWARE_HEADER_LOCATION +
2413 				offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
2414 				&tmp, SMC_RAM_END);
2415 
2416 	if (0 == result)
2417 		ci_data->arb_table_start = tmp;
2418 
2419 	error |= (0 != result);
2420 
2421 	result = ci_read_smc_sram_dword(hwmgr,
2422 				SMU7_FIRMWARE_HEADER_LOCATION +
2423 				offsetof(SMU7_Firmware_Header, Version),
2424 				&tmp, SMC_RAM_END);
2425 
2426 	if (0 == result)
2427 		hwmgr->microcode_version_info.SMC = tmp;
2428 
2429 	error |= (0 != result);
2430 
2431 	return error ? 1 : 0;
2432 }
2433 
ci_get_memory_modile_index(struct pp_hwmgr * hwmgr)2434 static uint8_t ci_get_memory_modile_index(struct pp_hwmgr *hwmgr)
2435 {
2436 	return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
2437 }
2438 
ci_check_s0_mc_reg_index(uint16_t in_reg,uint16_t * out_reg)2439 static bool ci_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
2440 {
2441 	bool result = true;
2442 
2443 	switch (in_reg) {
2444 	case  mmMC_SEQ_RAS_TIMING:
2445 		*out_reg = mmMC_SEQ_RAS_TIMING_LP;
2446 		break;
2447 
2448 	case  mmMC_SEQ_DLL_STBY:
2449 		*out_reg = mmMC_SEQ_DLL_STBY_LP;
2450 		break;
2451 
2452 	case  mmMC_SEQ_G5PDX_CMD0:
2453 		*out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
2454 		break;
2455 
2456 	case  mmMC_SEQ_G5PDX_CMD1:
2457 		*out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
2458 		break;
2459 
2460 	case  mmMC_SEQ_G5PDX_CTRL:
2461 		*out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
2462 		break;
2463 
2464 	case mmMC_SEQ_CAS_TIMING:
2465 		*out_reg = mmMC_SEQ_CAS_TIMING_LP;
2466 		break;
2467 
2468 	case mmMC_SEQ_MISC_TIMING:
2469 		*out_reg = mmMC_SEQ_MISC_TIMING_LP;
2470 		break;
2471 
2472 	case mmMC_SEQ_MISC_TIMING2:
2473 		*out_reg = mmMC_SEQ_MISC_TIMING2_LP;
2474 		break;
2475 
2476 	case mmMC_SEQ_PMG_DVS_CMD:
2477 		*out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
2478 		break;
2479 
2480 	case mmMC_SEQ_PMG_DVS_CTL:
2481 		*out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
2482 		break;
2483 
2484 	case mmMC_SEQ_RD_CTL_D0:
2485 		*out_reg = mmMC_SEQ_RD_CTL_D0_LP;
2486 		break;
2487 
2488 	case mmMC_SEQ_RD_CTL_D1:
2489 		*out_reg = mmMC_SEQ_RD_CTL_D1_LP;
2490 		break;
2491 
2492 	case mmMC_SEQ_WR_CTL_D0:
2493 		*out_reg = mmMC_SEQ_WR_CTL_D0_LP;
2494 		break;
2495 
2496 	case mmMC_SEQ_WR_CTL_D1:
2497 		*out_reg = mmMC_SEQ_WR_CTL_D1_LP;
2498 		break;
2499 
2500 	case mmMC_PMG_CMD_EMRS:
2501 		*out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
2502 		break;
2503 
2504 	case mmMC_PMG_CMD_MRS:
2505 		*out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
2506 		break;
2507 
2508 	case mmMC_PMG_CMD_MRS1:
2509 		*out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
2510 		break;
2511 
2512 	case mmMC_SEQ_PMG_TIMING:
2513 		*out_reg = mmMC_SEQ_PMG_TIMING_LP;
2514 		break;
2515 
2516 	case mmMC_PMG_CMD_MRS2:
2517 		*out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
2518 		break;
2519 
2520 	case mmMC_SEQ_WR_CTL_2:
2521 		*out_reg = mmMC_SEQ_WR_CTL_2_LP;
2522 		break;
2523 
2524 	default:
2525 		result = false;
2526 		break;
2527 	}
2528 
2529 	return result;
2530 }
2531 
ci_set_s0_mc_reg_index(struct ci_mc_reg_table * table)2532 static int ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
2533 {
2534 	uint32_t i;
2535 	uint16_t address;
2536 
2537 	for (i = 0; i < table->last; i++) {
2538 		table->mc_reg_address[i].s0 =
2539 			ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
2540 			? address : table->mc_reg_address[i].s1;
2541 	}
2542 	return 0;
2543 }
2544 
ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table * table,struct ci_mc_reg_table * ni_table)2545 static int ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
2546 					struct ci_mc_reg_table *ni_table)
2547 {
2548 	uint8_t i, j;
2549 
2550 	PP_ASSERT_WITH_CODE((table->last <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2551 		"Invalid VramInfo table.", return -EINVAL);
2552 	PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
2553 		"Invalid VramInfo table.", return -EINVAL);
2554 
2555 	for (i = 0; i < table->last; i++)
2556 		ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
2557 
2558 	ni_table->last = table->last;
2559 
2560 	for (i = 0; i < table->num_entries; i++) {
2561 		ni_table->mc_reg_table_entry[i].mclk_max =
2562 			table->mc_reg_table_entry[i].mclk_max;
2563 		for (j = 0; j < table->last; j++) {
2564 			ni_table->mc_reg_table_entry[i].mc_data[j] =
2565 				table->mc_reg_table_entry[i].mc_data[j];
2566 		}
2567 	}
2568 
2569 	ni_table->num_entries = table->num_entries;
2570 
2571 	return 0;
2572 }
2573 
ci_set_mc_special_registers(struct pp_hwmgr * hwmgr,struct ci_mc_reg_table * table)2574 static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr,
2575 					struct ci_mc_reg_table *table)
2576 {
2577 	uint8_t i, j, k;
2578 	uint32_t temp_reg;
2579 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2580 
2581 	for (i = 0, j = table->last; i < table->last; i++) {
2582 		PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2583 			"Invalid VramInfo table.", return -EINVAL);
2584 
2585 		switch (table->mc_reg_address[i].s1) {
2586 
2587 		case mmMC_SEQ_MISC1:
2588 			temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
2589 			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
2590 			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
2591 			for (k = 0; k < table->num_entries; k++) {
2592 				table->mc_reg_table_entry[k].mc_data[j] =
2593 					((temp_reg & 0xffff0000)) |
2594 					((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
2595 			}
2596 			j++;
2597 
2598 			PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2599 				"Invalid VramInfo table.", return -EINVAL);
2600 			temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
2601 			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
2602 			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
2603 			for (k = 0; k < table->num_entries; k++) {
2604 				table->mc_reg_table_entry[k].mc_data[j] =
2605 					(temp_reg & 0xffff0000) |
2606 					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2607 
2608 				if (!data->is_memory_gddr5)
2609 					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
2610 			}
2611 			j++;
2612 
2613 			if (!data->is_memory_gddr5) {
2614 				PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2615 					"Invalid VramInfo table.", return -EINVAL);
2616 				table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
2617 				table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
2618 				for (k = 0; k < table->num_entries; k++) {
2619 					table->mc_reg_table_entry[k].mc_data[j] =
2620 						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
2621 				}
2622 				j++;
2623 			}
2624 
2625 			break;
2626 
2627 		case mmMC_SEQ_RESERVE_M:
2628 			temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
2629 			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
2630 			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
2631 			for (k = 0; k < table->num_entries; k++) {
2632 				table->mc_reg_table_entry[k].mc_data[j] =
2633 					(temp_reg & 0xffff0000) |
2634 					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2635 			}
2636 			j++;
2637 			break;
2638 
2639 		default:
2640 			break;
2641 		}
2642 
2643 	}
2644 
2645 	table->last = j;
2646 
2647 	return 0;
2648 }
2649 
ci_set_valid_flag(struct ci_mc_reg_table * table)2650 static int ci_set_valid_flag(struct ci_mc_reg_table *table)
2651 {
2652 	uint8_t i, j;
2653 
2654 	for (i = 0; i < table->last; i++) {
2655 		for (j = 1; j < table->num_entries; j++) {
2656 			if (table->mc_reg_table_entry[j-1].mc_data[i] !=
2657 				table->mc_reg_table_entry[j].mc_data[i]) {
2658 				table->validflag |= (1 << i);
2659 				break;
2660 			}
2661 		}
2662 	}
2663 
2664 	return 0;
2665 }
2666 
ci_initialize_mc_reg_table(struct pp_hwmgr * hwmgr)2667 static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
2668 {
2669 	int result;
2670 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2671 	pp_atomctrl_mc_reg_table *table;
2672 	struct ci_mc_reg_table *ni_table = &smu_data->mc_reg_table;
2673 	uint8_t module_index = ci_get_memory_modile_index(hwmgr);
2674 
2675 	table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
2676 
2677 	if (NULL == table)
2678 		return -ENOMEM;
2679 
2680 	/* Program additional LP registers that are no longer programmed by VBIOS */
2681 	cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
2682 	cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
2683 	cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
2684 	cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
2685 	cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
2686 	cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
2687 	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
2688 	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
2689 	cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
2690 	cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
2691 	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
2692 	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
2693 	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
2694 	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
2695 	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
2696 	cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
2697 	cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
2698 	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
2699 	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
2700 	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
2701 
2702 	memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
2703 
2704 	result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
2705 
2706 	if (0 == result)
2707 		result = ci_copy_vbios_smc_reg_table(table, ni_table);
2708 
2709 	if (0 == result) {
2710 		ci_set_s0_mc_reg_index(ni_table);
2711 		result = ci_set_mc_special_registers(hwmgr, ni_table);
2712 	}
2713 
2714 	if (0 == result)
2715 		ci_set_valid_flag(ni_table);
2716 
2717 	kfree(table);
2718 
2719 	return result;
2720 }
2721 
ci_is_dpm_running(struct pp_hwmgr * hwmgr)2722 static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
2723 {
2724 	return ci_is_smc_ram_running(hwmgr);
2725 }
2726 
ci_smu_init(struct pp_hwmgr * hwmgr)2727 static int ci_smu_init(struct pp_hwmgr *hwmgr)
2728 {
2729 	struct ci_smumgr *ci_priv = NULL;
2730 
2731 	ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL);
2732 
2733 	if (ci_priv == NULL)
2734 		return -ENOMEM;
2735 
2736 	hwmgr->smu_backend = ci_priv;
2737 
2738 	return 0;
2739 }
2740 
ci_smu_fini(struct pp_hwmgr * hwmgr)2741 static int ci_smu_fini(struct pp_hwmgr *hwmgr)
2742 {
2743 	kfree(hwmgr->smu_backend);
2744 	hwmgr->smu_backend = NULL;
2745 	return 0;
2746 }
2747 
ci_start_smu(struct pp_hwmgr * hwmgr)2748 static int ci_start_smu(struct pp_hwmgr *hwmgr)
2749 {
2750 	return 0;
2751 }
2752 
ci_update_dpm_settings(struct pp_hwmgr * hwmgr,void * profile_setting)2753 static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
2754 				void *profile_setting)
2755 {
2756 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2757 	struct ci_smumgr *smu_data = (struct ci_smumgr *)
2758 			(hwmgr->smu_backend);
2759 	struct profile_mode_setting *setting;
2760 	struct SMU7_Discrete_GraphicsLevel *levels =
2761 			smu_data->smc_state_table.GraphicsLevel;
2762 	uint32_t array = smu_data->dpm_table_start +
2763 			offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2764 
2765 	uint32_t mclk_array = smu_data->dpm_table_start +
2766 			offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
2767 	struct SMU7_Discrete_MemoryLevel *mclk_levels =
2768 			smu_data->smc_state_table.MemoryLevel;
2769 	uint32_t i;
2770 	uint32_t offset, up_hyst_offset, down_hyst_offset, clk_activity_offset, tmp;
2771 
2772 	if (profile_setting == NULL)
2773 		return -EINVAL;
2774 
2775 	setting = (struct profile_mode_setting *)profile_setting;
2776 
2777 	if (setting->bupdate_sclk) {
2778 		if (!data->sclk_dpm_key_disabled)
2779 			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
2780 		for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
2781 			if (levels[i].ActivityLevel !=
2782 				cpu_to_be16(setting->sclk_activity)) {
2783 				levels[i].ActivityLevel = cpu_to_be16(setting->sclk_activity);
2784 
2785 				clk_activity_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
2786 						+ offsetof(SMU7_Discrete_GraphicsLevel, ActivityLevel);
2787 				offset = clk_activity_offset & ~0x3;
2788 				tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2789 				tmp = phm_set_field_to_u32(clk_activity_offset, tmp, levels[i].ActivityLevel, sizeof(uint16_t));
2790 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2791 
2792 			}
2793 			if (levels[i].UpH != setting->sclk_up_hyst ||
2794 				levels[i].DownH != setting->sclk_down_hyst) {
2795 				levels[i].UpH = setting->sclk_up_hyst;
2796 				levels[i].DownH = setting->sclk_down_hyst;
2797 				up_hyst_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
2798 						+ offsetof(SMU7_Discrete_GraphicsLevel, UpH);
2799 				down_hyst_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
2800 						+ offsetof(SMU7_Discrete_GraphicsLevel, DownH);
2801 				offset = up_hyst_offset & ~0x3;
2802 				tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2803 				tmp = phm_set_field_to_u32(up_hyst_offset, tmp, levels[i].UpH, sizeof(uint8_t));
2804 				tmp = phm_set_field_to_u32(down_hyst_offset, tmp, levels[i].DownH, sizeof(uint8_t));
2805 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2806 			}
2807 		}
2808 		if (!data->sclk_dpm_key_disabled)
2809 			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
2810 	}
2811 
2812 	if (setting->bupdate_mclk) {
2813 		if (!data->mclk_dpm_key_disabled)
2814 			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
2815 		for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
2816 			if (mclk_levels[i].ActivityLevel !=
2817 				cpu_to_be16(setting->mclk_activity)) {
2818 				mclk_levels[i].ActivityLevel = cpu_to_be16(setting->mclk_activity);
2819 
2820 				clk_activity_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
2821 						+ offsetof(SMU7_Discrete_MemoryLevel, ActivityLevel);
2822 				offset = clk_activity_offset & ~0x3;
2823 				tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2824 				tmp = phm_set_field_to_u32(clk_activity_offset, tmp, mclk_levels[i].ActivityLevel, sizeof(uint16_t));
2825 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2826 
2827 			}
2828 			if (mclk_levels[i].UpH != setting->mclk_up_hyst ||
2829 				mclk_levels[i].DownH != setting->mclk_down_hyst) {
2830 				mclk_levels[i].UpH = setting->mclk_up_hyst;
2831 				mclk_levels[i].DownH = setting->mclk_down_hyst;
2832 				up_hyst_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
2833 						+ offsetof(SMU7_Discrete_MemoryLevel, UpH);
2834 				down_hyst_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
2835 						+ offsetof(SMU7_Discrete_MemoryLevel, DownH);
2836 				offset = up_hyst_offset & ~0x3;
2837 				tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2838 				tmp = phm_set_field_to_u32(up_hyst_offset, tmp, mclk_levels[i].UpH, sizeof(uint8_t));
2839 				tmp = phm_set_field_to_u32(down_hyst_offset, tmp, mclk_levels[i].DownH, sizeof(uint8_t));
2840 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2841 			}
2842 		}
2843 		if (!data->mclk_dpm_key_disabled)
2844 			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
2845 	}
2846 	return 0;
2847 }
2848 
ci_update_uvd_smc_table(struct pp_hwmgr * hwmgr)2849 static int ci_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
2850 {
2851 	struct amdgpu_device *adev = hwmgr->adev;
2852 	struct smu7_hwmgr *data = hwmgr->backend;
2853 	struct ci_smumgr *smu_data = hwmgr->smu_backend;
2854 	struct phm_uvd_clock_voltage_dependency_table *uvd_table =
2855 			hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
2856 	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2857 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2858 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2859 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2860 	uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
2861 						hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
2862 	int32_t i;
2863 
2864 	if (PP_CAP(PHM_PlatformCaps_UVDDPM) || uvd_table->count <= 0)
2865 		smu_data->smc_state_table.UvdBootLevel = 0;
2866 	else
2867 		smu_data->smc_state_table.UvdBootLevel = uvd_table->count - 1;
2868 
2869 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
2870 				UvdBootLevel, smu_data->smc_state_table.UvdBootLevel);
2871 
2872 	data->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
2873 
2874 	for (i = uvd_table->count - 1; i >= 0; i--) {
2875 		if (uvd_table->entries[i].v <= max_vddc)
2876 			data->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
2877 		if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_UVDDPM))
2878 			break;
2879 	}
2880 	ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask,
2881 				data->dpm_level_enable_mask.uvd_dpm_enable_mask);
2882 
2883 	return 0;
2884 }
2885 
ci_update_vce_smc_table(struct pp_hwmgr * hwmgr)2886 static int ci_update_vce_smc_table(struct pp_hwmgr *hwmgr)
2887 {
2888 	struct amdgpu_device *adev = hwmgr->adev;
2889 	struct smu7_hwmgr *data = hwmgr->backend;
2890 	struct phm_vce_clock_voltage_dependency_table *vce_table =
2891 			hwmgr->dyn_state.vce_clock_voltage_dependency_table;
2892 	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2893 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2894 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2895 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2896 	uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
2897 						hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
2898 	int32_t i;
2899 
2900 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
2901 				VceBootLevel, 0); /* temp hard code to level 0, vce can set min evclk*/
2902 
2903 	data->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
2904 
2905 	for (i = vce_table->count - 1; i >= 0; i--) {
2906 		if (vce_table->entries[i].v <= max_vddc)
2907 			data->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
2908 		if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_VCEDPM))
2909 			break;
2910 	}
2911 	ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask,
2912 				data->dpm_level_enable_mask.vce_dpm_enable_mask);
2913 
2914 	return 0;
2915 }
2916 
ci_update_smc_table(struct pp_hwmgr * hwmgr,uint32_t type)2917 static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
2918 {
2919 	switch (type) {
2920 	case SMU_UVD_TABLE:
2921 		ci_update_uvd_smc_table(hwmgr);
2922 		break;
2923 	case SMU_VCE_TABLE:
2924 		ci_update_vce_smc_table(hwmgr);
2925 		break;
2926 	default:
2927 		break;
2928 	}
2929 	return 0;
2930 }
2931 
2932 const struct pp_smumgr_func ci_smu_funcs = {
2933 	.smu_init = ci_smu_init,
2934 	.smu_fini = ci_smu_fini,
2935 	.start_smu = ci_start_smu,
2936 	.check_fw_load_finish = NULL,
2937 	.request_smu_load_fw = NULL,
2938 	.request_smu_load_specific_fw = NULL,
2939 	.send_msg_to_smc = ci_send_msg_to_smc,
2940 	.send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter,
2941 	.download_pptable_settings = NULL,
2942 	.upload_pptable_settings = NULL,
2943 	.get_offsetof = ci_get_offsetof,
2944 	.process_firmware_header = ci_process_firmware_header,
2945 	.init_smc_table = ci_init_smc_table,
2946 	.update_sclk_threshold = ci_update_sclk_threshold,
2947 	.thermal_setup_fan_table = ci_thermal_setup_fan_table,
2948 	.populate_all_graphic_levels = ci_populate_all_graphic_levels,
2949 	.populate_all_memory_levels = ci_populate_all_memory_levels,
2950 	.get_mac_definition = ci_get_mac_definition,
2951 	.initialize_mc_reg_table = ci_initialize_mc_reg_table,
2952 	.is_dpm_running = ci_is_dpm_running,
2953 	.update_dpm_settings = ci_update_dpm_settings,
2954 	.update_smc_table = ci_update_smc_table,
2955 };
2956