1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "smumgr.h"
25 #include "vega20_inc.h"
26 #include "soc15_common.h"
27 #include "vega20_smumgr.h"
28 #include "vega20_ppsmc.h"
29 #include "smu11_driver_if.h"
30 #include "ppatomctrl.h"
31 #include "pp_debug.h"
32 #include "smu_ucode_xfer_vi.h"
33 #include "smu7_smumgr.h"
34 #include "vega20_hwmgr.h"
35 
36 /* MP Apertures */
37 #define MP0_Public			0x03800000
38 #define MP0_SRAM			0x03900000
39 #define MP1_Public			0x03b00000
40 #define MP1_SRAM			0x03c00004
41 
42 /* address block */
43 #define smnMP1_FIRMWARE_FLAGS		0x3010024
44 #define smnMP0_FW_INTF			0x30101c0
45 #define smnMP1_PUB_CTRL			0x3010b14
46 
vega20_is_smc_ram_running(struct pp_hwmgr * hwmgr)47 bool vega20_is_smc_ram_running(struct pp_hwmgr *hwmgr)
48 {
49 	struct amdgpu_device *adev = hwmgr->adev;
50 	uint32_t mp1_fw_flags;
51 
52 	mp1_fw_flags = RREG32_PCIE(MP1_Public |
53 				   (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
54 
55 	if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
56 	    MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
57 		return true;
58 
59 	return false;
60 }
61 
62 /*
63  * Check if SMC has responded to previous message.
64  *
65  * @param    smumgr  the address of the powerplay hardware manager.
66  * @return   TRUE    SMC has responded, FALSE otherwise.
67  */
vega20_wait_for_response(struct pp_hwmgr * hwmgr)68 static uint32_t vega20_wait_for_response(struct pp_hwmgr *hwmgr)
69 {
70 	struct amdgpu_device *adev = hwmgr->adev;
71 	uint32_t reg;
72 
73 	reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
74 
75 	phm_wait_for_register_unequal(hwmgr, reg,
76 			0, MP1_C2PMSG_90__CONTENT_MASK);
77 
78 	return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
79 }
80 
81 /*
82  * Send a message to the SMC, and do not wait for its response.
83  * @param    smumgr  the address of the powerplay hardware manager.
84  * @param    msg the message to send.
85  * @return   Always return 0.
86  */
vega20_send_msg_to_smc_without_waiting(struct pp_hwmgr * hwmgr,uint16_t msg)87 static int vega20_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
88 		uint16_t msg)
89 {
90 	struct amdgpu_device *adev = hwmgr->adev;
91 
92 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
93 
94 	return 0;
95 }
96 
97 /*
98  * Send a message to the SMC, and wait for its response.
99  * @param    hwmgr  the address of the powerplay hardware manager.
100  * @param    msg the message to send.
101  * @return   Always return 0.
102  */
vega20_send_msg_to_smc(struct pp_hwmgr * hwmgr,uint16_t msg)103 static int vega20_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
104 {
105 	struct amdgpu_device *adev = hwmgr->adev;
106 	int ret = 0;
107 
108 	vega20_wait_for_response(hwmgr);
109 
110 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
111 
112 	vega20_send_msg_to_smc_without_waiting(hwmgr, msg);
113 
114 	ret = vega20_wait_for_response(hwmgr);
115 	if (ret != PPSMC_Result_OK)
116 		pr_err("Failed to send message 0x%x, response 0x%x\n", msg, ret);
117 
118 	return (ret == PPSMC_Result_OK) ? 0 : -EIO;
119 }
120 
121 /*
122  * Send a message to the SMC with parameter
123  * @param    hwmgr:  the address of the powerplay hardware manager.
124  * @param    msg: the message to send.
125  * @param    parameter: the parameter to send
126  * @return   Always return 0.
127  */
vega20_send_msg_to_smc_with_parameter(struct pp_hwmgr * hwmgr,uint16_t msg,uint32_t parameter)128 static int vega20_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
129 		uint16_t msg, uint32_t parameter)
130 {
131 	struct amdgpu_device *adev = hwmgr->adev;
132 	int ret = 0;
133 
134 	vega20_wait_for_response(hwmgr);
135 
136 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
137 
138 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
139 
140 	vega20_send_msg_to_smc_without_waiting(hwmgr, msg);
141 
142 	ret = vega20_wait_for_response(hwmgr);
143 	if (ret != PPSMC_Result_OK)
144 		pr_err("Failed to send message 0x%x, response 0x%x\n", msg, ret);
145 
146 	return (ret == PPSMC_Result_OK) ? 0 : -EIO;
147 }
148 
vega20_get_argument(struct pp_hwmgr * hwmgr)149 static uint32_t vega20_get_argument(struct pp_hwmgr *hwmgr)
150 {
151 	struct amdgpu_device *adev = hwmgr->adev;
152 
153 	return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
154 }
155 
156 /*
157  * Copy table from SMC into driver FB
158  * @param   hwmgr    the address of the HW manager
159  * @param   table_id    the driver's table ID to copy from
160  */
vega20_copy_table_from_smc(struct pp_hwmgr * hwmgr,uint8_t * table,int16_t table_id)161 static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
162 				      uint8_t *table, int16_t table_id)
163 {
164 	struct vega20_smumgr *priv =
165 			(struct vega20_smumgr *)(hwmgr->smu_backend);
166 	struct amdgpu_device *adev = hwmgr->adev;
167 	int ret = 0;
168 
169 	PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
170 			"Invalid SMU Table ID!", return -EINVAL);
171 	PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
172 			"Invalid SMU Table version!", return -EINVAL);
173 	PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
174 			"Invalid SMU Table Length!", return -EINVAL);
175 
176 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
177 			PPSMC_MSG_SetDriverDramAddrHigh,
178 			upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
179 			"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!",
180 			return ret);
181 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
182 			PPSMC_MSG_SetDriverDramAddrLow,
183 			lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
184 			"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
185 			return ret);
186 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
187 			PPSMC_MSG_TransferTableSmu2Dram, table_id)) == 0,
188 			"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
189 			return ret);
190 
191 	/* flush hdp cache */
192 	adev->nbio_funcs->hdp_flush(adev, NULL);
193 
194 	memcpy(table, priv->smu_tables.entry[table_id].table,
195 			priv->smu_tables.entry[table_id].size);
196 
197 	return 0;
198 }
199 
200 /*
201  * Copy table from Driver FB into SMC
202  * @param   hwmgr    the address of the HW manager
203  * @param   table_id    the table to copy from
204  */
vega20_copy_table_to_smc(struct pp_hwmgr * hwmgr,uint8_t * table,int16_t table_id)205 static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
206 				    uint8_t *table, int16_t table_id)
207 {
208 	struct vega20_smumgr *priv =
209 			(struct vega20_smumgr *)(hwmgr->smu_backend);
210 	int ret = 0;
211 
212 	PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
213 			"Invalid SMU Table ID!", return -EINVAL);
214 	PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
215 			"Invalid SMU Table version!", return -EINVAL);
216 	PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
217 			"Invalid SMU Table Length!", return -EINVAL);
218 
219 	memcpy(priv->smu_tables.entry[table_id].table, table,
220 			priv->smu_tables.entry[table_id].size);
221 
222 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
223 			PPSMC_MSG_SetDriverDramAddrHigh,
224 			upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
225 			"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
226 			return ret);
227 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
228 			PPSMC_MSG_SetDriverDramAddrLow,
229 			lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
230 			"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
231 			return ret);
232 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
233 			PPSMC_MSG_TransferTableDram2Smu, table_id)) == 0,
234 			"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
235 			return ret);
236 
237 	return 0;
238 }
239 
vega20_set_activity_monitor_coeff(struct pp_hwmgr * hwmgr,uint8_t * table,uint16_t workload_type)240 int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
241 		uint8_t *table, uint16_t workload_type)
242 {
243 	struct vega20_smumgr *priv =
244 			(struct vega20_smumgr *)(hwmgr->smu_backend);
245 	int ret = 0;
246 
247 	memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table,
248 			priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
249 
250 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
251 			PPSMC_MSG_SetDriverDramAddrHigh,
252 			upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
253 			"[SetActivityMonitor] Attempt to Set Dram Addr High Failed!",
254 			return ret);
255 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
256 			PPSMC_MSG_SetDriverDramAddrLow,
257 			lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
258 			"[SetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
259 			return ret);
260 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
261 			PPSMC_MSG_TransferTableDram2Smu, TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0,
262 			"[SetActivityMonitor] Attempt to Transfer Table To SMU Failed!",
263 			return ret);
264 
265 	return 0;
266 }
267 
vega20_get_activity_monitor_coeff(struct pp_hwmgr * hwmgr,uint8_t * table,uint16_t workload_type)268 int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
269 		uint8_t *table, uint16_t workload_type)
270 {
271 	struct vega20_smumgr *priv =
272 			(struct vega20_smumgr *)(hwmgr->smu_backend);
273 	struct amdgpu_device *adev = hwmgr->adev;
274 	int ret = 0;
275 
276 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
277 			PPSMC_MSG_SetDriverDramAddrHigh,
278 			upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
279 			"[GetActivityMonitor] Attempt to Set Dram Addr High Failed!",
280 			return ret);
281 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
282 			PPSMC_MSG_SetDriverDramAddrLow,
283 			lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
284 			"[GetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
285 			return ret);
286 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
287 			PPSMC_MSG_TransferTableSmu2Dram,
288 			TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0,
289 			"[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!",
290 			return ret);
291 
292 	/* flush hdp cache */
293 	adev->nbio_funcs->hdp_flush(adev, NULL);
294 
295 	memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
296 			priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
297 
298 	return 0;
299 }
300 
vega20_enable_smc_features(struct pp_hwmgr * hwmgr,bool enable,uint64_t feature_mask)301 int vega20_enable_smc_features(struct pp_hwmgr *hwmgr,
302 		bool enable, uint64_t feature_mask)
303 {
304 	uint32_t smu_features_low, smu_features_high;
305 	int ret = 0;
306 
307 	smu_features_low = (uint32_t)((feature_mask & SMU_FEATURES_LOW_MASK) >> SMU_FEATURES_LOW_SHIFT);
308 	smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
309 
310 	if (enable) {
311 		PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
312 				PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low)) == 0,
313 				"[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!",
314 				return ret);
315 		PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
316 				PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high)) == 0,
317 				"[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!",
318 				return ret);
319 	} else {
320 		PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
321 				PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low)) == 0,
322 				"[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!",
323 				return ret);
324 		PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
325 				PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high)) == 0,
326 				"[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!",
327 				return ret);
328 	}
329 
330 	return 0;
331 }
332 
vega20_get_enabled_smc_features(struct pp_hwmgr * hwmgr,uint64_t * features_enabled)333 int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
334 		uint64_t *features_enabled)
335 {
336 	uint32_t smc_features_low, smc_features_high;
337 	int ret = 0;
338 
339 	if (features_enabled == NULL)
340 		return -EINVAL;
341 
342 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
343 			PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0,
344 			"[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!",
345 			return ret);
346 	smc_features_low = vega20_get_argument(hwmgr);
347 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
348 			PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0,
349 			"[GetEnabledSMCFeatures] Attemp to get SMU features High failed!",
350 			return ret);
351 	smc_features_high = vega20_get_argument(hwmgr);
352 
353 	*features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
354 			(((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
355 
356 	return 0;
357 }
358 
vega20_set_tools_address(struct pp_hwmgr * hwmgr)359 static int vega20_set_tools_address(struct pp_hwmgr *hwmgr)
360 {
361 	struct vega20_smumgr *priv =
362 			(struct vega20_smumgr *)(hwmgr->smu_backend);
363 	int ret = 0;
364 
365 	if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
366 		ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
367 				PPSMC_MSG_SetToolsDramAddrHigh,
368 				upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
369 		if (!ret)
370 			ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
371 					PPSMC_MSG_SetToolsDramAddrLow,
372 					lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
373 	}
374 
375 	return ret;
376 }
377 
vega20_set_pptable_driver_address(struct pp_hwmgr * hwmgr)378 int vega20_set_pptable_driver_address(struct pp_hwmgr *hwmgr)
379 {
380 	struct vega20_smumgr *priv =
381 			(struct vega20_smumgr *)(hwmgr->smu_backend);
382 	int ret = 0;
383 
384 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
385 			PPSMC_MSG_SetDriverDramAddrHigh,
386 			upper_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0,
387 			"[SetPPtabeDriverAddress] Attempt to Set Dram Addr High Failed!",
388 			return ret);
389 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
390 			PPSMC_MSG_SetDriverDramAddrLow,
391 			lower_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0,
392 			"[SetPPtabeDriverAddress] Attempt to Set Dram Addr Low Failed!",
393 			return ret);
394 
395 	return ret;
396 }
397 
vega20_smu_init(struct pp_hwmgr * hwmgr)398 static int vega20_smu_init(struct pp_hwmgr *hwmgr)
399 {
400 	struct vega20_smumgr *priv;
401 	unsigned long tools_size = 0x19000;
402 	int ret = 0;
403 
404 	struct cgs_firmware_info info = {0};
405 
406 	ret = cgs_get_firmware_info(hwmgr->device,
407 				smu7_convert_fw_type_to_cgs(UCODE_ID_SMU),
408 				&info);
409 	if (ret || !info.kptr)
410 		return -EINVAL;
411 
412 	priv = kzalloc(sizeof(struct vega20_smumgr), GFP_KERNEL);
413 	if (!priv)
414 		return -ENOMEM;
415 
416 	hwmgr->smu_backend = priv;
417 
418 	/* allocate space for pptable */
419 	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
420 			sizeof(PPTable_t),
421 			PAGE_SIZE,
422 			AMDGPU_GEM_DOMAIN_VRAM,
423 			&priv->smu_tables.entry[TABLE_PPTABLE].handle,
424 			&priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
425 			&priv->smu_tables.entry[TABLE_PPTABLE].table);
426 	if (ret)
427 		goto free_backend;
428 
429 	priv->smu_tables.entry[TABLE_PPTABLE].version = 0x01;
430 	priv->smu_tables.entry[TABLE_PPTABLE].size = sizeof(PPTable_t);
431 
432 	/* allocate space for watermarks table */
433 	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
434 			sizeof(Watermarks_t),
435 			PAGE_SIZE,
436 			AMDGPU_GEM_DOMAIN_VRAM,
437 			&priv->smu_tables.entry[TABLE_WATERMARKS].handle,
438 			&priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
439 			&priv->smu_tables.entry[TABLE_WATERMARKS].table);
440 	if (ret)
441 		goto err0;
442 
443 	priv->smu_tables.entry[TABLE_WATERMARKS].version = 0x01;
444 	priv->smu_tables.entry[TABLE_WATERMARKS].size = sizeof(Watermarks_t);
445 
446 	/* allocate space for pmstatuslog table */
447 	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
448 			tools_size,
449 			PAGE_SIZE,
450 			AMDGPU_GEM_DOMAIN_VRAM,
451 			&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
452 			&priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
453 			&priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
454 	if (ret)
455 		goto err1;
456 
457 	priv->smu_tables.entry[TABLE_PMSTATUSLOG].version = 0x01;
458 	priv->smu_tables.entry[TABLE_PMSTATUSLOG].size = tools_size;
459 
460 	/* allocate space for OverDrive table */
461 	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
462 			sizeof(OverDriveTable_t),
463 			PAGE_SIZE,
464 			AMDGPU_GEM_DOMAIN_VRAM,
465 			&priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
466 			&priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
467 			&priv->smu_tables.entry[TABLE_OVERDRIVE].table);
468 	if (ret)
469 		goto err2;
470 
471 	priv->smu_tables.entry[TABLE_OVERDRIVE].version = 0x01;
472 	priv->smu_tables.entry[TABLE_OVERDRIVE].size = sizeof(OverDriveTable_t);
473 
474 	/* allocate space for SmuMetrics table */
475 	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
476 			sizeof(SmuMetrics_t),
477 			PAGE_SIZE,
478 			AMDGPU_GEM_DOMAIN_VRAM,
479 			&priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
480 			&priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
481 			&priv->smu_tables.entry[TABLE_SMU_METRICS].table);
482 	if (ret)
483 		goto err3;
484 
485 	priv->smu_tables.entry[TABLE_SMU_METRICS].version = 0x01;
486 	priv->smu_tables.entry[TABLE_SMU_METRICS].size = sizeof(SmuMetrics_t);
487 
488 	/* allocate space for ActivityMonitor table */
489 	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
490 			sizeof(DpmActivityMonitorCoeffInt_t),
491 			PAGE_SIZE,
492 			AMDGPU_GEM_DOMAIN_VRAM,
493 			&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].handle,
494 			&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr,
495 			&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table);
496 	if (ret)
497 		goto err4;
498 
499 	priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
500 	priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
501 
502 	return 0;
503 
504 err4:
505 	amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
506 			&priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
507 			&priv->smu_tables.entry[TABLE_SMU_METRICS].table);
508 err3:
509 	amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
510 			&priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
511 			&priv->smu_tables.entry[TABLE_OVERDRIVE].table);
512 err2:
513 	amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
514 			&priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
515 			&priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
516 err1:
517 	amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_WATERMARKS].handle,
518 			&priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
519 			&priv->smu_tables.entry[TABLE_WATERMARKS].table);
520 err0:
521 	amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
522 			&priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
523 			&priv->smu_tables.entry[TABLE_PPTABLE].table);
524 free_backend:
525 	kfree(hwmgr->smu_backend);
526 
527 	return -EINVAL;
528 }
529 
vega20_smu_fini(struct pp_hwmgr * hwmgr)530 static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
531 {
532 	struct vega20_smumgr *priv =
533 			(struct vega20_smumgr *)(hwmgr->smu_backend);
534 
535 	if (priv) {
536 		amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
537 				&priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
538 				&priv->smu_tables.entry[TABLE_PPTABLE].table);
539 		amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_WATERMARKS].handle,
540 				&priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
541 				&priv->smu_tables.entry[TABLE_WATERMARKS].table);
542 		amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
543 				&priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
544 				&priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
545 		amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
546 				&priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
547 				&priv->smu_tables.entry[TABLE_OVERDRIVE].table);
548 		amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
549 				&priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
550 				&priv->smu_tables.entry[TABLE_SMU_METRICS].table);
551 		amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].handle,
552 				&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr,
553 				&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table);
554 		kfree(hwmgr->smu_backend);
555 		hwmgr->smu_backend = NULL;
556 	}
557 	return 0;
558 }
559 
vega20_start_smu(struct pp_hwmgr * hwmgr)560 static int vega20_start_smu(struct pp_hwmgr *hwmgr)
561 {
562 	int ret;
563 
564 	ret = vega20_is_smc_ram_running(hwmgr);
565 	PP_ASSERT_WITH_CODE(ret,
566 			"[Vega20StartSmu] SMC is not running!",
567 			return -EINVAL);
568 
569 	ret = vega20_set_tools_address(hwmgr);
570 	PP_ASSERT_WITH_CODE(!ret,
571 			"[Vega20StartSmu] Failed to set tools address!",
572 			return ret);
573 
574 	return 0;
575 }
576 
vega20_is_dpm_running(struct pp_hwmgr * hwmgr)577 static bool vega20_is_dpm_running(struct pp_hwmgr *hwmgr)
578 {
579 	uint64_t features_enabled = 0;
580 
581 	vega20_get_enabled_smc_features(hwmgr, &features_enabled);
582 
583 	if (features_enabled & SMC_DPM_FEATURES)
584 		return true;
585 	else
586 		return false;
587 }
588 
vega20_smc_table_manager(struct pp_hwmgr * hwmgr,uint8_t * table,uint16_t table_id,bool rw)589 static int vega20_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table,
590 				    uint16_t table_id, bool rw)
591 {
592 	int ret;
593 
594 	if (rw)
595 		ret = vega20_copy_table_from_smc(hwmgr, table, table_id);
596 	else
597 		ret = vega20_copy_table_to_smc(hwmgr, table, table_id);
598 
599 	return ret;
600 }
601 
602 const struct pp_smumgr_func vega20_smu_funcs = {
603 	.name = "vega20_smu",
604 	.smu_init = &vega20_smu_init,
605 	.smu_fini = &vega20_smu_fini,
606 	.start_smu = &vega20_start_smu,
607 	.request_smu_load_specific_fw = NULL,
608 	.send_msg_to_smc = &vega20_send_msg_to_smc,
609 	.send_msg_to_smc_with_parameter = &vega20_send_msg_to_smc_with_parameter,
610 	.download_pptable_settings = NULL,
611 	.upload_pptable_settings = NULL,
612 	.is_dpm_running = vega20_is_dpm_running,
613 	.get_argument = vega20_get_argument,
614 	.smc_table_manager = vega20_smc_table_manager,
615 };
616