1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/gfp.h>
27 #include <linux/slab.h>
28 #include <linux/firmware.h>
29 #include "amd_shared.h"
30 #include "amd_powerplay.h"
31 #include "power_state.h"
32 #include "amdgpu.h"
33 #include "hwmgr.h"
34 
35 
36 static const struct amd_pm_funcs pp_dpm_funcs;
37 
amd_powerplay_create(struct amdgpu_device * adev)38 static int amd_powerplay_create(struct amdgpu_device *adev)
39 {
40 	struct pp_hwmgr *hwmgr;
41 
42 	if (adev == NULL)
43 		return -EINVAL;
44 
45 	hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
46 	if (hwmgr == NULL)
47 		return -ENOMEM;
48 
49 	hwmgr->adev = adev;
50 	hwmgr->not_vf = !amdgpu_sriov_vf(adev);
51 	hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false;
52 	hwmgr->device = amdgpu_cgs_create_device(adev);
53 	mutex_init(&hwmgr->smu_lock);
54 	hwmgr->chip_family = adev->family;
55 	hwmgr->chip_id = adev->asic_type;
56 	hwmgr->feature_mask = adev->pm.pp_feature;
57 	hwmgr->display_config = &adev->pm.pm_display_cfg;
58 	adev->powerplay.pp_handle = hwmgr;
59 	adev->powerplay.pp_funcs = &pp_dpm_funcs;
60 	return 0;
61 }
62 
63 
amd_powerplay_destroy(struct amdgpu_device * adev)64 static void amd_powerplay_destroy(struct amdgpu_device *adev)
65 {
66 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
67 
68 	kfree(hwmgr->hardcode_pp_table);
69 	hwmgr->hardcode_pp_table = NULL;
70 
71 	kfree(hwmgr);
72 	hwmgr = NULL;
73 }
74 
pp_early_init(void * handle)75 static int pp_early_init(void *handle)
76 {
77 	int ret;
78 	struct amdgpu_device *adev = handle;
79 
80 	ret = amd_powerplay_create(adev);
81 
82 	if (ret != 0)
83 		return ret;
84 
85 	ret = hwmgr_early_init(adev->powerplay.pp_handle);
86 	if (ret)
87 		return -EINVAL;
88 
89 	return 0;
90 }
91 
pp_sw_init(void * handle)92 static int pp_sw_init(void *handle)
93 {
94 	struct amdgpu_device *adev = handle;
95 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
96 	int ret = 0;
97 
98 	ret = hwmgr_sw_init(hwmgr);
99 
100 	pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
101 
102 	return ret;
103 }
104 
pp_sw_fini(void * handle)105 static int pp_sw_fini(void *handle)
106 {
107 	struct amdgpu_device *adev = handle;
108 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
109 
110 	hwmgr_sw_fini(hwmgr);
111 
112 	release_firmware(adev->pm.fw);
113 	adev->pm.fw = NULL;
114 
115 	return 0;
116 }
117 
pp_hw_init(void * handle)118 static int pp_hw_init(void *handle)
119 {
120 	int ret = 0;
121 	struct amdgpu_device *adev = handle;
122 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
123 
124 	ret = hwmgr_hw_init(hwmgr);
125 
126 	if (ret)
127 		pr_err("powerplay hw init failed\n");
128 
129 	return ret;
130 }
131 
pp_hw_fini(void * handle)132 static int pp_hw_fini(void *handle)
133 {
134 	struct amdgpu_device *adev = handle;
135 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
136 
137 	hwmgr_hw_fini(hwmgr);
138 
139 	return 0;
140 }
141 
pp_reserve_vram_for_smu(struct amdgpu_device * adev)142 static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
143 {
144 	int r = -EINVAL;
145 	void *cpu_ptr = NULL;
146 	uint64_t gpu_addr;
147 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
148 
149 	if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
150 						PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
151 						&adev->pm.smu_prv_buffer,
152 						&gpu_addr,
153 						&cpu_ptr)) {
154 		DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
155 		return;
156 	}
157 
158 	if (hwmgr->hwmgr_func->notify_cac_buffer_info)
159 		r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
160 					lower_32_bits((unsigned long)cpu_ptr),
161 					upper_32_bits((unsigned long)cpu_ptr),
162 					lower_32_bits(gpu_addr),
163 					upper_32_bits(gpu_addr),
164 					adev->pm.smu_prv_buffer_size);
165 
166 	if (r) {
167 		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
168 		adev->pm.smu_prv_buffer = NULL;
169 		DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
170 	}
171 }
172 
pp_late_init(void * handle)173 static int pp_late_init(void *handle)
174 {
175 	struct amdgpu_device *adev = handle;
176 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
177 
178 	if (hwmgr && hwmgr->pm_en) {
179 		mutex_lock(&hwmgr->smu_lock);
180 		hwmgr_handle_task(hwmgr,
181 					AMD_PP_TASK_COMPLETE_INIT, NULL);
182 		mutex_unlock(&hwmgr->smu_lock);
183 	}
184 	if (adev->pm.smu_prv_buffer_size != 0)
185 		pp_reserve_vram_for_smu(adev);
186 
187 	return 0;
188 }
189 
pp_late_fini(void * handle)190 static void pp_late_fini(void *handle)
191 {
192 	struct amdgpu_device *adev = handle;
193 
194 	if (adev->pm.smu_prv_buffer)
195 		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
196 	amd_powerplay_destroy(adev);
197 }
198 
199 
pp_is_idle(void * handle)200 static bool pp_is_idle(void *handle)
201 {
202 	return false;
203 }
204 
pp_wait_for_idle(void * handle)205 static int pp_wait_for_idle(void *handle)
206 {
207 	return 0;
208 }
209 
pp_sw_reset(void * handle)210 static int pp_sw_reset(void *handle)
211 {
212 	return 0;
213 }
214 
pp_set_powergating_state(void * handle,enum amd_powergating_state state)215 static int pp_set_powergating_state(void *handle,
216 				    enum amd_powergating_state state)
217 {
218 	return 0;
219 }
220 
pp_suspend(void * handle)221 static int pp_suspend(void *handle)
222 {
223 	struct amdgpu_device *adev = handle;
224 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
225 
226 	return hwmgr_suspend(hwmgr);
227 }
228 
pp_resume(void * handle)229 static int pp_resume(void *handle)
230 {
231 	struct amdgpu_device *adev = handle;
232 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
233 
234 	return hwmgr_resume(hwmgr);
235 }
236 
pp_set_clockgating_state(void * handle,enum amd_clockgating_state state)237 static int pp_set_clockgating_state(void *handle,
238 					  enum amd_clockgating_state state)
239 {
240 	return 0;
241 }
242 
243 static const struct amd_ip_funcs pp_ip_funcs = {
244 	.name = "powerplay",
245 	.early_init = pp_early_init,
246 	.late_init = pp_late_init,
247 	.sw_init = pp_sw_init,
248 	.sw_fini = pp_sw_fini,
249 	.hw_init = pp_hw_init,
250 	.hw_fini = pp_hw_fini,
251 	.late_fini = pp_late_fini,
252 	.suspend = pp_suspend,
253 	.resume = pp_resume,
254 	.is_idle = pp_is_idle,
255 	.wait_for_idle = pp_wait_for_idle,
256 	.soft_reset = pp_sw_reset,
257 	.set_clockgating_state = pp_set_clockgating_state,
258 	.set_powergating_state = pp_set_powergating_state,
259 };
260 
261 const struct amdgpu_ip_block_version pp_smu_ip_block =
262 {
263 	.type = AMD_IP_BLOCK_TYPE_SMC,
264 	.major = 1,
265 	.minor = 0,
266 	.rev = 0,
267 	.funcs = &pp_ip_funcs,
268 };
269 
270 /* This interface only be supported On Vi,
271  * because only smu7/8 can help to load gfx/sdma fw,
272  * smu need to be enabled before load other ip's fw.
273  * so call start smu to load smu7 fw and other ip's fw
274  */
pp_dpm_load_fw(void * handle)275 static int pp_dpm_load_fw(void *handle)
276 {
277 	struct pp_hwmgr *hwmgr = handle;
278 
279 	if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
280 		return -EINVAL;
281 
282 	if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
283 		pr_err("fw load failed\n");
284 		return -EINVAL;
285 	}
286 
287 	return 0;
288 }
289 
pp_dpm_fw_loading_complete(void * handle)290 static int pp_dpm_fw_loading_complete(void *handle)
291 {
292 	return 0;
293 }
294 
pp_set_clockgating_by_smu(void * handle,uint32_t msg_id)295 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
296 {
297 	struct pp_hwmgr *hwmgr = handle;
298 
299 	if (!hwmgr || !hwmgr->pm_en)
300 		return -EINVAL;
301 
302 	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
303 		pr_info_ratelimited("%s was not implemented.\n", __func__);
304 		return 0;
305 	}
306 
307 	return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
308 }
309 
pp_dpm_en_umd_pstate(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level * level)310 static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
311 						enum amd_dpm_forced_level *level)
312 {
313 	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
314 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
315 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
316 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
317 
318 	if (!(hwmgr->dpm_level & profile_mode_mask)) {
319 		/* enter umd pstate, save current level, disable gfx cg*/
320 		if (*level & profile_mode_mask) {
321 			hwmgr->saved_dpm_level = hwmgr->dpm_level;
322 			hwmgr->en_umd_pstate = true;
323 			amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
324 						AMD_IP_BLOCK_TYPE_GFX,
325 						AMD_CG_STATE_UNGATE);
326 			amdgpu_device_ip_set_powergating_state(hwmgr->adev,
327 					AMD_IP_BLOCK_TYPE_GFX,
328 					AMD_PG_STATE_UNGATE);
329 		}
330 	} else {
331 		/* exit umd pstate, restore level, enable gfx cg*/
332 		if (!(*level & profile_mode_mask)) {
333 			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
334 				*level = hwmgr->saved_dpm_level;
335 			hwmgr->en_umd_pstate = false;
336 			amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
337 					AMD_IP_BLOCK_TYPE_GFX,
338 					AMD_CG_STATE_GATE);
339 			amdgpu_device_ip_set_powergating_state(hwmgr->adev,
340 					AMD_IP_BLOCK_TYPE_GFX,
341 					AMD_PG_STATE_GATE);
342 		}
343 	}
344 }
345 
pp_dpm_force_performance_level(void * handle,enum amd_dpm_forced_level level)346 static int pp_dpm_force_performance_level(void *handle,
347 					enum amd_dpm_forced_level level)
348 {
349 	struct pp_hwmgr *hwmgr = handle;
350 
351 	if (!hwmgr || !hwmgr->pm_en)
352 		return -EINVAL;
353 
354 	if (level == hwmgr->dpm_level)
355 		return 0;
356 
357 	mutex_lock(&hwmgr->smu_lock);
358 	pp_dpm_en_umd_pstate(hwmgr, &level);
359 	hwmgr->request_dpm_level = level;
360 	hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
361 	mutex_unlock(&hwmgr->smu_lock);
362 
363 	return 0;
364 }
365 
pp_dpm_get_performance_level(void * handle)366 static enum amd_dpm_forced_level pp_dpm_get_performance_level(
367 								void *handle)
368 {
369 	struct pp_hwmgr *hwmgr = handle;
370 	enum amd_dpm_forced_level level;
371 
372 	if (!hwmgr || !hwmgr->pm_en)
373 		return -EINVAL;
374 
375 	mutex_lock(&hwmgr->smu_lock);
376 	level = hwmgr->dpm_level;
377 	mutex_unlock(&hwmgr->smu_lock);
378 	return level;
379 }
380 
pp_dpm_get_sclk(void * handle,bool low)381 static uint32_t pp_dpm_get_sclk(void *handle, bool low)
382 {
383 	struct pp_hwmgr *hwmgr = handle;
384 	uint32_t clk = 0;
385 
386 	if (!hwmgr || !hwmgr->pm_en)
387 		return 0;
388 
389 	if (hwmgr->hwmgr_func->get_sclk == NULL) {
390 		pr_info_ratelimited("%s was not implemented.\n", __func__);
391 		return 0;
392 	}
393 	mutex_lock(&hwmgr->smu_lock);
394 	clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
395 	mutex_unlock(&hwmgr->smu_lock);
396 	return clk;
397 }
398 
pp_dpm_get_mclk(void * handle,bool low)399 static uint32_t pp_dpm_get_mclk(void *handle, bool low)
400 {
401 	struct pp_hwmgr *hwmgr = handle;
402 	uint32_t clk = 0;
403 
404 	if (!hwmgr || !hwmgr->pm_en)
405 		return 0;
406 
407 	if (hwmgr->hwmgr_func->get_mclk == NULL) {
408 		pr_info_ratelimited("%s was not implemented.\n", __func__);
409 		return 0;
410 	}
411 	mutex_lock(&hwmgr->smu_lock);
412 	clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
413 	mutex_unlock(&hwmgr->smu_lock);
414 	return clk;
415 }
416 
pp_dpm_powergate_vce(void * handle,bool gate)417 static void pp_dpm_powergate_vce(void *handle, bool gate)
418 {
419 	struct pp_hwmgr *hwmgr = handle;
420 
421 	if (!hwmgr || !hwmgr->pm_en)
422 		return;
423 
424 	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
425 		pr_info_ratelimited("%s was not implemented.\n", __func__);
426 		return;
427 	}
428 	mutex_lock(&hwmgr->smu_lock);
429 	hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
430 	mutex_unlock(&hwmgr->smu_lock);
431 }
432 
pp_dpm_powergate_uvd(void * handle,bool gate)433 static void pp_dpm_powergate_uvd(void *handle, bool gate)
434 {
435 	struct pp_hwmgr *hwmgr = handle;
436 
437 	if (!hwmgr || !hwmgr->pm_en)
438 		return;
439 
440 	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
441 		pr_info_ratelimited("%s was not implemented.\n", __func__);
442 		return;
443 	}
444 	mutex_lock(&hwmgr->smu_lock);
445 	hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
446 	mutex_unlock(&hwmgr->smu_lock);
447 }
448 
pp_dpm_dispatch_tasks(void * handle,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)449 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
450 		enum amd_pm_state_type *user_state)
451 {
452 	int ret = 0;
453 	struct pp_hwmgr *hwmgr = handle;
454 
455 	if (!hwmgr || !hwmgr->pm_en)
456 		return -EINVAL;
457 
458 	mutex_lock(&hwmgr->smu_lock);
459 	ret = hwmgr_handle_task(hwmgr, task_id, user_state);
460 	mutex_unlock(&hwmgr->smu_lock);
461 
462 	return ret;
463 }
464 
pp_dpm_get_current_power_state(void * handle)465 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
466 {
467 	struct pp_hwmgr *hwmgr = handle;
468 	struct pp_power_state *state;
469 	enum amd_pm_state_type pm_type;
470 
471 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
472 		return -EINVAL;
473 
474 	mutex_lock(&hwmgr->smu_lock);
475 
476 	state = hwmgr->current_ps;
477 
478 	switch (state->classification.ui_label) {
479 	case PP_StateUILabel_Battery:
480 		pm_type = POWER_STATE_TYPE_BATTERY;
481 		break;
482 	case PP_StateUILabel_Balanced:
483 		pm_type = POWER_STATE_TYPE_BALANCED;
484 		break;
485 	case PP_StateUILabel_Performance:
486 		pm_type = POWER_STATE_TYPE_PERFORMANCE;
487 		break;
488 	default:
489 		if (state->classification.flags & PP_StateClassificationFlag_Boot)
490 			pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
491 		else
492 			pm_type = POWER_STATE_TYPE_DEFAULT;
493 		break;
494 	}
495 	mutex_unlock(&hwmgr->smu_lock);
496 
497 	return pm_type;
498 }
499 
pp_dpm_set_fan_control_mode(void * handle,uint32_t mode)500 static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
501 {
502 	struct pp_hwmgr *hwmgr = handle;
503 
504 	if (!hwmgr || !hwmgr->pm_en)
505 		return;
506 
507 	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
508 		pr_info_ratelimited("%s was not implemented.\n", __func__);
509 		return;
510 	}
511 	mutex_lock(&hwmgr->smu_lock);
512 	hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
513 	mutex_unlock(&hwmgr->smu_lock);
514 }
515 
pp_dpm_get_fan_control_mode(void * handle)516 static uint32_t pp_dpm_get_fan_control_mode(void *handle)
517 {
518 	struct pp_hwmgr *hwmgr = handle;
519 	uint32_t mode = 0;
520 
521 	if (!hwmgr || !hwmgr->pm_en)
522 		return 0;
523 
524 	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
525 		pr_info_ratelimited("%s was not implemented.\n", __func__);
526 		return 0;
527 	}
528 	mutex_lock(&hwmgr->smu_lock);
529 	mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
530 	mutex_unlock(&hwmgr->smu_lock);
531 	return mode;
532 }
533 
pp_dpm_set_fan_speed_percent(void * handle,uint32_t percent)534 static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
535 {
536 	struct pp_hwmgr *hwmgr = handle;
537 	int ret = 0;
538 
539 	if (!hwmgr || !hwmgr->pm_en)
540 		return -EINVAL;
541 
542 	if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
543 		pr_info_ratelimited("%s was not implemented.\n", __func__);
544 		return 0;
545 	}
546 	mutex_lock(&hwmgr->smu_lock);
547 	ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
548 	mutex_unlock(&hwmgr->smu_lock);
549 	return ret;
550 }
551 
pp_dpm_get_fan_speed_percent(void * handle,uint32_t * speed)552 static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
553 {
554 	struct pp_hwmgr *hwmgr = handle;
555 	int ret = 0;
556 
557 	if (!hwmgr || !hwmgr->pm_en)
558 		return -EINVAL;
559 
560 	if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
561 		pr_info_ratelimited("%s was not implemented.\n", __func__);
562 		return 0;
563 	}
564 
565 	mutex_lock(&hwmgr->smu_lock);
566 	ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
567 	mutex_unlock(&hwmgr->smu_lock);
568 	return ret;
569 }
570 
pp_dpm_get_fan_speed_rpm(void * handle,uint32_t * rpm)571 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
572 {
573 	struct pp_hwmgr *hwmgr = handle;
574 	int ret = 0;
575 
576 	if (!hwmgr || !hwmgr->pm_en)
577 		return -EINVAL;
578 
579 	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
580 		return -EINVAL;
581 
582 	mutex_lock(&hwmgr->smu_lock);
583 	ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
584 	mutex_unlock(&hwmgr->smu_lock);
585 	return ret;
586 }
587 
pp_dpm_set_fan_speed_rpm(void * handle,uint32_t rpm)588 static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
589 {
590 	struct pp_hwmgr *hwmgr = handle;
591 	int ret = 0;
592 
593 	if (!hwmgr || !hwmgr->pm_en)
594 		return -EINVAL;
595 
596 	if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) {
597 		pr_info_ratelimited("%s was not implemented.\n", __func__);
598 		return 0;
599 	}
600 	mutex_lock(&hwmgr->smu_lock);
601 	ret = hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
602 	mutex_unlock(&hwmgr->smu_lock);
603 	return ret;
604 }
605 
pp_dpm_get_pp_num_states(void * handle,struct pp_states_info * data)606 static int pp_dpm_get_pp_num_states(void *handle,
607 		struct pp_states_info *data)
608 {
609 	struct pp_hwmgr *hwmgr = handle;
610 	int i;
611 
612 	memset(data, 0, sizeof(*data));
613 
614 	if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
615 		return -EINVAL;
616 
617 	mutex_lock(&hwmgr->smu_lock);
618 
619 	data->nums = hwmgr->num_ps;
620 
621 	for (i = 0; i < hwmgr->num_ps; i++) {
622 		struct pp_power_state *state = (struct pp_power_state *)
623 				((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
624 		switch (state->classification.ui_label) {
625 		case PP_StateUILabel_Battery:
626 			data->states[i] = POWER_STATE_TYPE_BATTERY;
627 			break;
628 		case PP_StateUILabel_Balanced:
629 			data->states[i] = POWER_STATE_TYPE_BALANCED;
630 			break;
631 		case PP_StateUILabel_Performance:
632 			data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
633 			break;
634 		default:
635 			if (state->classification.flags & PP_StateClassificationFlag_Boot)
636 				data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
637 			else
638 				data->states[i] = POWER_STATE_TYPE_DEFAULT;
639 		}
640 	}
641 	mutex_unlock(&hwmgr->smu_lock);
642 	return 0;
643 }
644 
pp_dpm_get_pp_table(void * handle,char ** table)645 static int pp_dpm_get_pp_table(void *handle, char **table)
646 {
647 	struct pp_hwmgr *hwmgr = handle;
648 	int size = 0;
649 
650 	if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
651 		return -EINVAL;
652 
653 	mutex_lock(&hwmgr->smu_lock);
654 	*table = (char *)hwmgr->soft_pp_table;
655 	size = hwmgr->soft_pp_table_size;
656 	mutex_unlock(&hwmgr->smu_lock);
657 	return size;
658 }
659 
amd_powerplay_reset(void * handle)660 static int amd_powerplay_reset(void *handle)
661 {
662 	struct pp_hwmgr *hwmgr = handle;
663 	int ret;
664 
665 	ret = hwmgr_hw_fini(hwmgr);
666 	if (ret)
667 		return ret;
668 
669 	ret = hwmgr_hw_init(hwmgr);
670 	if (ret)
671 		return ret;
672 
673 	return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
674 }
675 
pp_dpm_set_pp_table(void * handle,const char * buf,size_t size)676 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
677 {
678 	struct pp_hwmgr *hwmgr = handle;
679 	int ret = -ENOMEM;
680 
681 	if (!hwmgr || !hwmgr->pm_en)
682 		return -EINVAL;
683 
684 	mutex_lock(&hwmgr->smu_lock);
685 	if (!hwmgr->hardcode_pp_table) {
686 		hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
687 						   hwmgr->soft_pp_table_size,
688 						   GFP_KERNEL);
689 		if (!hwmgr->hardcode_pp_table)
690 			goto err;
691 	}
692 
693 	memcpy(hwmgr->hardcode_pp_table, buf, size);
694 
695 	hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
696 
697 	ret = amd_powerplay_reset(handle);
698 	if (ret)
699 		goto err;
700 
701 	if (hwmgr->hwmgr_func->avfs_control) {
702 		ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
703 		if (ret)
704 			goto err;
705 	}
706 	mutex_unlock(&hwmgr->smu_lock);
707 	return 0;
708 err:
709 	mutex_unlock(&hwmgr->smu_lock);
710 	return ret;
711 }
712 
pp_dpm_force_clock_level(void * handle,enum pp_clock_type type,uint32_t mask)713 static int pp_dpm_force_clock_level(void *handle,
714 		enum pp_clock_type type, uint32_t mask)
715 {
716 	struct pp_hwmgr *hwmgr = handle;
717 	int ret = 0;
718 
719 	if (!hwmgr || !hwmgr->pm_en)
720 		return -EINVAL;
721 
722 	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
723 		pr_info_ratelimited("%s was not implemented.\n", __func__);
724 		return 0;
725 	}
726 
727 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
728 		pr_debug("force clock level is for dpm manual mode only.\n");
729 		return -EINVAL;
730 	}
731 
732 	mutex_lock(&hwmgr->smu_lock);
733 	ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
734 	mutex_unlock(&hwmgr->smu_lock);
735 	return ret;
736 }
737 
pp_dpm_print_clock_levels(void * handle,enum pp_clock_type type,char * buf)738 static int pp_dpm_print_clock_levels(void *handle,
739 		enum pp_clock_type type, char *buf)
740 {
741 	struct pp_hwmgr *hwmgr = handle;
742 	int ret = 0;
743 
744 	if (!hwmgr || !hwmgr->pm_en)
745 		return -EINVAL;
746 
747 	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
748 		pr_info_ratelimited("%s was not implemented.\n", __func__);
749 		return 0;
750 	}
751 	mutex_lock(&hwmgr->smu_lock);
752 	ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
753 	mutex_unlock(&hwmgr->smu_lock);
754 	return ret;
755 }
756 
pp_dpm_get_sclk_od(void * handle)757 static int pp_dpm_get_sclk_od(void *handle)
758 {
759 	struct pp_hwmgr *hwmgr = handle;
760 	int ret = 0;
761 
762 	if (!hwmgr || !hwmgr->pm_en)
763 		return -EINVAL;
764 
765 	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
766 		pr_info_ratelimited("%s was not implemented.\n", __func__);
767 		return 0;
768 	}
769 	mutex_lock(&hwmgr->smu_lock);
770 	ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
771 	mutex_unlock(&hwmgr->smu_lock);
772 	return ret;
773 }
774 
pp_dpm_set_sclk_od(void * handle,uint32_t value)775 static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
776 {
777 	struct pp_hwmgr *hwmgr = handle;
778 	int ret = 0;
779 
780 	if (!hwmgr || !hwmgr->pm_en)
781 		return -EINVAL;
782 
783 	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
784 		pr_info_ratelimited("%s was not implemented.\n", __func__);
785 		return 0;
786 	}
787 
788 	mutex_lock(&hwmgr->smu_lock);
789 	ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
790 	mutex_unlock(&hwmgr->smu_lock);
791 	return ret;
792 }
793 
pp_dpm_get_mclk_od(void * handle)794 static int pp_dpm_get_mclk_od(void *handle)
795 {
796 	struct pp_hwmgr *hwmgr = handle;
797 	int ret = 0;
798 
799 	if (!hwmgr || !hwmgr->pm_en)
800 		return -EINVAL;
801 
802 	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
803 		pr_info_ratelimited("%s was not implemented.\n", __func__);
804 		return 0;
805 	}
806 	mutex_lock(&hwmgr->smu_lock);
807 	ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
808 	mutex_unlock(&hwmgr->smu_lock);
809 	return ret;
810 }
811 
pp_dpm_set_mclk_od(void * handle,uint32_t value)812 static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
813 {
814 	struct pp_hwmgr *hwmgr = handle;
815 	int ret = 0;
816 
817 	if (!hwmgr || !hwmgr->pm_en)
818 		return -EINVAL;
819 
820 	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
821 		pr_info_ratelimited("%s was not implemented.\n", __func__);
822 		return 0;
823 	}
824 	mutex_lock(&hwmgr->smu_lock);
825 	ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
826 	mutex_unlock(&hwmgr->smu_lock);
827 	return ret;
828 }
829 
pp_dpm_read_sensor(void * handle,int idx,void * value,int * size)830 static int pp_dpm_read_sensor(void *handle, int idx,
831 			      void *value, int *size)
832 {
833 	struct pp_hwmgr *hwmgr = handle;
834 	int ret = 0;
835 
836 	if (!hwmgr || !hwmgr->pm_en || !value)
837 		return -EINVAL;
838 
839 	switch (idx) {
840 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
841 		*((uint32_t *)value) = hwmgr->pstate_sclk;
842 		return 0;
843 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
844 		*((uint32_t *)value) = hwmgr->pstate_mclk;
845 		return 0;
846 	case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
847 		*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
848 		return 0;
849 	case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
850 		*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
851 		return 0;
852 	default:
853 		mutex_lock(&hwmgr->smu_lock);
854 		ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
855 		mutex_unlock(&hwmgr->smu_lock);
856 		return ret;
857 	}
858 }
859 
860 static struct amd_vce_state*
pp_dpm_get_vce_clock_state(void * handle,unsigned idx)861 pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
862 {
863 	struct pp_hwmgr *hwmgr = handle;
864 
865 	if (!hwmgr || !hwmgr->pm_en)
866 		return NULL;
867 
868 	if (idx < hwmgr->num_vce_state_tables)
869 		return &hwmgr->vce_states[idx];
870 	return NULL;
871 }
872 
pp_get_power_profile_mode(void * handle,char * buf)873 static int pp_get_power_profile_mode(void *handle, char *buf)
874 {
875 	struct pp_hwmgr *hwmgr = handle;
876 
877 	if (!hwmgr || !hwmgr->pm_en || !buf)
878 		return -EINVAL;
879 
880 	if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
881 		pr_info_ratelimited("%s was not implemented.\n", __func__);
882 		return snprintf(buf, PAGE_SIZE, "\n");
883 	}
884 
885 	return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
886 }
887 
pp_set_power_profile_mode(void * handle,long * input,uint32_t size)888 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
889 {
890 	struct pp_hwmgr *hwmgr = handle;
891 	int ret = -EINVAL;
892 
893 	if (!hwmgr || !hwmgr->pm_en)
894 		return ret;
895 
896 	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
897 		pr_info_ratelimited("%s was not implemented.\n", __func__);
898 		return ret;
899 	}
900 
901 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
902 		pr_debug("power profile setting is for manual dpm mode only.\n");
903 		return ret;
904 	}
905 
906 	mutex_lock(&hwmgr->smu_lock);
907 	ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
908 	mutex_unlock(&hwmgr->smu_lock);
909 	return ret;
910 }
911 
pp_odn_edit_dpm_table(void * handle,uint32_t type,long * input,uint32_t size)912 static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
913 {
914 	struct pp_hwmgr *hwmgr = handle;
915 
916 	if (!hwmgr || !hwmgr->pm_en)
917 		return -EINVAL;
918 
919 	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
920 		pr_info_ratelimited("%s was not implemented.\n", __func__);
921 		return -EINVAL;
922 	}
923 
924 	return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
925 }
926 
pp_dpm_set_mp1_state(void * handle,enum pp_mp1_state mp1_state)927 static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
928 {
929 	struct pp_hwmgr *hwmgr = handle;
930 
931 	if (!hwmgr || !hwmgr->pm_en)
932 		return -EINVAL;
933 
934 	if (hwmgr->hwmgr_func->set_mp1_state)
935 		return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
936 
937 	return 0;
938 }
939 
pp_dpm_switch_power_profile(void * handle,enum PP_SMC_POWER_PROFILE type,bool en)940 static int pp_dpm_switch_power_profile(void *handle,
941 		enum PP_SMC_POWER_PROFILE type, bool en)
942 {
943 	struct pp_hwmgr *hwmgr = handle;
944 	long workload;
945 	uint32_t index;
946 
947 	if (!hwmgr || !hwmgr->pm_en)
948 		return -EINVAL;
949 
950 	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
951 		pr_info_ratelimited("%s was not implemented.\n", __func__);
952 		return -EINVAL;
953 	}
954 
955 	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
956 		return -EINVAL;
957 
958 	mutex_lock(&hwmgr->smu_lock);
959 
960 	if (!en) {
961 		hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
962 		index = fls(hwmgr->workload_mask);
963 		index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
964 		workload = hwmgr->workload_setting[index];
965 	} else {
966 		hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
967 		index = fls(hwmgr->workload_mask);
968 		index = index <= Workload_Policy_Max ? index - 1 : 0;
969 		workload = hwmgr->workload_setting[index];
970 	}
971 
972 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
973 		hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
974 	mutex_unlock(&hwmgr->smu_lock);
975 
976 	return 0;
977 }
978 
pp_set_power_limit(void * handle,uint32_t limit)979 static int pp_set_power_limit(void *handle, uint32_t limit)
980 {
981 	struct pp_hwmgr *hwmgr = handle;
982 	uint32_t max_power_limit;
983 
984 	if (!hwmgr || !hwmgr->pm_en)
985 		return -EINVAL;
986 
987 	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
988 		pr_info_ratelimited("%s was not implemented.\n", __func__);
989 		return -EINVAL;
990 	}
991 
992 	if (limit == 0)
993 		limit = hwmgr->default_power_limit;
994 
995 	max_power_limit = hwmgr->default_power_limit;
996 	if (hwmgr->od_enabled) {
997 		max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
998 		max_power_limit /= 100;
999 	}
1000 
1001 	if (limit > max_power_limit)
1002 		return -EINVAL;
1003 
1004 	mutex_lock(&hwmgr->smu_lock);
1005 	hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
1006 	hwmgr->power_limit = limit;
1007 	mutex_unlock(&hwmgr->smu_lock);
1008 	return 0;
1009 }
1010 
pp_get_power_limit(void * handle,uint32_t * limit,bool default_limit)1011 static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
1012 {
1013 	struct pp_hwmgr *hwmgr = handle;
1014 
1015 	if (!hwmgr || !hwmgr->pm_en ||!limit)
1016 		return -EINVAL;
1017 
1018 	mutex_lock(&hwmgr->smu_lock);
1019 
1020 	if (default_limit) {
1021 		*limit = hwmgr->default_power_limit;
1022 		if (hwmgr->od_enabled) {
1023 			*limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1024 			*limit /= 100;
1025 		}
1026 	}
1027 	else
1028 		*limit = hwmgr->power_limit;
1029 
1030 	mutex_unlock(&hwmgr->smu_lock);
1031 
1032 	return 0;
1033 }
1034 
pp_display_configuration_change(void * handle,const struct amd_pp_display_configuration * display_config)1035 static int pp_display_configuration_change(void *handle,
1036 	const struct amd_pp_display_configuration *display_config)
1037 {
1038 	struct pp_hwmgr *hwmgr = handle;
1039 
1040 	if (!hwmgr || !hwmgr->pm_en)
1041 		return -EINVAL;
1042 
1043 	mutex_lock(&hwmgr->smu_lock);
1044 	phm_store_dal_configuration_data(hwmgr, display_config);
1045 	mutex_unlock(&hwmgr->smu_lock);
1046 	return 0;
1047 }
1048 
pp_get_display_power_level(void * handle,struct amd_pp_simple_clock_info * output)1049 static int pp_get_display_power_level(void *handle,
1050 		struct amd_pp_simple_clock_info *output)
1051 {
1052 	struct pp_hwmgr *hwmgr = handle;
1053 	int ret = 0;
1054 
1055 	if (!hwmgr || !hwmgr->pm_en ||!output)
1056 		return -EINVAL;
1057 
1058 	mutex_lock(&hwmgr->smu_lock);
1059 	ret = phm_get_dal_power_level(hwmgr, output);
1060 	mutex_unlock(&hwmgr->smu_lock);
1061 	return ret;
1062 }
1063 
pp_get_current_clocks(void * handle,struct amd_pp_clock_info * clocks)1064 static int pp_get_current_clocks(void *handle,
1065 		struct amd_pp_clock_info *clocks)
1066 {
1067 	struct amd_pp_simple_clock_info simple_clocks = { 0 };
1068 	struct pp_clock_info hw_clocks;
1069 	struct pp_hwmgr *hwmgr = handle;
1070 	int ret = 0;
1071 
1072 	if (!hwmgr || !hwmgr->pm_en)
1073 		return -EINVAL;
1074 
1075 	mutex_lock(&hwmgr->smu_lock);
1076 
1077 	phm_get_dal_power_level(hwmgr, &simple_clocks);
1078 
1079 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1080 					PHM_PlatformCaps_PowerContainment))
1081 		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1082 					&hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1083 	else
1084 		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1085 					&hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1086 
1087 	if (ret) {
1088 		pr_debug("Error in phm_get_clock_info \n");
1089 		mutex_unlock(&hwmgr->smu_lock);
1090 		return -EINVAL;
1091 	}
1092 
1093 	clocks->min_engine_clock = hw_clocks.min_eng_clk;
1094 	clocks->max_engine_clock = hw_clocks.max_eng_clk;
1095 	clocks->min_memory_clock = hw_clocks.min_mem_clk;
1096 	clocks->max_memory_clock = hw_clocks.max_mem_clk;
1097 	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1098 	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1099 
1100 	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1101 	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1102 
1103 	if (simple_clocks.level == 0)
1104 		clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1105 	else
1106 		clocks->max_clocks_state = simple_clocks.level;
1107 
1108 	if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1109 		clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1110 		clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1111 	}
1112 	mutex_unlock(&hwmgr->smu_lock);
1113 	return 0;
1114 }
1115 
pp_get_clock_by_type(void * handle,enum amd_pp_clock_type type,struct amd_pp_clocks * clocks)1116 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1117 {
1118 	struct pp_hwmgr *hwmgr = handle;
1119 	int ret = 0;
1120 
1121 	if (!hwmgr || !hwmgr->pm_en)
1122 		return -EINVAL;
1123 
1124 	if (clocks == NULL)
1125 		return -EINVAL;
1126 
1127 	mutex_lock(&hwmgr->smu_lock);
1128 	ret = phm_get_clock_by_type(hwmgr, type, clocks);
1129 	mutex_unlock(&hwmgr->smu_lock);
1130 	return ret;
1131 }
1132 
pp_get_clock_by_type_with_latency(void * handle,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)1133 static int pp_get_clock_by_type_with_latency(void *handle,
1134 		enum amd_pp_clock_type type,
1135 		struct pp_clock_levels_with_latency *clocks)
1136 {
1137 	struct pp_hwmgr *hwmgr = handle;
1138 	int ret = 0;
1139 
1140 	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1141 		return -EINVAL;
1142 
1143 	mutex_lock(&hwmgr->smu_lock);
1144 	ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1145 	mutex_unlock(&hwmgr->smu_lock);
1146 	return ret;
1147 }
1148 
pp_get_clock_by_type_with_voltage(void * handle,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)1149 static int pp_get_clock_by_type_with_voltage(void *handle,
1150 		enum amd_pp_clock_type type,
1151 		struct pp_clock_levels_with_voltage *clocks)
1152 {
1153 	struct pp_hwmgr *hwmgr = handle;
1154 	int ret = 0;
1155 
1156 	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1157 		return -EINVAL;
1158 
1159 	mutex_lock(&hwmgr->smu_lock);
1160 
1161 	ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1162 
1163 	mutex_unlock(&hwmgr->smu_lock);
1164 	return ret;
1165 }
1166 
pp_set_watermarks_for_clocks_ranges(void * handle,void * clock_ranges)1167 static int pp_set_watermarks_for_clocks_ranges(void *handle,
1168 		void *clock_ranges)
1169 {
1170 	struct pp_hwmgr *hwmgr = handle;
1171 	int ret = 0;
1172 
1173 	if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
1174 		return -EINVAL;
1175 
1176 	mutex_lock(&hwmgr->smu_lock);
1177 	ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
1178 			clock_ranges);
1179 	mutex_unlock(&hwmgr->smu_lock);
1180 
1181 	return ret;
1182 }
1183 
pp_display_clock_voltage_request(void * handle,struct pp_display_clock_request * clock)1184 static int pp_display_clock_voltage_request(void *handle,
1185 		struct pp_display_clock_request *clock)
1186 {
1187 	struct pp_hwmgr *hwmgr = handle;
1188 	int ret = 0;
1189 
1190 	if (!hwmgr || !hwmgr->pm_en ||!clock)
1191 		return -EINVAL;
1192 
1193 	mutex_lock(&hwmgr->smu_lock);
1194 	ret = phm_display_clock_voltage_request(hwmgr, clock);
1195 	mutex_unlock(&hwmgr->smu_lock);
1196 
1197 	return ret;
1198 }
1199 
pp_get_display_mode_validation_clocks(void * handle,struct amd_pp_simple_clock_info * clocks)1200 static int pp_get_display_mode_validation_clocks(void *handle,
1201 		struct amd_pp_simple_clock_info *clocks)
1202 {
1203 	struct pp_hwmgr *hwmgr = handle;
1204 	int ret = 0;
1205 
1206 	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1207 		return -EINVAL;
1208 
1209 	clocks->level = PP_DAL_POWERLEVEL_7;
1210 
1211 	mutex_lock(&hwmgr->smu_lock);
1212 
1213 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1214 		ret = phm_get_max_high_clocks(hwmgr, clocks);
1215 
1216 	mutex_unlock(&hwmgr->smu_lock);
1217 	return ret;
1218 }
1219 
pp_dpm_powergate_mmhub(void * handle)1220 static int pp_dpm_powergate_mmhub(void *handle)
1221 {
1222 	struct pp_hwmgr *hwmgr = handle;
1223 
1224 	if (!hwmgr || !hwmgr->pm_en)
1225 		return -EINVAL;
1226 
1227 	if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
1228 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1229 		return 0;
1230 	}
1231 
1232 	return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
1233 }
1234 
pp_dpm_powergate_gfx(void * handle,bool gate)1235 static int pp_dpm_powergate_gfx(void *handle, bool gate)
1236 {
1237 	struct pp_hwmgr *hwmgr = handle;
1238 
1239 	if (!hwmgr || !hwmgr->pm_en)
1240 		return 0;
1241 
1242 	if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
1243 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1244 		return 0;
1245 	}
1246 
1247 	return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1248 }
1249 
pp_dpm_powergate_acp(void * handle,bool gate)1250 static void pp_dpm_powergate_acp(void *handle, bool gate)
1251 {
1252 	struct pp_hwmgr *hwmgr = handle;
1253 
1254 	if (!hwmgr || !hwmgr->pm_en)
1255 		return;
1256 
1257 	if (hwmgr->hwmgr_func->powergate_acp == NULL) {
1258 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1259 		return;
1260 	}
1261 
1262 	hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
1263 }
1264 
pp_dpm_powergate_sdma(void * handle,bool gate)1265 static void pp_dpm_powergate_sdma(void *handle, bool gate)
1266 {
1267 	struct pp_hwmgr *hwmgr = handle;
1268 
1269 	if (!hwmgr)
1270 		return;
1271 
1272 	if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
1273 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1274 		return;
1275 	}
1276 
1277 	hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
1278 }
1279 
pp_set_powergating_by_smu(void * handle,uint32_t block_type,bool gate)1280 static int pp_set_powergating_by_smu(void *handle,
1281 				uint32_t block_type, bool gate)
1282 {
1283 	int ret = 0;
1284 
1285 	switch (block_type) {
1286 	case AMD_IP_BLOCK_TYPE_UVD:
1287 	case AMD_IP_BLOCK_TYPE_VCN:
1288 		pp_dpm_powergate_uvd(handle, gate);
1289 		break;
1290 	case AMD_IP_BLOCK_TYPE_VCE:
1291 		pp_dpm_powergate_vce(handle, gate);
1292 		break;
1293 	case AMD_IP_BLOCK_TYPE_GMC:
1294 		pp_dpm_powergate_mmhub(handle);
1295 		break;
1296 	case AMD_IP_BLOCK_TYPE_GFX:
1297 		ret = pp_dpm_powergate_gfx(handle, gate);
1298 		break;
1299 	case AMD_IP_BLOCK_TYPE_ACP:
1300 		pp_dpm_powergate_acp(handle, gate);
1301 		break;
1302 	case AMD_IP_BLOCK_TYPE_SDMA:
1303 		pp_dpm_powergate_sdma(handle, gate);
1304 		break;
1305 	default:
1306 		break;
1307 	}
1308 	return ret;
1309 }
1310 
pp_notify_smu_enable_pwe(void * handle)1311 static int pp_notify_smu_enable_pwe(void *handle)
1312 {
1313 	struct pp_hwmgr *hwmgr = handle;
1314 
1315 	if (!hwmgr || !hwmgr->pm_en)
1316 		return -EINVAL;
1317 
1318 	if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
1319 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1320 		return -EINVAL;
1321 	}
1322 
1323 	mutex_lock(&hwmgr->smu_lock);
1324 	hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
1325 	mutex_unlock(&hwmgr->smu_lock);
1326 
1327 	return 0;
1328 }
1329 
pp_enable_mgpu_fan_boost(void * handle)1330 static int pp_enable_mgpu_fan_boost(void *handle)
1331 {
1332 	struct pp_hwmgr *hwmgr = handle;
1333 
1334 	if (!hwmgr)
1335 		return -EINVAL;
1336 
1337 	if (!hwmgr->pm_en ||
1338 	     hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
1339 		return 0;
1340 
1341 	mutex_lock(&hwmgr->smu_lock);
1342 	hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
1343 	mutex_unlock(&hwmgr->smu_lock);
1344 
1345 	return 0;
1346 }
1347 
pp_set_min_deep_sleep_dcefclk(void * handle,uint32_t clock)1348 static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
1349 {
1350 	struct pp_hwmgr *hwmgr = handle;
1351 
1352 	if (!hwmgr || !hwmgr->pm_en)
1353 		return -EINVAL;
1354 
1355 	if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
1356 		pr_debug("%s was not implemented.\n", __func__);
1357 		return -EINVAL;
1358 	}
1359 
1360 	mutex_lock(&hwmgr->smu_lock);
1361 	hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
1362 	mutex_unlock(&hwmgr->smu_lock);
1363 
1364 	return 0;
1365 }
1366 
pp_set_hard_min_dcefclk_by_freq(void * handle,uint32_t clock)1367 static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
1368 {
1369 	struct pp_hwmgr *hwmgr = handle;
1370 
1371 	if (!hwmgr || !hwmgr->pm_en)
1372 		return -EINVAL;
1373 
1374 	if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
1375 		pr_debug("%s was not implemented.\n", __func__);
1376 		return -EINVAL;
1377 	}
1378 
1379 	mutex_lock(&hwmgr->smu_lock);
1380 	hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
1381 	mutex_unlock(&hwmgr->smu_lock);
1382 
1383 	return 0;
1384 }
1385 
pp_set_hard_min_fclk_by_freq(void * handle,uint32_t clock)1386 static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
1387 {
1388 	struct pp_hwmgr *hwmgr = handle;
1389 
1390 	if (!hwmgr || !hwmgr->pm_en)
1391 		return -EINVAL;
1392 
1393 	if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
1394 		pr_debug("%s was not implemented.\n", __func__);
1395 		return -EINVAL;
1396 	}
1397 
1398 	mutex_lock(&hwmgr->smu_lock);
1399 	hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
1400 	mutex_unlock(&hwmgr->smu_lock);
1401 
1402 	return 0;
1403 }
1404 
pp_set_active_display_count(void * handle,uint32_t count)1405 static int pp_set_active_display_count(void *handle, uint32_t count)
1406 {
1407 	struct pp_hwmgr *hwmgr = handle;
1408 	int ret = 0;
1409 
1410 	if (!hwmgr || !hwmgr->pm_en)
1411 		return -EINVAL;
1412 
1413 	mutex_lock(&hwmgr->smu_lock);
1414 	ret = phm_set_active_display_count(hwmgr, count);
1415 	mutex_unlock(&hwmgr->smu_lock);
1416 
1417 	return ret;
1418 }
1419 
pp_get_asic_baco_capability(void * handle,bool * cap)1420 static int pp_get_asic_baco_capability(void *handle, bool *cap)
1421 {
1422 	struct pp_hwmgr *hwmgr = handle;
1423 
1424 	if (!hwmgr)
1425 		return -EINVAL;
1426 
1427 	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability)
1428 		return 0;
1429 
1430 	mutex_lock(&hwmgr->smu_lock);
1431 	hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
1432 	mutex_unlock(&hwmgr->smu_lock);
1433 
1434 	return 0;
1435 }
1436 
pp_get_asic_baco_state(void * handle,int * state)1437 static int pp_get_asic_baco_state(void *handle, int *state)
1438 {
1439 	struct pp_hwmgr *hwmgr = handle;
1440 
1441 	if (!hwmgr)
1442 		return -EINVAL;
1443 
1444 	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
1445 		return 0;
1446 
1447 	mutex_lock(&hwmgr->smu_lock);
1448 	hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
1449 	mutex_unlock(&hwmgr->smu_lock);
1450 
1451 	return 0;
1452 }
1453 
pp_set_asic_baco_state(void * handle,int state)1454 static int pp_set_asic_baco_state(void *handle, int state)
1455 {
1456 	struct pp_hwmgr *hwmgr = handle;
1457 
1458 	if (!hwmgr)
1459 		return -EINVAL;
1460 
1461 	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state)
1462 		return 0;
1463 
1464 	mutex_lock(&hwmgr->smu_lock);
1465 	hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
1466 	mutex_unlock(&hwmgr->smu_lock);
1467 
1468 	return 0;
1469 }
1470 
pp_get_ppfeature_status(void * handle,char * buf)1471 static int pp_get_ppfeature_status(void *handle, char *buf)
1472 {
1473 	struct pp_hwmgr *hwmgr = handle;
1474 	int ret = 0;
1475 
1476 	if (!hwmgr || !hwmgr->pm_en || !buf)
1477 		return -EINVAL;
1478 
1479 	if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
1480 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1481 		return -EINVAL;
1482 	}
1483 
1484 	mutex_lock(&hwmgr->smu_lock);
1485 	ret = hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
1486 	mutex_unlock(&hwmgr->smu_lock);
1487 
1488 	return ret;
1489 }
1490 
pp_set_ppfeature_status(void * handle,uint64_t ppfeature_masks)1491 static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
1492 {
1493 	struct pp_hwmgr *hwmgr = handle;
1494 	int ret = 0;
1495 
1496 	if (!hwmgr || !hwmgr->pm_en)
1497 		return -EINVAL;
1498 
1499 	if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
1500 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1501 		return -EINVAL;
1502 	}
1503 
1504 	mutex_lock(&hwmgr->smu_lock);
1505 	ret = hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
1506 	mutex_unlock(&hwmgr->smu_lock);
1507 
1508 	return ret;
1509 }
1510 
pp_asic_reset_mode_2(void * handle)1511 static int pp_asic_reset_mode_2(void *handle)
1512 {
1513 	struct pp_hwmgr *hwmgr = handle;
1514 		int ret = 0;
1515 
1516 	if (!hwmgr || !hwmgr->pm_en)
1517 		return -EINVAL;
1518 
1519 	if (hwmgr->hwmgr_func->asic_reset == NULL) {
1520 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1521 		return -EINVAL;
1522 	}
1523 
1524 	mutex_lock(&hwmgr->smu_lock);
1525 	ret = hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
1526 	mutex_unlock(&hwmgr->smu_lock);
1527 
1528 	return ret;
1529 }
1530 
pp_smu_i2c_bus_access(void * handle,bool acquire)1531 static int pp_smu_i2c_bus_access(void *handle, bool acquire)
1532 {
1533 	struct pp_hwmgr *hwmgr = handle;
1534 	int ret = 0;
1535 
1536 	if (!hwmgr || !hwmgr->pm_en)
1537 		return -EINVAL;
1538 
1539 	if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
1540 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1541 		return -EINVAL;
1542 	}
1543 
1544 	mutex_lock(&hwmgr->smu_lock);
1545 	ret = hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
1546 	mutex_unlock(&hwmgr->smu_lock);
1547 
1548 	return ret;
1549 }
1550 
1551 static const struct amd_pm_funcs pp_dpm_funcs = {
1552 	.load_firmware = pp_dpm_load_fw,
1553 	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1554 	.force_performance_level = pp_dpm_force_performance_level,
1555 	.get_performance_level = pp_dpm_get_performance_level,
1556 	.get_current_power_state = pp_dpm_get_current_power_state,
1557 	.dispatch_tasks = pp_dpm_dispatch_tasks,
1558 	.set_fan_control_mode = pp_dpm_set_fan_control_mode,
1559 	.get_fan_control_mode = pp_dpm_get_fan_control_mode,
1560 	.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
1561 	.get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
1562 	.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1563 	.set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
1564 	.get_pp_num_states = pp_dpm_get_pp_num_states,
1565 	.get_pp_table = pp_dpm_get_pp_table,
1566 	.set_pp_table = pp_dpm_set_pp_table,
1567 	.force_clock_level = pp_dpm_force_clock_level,
1568 	.print_clock_levels = pp_dpm_print_clock_levels,
1569 	.get_sclk_od = pp_dpm_get_sclk_od,
1570 	.set_sclk_od = pp_dpm_set_sclk_od,
1571 	.get_mclk_od = pp_dpm_get_mclk_od,
1572 	.set_mclk_od = pp_dpm_set_mclk_od,
1573 	.read_sensor = pp_dpm_read_sensor,
1574 	.get_vce_clock_state = pp_dpm_get_vce_clock_state,
1575 	.switch_power_profile = pp_dpm_switch_power_profile,
1576 	.set_clockgating_by_smu = pp_set_clockgating_by_smu,
1577 	.set_powergating_by_smu = pp_set_powergating_by_smu,
1578 	.get_power_profile_mode = pp_get_power_profile_mode,
1579 	.set_power_profile_mode = pp_set_power_profile_mode,
1580 	.odn_edit_dpm_table = pp_odn_edit_dpm_table,
1581 	.set_mp1_state = pp_dpm_set_mp1_state,
1582 	.set_power_limit = pp_set_power_limit,
1583 	.get_power_limit = pp_get_power_limit,
1584 /* export to DC */
1585 	.get_sclk = pp_dpm_get_sclk,
1586 	.get_mclk = pp_dpm_get_mclk,
1587 	.display_configuration_change = pp_display_configuration_change,
1588 	.get_display_power_level = pp_get_display_power_level,
1589 	.get_current_clocks = pp_get_current_clocks,
1590 	.get_clock_by_type = pp_get_clock_by_type,
1591 	.get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1592 	.get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1593 	.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1594 	.display_clock_voltage_request = pp_display_clock_voltage_request,
1595 	.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1596 	.notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
1597 	.enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
1598 	.set_active_display_count = pp_set_active_display_count,
1599 	.set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
1600 	.set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
1601 	.set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
1602 	.get_asic_baco_capability = pp_get_asic_baco_capability,
1603 	.get_asic_baco_state = pp_get_asic_baco_state,
1604 	.set_asic_baco_state = pp_set_asic_baco_state,
1605 	.get_ppfeature_status = pp_get_ppfeature_status,
1606 	.set_ppfeature_status = pp_set_ppfeature_status,
1607 	.asic_reset_mode_2 = pp_asic_reset_mode_2,
1608 	.smu_i2c_bus_access = pp_smu_i2c_bus_access,
1609 };
1610