1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include "pp_debug.h"
25 #include <linux/delay.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/types.h>
29 #include <linux/pci.h>
30 #include <drm/amdgpu_drm.h>
31 #include "power_state.h"
32 #include "hwmgr.h"
33 #include "ppsmc.h"
34 #include "amd_acpi.h"
35 #include "pp_psm.h"
36
37 extern const struct pp_smumgr_func ci_smu_funcs;
38 extern const struct pp_smumgr_func smu8_smu_funcs;
39 extern const struct pp_smumgr_func iceland_smu_funcs;
40 extern const struct pp_smumgr_func tonga_smu_funcs;
41 extern const struct pp_smumgr_func fiji_smu_funcs;
42 extern const struct pp_smumgr_func polaris10_smu_funcs;
43 extern const struct pp_smumgr_func vegam_smu_funcs;
44 extern const struct pp_smumgr_func vega10_smu_funcs;
45 extern const struct pp_smumgr_func vega12_smu_funcs;
46 extern const struct pp_smumgr_func smu10_smu_funcs;
47
48 extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr);
49 extern int smu8_init_function_pointers(struct pp_hwmgr *hwmgr);
50 extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
51 extern int vega12_hwmgr_init(struct pp_hwmgr *hwmgr);
52 extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
53
54 static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
55 static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
56 static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
57 static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr);
58 static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
59 static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
60 static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
61
62
hwmgr_init_workload_prority(struct pp_hwmgr * hwmgr)63 static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
64 {
65 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 2;
66 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 0;
67 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 1;
68 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 3;
69 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 4;
70
71 hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_POWERSAVING;
72 hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_VIDEO;
73 hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
74 hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VR;
75 hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE;
76 }
77
hwmgr_early_init(struct pp_hwmgr * hwmgr)78 int hwmgr_early_init(struct pp_hwmgr *hwmgr)
79 {
80 if (!hwmgr)
81 return -EINVAL;
82
83 hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
84 hwmgr->pp_table_version = PP_TABLE_V1;
85 hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
86 hwmgr->request_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
87 hwmgr_init_default_caps(hwmgr);
88 hwmgr_set_user_specify_caps(hwmgr);
89 hwmgr->fan_ctrl_is_in_default_mode = true;
90 hwmgr->reload_fw = 1;
91 hwmgr_init_workload_prority(hwmgr);
92
93 switch (hwmgr->chip_family) {
94 case AMDGPU_FAMILY_CI:
95 hwmgr->smumgr_funcs = &ci_smu_funcs;
96 ci_set_asic_special_caps(hwmgr);
97 hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
98 PP_ENABLE_GFX_CG_THRU_SMU |
99 PP_GFXOFF_MASK);
100 hwmgr->pp_table_version = PP_TABLE_V0;
101 hwmgr->od_enabled = false;
102 smu7_init_function_pointers(hwmgr);
103 break;
104 case AMDGPU_FAMILY_CZ:
105 hwmgr->od_enabled = false;
106 hwmgr->smumgr_funcs = &smu8_smu_funcs;
107 hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
108 smu8_init_function_pointers(hwmgr);
109 break;
110 case AMDGPU_FAMILY_VI:
111 hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
112 switch (hwmgr->chip_id) {
113 case CHIP_TOPAZ:
114 hwmgr->smumgr_funcs = &iceland_smu_funcs;
115 topaz_set_asic_special_caps(hwmgr);
116 hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
117 PP_ENABLE_GFX_CG_THRU_SMU);
118 hwmgr->pp_table_version = PP_TABLE_V0;
119 hwmgr->od_enabled = false;
120 break;
121 case CHIP_TONGA:
122 hwmgr->smumgr_funcs = &tonga_smu_funcs;
123 tonga_set_asic_special_caps(hwmgr);
124 hwmgr->feature_mask &= ~PP_VBI_TIME_SUPPORT_MASK;
125 break;
126 case CHIP_FIJI:
127 hwmgr->smumgr_funcs = &fiji_smu_funcs;
128 fiji_set_asic_special_caps(hwmgr);
129 hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
130 PP_ENABLE_GFX_CG_THRU_SMU);
131 break;
132 case CHIP_POLARIS11:
133 case CHIP_POLARIS10:
134 case CHIP_POLARIS12:
135 hwmgr->smumgr_funcs = &polaris10_smu_funcs;
136 polaris_set_asic_special_caps(hwmgr);
137 hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
138 break;
139 case CHIP_VEGAM:
140 hwmgr->smumgr_funcs = &vegam_smu_funcs;
141 polaris_set_asic_special_caps(hwmgr);
142 hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
143 break;
144 default:
145 return -EINVAL;
146 }
147 smu7_init_function_pointers(hwmgr);
148 break;
149 case AMDGPU_FAMILY_AI:
150 switch (hwmgr->chip_id) {
151 case CHIP_VEGA10:
152 case CHIP_VEGA20:
153 hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
154 hwmgr->smumgr_funcs = &vega10_smu_funcs;
155 vega10_hwmgr_init(hwmgr);
156 break;
157 case CHIP_VEGA12:
158 hwmgr->smumgr_funcs = &vega12_smu_funcs;
159 vega12_hwmgr_init(hwmgr);
160 break;
161 default:
162 return -EINVAL;
163 }
164 break;
165 case AMDGPU_FAMILY_RV:
166 switch (hwmgr->chip_id) {
167 case CHIP_RAVEN:
168 hwmgr->od_enabled = false;
169 hwmgr->smumgr_funcs = &smu10_smu_funcs;
170 smu10_init_function_pointers(hwmgr);
171 break;
172 default:
173 return -EINVAL;
174 }
175 break;
176 default:
177 return -EINVAL;
178 }
179
180 return 0;
181 }
182
hwmgr_sw_init(struct pp_hwmgr * hwmgr)183 int hwmgr_sw_init(struct pp_hwmgr *hwmgr)
184 {
185 if (!hwmgr|| !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->smu_init)
186 return -EINVAL;
187
188 phm_register_irq_handlers(hwmgr);
189
190 return hwmgr->smumgr_funcs->smu_init(hwmgr);
191 }
192
193
hwmgr_sw_fini(struct pp_hwmgr * hwmgr)194 int hwmgr_sw_fini(struct pp_hwmgr *hwmgr)
195 {
196 if (hwmgr && hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->smu_fini)
197 hwmgr->smumgr_funcs->smu_fini(hwmgr);
198
199 return 0;
200 }
201
hwmgr_hw_init(struct pp_hwmgr * hwmgr)202 int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
203 {
204 int ret = 0;
205
206 if (!hwmgr || !hwmgr->smumgr_funcs)
207 return -EINVAL;
208
209 if (hwmgr->smumgr_funcs->start_smu) {
210 ret = hwmgr->smumgr_funcs->start_smu(hwmgr);
211 if (ret) {
212 pr_err("smc start failed\n");
213 return -EINVAL;
214 }
215 }
216
217 if (!hwmgr->pm_en)
218 return 0;
219
220 if (!hwmgr->pptable_func ||
221 !hwmgr->pptable_func->pptable_init ||
222 !hwmgr->hwmgr_func->backend_init) {
223 hwmgr->pm_en = false;
224 pr_info("dpm not supported \n");
225 return 0;
226 }
227
228 ret = hwmgr->pptable_func->pptable_init(hwmgr);
229 if (ret)
230 goto err;
231
232 ((struct amdgpu_device *)hwmgr->adev)->pm.no_fan =
233 hwmgr->thermal_controller.fanInfo.bNoFan;
234
235 ret = hwmgr->hwmgr_func->backend_init(hwmgr);
236 if (ret)
237 goto err1;
238 /* make sure dc limits are valid */
239 if ((hwmgr->dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
240 (hwmgr->dyn_state.max_clock_voltage_on_dc.mclk == 0))
241 hwmgr->dyn_state.max_clock_voltage_on_dc =
242 hwmgr->dyn_state.max_clock_voltage_on_ac;
243
244 ret = psm_init_power_state_table(hwmgr);
245 if (ret)
246 goto err2;
247
248 ret = phm_setup_asic(hwmgr);
249 if (ret)
250 goto err2;
251
252 ret = phm_enable_dynamic_state_management(hwmgr);
253 if (ret)
254 goto err2;
255 ret = phm_start_thermal_controller(hwmgr);
256 ret |= psm_set_performance_states(hwmgr);
257 if (ret)
258 goto err2;
259
260 ((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled = true;
261
262 return 0;
263 err2:
264 if (hwmgr->hwmgr_func->backend_fini)
265 hwmgr->hwmgr_func->backend_fini(hwmgr);
266 err1:
267 if (hwmgr->pptable_func->pptable_fini)
268 hwmgr->pptable_func->pptable_fini(hwmgr);
269 err:
270 return ret;
271 }
272
hwmgr_hw_fini(struct pp_hwmgr * hwmgr)273 int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
274 {
275 if (!hwmgr || !hwmgr->pm_en)
276 return 0;
277
278 phm_stop_thermal_controller(hwmgr);
279 psm_set_boot_states(hwmgr);
280 psm_adjust_power_state_dynamic(hwmgr, false, NULL);
281 phm_disable_dynamic_state_management(hwmgr);
282 phm_disable_clock_power_gatings(hwmgr);
283
284 if (hwmgr->hwmgr_func->backend_fini)
285 hwmgr->hwmgr_func->backend_fini(hwmgr);
286 if (hwmgr->pptable_func->pptable_fini)
287 hwmgr->pptable_func->pptable_fini(hwmgr);
288 return psm_fini_power_state_table(hwmgr);
289 }
290
hwmgr_suspend(struct pp_hwmgr * hwmgr)291 int hwmgr_suspend(struct pp_hwmgr *hwmgr)
292 {
293 int ret = 0;
294
295 if (!hwmgr || !hwmgr->pm_en)
296 return 0;
297
298 phm_disable_smc_firmware_ctf(hwmgr);
299 ret = psm_set_boot_states(hwmgr);
300 if (ret)
301 return ret;
302 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
303 if (ret)
304 return ret;
305 ret = phm_power_down_asic(hwmgr);
306
307 return ret;
308 }
309
hwmgr_resume(struct pp_hwmgr * hwmgr)310 int hwmgr_resume(struct pp_hwmgr *hwmgr)
311 {
312 int ret = 0;
313
314 if (!hwmgr)
315 return -EINVAL;
316
317 if (hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->start_smu) {
318 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
319 pr_err("smc start failed\n");
320 return -EINVAL;
321 }
322 }
323
324 if (!hwmgr->pm_en)
325 return 0;
326
327 ret = phm_setup_asic(hwmgr);
328 if (ret)
329 return ret;
330
331 ret = phm_enable_dynamic_state_management(hwmgr);
332 if (ret)
333 return ret;
334 ret = phm_start_thermal_controller(hwmgr);
335 ret |= psm_set_performance_states(hwmgr);
336 if (ret)
337 return ret;
338
339 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
340
341 return ret;
342 }
343
power_state_convert(enum amd_pm_state_type state)344 static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
345 {
346 switch (state) {
347 case POWER_STATE_TYPE_BATTERY:
348 return PP_StateUILabel_Battery;
349 case POWER_STATE_TYPE_BALANCED:
350 return PP_StateUILabel_Balanced;
351 case POWER_STATE_TYPE_PERFORMANCE:
352 return PP_StateUILabel_Performance;
353 default:
354 return PP_StateUILabel_None;
355 }
356 }
357
hwmgr_handle_task(struct pp_hwmgr * hwmgr,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)358 int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
359 enum amd_pm_state_type *user_state)
360 {
361 int ret = 0;
362
363 if (hwmgr == NULL)
364 return -EINVAL;
365
366 switch (task_id) {
367 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
368 ret = phm_set_cpu_power_state(hwmgr);
369 if (ret)
370 return ret;
371 ret = psm_set_performance_states(hwmgr);
372 if (ret)
373 return ret;
374 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
375 break;
376 case AMD_PP_TASK_ENABLE_USER_STATE:
377 {
378 enum PP_StateUILabel requested_ui_label;
379 struct pp_power_state *requested_ps = NULL;
380
381 if (user_state == NULL) {
382 ret = -EINVAL;
383 break;
384 }
385
386 requested_ui_label = power_state_convert(*user_state);
387 ret = psm_set_user_performance_state(hwmgr, requested_ui_label, &requested_ps);
388 if (ret)
389 return ret;
390 ret = psm_adjust_power_state_dynamic(hwmgr, false, requested_ps);
391 break;
392 }
393 case AMD_PP_TASK_COMPLETE_INIT:
394 case AMD_PP_TASK_READJUST_POWER_STATE:
395 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
396 break;
397 default:
398 break;
399 }
400 return ret;
401 }
402
hwmgr_init_default_caps(struct pp_hwmgr * hwmgr)403 void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
404 {
405 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
406
407 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
408 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
409
410 #if defined(CONFIG_ACPI)
411 if (amdgpu_acpi_is_pcie_performance_request_supported(hwmgr->adev))
412 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
413 #endif
414
415 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
416 PHM_PlatformCaps_DynamicPatchPowerState);
417
418 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
419 PHM_PlatformCaps_EnableSMU7ThermalManagement);
420
421 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
422 PHM_PlatformCaps_DynamicPowerManagement);
423
424 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
425 PHM_PlatformCaps_SMC);
426
427 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
428 PHM_PlatformCaps_DynamicUVDState);
429
430 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
431 PHM_PlatformCaps_FanSpeedInTableIsRPM);
432 return;
433 }
434
hwmgr_set_user_specify_caps(struct pp_hwmgr * hwmgr)435 int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
436 {
437 if (hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK)
438 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
439 PHM_PlatformCaps_SclkDeepSleep);
440 else
441 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
442 PHM_PlatformCaps_SclkDeepSleep);
443
444 if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
445 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
446 PHM_PlatformCaps_PowerContainment);
447 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
448 PHM_PlatformCaps_CAC);
449 } else {
450 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
451 PHM_PlatformCaps_PowerContainment);
452 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
453 PHM_PlatformCaps_CAC);
454 }
455
456 if (hwmgr->feature_mask & PP_OVERDRIVE_MASK)
457 hwmgr->od_enabled = true;
458
459 return 0;
460 }
461
polaris_set_asic_special_caps(struct pp_hwmgr * hwmgr)462 int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
463 {
464 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
465 PHM_PlatformCaps_EVV);
466 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
467 PHM_PlatformCaps_SQRamping);
468 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
469 PHM_PlatformCaps_RegulatorHot);
470
471 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
472 PHM_PlatformCaps_AutomaticDCTransition);
473
474 if (hwmgr->chip_id != CHIP_POLARIS10)
475 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
476 PHM_PlatformCaps_SPLLShutdownSupport);
477
478 if (hwmgr->chip_id != CHIP_POLARIS11) {
479 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
480 PHM_PlatformCaps_DBRamping);
481 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
482 PHM_PlatformCaps_TDRamping);
483 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
484 PHM_PlatformCaps_TCPRamping);
485 }
486 return 0;
487 }
488
fiji_set_asic_special_caps(struct pp_hwmgr * hwmgr)489 int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
490 {
491 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
492 PHM_PlatformCaps_EVV);
493 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
494 PHM_PlatformCaps_SQRamping);
495 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
496 PHM_PlatformCaps_DBRamping);
497 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
498 PHM_PlatformCaps_TDRamping);
499 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
500 PHM_PlatformCaps_TCPRamping);
501 return 0;
502 }
503
tonga_set_asic_special_caps(struct pp_hwmgr * hwmgr)504 int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
505 {
506 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
507 PHM_PlatformCaps_EVV);
508 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
509 PHM_PlatformCaps_SQRamping);
510 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
511 PHM_PlatformCaps_DBRamping);
512 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
513 PHM_PlatformCaps_TDRamping);
514 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
515 PHM_PlatformCaps_TCPRamping);
516
517 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
518 PHM_PlatformCaps_UVDPowerGating);
519 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
520 PHM_PlatformCaps_VCEPowerGating);
521 return 0;
522 }
523
topaz_set_asic_special_caps(struct pp_hwmgr * hwmgr)524 int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
525 {
526 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
527 PHM_PlatformCaps_EVV);
528 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
529 PHM_PlatformCaps_SQRamping);
530 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
531 PHM_PlatformCaps_DBRamping);
532 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
533 PHM_PlatformCaps_TDRamping);
534 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
535 PHM_PlatformCaps_TCPRamping);
536 return 0;
537 }
538
ci_set_asic_special_caps(struct pp_hwmgr * hwmgr)539 int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr)
540 {
541 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
542 PHM_PlatformCaps_SQRamping);
543 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
544 PHM_PlatformCaps_DBRamping);
545 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
546 PHM_PlatformCaps_TDRamping);
547 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
548 PHM_PlatformCaps_TCPRamping);
549 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
550 PHM_PlatformCaps_MemorySpreadSpectrumSupport);
551 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
552 PHM_PlatformCaps_EngineSpreadSpectrumSupport);
553 return 0;
554 }
555