1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include "pp_debug.h"
25 #include <linux/delay.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/types.h>
29 #include <linux/pci.h>
30 #include <drm/amdgpu_drm.h>
31 #include "power_state.h"
32 #include "hwmgr.h"
33 #include "ppsmc.h"
34 #include "amd_acpi.h"
35 #include "pp_psm.h"
36
37 extern const struct pp_smumgr_func ci_smu_funcs;
38 extern const struct pp_smumgr_func smu8_smu_funcs;
39 extern const struct pp_smumgr_func iceland_smu_funcs;
40 extern const struct pp_smumgr_func tonga_smu_funcs;
41 extern const struct pp_smumgr_func fiji_smu_funcs;
42 extern const struct pp_smumgr_func polaris10_smu_funcs;
43 extern const struct pp_smumgr_func vegam_smu_funcs;
44 extern const struct pp_smumgr_func vega10_smu_funcs;
45 extern const struct pp_smumgr_func vega12_smu_funcs;
46 extern const struct pp_smumgr_func smu10_smu_funcs;
47 extern const struct pp_smumgr_func vega20_smu_funcs;
48
49 extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr);
50 extern int smu8_init_function_pointers(struct pp_hwmgr *hwmgr);
51 extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
52 extern int vega12_hwmgr_init(struct pp_hwmgr *hwmgr);
53 extern int vega20_hwmgr_init(struct pp_hwmgr *hwmgr);
54 extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
55
56 static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
57 static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
58 static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
59 static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr);
60 static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
61 static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
62 static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
63
64
hwmgr_init_workload_prority(struct pp_hwmgr * hwmgr)65 static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
66 {
67 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
68 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
69 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
70 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
71 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
72 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
73
74 hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
75 hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
76 hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
77 hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
78 hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
79 hwmgr->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
80 }
81
hwmgr_early_init(struct pp_hwmgr * hwmgr)82 int hwmgr_early_init(struct pp_hwmgr *hwmgr)
83 {
84 if (!hwmgr)
85 return -EINVAL;
86
87 hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
88 hwmgr->pp_table_version = PP_TABLE_V1;
89 hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
90 hwmgr->request_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
91 hwmgr_init_default_caps(hwmgr);
92 hwmgr_set_user_specify_caps(hwmgr);
93 hwmgr->fan_ctrl_is_in_default_mode = true;
94 hwmgr_init_workload_prority(hwmgr);
95 hwmgr->gfxoff_state_changed_by_workload = false;
96
97 switch (hwmgr->chip_family) {
98 case AMDGPU_FAMILY_CI:
99 hwmgr->smumgr_funcs = &ci_smu_funcs;
100 ci_set_asic_special_caps(hwmgr);
101 hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
102 PP_ENABLE_GFX_CG_THRU_SMU |
103 PP_GFXOFF_MASK);
104 hwmgr->pp_table_version = PP_TABLE_V0;
105 hwmgr->od_enabled = false;
106 smu7_init_function_pointers(hwmgr);
107 break;
108 case AMDGPU_FAMILY_CZ:
109 hwmgr->od_enabled = false;
110 hwmgr->smumgr_funcs = &smu8_smu_funcs;
111 hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
112 smu8_init_function_pointers(hwmgr);
113 break;
114 case AMDGPU_FAMILY_VI:
115 hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
116 switch (hwmgr->chip_id) {
117 case CHIP_TOPAZ:
118 hwmgr->smumgr_funcs = &iceland_smu_funcs;
119 topaz_set_asic_special_caps(hwmgr);
120 hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
121 PP_ENABLE_GFX_CG_THRU_SMU);
122 hwmgr->pp_table_version = PP_TABLE_V0;
123 hwmgr->od_enabled = false;
124 break;
125 case CHIP_TONGA:
126 hwmgr->smumgr_funcs = &tonga_smu_funcs;
127 tonga_set_asic_special_caps(hwmgr);
128 hwmgr->feature_mask &= ~PP_VBI_TIME_SUPPORT_MASK;
129 break;
130 case CHIP_FIJI:
131 hwmgr->smumgr_funcs = &fiji_smu_funcs;
132 fiji_set_asic_special_caps(hwmgr);
133 hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
134 PP_ENABLE_GFX_CG_THRU_SMU);
135 break;
136 case CHIP_POLARIS11:
137 case CHIP_POLARIS10:
138 case CHIP_POLARIS12:
139 hwmgr->smumgr_funcs = &polaris10_smu_funcs;
140 polaris_set_asic_special_caps(hwmgr);
141 hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
142 break;
143 case CHIP_VEGAM:
144 hwmgr->smumgr_funcs = &vegam_smu_funcs;
145 polaris_set_asic_special_caps(hwmgr);
146 hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
147 break;
148 default:
149 return -EINVAL;
150 }
151 smu7_init_function_pointers(hwmgr);
152 break;
153 case AMDGPU_FAMILY_AI:
154 switch (hwmgr->chip_id) {
155 case CHIP_VEGA10:
156 hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
157 hwmgr->smumgr_funcs = &vega10_smu_funcs;
158 vega10_hwmgr_init(hwmgr);
159 break;
160 case CHIP_VEGA12:
161 hwmgr->smumgr_funcs = &vega12_smu_funcs;
162 vega12_hwmgr_init(hwmgr);
163 break;
164 case CHIP_VEGA20:
165 hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
166 hwmgr->smumgr_funcs = &vega20_smu_funcs;
167 vega20_hwmgr_init(hwmgr);
168 break;
169 default:
170 return -EINVAL;
171 }
172 break;
173 case AMDGPU_FAMILY_RV:
174 switch (hwmgr->chip_id) {
175 case CHIP_RAVEN:
176 hwmgr->od_enabled = false;
177 hwmgr->smumgr_funcs = &smu10_smu_funcs;
178 smu10_init_function_pointers(hwmgr);
179 break;
180 default:
181 return -EINVAL;
182 }
183 break;
184 default:
185 return -EINVAL;
186 }
187
188 return 0;
189 }
190
hwmgr_sw_init(struct pp_hwmgr * hwmgr)191 int hwmgr_sw_init(struct pp_hwmgr *hwmgr)
192 {
193 if (!hwmgr|| !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->smu_init)
194 return -EINVAL;
195
196 phm_register_irq_handlers(hwmgr);
197 pr_info("hwmgr_sw_init smu backed is %s\n", hwmgr->smumgr_funcs->name);
198
199 return hwmgr->smumgr_funcs->smu_init(hwmgr);
200 }
201
202
hwmgr_sw_fini(struct pp_hwmgr * hwmgr)203 int hwmgr_sw_fini(struct pp_hwmgr *hwmgr)
204 {
205 if (hwmgr && hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->smu_fini)
206 hwmgr->smumgr_funcs->smu_fini(hwmgr);
207
208 return 0;
209 }
210
hwmgr_hw_init(struct pp_hwmgr * hwmgr)211 int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
212 {
213 int ret = 0;
214
215 if (!hwmgr->pm_en)
216 return 0;
217
218 if (!hwmgr->pptable_func ||
219 !hwmgr->pptable_func->pptable_init ||
220 !hwmgr->hwmgr_func->backend_init) {
221 hwmgr->pm_en = false;
222 pr_info("dpm not supported \n");
223 return 0;
224 }
225
226 ret = hwmgr->pptable_func->pptable_init(hwmgr);
227 if (ret)
228 goto err;
229
230 ((struct amdgpu_device *)hwmgr->adev)->pm.no_fan =
231 hwmgr->thermal_controller.fanInfo.bNoFan;
232
233 ret = hwmgr->hwmgr_func->backend_init(hwmgr);
234 if (ret)
235 goto err1;
236 /* make sure dc limits are valid */
237 if ((hwmgr->dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
238 (hwmgr->dyn_state.max_clock_voltage_on_dc.mclk == 0))
239 hwmgr->dyn_state.max_clock_voltage_on_dc =
240 hwmgr->dyn_state.max_clock_voltage_on_ac;
241
242 ret = psm_init_power_state_table(hwmgr);
243 if (ret)
244 goto err2;
245
246 ret = phm_setup_asic(hwmgr);
247 if (ret)
248 goto err2;
249
250 ret = phm_enable_dynamic_state_management(hwmgr);
251 if (ret)
252 goto err2;
253 ret = phm_start_thermal_controller(hwmgr);
254 ret |= psm_set_performance_states(hwmgr);
255 if (ret)
256 goto err2;
257
258 ((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled = true;
259
260 return 0;
261 err2:
262 if (hwmgr->hwmgr_func->backend_fini)
263 hwmgr->hwmgr_func->backend_fini(hwmgr);
264 err1:
265 if (hwmgr->pptable_func->pptable_fini)
266 hwmgr->pptable_func->pptable_fini(hwmgr);
267 err:
268 return ret;
269 }
270
hwmgr_hw_fini(struct pp_hwmgr * hwmgr)271 int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
272 {
273 if (!hwmgr || !hwmgr->pm_en)
274 return 0;
275
276 phm_stop_thermal_controller(hwmgr);
277 psm_set_boot_states(hwmgr);
278 psm_adjust_power_state_dynamic(hwmgr, true, NULL);
279 phm_disable_dynamic_state_management(hwmgr);
280 phm_disable_clock_power_gatings(hwmgr);
281
282 if (hwmgr->hwmgr_func->backend_fini)
283 hwmgr->hwmgr_func->backend_fini(hwmgr);
284 if (hwmgr->pptable_func->pptable_fini)
285 hwmgr->pptable_func->pptable_fini(hwmgr);
286 return psm_fini_power_state_table(hwmgr);
287 }
288
hwmgr_suspend(struct pp_hwmgr * hwmgr)289 int hwmgr_suspend(struct pp_hwmgr *hwmgr)
290 {
291 int ret = 0;
292
293 if (!hwmgr || !hwmgr->pm_en)
294 return 0;
295
296 phm_disable_smc_firmware_ctf(hwmgr);
297 ret = psm_set_boot_states(hwmgr);
298 if (ret)
299 return ret;
300 ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL);
301 if (ret)
302 return ret;
303 ret = phm_power_down_asic(hwmgr);
304
305 return ret;
306 }
307
hwmgr_resume(struct pp_hwmgr * hwmgr)308 int hwmgr_resume(struct pp_hwmgr *hwmgr)
309 {
310 int ret = 0;
311
312 if (!hwmgr)
313 return -EINVAL;
314
315 if (!hwmgr->pm_en)
316 return 0;
317
318 ret = phm_setup_asic(hwmgr);
319 if (ret)
320 return ret;
321
322 ret = phm_enable_dynamic_state_management(hwmgr);
323 if (ret)
324 return ret;
325 ret = phm_start_thermal_controller(hwmgr);
326 ret |= psm_set_performance_states(hwmgr);
327 if (ret)
328 return ret;
329
330 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
331
332 return ret;
333 }
334
power_state_convert(enum amd_pm_state_type state)335 static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
336 {
337 switch (state) {
338 case POWER_STATE_TYPE_BATTERY:
339 return PP_StateUILabel_Battery;
340 case POWER_STATE_TYPE_BALANCED:
341 return PP_StateUILabel_Balanced;
342 case POWER_STATE_TYPE_PERFORMANCE:
343 return PP_StateUILabel_Performance;
344 default:
345 return PP_StateUILabel_None;
346 }
347 }
348
hwmgr_handle_task(struct pp_hwmgr * hwmgr,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)349 int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
350 enum amd_pm_state_type *user_state)
351 {
352 int ret = 0;
353
354 if (hwmgr == NULL)
355 return -EINVAL;
356
357 switch (task_id) {
358 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
359 ret = phm_pre_display_configuration_changed(hwmgr);
360 if (ret)
361 return ret;
362 ret = phm_set_cpu_power_state(hwmgr);
363 if (ret)
364 return ret;
365 ret = psm_set_performance_states(hwmgr);
366 if (ret)
367 return ret;
368 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
369 break;
370 case AMD_PP_TASK_ENABLE_USER_STATE:
371 {
372 enum PP_StateUILabel requested_ui_label;
373 struct pp_power_state *requested_ps = NULL;
374
375 if (user_state == NULL) {
376 ret = -EINVAL;
377 break;
378 }
379
380 requested_ui_label = power_state_convert(*user_state);
381 ret = psm_set_user_performance_state(hwmgr, requested_ui_label, &requested_ps);
382 if (ret)
383 return ret;
384 ret = psm_adjust_power_state_dynamic(hwmgr, true, requested_ps);
385 break;
386 }
387 case AMD_PP_TASK_COMPLETE_INIT:
388 case AMD_PP_TASK_READJUST_POWER_STATE:
389 ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL);
390 break;
391 default:
392 break;
393 }
394 return ret;
395 }
396
hwmgr_init_default_caps(struct pp_hwmgr * hwmgr)397 void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
398 {
399 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
400
401 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
402 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
403
404 #if defined(CONFIG_ACPI)
405 if (amdgpu_acpi_is_pcie_performance_request_supported(hwmgr->adev))
406 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
407 #endif
408
409 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
410 PHM_PlatformCaps_DynamicPatchPowerState);
411
412 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
413 PHM_PlatformCaps_EnableSMU7ThermalManagement);
414
415 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
416 PHM_PlatformCaps_DynamicPowerManagement);
417
418 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
419 PHM_PlatformCaps_SMC);
420
421 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
422 PHM_PlatformCaps_DynamicUVDState);
423
424 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
425 PHM_PlatformCaps_FanSpeedInTableIsRPM);
426 return;
427 }
428
hwmgr_set_user_specify_caps(struct pp_hwmgr * hwmgr)429 int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
430 {
431 if (hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK)
432 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
433 PHM_PlatformCaps_SclkDeepSleep);
434 else
435 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
436 PHM_PlatformCaps_SclkDeepSleep);
437
438 if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
439 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
440 PHM_PlatformCaps_PowerContainment);
441 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
442 PHM_PlatformCaps_CAC);
443 } else {
444 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
445 PHM_PlatformCaps_PowerContainment);
446 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
447 PHM_PlatformCaps_CAC);
448 }
449
450 if (hwmgr->feature_mask & PP_OVERDRIVE_MASK)
451 hwmgr->od_enabled = true;
452
453 return 0;
454 }
455
polaris_set_asic_special_caps(struct pp_hwmgr * hwmgr)456 int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
457 {
458 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
459 PHM_PlatformCaps_EVV);
460 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
461 PHM_PlatformCaps_SQRamping);
462 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
463 PHM_PlatformCaps_RegulatorHot);
464
465 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
466 PHM_PlatformCaps_AutomaticDCTransition);
467
468 if (hwmgr->chip_id != CHIP_POLARIS10)
469 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
470 PHM_PlatformCaps_SPLLShutdownSupport);
471
472 if (hwmgr->chip_id != CHIP_POLARIS11) {
473 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
474 PHM_PlatformCaps_DBRamping);
475 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
476 PHM_PlatformCaps_TDRamping);
477 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
478 PHM_PlatformCaps_TCPRamping);
479 }
480 return 0;
481 }
482
fiji_set_asic_special_caps(struct pp_hwmgr * hwmgr)483 int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
484 {
485 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
486 PHM_PlatformCaps_EVV);
487 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
488 PHM_PlatformCaps_SQRamping);
489 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
490 PHM_PlatformCaps_DBRamping);
491 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
492 PHM_PlatformCaps_TDRamping);
493 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
494 PHM_PlatformCaps_TCPRamping);
495 return 0;
496 }
497
tonga_set_asic_special_caps(struct pp_hwmgr * hwmgr)498 int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
499 {
500 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
501 PHM_PlatformCaps_EVV);
502 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
503 PHM_PlatformCaps_SQRamping);
504 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
505 PHM_PlatformCaps_DBRamping);
506 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
507 PHM_PlatformCaps_TDRamping);
508 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
509 PHM_PlatformCaps_TCPRamping);
510
511 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
512 PHM_PlatformCaps_UVDPowerGating);
513 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
514 PHM_PlatformCaps_VCEPowerGating);
515 return 0;
516 }
517
topaz_set_asic_special_caps(struct pp_hwmgr * hwmgr)518 int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
519 {
520 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
521 PHM_PlatformCaps_EVV);
522 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
523 PHM_PlatformCaps_SQRamping);
524 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
525 PHM_PlatformCaps_DBRamping);
526 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
527 PHM_PlatformCaps_TDRamping);
528 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
529 PHM_PlatformCaps_TCPRamping);
530 return 0;
531 }
532
ci_set_asic_special_caps(struct pp_hwmgr * hwmgr)533 int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr)
534 {
535 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
536 PHM_PlatformCaps_SQRamping);
537 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
538 PHM_PlatformCaps_DBRamping);
539 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
540 PHM_PlatformCaps_TDRamping);
541 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
542 PHM_PlatformCaps_TCPRamping);
543 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
544 PHM_PlatformCaps_MemorySpreadSpectrumSupport);
545 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
546 PHM_PlatformCaps_EngineSpreadSpectrumSupport);
547 return 0;
548 }
549