2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/delay.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/types.h>
29 #include <linux/pci.h>
30 #include <drm/amdgpu_drm.h>
31 #include "power_state.h"
33 #include "pppcielanes.h"
34 #include "ppatomctrl.h"
39 extern const struct pp_smumgr_func ci_smu_funcs;
40 extern const struct pp_smumgr_func cz_smu_funcs;
41 extern const struct pp_smumgr_func iceland_smu_funcs;
42 extern const struct pp_smumgr_func tonga_smu_funcs;
43 extern const struct pp_smumgr_func fiji_smu_funcs;
44 extern const struct pp_smumgr_func polaris10_smu_funcs;
45 extern const struct pp_smumgr_func vega10_smu_funcs;
46 extern const struct pp_smumgr_func rv_smu_funcs;
48 extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr);
49 static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
50 static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
51 static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
52 static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr);
53 static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
54 static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
55 static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
57 uint8_t convert_to_vid(uint16_t vddc)
59 return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
62 uint16_t convert_to_vddc(uint8_t vid)
64 return (uint16_t) ((6200 - (vid * 25)) / VOLTAGE_SCALE);
67 uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size)
72 shift = (offset % 4) << 3;
73 if (size == sizeof(uint8_t))
75 else if (size == sizeof(uint16_t))
76 mask = 0xFFFF << shift;
78 original_data &= ~mask;
79 original_data |= (field << shift);
83 static int phm_thermal_l2h_irq(void *private_data,
84 unsigned src_id, const uint32_t *iv_entry)
86 struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data;
87 struct amdgpu_device *adev = hwmgr->adev;
89 pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
90 PCI_BUS_NUM(adev->pdev->devfn),
91 PCI_SLOT(adev->pdev->devfn),
92 PCI_FUNC(adev->pdev->devfn));
96 static int phm_thermal_h2l_irq(void *private_data,
97 unsigned src_id, const uint32_t *iv_entry)
99 struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data;
100 struct amdgpu_device *adev = hwmgr->adev;
102 pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
103 PCI_BUS_NUM(adev->pdev->devfn),
104 PCI_SLOT(adev->pdev->devfn),
105 PCI_FUNC(adev->pdev->devfn));
109 static int phm_ctf_irq(void *private_data,
110 unsigned src_id, const uint32_t *iv_entry)
112 struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data;
113 struct amdgpu_device *adev = hwmgr->adev;
115 pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
116 PCI_BUS_NUM(adev->pdev->devfn),
117 PCI_SLOT(adev->pdev->devfn),
118 PCI_FUNC(adev->pdev->devfn));
122 static const struct cgs_irq_src_funcs thermal_irq_src[3] = {
123 { .handler = phm_thermal_l2h_irq },
124 { .handler = phm_thermal_h2l_irq },
125 { .handler = phm_ctf_irq }
128 static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
130 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 2;
131 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 0;
132 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 1;
133 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 3;
134 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 4;
136 hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_POWERSAVING;
137 hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_VIDEO;
138 hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
139 hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VR;
140 hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE;
143 int hwmgr_early_init(struct pp_instance *handle)
145 struct pp_hwmgr *hwmgr;
150 hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
154 handle->hwmgr = hwmgr;
155 hwmgr->adev = handle->parent;
156 hwmgr->device = handle->device;
157 hwmgr->chip_family = ((struct amdgpu_device *)handle->parent)->family;
158 hwmgr->chip_id = ((struct amdgpu_device *)handle->parent)->asic_type;
159 hwmgr->feature_mask = amdgpu_pp_feature_mask;
160 hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
161 hwmgr->power_source = PP_PowerSource_AC;
162 hwmgr->pp_table_version = PP_TABLE_V1;
163 hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
164 hwmgr->request_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
165 hwmgr_init_default_caps(hwmgr);
166 hwmgr_set_user_specify_caps(hwmgr);
167 hwmgr->fan_ctrl_is_in_default_mode = true;
168 hwmgr->reload_fw = 1;
169 hwmgr_init_workload_prority(hwmgr);
171 switch (hwmgr->chip_family) {
172 case AMDGPU_FAMILY_CI:
173 hwmgr->smumgr_funcs = &ci_smu_funcs;
174 ci_set_asic_special_caps(hwmgr);
175 hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
176 PP_ENABLE_GFX_CG_THRU_SMU);
177 hwmgr->pp_table_version = PP_TABLE_V0;
178 hwmgr->od_enabled = false;
179 smu7_init_function_pointers(hwmgr);
181 case AMDGPU_FAMILY_CZ:
182 hwmgr->od_enabled = false;
183 hwmgr->smumgr_funcs = &cz_smu_funcs;
184 cz_init_function_pointers(hwmgr);
186 case AMDGPU_FAMILY_VI:
187 switch (hwmgr->chip_id) {
189 hwmgr->smumgr_funcs = &iceland_smu_funcs;
190 topaz_set_asic_special_caps(hwmgr);
191 hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
192 PP_ENABLE_GFX_CG_THRU_SMU);
193 hwmgr->pp_table_version = PP_TABLE_V0;
194 hwmgr->od_enabled = false;
197 hwmgr->smumgr_funcs = &tonga_smu_funcs;
198 tonga_set_asic_special_caps(hwmgr);
199 hwmgr->feature_mask &= ~PP_VBI_TIME_SUPPORT_MASK;
202 hwmgr->smumgr_funcs = &fiji_smu_funcs;
203 fiji_set_asic_special_caps(hwmgr);
204 hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
205 PP_ENABLE_GFX_CG_THRU_SMU);
210 hwmgr->smumgr_funcs = &polaris10_smu_funcs;
211 polaris_set_asic_special_caps(hwmgr);
212 hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
217 smu7_init_function_pointers(hwmgr);
219 case AMDGPU_FAMILY_AI:
220 switch (hwmgr->chip_id) {
222 hwmgr->smumgr_funcs = &vega10_smu_funcs;
223 vega10_hwmgr_init(hwmgr);
229 case AMDGPU_FAMILY_RV:
230 switch (hwmgr->chip_id) {
232 hwmgr->od_enabled = false;
233 hwmgr->smumgr_funcs = &rv_smu_funcs;
234 rv_init_function_pointers(hwmgr);
247 int hwmgr_hw_init(struct pp_instance *handle)
249 struct pp_hwmgr *hwmgr;
255 hwmgr = handle->hwmgr;
257 if (hwmgr->pptable_func == NULL ||
258 hwmgr->pptable_func->pptable_init == NULL ||
259 hwmgr->hwmgr_func->backend_init == NULL)
262 ret = hwmgr->pptable_func->pptable_init(hwmgr);
266 ret = hwmgr->hwmgr_func->backend_init(hwmgr);
270 ret = psm_init_power_state_table(hwmgr);
274 ret = phm_setup_asic(hwmgr);
278 ret = phm_enable_dynamic_state_management(hwmgr);
281 ret = phm_start_thermal_controller(hwmgr);
282 ret |= psm_set_performance_states(hwmgr);
286 ret = phm_register_thermal_interrupt(hwmgr, &thermal_irq_src);
292 if (hwmgr->hwmgr_func->backend_fini)
293 hwmgr->hwmgr_func->backend_fini(hwmgr);
295 if (hwmgr->pptable_func->pptable_fini)
296 hwmgr->pptable_func->pptable_fini(hwmgr);
298 pr_err("amdgpu: powerplay initialization failed\n");
302 int hwmgr_hw_fini(struct pp_instance *handle)
304 struct pp_hwmgr *hwmgr;
306 if (handle == NULL || handle->hwmgr == NULL)
309 hwmgr = handle->hwmgr;
311 phm_stop_thermal_controller(hwmgr);
312 psm_set_boot_states(hwmgr);
313 psm_adjust_power_state_dynamic(hwmgr, false, NULL);
314 phm_disable_dynamic_state_management(hwmgr);
315 phm_disable_clock_power_gatings(hwmgr);
317 if (hwmgr->hwmgr_func->backend_fini)
318 hwmgr->hwmgr_func->backend_fini(hwmgr);
319 if (hwmgr->pptable_func->pptable_fini)
320 hwmgr->pptable_func->pptable_fini(hwmgr);
321 return psm_fini_power_state_table(hwmgr);
324 int hwmgr_hw_suspend(struct pp_instance *handle)
326 struct pp_hwmgr *hwmgr;
329 if (handle == NULL || handle->hwmgr == NULL)
332 hwmgr = handle->hwmgr;
333 phm_disable_smc_firmware_ctf(hwmgr);
334 ret = psm_set_boot_states(hwmgr);
337 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
340 ret = phm_power_down_asic(hwmgr);
345 int hwmgr_hw_resume(struct pp_instance *handle)
347 struct pp_hwmgr *hwmgr;
350 if (handle == NULL || handle->hwmgr == NULL)
353 hwmgr = handle->hwmgr;
354 ret = phm_setup_asic(hwmgr);
358 ret = phm_enable_dynamic_state_management(hwmgr);
361 ret = phm_start_thermal_controller(hwmgr);
365 ret |= psm_set_performance_states(hwmgr);
369 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
374 static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
377 case POWER_STATE_TYPE_BATTERY:
378 return PP_StateUILabel_Battery;
379 case POWER_STATE_TYPE_BALANCED:
380 return PP_StateUILabel_Balanced;
381 case POWER_STATE_TYPE_PERFORMANCE:
382 return PP_StateUILabel_Performance;
384 return PP_StateUILabel_None;
388 int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id,
389 enum amd_pm_state_type *user_state)
392 struct pp_hwmgr *hwmgr;
394 if (handle == NULL || handle->hwmgr == NULL)
397 hwmgr = handle->hwmgr;
400 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
401 ret = phm_set_cpu_power_state(hwmgr);
404 ret = psm_set_performance_states(hwmgr);
407 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
409 case AMD_PP_TASK_ENABLE_USER_STATE:
411 enum PP_StateUILabel requested_ui_label;
412 struct pp_power_state *requested_ps = NULL;
414 if (user_state == NULL) {
419 requested_ui_label = power_state_convert(*user_state);
420 ret = psm_set_user_performance_state(hwmgr, requested_ui_label, &requested_ps);
423 ret = psm_adjust_power_state_dynamic(hwmgr, false, requested_ps);
426 case AMD_PP_TASK_COMPLETE_INIT:
427 case AMD_PP_TASK_READJUST_POWER_STATE:
428 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
436 * Returns once the part of the register indicated by the mask has
437 * reached the given value.
439 int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
440 uint32_t value, uint32_t mask)
445 if (hwmgr == NULL || hwmgr->device == NULL) {
446 pr_err("Invalid Hardware Manager!");
450 for (i = 0; i < hwmgr->usec_timeout; i++) {
451 cur_value = cgs_read_register(hwmgr->device, index);
452 if ((cur_value & mask) == (value & mask))
457 /* timeout means wrong logic*/
458 if (i == hwmgr->usec_timeout)
465 * Returns once the part of the register indicated by the mask has
466 * reached the given value.The indirect space is described by giving
467 * the memory-mapped index of the indirect index register.
469 int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
470 uint32_t indirect_port,
475 if (hwmgr == NULL || hwmgr->device == NULL) {
476 pr_err("Invalid Hardware Manager!");
480 cgs_write_register(hwmgr->device, indirect_port, index);
481 return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
484 int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
486 uint32_t value, uint32_t mask)
491 if (hwmgr == NULL || hwmgr->device == NULL)
494 for (i = 0; i < hwmgr->usec_timeout; i++) {
495 cur_value = cgs_read_register(hwmgr->device,
497 if ((cur_value & mask) != (value & mask))
502 /* timeout means wrong logic */
503 if (i == hwmgr->usec_timeout)
508 int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
509 uint32_t indirect_port,
514 if (hwmgr == NULL || hwmgr->device == NULL)
517 cgs_write_register(hwmgr->device, indirect_port, index);
518 return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
522 bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
524 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
527 bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
529 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
533 int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
538 struct pp_atomctrl_voltage_table *table;
540 PP_ASSERT_WITH_CODE((NULL != vol_table),
541 "Voltage Table empty.", return -EINVAL);
543 table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
549 table->mask_low = vol_table->mask_low;
550 table->phase_delay = vol_table->phase_delay;
552 for (i = 0; i < vol_table->count; i++) {
553 vvalue = vol_table->entries[i].value;
556 for (j = 0; j < table->count; j++) {
557 if (vvalue == table->entries[j].value) {
564 table->entries[table->count].value = vvalue;
565 table->entries[table->count].smio_low =
566 vol_table->entries[i].smio_low;
571 memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
577 int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
578 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
583 PP_ASSERT_WITH_CODE((0 != dep_table->count),
584 "Voltage Dependency Table empty.", return -EINVAL);
586 PP_ASSERT_WITH_CODE((NULL != vol_table),
587 "vol_table empty.", return -EINVAL);
589 vol_table->mask_low = 0;
590 vol_table->phase_delay = 0;
591 vol_table->count = dep_table->count;
593 for (i = 0; i < dep_table->count; i++) {
594 vol_table->entries[i].value = dep_table->entries[i].mvdd;
595 vol_table->entries[i].smio_low = 0;
598 result = phm_trim_voltage_table(vol_table);
599 PP_ASSERT_WITH_CODE((0 == result),
600 "Failed to trim MVDD table.", return result);
605 int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
606 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
611 PP_ASSERT_WITH_CODE((0 != dep_table->count),
612 "Voltage Dependency Table empty.", return -EINVAL);
614 PP_ASSERT_WITH_CODE((NULL != vol_table),
615 "vol_table empty.", return -EINVAL);
617 vol_table->mask_low = 0;
618 vol_table->phase_delay = 0;
619 vol_table->count = dep_table->count;
621 for (i = 0; i < dep_table->count; i++) {
622 vol_table->entries[i].value = dep_table->entries[i].vddci;
623 vol_table->entries[i].smio_low = 0;
626 result = phm_trim_voltage_table(vol_table);
627 PP_ASSERT_WITH_CODE((0 == result),
628 "Failed to trim VDDCI table.", return result);
633 int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
634 phm_ppt_v1_voltage_lookup_table *lookup_table)
638 PP_ASSERT_WITH_CODE((0 != lookup_table->count),
639 "Voltage Lookup Table empty.", return -EINVAL);
641 PP_ASSERT_WITH_CODE((NULL != vol_table),
642 "vol_table empty.", return -EINVAL);
644 vol_table->mask_low = 0;
645 vol_table->phase_delay = 0;
647 vol_table->count = lookup_table->count;
649 for (i = 0; i < vol_table->count; i++) {
650 vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
651 vol_table->entries[i].smio_low = 0;
657 void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,
658 struct pp_atomctrl_voltage_table *vol_table)
660 unsigned int i, diff;
662 if (vol_table->count <= max_vol_steps)
665 diff = vol_table->count - max_vol_steps;
667 for (i = 0; i < max_vol_steps; i++)
668 vol_table->entries[i] = vol_table->entries[i + diff];
670 vol_table->count = max_vol_steps;
675 int phm_reset_single_dpm_table(void *table,
676 uint32_t count, int max)
680 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
682 dpm_table->count = count > max ? max : count;
684 for (i = 0; i < dpm_table->count; i++)
685 dpm_table->dpm_level[i].enabled = false;
690 void phm_setup_pcie_table_entry(
692 uint32_t index, uint32_t pcie_gen,
695 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
696 dpm_table->dpm_level[index].value = pcie_gen;
697 dpm_table->dpm_level[index].param1 = pcie_lanes;
698 dpm_table->dpm_level[index].enabled = 1;
701 int32_t phm_get_dpm_level_enable_mask_value(void *table)
705 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
707 for (i = dpm_table->count; i > 0; i--) {
709 if (dpm_table->dpm_level[i - 1].enabled)
718 uint8_t phm_get_voltage_index(
719 struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
721 uint8_t count = (uint8_t) (lookup_table->count);
724 PP_ASSERT_WITH_CODE((NULL != lookup_table),
725 "Lookup Table empty.", return 0);
726 PP_ASSERT_WITH_CODE((0 != count),
727 "Lookup Table empty.", return 0);
729 for (i = 0; i < lookup_table->count; i++) {
730 /* find first voltage equal or bigger than requested */
731 if (lookup_table->entries[i].us_vdd >= voltage)
734 /* voltage is bigger than max voltage in the table */
738 uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
741 uint8_t count = (uint8_t) (voltage_table->count);
744 PP_ASSERT_WITH_CODE((NULL != voltage_table),
745 "Voltage Table empty.", return 0;);
746 PP_ASSERT_WITH_CODE((0 != count),
747 "Voltage Table empty.", return 0;);
749 for (i = 0; i < count; i++) {
750 /* find first voltage bigger than requested */
751 if (voltage_table->entries[i].value >= voltage)
755 /* voltage is bigger than max voltage in the table */
759 uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
763 for (i = 0; i < vddci_table->count; i++) {
764 if (vddci_table->entries[i].value >= vddci)
765 return vddci_table->entries[i].value;
768 pr_debug("vddci is larger than max value in vddci_table\n");
769 return vddci_table->entries[i-1].value;
772 int phm_find_boot_level(void *table,
773 uint32_t value, uint32_t *boot_level)
775 int result = -EINVAL;
777 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
779 for (i = 0; i < dpm_table->count; i++) {
780 if (value == dpm_table->dpm_level[i].value) {
789 int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
790 phm_ppt_v1_voltage_lookup_table *lookup_table,
791 uint16_t virtual_voltage_id, int32_t *sclk)
795 struct phm_ppt_v1_information *table_info =
796 (struct phm_ppt_v1_information *)(hwmgr->pptable);
798 PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
800 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
801 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
802 voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
803 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
807 if (entry_id >= table_info->vdd_dep_on_sclk->count) {
808 pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
812 *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
818 * Initialize Dynamic State Adjustment Rule Settings
820 * @param hwmgr the address of the powerplay hardware manager.
822 int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
825 struct phm_clock_voltage_dependency_table *table_clk_vlt;
826 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
828 /* initialize vddc_dep_on_dal_pwrl table */
829 table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
830 table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
832 if (NULL == table_clk_vlt) {
833 pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
836 table_clk_vlt->count = 4;
837 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
838 table_clk_vlt->entries[0].v = 0;
839 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
840 table_clk_vlt->entries[1].v = 720;
841 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
842 table_clk_vlt->entries[2].v = 810;
843 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
844 table_clk_vlt->entries[3].v = 900;
845 if (pptable_info != NULL)
846 pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
847 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
853 uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
857 while (0 == (mask & (1 << level)))
863 void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
865 struct phm_ppt_v1_information *table_info =
866 (struct phm_ppt_v1_information *)hwmgr->pptable;
867 struct phm_clock_voltage_dependency_table *table =
868 table_info->vddc_dep_on_dal_pwrl;
869 struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
870 enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
871 uint32_t req_vddc = 0, req_volt, i;
873 if (!table || table->count <= 0
874 || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
875 || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
878 for (i = 0; i < table->count; i++) {
879 if (dal_power_level == table->entries[i].clk) {
880 req_vddc = table->entries[i].v;
885 vddc_table = table_info->vdd_dep_on_sclk;
886 for (i = 0; i < vddc_table->count; i++) {
887 if (req_vddc <= vddc_table->entries[i].vddc) {
888 req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
889 smum_send_msg_to_smc_with_parameter(hwmgr,
890 PPSMC_MSG_VddC_Request, req_volt);
894 pr_err("DAL requested level can not"
895 " found a available voltage in VDDC DPM Table \n");
898 void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
900 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
902 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
903 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
905 #if defined(CONFIG_ACPI)
906 if (amdgpu_acpi_is_pcie_performance_request_supported(hwmgr->adev))
907 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
910 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
911 PHM_PlatformCaps_DynamicPatchPowerState);
913 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
914 PHM_PlatformCaps_EnableSMU7ThermalManagement);
916 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
917 PHM_PlatformCaps_DynamicPowerManagement);
919 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
920 PHM_PlatformCaps_SMC);
922 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
923 PHM_PlatformCaps_DynamicUVDState);
925 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
926 PHM_PlatformCaps_FanSpeedInTableIsRPM);
930 int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
932 if (hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK)
933 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
934 PHM_PlatformCaps_SclkDeepSleep);
936 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
937 PHM_PlatformCaps_SclkDeepSleep);
939 if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
940 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
941 PHM_PlatformCaps_PowerContainment);
942 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
943 PHM_PlatformCaps_CAC);
945 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
946 PHM_PlatformCaps_PowerContainment);
947 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
948 PHM_PlatformCaps_CAC);
951 if (hwmgr->feature_mask & PP_OVERDRIVE_MASK)
952 hwmgr->od_enabled = true;
957 int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
958 uint32_t sclk, uint16_t id, uint16_t *voltage)
963 if (hwmgr->chip_id < CHIP_TONGA) {
964 ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
965 } else if (hwmgr->chip_id < CHIP_POLARIS10) {
966 ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
967 if (*voltage >= 2000 || *voltage == 0)
970 ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
971 *voltage = (uint16_t)(vol/100);
976 int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
978 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
979 PHM_PlatformCaps_EVV);
980 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
981 PHM_PlatformCaps_SQRamping);
982 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
983 PHM_PlatformCaps_RegulatorHot);
985 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
986 PHM_PlatformCaps_AutomaticDCTransition);
988 if (hwmgr->chip_id != CHIP_POLARIS10)
989 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
990 PHM_PlatformCaps_SPLLShutdownSupport);
992 if (hwmgr->chip_id != CHIP_POLARIS11) {
993 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
994 PHM_PlatformCaps_DBRamping);
995 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
996 PHM_PlatformCaps_TDRamping);
997 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
998 PHM_PlatformCaps_TCPRamping);
1003 int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
1005 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1006 PHM_PlatformCaps_EVV);
1007 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1008 PHM_PlatformCaps_SQRamping);
1009 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1010 PHM_PlatformCaps_DBRamping);
1011 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1012 PHM_PlatformCaps_TDRamping);
1013 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1014 PHM_PlatformCaps_TCPRamping);
1018 int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
1020 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1021 PHM_PlatformCaps_EVV);
1022 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1023 PHM_PlatformCaps_SQRamping);
1024 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1025 PHM_PlatformCaps_DBRamping);
1026 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1027 PHM_PlatformCaps_TDRamping);
1028 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1029 PHM_PlatformCaps_TCPRamping);
1031 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1032 PHM_PlatformCaps_UVDPowerGating);
1033 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1034 PHM_PlatformCaps_VCEPowerGating);
1038 int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
1040 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1041 PHM_PlatformCaps_EVV);
1042 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1043 PHM_PlatformCaps_SQRamping);
1044 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1045 PHM_PlatformCaps_DBRamping);
1046 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1047 PHM_PlatformCaps_TDRamping);
1048 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1049 PHM_PlatformCaps_TCPRamping);
1053 int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr)
1055 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1056 PHM_PlatformCaps_SQRamping);
1057 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1058 PHM_PlatformCaps_DBRamping);
1059 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1060 PHM_PlatformCaps_TDRamping);
1061 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1062 PHM_PlatformCaps_TCPRamping);
1063 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1064 PHM_PlatformCaps_MemorySpreadSpectrumSupport);
1065 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1066 PHM_PlatformCaps_EngineSpreadSpectrumSupport);