2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/delay.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/types.h>
29 #include <linux/pci.h>
30 #include <drm/amdgpu_drm.h>
31 #include "power_state.h"
33 #include "pppcielanes.h"
34 #include "ppatomctrl.h"
40 extern const struct pp_smumgr_func ci_smu_funcs;
41 extern const struct pp_smumgr_func cz_smu_funcs;
42 extern const struct pp_smumgr_func iceland_smu_funcs;
43 extern const struct pp_smumgr_func tonga_smu_funcs;
44 extern const struct pp_smumgr_func fiji_smu_funcs;
45 extern const struct pp_smumgr_func polaris10_smu_funcs;
46 extern const struct pp_smumgr_func vega10_smu_funcs;
47 extern const struct pp_smumgr_func rv_smu_funcs;
49 extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr);
50 static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
51 static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
52 static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
53 static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr);
54 static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
55 static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
56 static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
58 uint8_t convert_to_vid(uint16_t vddc)
60 return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
63 static int phm_get_pci_bus_devfn(struct pp_hwmgr *hwmgr,
64 struct cgs_system_info *sys_info)
66 sys_info->size = sizeof(struct cgs_system_info);
67 sys_info->info_id = CGS_SYSTEM_INFO_PCIE_BUS_DEVFN;
69 return cgs_query_system_info(hwmgr->device, sys_info);
72 static int phm_thermal_l2h_irq(void *private_data,
73 unsigned src_id, const uint32_t *iv_entry)
75 struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data;
76 struct cgs_system_info sys_info = {0};
79 result = phm_get_pci_bus_devfn(hwmgr, &sys_info);
83 pr_warn("GPU over temperature range detected on PCIe %lld:%lld.%lld!\n",
84 PCI_BUS_NUM(sys_info.value),
85 PCI_SLOT(sys_info.value),
86 PCI_FUNC(sys_info.value));
90 static int phm_thermal_h2l_irq(void *private_data,
91 unsigned src_id, const uint32_t *iv_entry)
93 struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data;
94 struct cgs_system_info sys_info = {0};
97 result = phm_get_pci_bus_devfn(hwmgr, &sys_info);
101 pr_warn("GPU under temperature range detected on PCIe %lld:%lld.%lld!\n",
102 PCI_BUS_NUM(sys_info.value),
103 PCI_SLOT(sys_info.value),
104 PCI_FUNC(sys_info.value));
108 static int phm_ctf_irq(void *private_data,
109 unsigned src_id, const uint32_t *iv_entry)
111 struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data;
112 struct cgs_system_info sys_info = {0};
115 result = phm_get_pci_bus_devfn(hwmgr, &sys_info);
119 pr_warn("GPU Critical Temperature Fault detected on PCIe %lld:%lld.%lld!\n",
120 PCI_BUS_NUM(sys_info.value),
121 PCI_SLOT(sys_info.value),
122 PCI_FUNC(sys_info.value));
126 static const struct cgs_irq_src_funcs thermal_irq_src[3] = {
127 { .handler = phm_thermal_l2h_irq },
128 { .handler = phm_thermal_h2l_irq },
129 { .handler = phm_ctf_irq }
132 int hwmgr_early_init(struct pp_instance *handle)
134 struct pp_hwmgr *hwmgr;
139 hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
143 handle->hwmgr = hwmgr;
144 hwmgr->device = handle->device;
145 hwmgr->chip_family = handle->chip_family;
146 hwmgr->chip_id = handle->chip_id;
147 hwmgr->feature_mask = handle->feature_mask;
148 hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
149 hwmgr->power_source = PP_PowerSource_AC;
150 hwmgr->pp_table_version = PP_TABLE_V1;
151 hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
152 hwmgr->request_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
153 hwmgr_init_default_caps(hwmgr);
154 hwmgr_set_user_specify_caps(hwmgr);
155 hwmgr->fan_ctrl_is_in_default_mode = true;
156 hwmgr->reload_fw = 1;
158 switch (hwmgr->chip_family) {
159 case AMDGPU_FAMILY_CI:
160 hwmgr->smumgr_funcs = &ci_smu_funcs;
161 ci_set_asic_special_caps(hwmgr);
162 hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
163 PP_ENABLE_GFX_CG_THRU_SMU);
164 hwmgr->pp_table_version = PP_TABLE_V0;
165 smu7_init_function_pointers(hwmgr);
167 case AMDGPU_FAMILY_CZ:
168 hwmgr->smumgr_funcs = &cz_smu_funcs;
169 cz_init_function_pointers(hwmgr);
171 case AMDGPU_FAMILY_VI:
172 switch (hwmgr->chip_id) {
174 hwmgr->smumgr_funcs = &iceland_smu_funcs;
175 topaz_set_asic_special_caps(hwmgr);
176 hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
177 PP_ENABLE_GFX_CG_THRU_SMU);
178 hwmgr->pp_table_version = PP_TABLE_V0;
181 hwmgr->smumgr_funcs = &tonga_smu_funcs;
182 tonga_set_asic_special_caps(hwmgr);
183 hwmgr->feature_mask &= ~PP_VBI_TIME_SUPPORT_MASK;
186 hwmgr->smumgr_funcs = &fiji_smu_funcs;
187 fiji_set_asic_special_caps(hwmgr);
188 hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
189 PP_ENABLE_GFX_CG_THRU_SMU);
194 hwmgr->smumgr_funcs = &polaris10_smu_funcs;
195 polaris_set_asic_special_caps(hwmgr);
196 hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
201 smu7_init_function_pointers(hwmgr);
203 case AMDGPU_FAMILY_AI:
204 switch (hwmgr->chip_id) {
206 hwmgr->smumgr_funcs = &vega10_smu_funcs;
207 vega10_hwmgr_init(hwmgr);
213 case AMDGPU_FAMILY_RV:
214 switch (hwmgr->chip_id) {
216 hwmgr->smumgr_funcs = &rv_smu_funcs;
217 rv_init_function_pointers(hwmgr);
230 int hwmgr_hw_init(struct pp_instance *handle)
232 struct pp_hwmgr *hwmgr;
238 hwmgr = handle->hwmgr;
240 if (hwmgr->pptable_func == NULL ||
241 hwmgr->pptable_func->pptable_init == NULL ||
242 hwmgr->hwmgr_func->backend_init == NULL)
245 ret = hwmgr->pptable_func->pptable_init(hwmgr);
249 ret = hwmgr->hwmgr_func->backend_init(hwmgr);
253 ret = psm_init_power_state_table(hwmgr);
257 ret = phm_setup_asic(hwmgr);
261 ret = phm_enable_dynamic_state_management(hwmgr);
264 ret = phm_start_thermal_controller(hwmgr, NULL);
265 ret |= psm_set_performance_states(hwmgr);
269 ret = phm_register_thermal_interrupt(hwmgr, &thermal_irq_src);
275 if (hwmgr->hwmgr_func->backend_fini)
276 hwmgr->hwmgr_func->backend_fini(hwmgr);
278 if (hwmgr->pptable_func->pptable_fini)
279 hwmgr->pptable_func->pptable_fini(hwmgr);
281 pr_err("amdgpu: powerplay initialization failed\n");
285 int hwmgr_hw_fini(struct pp_instance *handle)
287 struct pp_hwmgr *hwmgr;
289 if (handle == NULL || handle->hwmgr == NULL)
292 hwmgr = handle->hwmgr;
294 phm_stop_thermal_controller(hwmgr);
295 psm_set_boot_states(hwmgr);
296 psm_adjust_power_state_dynamic(hwmgr, false, NULL);
297 phm_disable_dynamic_state_management(hwmgr);
298 phm_disable_clock_power_gatings(hwmgr);
300 if (hwmgr->hwmgr_func->backend_fini)
301 hwmgr->hwmgr_func->backend_fini(hwmgr);
302 if (hwmgr->pptable_func->pptable_fini)
303 hwmgr->pptable_func->pptable_fini(hwmgr);
304 return psm_fini_power_state_table(hwmgr);
307 int hwmgr_hw_suspend(struct pp_instance *handle)
309 struct pp_hwmgr *hwmgr;
312 if (handle == NULL || handle->hwmgr == NULL)
315 hwmgr = handle->hwmgr;
316 phm_disable_smc_firmware_ctf(hwmgr);
317 ret = psm_set_boot_states(hwmgr);
320 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
323 ret = phm_power_down_asic(hwmgr);
328 int hwmgr_hw_resume(struct pp_instance *handle)
330 struct pp_hwmgr *hwmgr;
333 if (handle == NULL || handle->hwmgr == NULL)
336 hwmgr = handle->hwmgr;
337 ret = phm_setup_asic(hwmgr);
341 ret = phm_enable_dynamic_state_management(hwmgr);
344 ret = phm_start_thermal_controller(hwmgr, NULL);
348 ret |= psm_set_performance_states(hwmgr);
352 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
357 static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
360 case POWER_STATE_TYPE_BATTERY:
361 return PP_StateUILabel_Battery;
362 case POWER_STATE_TYPE_BALANCED:
363 return PP_StateUILabel_Balanced;
364 case POWER_STATE_TYPE_PERFORMANCE:
365 return PP_StateUILabel_Performance;
367 return PP_StateUILabel_None;
371 int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id,
372 void *input, void *output)
375 struct pp_hwmgr *hwmgr;
377 if (handle == NULL || handle->hwmgr == NULL)
380 hwmgr = handle->hwmgr;
383 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
384 ret = phm_set_cpu_power_state(hwmgr);
387 ret = psm_set_performance_states(hwmgr);
390 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
392 case AMD_PP_TASK_ENABLE_USER_STATE:
394 enum amd_pm_state_type ps;
395 enum PP_StateUILabel requested_ui_label;
396 struct pp_power_state *requested_ps = NULL;
402 ps = *(unsigned long *)input;
404 requested_ui_label = power_state_convert(ps);
405 ret = psm_set_user_performance_state(hwmgr, requested_ui_label, &requested_ps);
408 ret = psm_adjust_power_state_dynamic(hwmgr, false, requested_ps);
411 case AMD_PP_TASK_COMPLETE_INIT:
412 case AMD_PP_TASK_READJUST_POWER_STATE:
413 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
421 * Returns once the part of the register indicated by the mask has
422 * reached the given value.
424 int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
425 uint32_t value, uint32_t mask)
430 if (hwmgr == NULL || hwmgr->device == NULL) {
431 pr_err("Invalid Hardware Manager!");
435 for (i = 0; i < hwmgr->usec_timeout; i++) {
436 cur_value = cgs_read_register(hwmgr->device, index);
437 if ((cur_value & mask) == (value & mask))
442 /* timeout means wrong logic*/
443 if (i == hwmgr->usec_timeout)
450 * Returns once the part of the register indicated by the mask has
451 * reached the given value.The indirect space is described by giving
452 * the memory-mapped index of the indirect index register.
454 int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
455 uint32_t indirect_port,
460 if (hwmgr == NULL || hwmgr->device == NULL) {
461 pr_err("Invalid Hardware Manager!");
465 cgs_write_register(hwmgr->device, indirect_port, index);
466 return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
469 int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
471 uint32_t value, uint32_t mask)
476 if (hwmgr == NULL || hwmgr->device == NULL)
479 for (i = 0; i < hwmgr->usec_timeout; i++) {
480 cur_value = cgs_read_register(hwmgr->device,
482 if ((cur_value & mask) != (value & mask))
487 /* timeout means wrong logic */
488 if (i == hwmgr->usec_timeout)
493 int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
494 uint32_t indirect_port,
499 if (hwmgr == NULL || hwmgr->device == NULL)
502 cgs_write_register(hwmgr->device, indirect_port, index);
503 return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
507 bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
509 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
512 bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
514 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
518 int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
523 struct pp_atomctrl_voltage_table *table;
525 PP_ASSERT_WITH_CODE((NULL != vol_table),
526 "Voltage Table empty.", return -EINVAL);
528 table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
534 table->mask_low = vol_table->mask_low;
535 table->phase_delay = vol_table->phase_delay;
537 for (i = 0; i < vol_table->count; i++) {
538 vvalue = vol_table->entries[i].value;
541 for (j = 0; j < table->count; j++) {
542 if (vvalue == table->entries[j].value) {
549 table->entries[table->count].value = vvalue;
550 table->entries[table->count].smio_low =
551 vol_table->entries[i].smio_low;
556 memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
562 int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
563 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
568 PP_ASSERT_WITH_CODE((0 != dep_table->count),
569 "Voltage Dependency Table empty.", return -EINVAL);
571 PP_ASSERT_WITH_CODE((NULL != vol_table),
572 "vol_table empty.", return -EINVAL);
574 vol_table->mask_low = 0;
575 vol_table->phase_delay = 0;
576 vol_table->count = dep_table->count;
578 for (i = 0; i < dep_table->count; i++) {
579 vol_table->entries[i].value = dep_table->entries[i].mvdd;
580 vol_table->entries[i].smio_low = 0;
583 result = phm_trim_voltage_table(vol_table);
584 PP_ASSERT_WITH_CODE((0 == result),
585 "Failed to trim MVDD table.", return result);
590 int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
591 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
596 PP_ASSERT_WITH_CODE((0 != dep_table->count),
597 "Voltage Dependency Table empty.", return -EINVAL);
599 PP_ASSERT_WITH_CODE((NULL != vol_table),
600 "vol_table empty.", return -EINVAL);
602 vol_table->mask_low = 0;
603 vol_table->phase_delay = 0;
604 vol_table->count = dep_table->count;
606 for (i = 0; i < dep_table->count; i++) {
607 vol_table->entries[i].value = dep_table->entries[i].vddci;
608 vol_table->entries[i].smio_low = 0;
611 result = phm_trim_voltage_table(vol_table);
612 PP_ASSERT_WITH_CODE((0 == result),
613 "Failed to trim VDDCI table.", return result);
618 int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
619 phm_ppt_v1_voltage_lookup_table *lookup_table)
623 PP_ASSERT_WITH_CODE((0 != lookup_table->count),
624 "Voltage Lookup Table empty.", return -EINVAL);
626 PP_ASSERT_WITH_CODE((NULL != vol_table),
627 "vol_table empty.", return -EINVAL);
629 vol_table->mask_low = 0;
630 vol_table->phase_delay = 0;
632 vol_table->count = lookup_table->count;
634 for (i = 0; i < vol_table->count; i++) {
635 vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
636 vol_table->entries[i].smio_low = 0;
642 void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,
643 struct pp_atomctrl_voltage_table *vol_table)
645 unsigned int i, diff;
647 if (vol_table->count <= max_vol_steps)
650 diff = vol_table->count - max_vol_steps;
652 for (i = 0; i < max_vol_steps; i++)
653 vol_table->entries[i] = vol_table->entries[i + diff];
655 vol_table->count = max_vol_steps;
660 int phm_reset_single_dpm_table(void *table,
661 uint32_t count, int max)
665 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
667 dpm_table->count = count > max ? max : count;
669 for (i = 0; i < dpm_table->count; i++)
670 dpm_table->dpm_level[i].enabled = false;
675 void phm_setup_pcie_table_entry(
677 uint32_t index, uint32_t pcie_gen,
680 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
681 dpm_table->dpm_level[index].value = pcie_gen;
682 dpm_table->dpm_level[index].param1 = pcie_lanes;
683 dpm_table->dpm_level[index].enabled = 1;
686 int32_t phm_get_dpm_level_enable_mask_value(void *table)
690 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
692 for (i = dpm_table->count; i > 0; i--) {
694 if (dpm_table->dpm_level[i - 1].enabled)
703 uint8_t phm_get_voltage_index(
704 struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
706 uint8_t count = (uint8_t) (lookup_table->count);
709 PP_ASSERT_WITH_CODE((NULL != lookup_table),
710 "Lookup Table empty.", return 0);
711 PP_ASSERT_WITH_CODE((0 != count),
712 "Lookup Table empty.", return 0);
714 for (i = 0; i < lookup_table->count; i++) {
715 /* find first voltage equal or bigger than requested */
716 if (lookup_table->entries[i].us_vdd >= voltage)
719 /* voltage is bigger than max voltage in the table */
723 uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
726 uint8_t count = (uint8_t) (voltage_table->count);
729 PP_ASSERT_WITH_CODE((NULL != voltage_table),
730 "Voltage Table empty.", return 0;);
731 PP_ASSERT_WITH_CODE((0 != count),
732 "Voltage Table empty.", return 0;);
734 for (i = 0; i < count; i++) {
735 /* find first voltage bigger than requested */
736 if (voltage_table->entries[i].value >= voltage)
740 /* voltage is bigger than max voltage in the table */
744 uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
748 for (i = 0; i < vddci_table->count; i++) {
749 if (vddci_table->entries[i].value >= vddci)
750 return vddci_table->entries[i].value;
753 pr_debug("vddci is larger than max value in vddci_table\n");
754 return vddci_table->entries[i-1].value;
757 int phm_find_boot_level(void *table,
758 uint32_t value, uint32_t *boot_level)
760 int result = -EINVAL;
762 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
764 for (i = 0; i < dpm_table->count; i++) {
765 if (value == dpm_table->dpm_level[i].value) {
774 int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
775 phm_ppt_v1_voltage_lookup_table *lookup_table,
776 uint16_t virtual_voltage_id, int32_t *sclk)
780 struct phm_ppt_v1_information *table_info =
781 (struct phm_ppt_v1_information *)(hwmgr->pptable);
783 PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
785 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
786 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
787 voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
788 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
792 if (entry_id >= table_info->vdd_dep_on_sclk->count) {
793 pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
797 *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
803 * Initialize Dynamic State Adjustment Rule Settings
805 * @param hwmgr the address of the powerplay hardware manager.
807 int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
810 struct phm_clock_voltage_dependency_table *table_clk_vlt;
811 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
813 /* initialize vddc_dep_on_dal_pwrl table */
814 table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
815 table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
817 if (NULL == table_clk_vlt) {
818 pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
821 table_clk_vlt->count = 4;
822 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
823 table_clk_vlt->entries[0].v = 0;
824 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
825 table_clk_vlt->entries[1].v = 720;
826 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
827 table_clk_vlt->entries[2].v = 810;
828 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
829 table_clk_vlt->entries[3].v = 900;
830 if (pptable_info != NULL)
831 pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
832 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
838 uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
842 while (0 == (mask & (1 << level)))
848 void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
850 struct phm_ppt_v1_information *table_info =
851 (struct phm_ppt_v1_information *)hwmgr->pptable;
852 struct phm_clock_voltage_dependency_table *table =
853 table_info->vddc_dep_on_dal_pwrl;
854 struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
855 enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
856 uint32_t req_vddc = 0, req_volt, i;
858 if (!table || table->count <= 0
859 || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
860 || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
863 for (i = 0; i < table->count; i++) {
864 if (dal_power_level == table->entries[i].clk) {
865 req_vddc = table->entries[i].v;
870 vddc_table = table_info->vdd_dep_on_sclk;
871 for (i = 0; i < vddc_table->count; i++) {
872 if (req_vddc <= vddc_table->entries[i].vddc) {
873 req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
874 smum_send_msg_to_smc_with_parameter(hwmgr,
875 PPSMC_MSG_VddC_Request, req_volt);
879 pr_err("DAL requested level can not"
880 " found a available voltage in VDDC DPM Table \n");
883 void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
885 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
887 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
888 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
890 if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
891 acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
892 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
894 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
895 PHM_PlatformCaps_DynamicPatchPowerState);
897 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
898 PHM_PlatformCaps_EnableSMU7ThermalManagement);
900 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
901 PHM_PlatformCaps_DynamicPowerManagement);
903 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
904 PHM_PlatformCaps_SMC);
906 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
907 PHM_PlatformCaps_DynamicUVDState);
909 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
910 PHM_PlatformCaps_FanSpeedInTableIsRPM);
914 int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
916 if (hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK)
917 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
918 PHM_PlatformCaps_SclkDeepSleep);
920 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
921 PHM_PlatformCaps_SclkDeepSleep);
923 if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
924 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
925 PHM_PlatformCaps_PowerContainment);
926 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
927 PHM_PlatformCaps_CAC);
929 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
930 PHM_PlatformCaps_PowerContainment);
931 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
932 PHM_PlatformCaps_CAC);
938 int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
939 uint32_t sclk, uint16_t id, uint16_t *voltage)
944 if (hwmgr->chip_id < CHIP_TONGA) {
945 ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
946 } else if (hwmgr->chip_id < CHIP_POLARIS10) {
947 ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
948 if (*voltage >= 2000 || *voltage == 0)
951 ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
952 *voltage = (uint16_t)(vol/100);
957 int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
959 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
960 PHM_PlatformCaps_EVV);
961 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
962 PHM_PlatformCaps_SQRamping);
963 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
964 PHM_PlatformCaps_RegulatorHot);
966 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
967 PHM_PlatformCaps_AutomaticDCTransition);
969 if (hwmgr->chip_id != CHIP_POLARIS10)
970 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
971 PHM_PlatformCaps_SPLLShutdownSupport);
973 if (hwmgr->chip_id != CHIP_POLARIS11) {
974 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
975 PHM_PlatformCaps_DBRamping);
976 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
977 PHM_PlatformCaps_TDRamping);
978 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
979 PHM_PlatformCaps_TCPRamping);
984 int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
986 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
987 PHM_PlatformCaps_EVV);
988 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
989 PHM_PlatformCaps_SQRamping);
990 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
991 PHM_PlatformCaps_DBRamping);
992 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
993 PHM_PlatformCaps_TDRamping);
994 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
995 PHM_PlatformCaps_TCPRamping);
999 int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
1001 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1002 PHM_PlatformCaps_EVV);
1003 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1004 PHM_PlatformCaps_SQRamping);
1005 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1006 PHM_PlatformCaps_DBRamping);
1007 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1008 PHM_PlatformCaps_TDRamping);
1009 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1010 PHM_PlatformCaps_TCPRamping);
1012 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1013 PHM_PlatformCaps_UVDPowerGating);
1014 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1015 PHM_PlatformCaps_VCEPowerGating);
1019 int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
1021 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1022 PHM_PlatformCaps_EVV);
1023 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1024 PHM_PlatformCaps_SQRamping);
1025 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1026 PHM_PlatformCaps_DBRamping);
1027 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1028 PHM_PlatformCaps_TDRamping);
1029 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1030 PHM_PlatformCaps_TCPRamping);
1034 int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr)
1036 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1037 PHM_PlatformCaps_SQRamping);
1038 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1039 PHM_PlatformCaps_DBRamping);
1040 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1041 PHM_PlatformCaps_TDRamping);
1042 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1043 PHM_PlatformCaps_TCPRamping);
1044 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1045 PHM_PlatformCaps_MemorySpreadSpectrumSupport);
1046 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1047 PHM_PlatformCaps_EngineSpreadSpectrumSupport);