2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/gfp.h>
27 #include <linux/slab.h>
28 #include "amd_shared.h"
29 #include "amd_powerplay.h"
30 #include "power_state.h"
34 #define PP_DPM_DISABLED 0xCCCC
36 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
37 enum amd_pm_state_type *user_state);
39 static const struct amd_pm_funcs pp_dpm_funcs;
41 static inline int pp_check(struct pp_hwmgr *hwmgr)
43 if (hwmgr == NULL || hwmgr->smumgr_funcs == NULL)
46 if (hwmgr->pm_en == 0 || hwmgr->hwmgr_func == NULL)
47 return PP_DPM_DISABLED;
52 static int amd_powerplay_create(struct amdgpu_device *adev)
54 struct pp_hwmgr *hwmgr;
59 hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
64 hwmgr->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
65 hwmgr->device = amdgpu_cgs_create_device(adev);
66 mutex_init(&hwmgr->smu_lock);
67 hwmgr->chip_family = adev->family;
68 hwmgr->chip_id = adev->asic_type;
69 hwmgr->feature_mask = amdgpu_pp_feature_mask;
70 adev->powerplay.pp_handle = hwmgr;
71 adev->powerplay.pp_funcs = &pp_dpm_funcs;
76 static int amd_powerplay_destroy(struct amdgpu_device *adev)
78 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
80 kfree(hwmgr->hardcode_pp_table);
81 hwmgr->hardcode_pp_table = NULL;
89 static int pp_early_init(void *handle)
92 struct amdgpu_device *adev = handle;
94 ret = amd_powerplay_create(adev);
99 ret = hwmgr_early_init(adev->powerplay.pp_handle);
106 static int pp_sw_init(void *handle)
108 struct amdgpu_device *adev = handle;
109 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
112 ret = pp_check(hwmgr);
115 if (hwmgr->smumgr_funcs->smu_init == NULL)
118 ret = hwmgr->smumgr_funcs->smu_init(hwmgr);
120 phm_register_irq_handlers(hwmgr);
122 pr_debug("amdgpu: powerplay sw initialized\n");
128 static int pp_sw_fini(void *handle)
130 struct amdgpu_device *adev = handle;
131 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
134 ret = pp_check(hwmgr);
136 if (hwmgr->smumgr_funcs->smu_fini != NULL)
137 hwmgr->smumgr_funcs->smu_fini(hwmgr);
140 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
141 amdgpu_ucode_fini_bo(adev);
146 static int pp_hw_init(void *handle)
149 struct amdgpu_device *adev = handle;
150 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
152 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
153 amdgpu_ucode_init_bo(adev);
155 ret = pp_check(hwmgr);
158 if (hwmgr->smumgr_funcs->start_smu == NULL)
161 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
162 pr_err("smc start failed\n");
163 hwmgr->smumgr_funcs->smu_fini(hwmgr);
166 if (ret == PP_DPM_DISABLED)
168 ret = hwmgr_hw_init(hwmgr);
175 cgs_notify_dpm_enabled(hwmgr->device, false);
180 static int pp_hw_fini(void *handle)
182 struct amdgpu_device *adev = handle;
183 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
186 ret = pp_check(hwmgr);
188 hwmgr_hw_fini(hwmgr);
193 static int pp_late_init(void *handle)
195 struct amdgpu_device *adev = handle;
196 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
199 ret = pp_check(hwmgr);
202 pp_dpm_dispatch_tasks(hwmgr,
203 AMD_PP_TASK_COMPLETE_INIT, NULL);
208 static void pp_late_fini(void *handle)
210 struct amdgpu_device *adev = handle;
212 amd_powerplay_destroy(adev);
216 static bool pp_is_idle(void *handle)
221 static int pp_wait_for_idle(void *handle)
226 static int pp_sw_reset(void *handle)
231 static int pp_set_powergating_state(void *handle,
232 enum amd_powergating_state state)
234 struct amdgpu_device *adev = handle;
235 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
238 ret = pp_check(hwmgr);
243 if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
244 pr_info("%s was not implemented.\n", __func__);
248 /* Enable/disable GFX per cu powergating through SMU */
249 return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
250 state == AMD_PG_STATE_GATE);
253 static int pp_suspend(void *handle)
255 struct amdgpu_device *adev = handle;
256 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
259 ret = pp_check(hwmgr);
261 hwmgr_hw_suspend(hwmgr);
265 static int pp_resume(void *handle)
267 struct amdgpu_device *adev = handle;
268 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
271 ret = pp_check(hwmgr);
276 if (hwmgr->smumgr_funcs->start_smu == NULL)
279 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
280 pr_err("smc start failed\n");
281 hwmgr->smumgr_funcs->smu_fini(hwmgr);
285 if (ret == PP_DPM_DISABLED)
288 return hwmgr_hw_resume(hwmgr);
291 static int pp_set_clockgating_state(void *handle,
292 enum amd_clockgating_state state)
297 static const struct amd_ip_funcs pp_ip_funcs = {
299 .early_init = pp_early_init,
300 .late_init = pp_late_init,
301 .sw_init = pp_sw_init,
302 .sw_fini = pp_sw_fini,
303 .hw_init = pp_hw_init,
304 .hw_fini = pp_hw_fini,
305 .late_fini = pp_late_fini,
306 .suspend = pp_suspend,
308 .is_idle = pp_is_idle,
309 .wait_for_idle = pp_wait_for_idle,
310 .soft_reset = pp_sw_reset,
311 .set_clockgating_state = pp_set_clockgating_state,
312 .set_powergating_state = pp_set_powergating_state,
315 const struct amdgpu_ip_block_version pp_smu_ip_block =
317 .type = AMD_IP_BLOCK_TYPE_SMC,
321 .funcs = &pp_ip_funcs,
324 static int pp_dpm_load_fw(void *handle)
329 static int pp_dpm_fw_loading_complete(void *handle)
334 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
336 struct pp_hwmgr *hwmgr = handle;
339 ret = pp_check(hwmgr);
344 if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
345 pr_info("%s was not implemented.\n", __func__);
349 return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
352 static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
353 enum amd_dpm_forced_level *level)
355 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
356 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
357 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
358 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
360 if (!(hwmgr->dpm_level & profile_mode_mask)) {
361 /* enter umd pstate, save current level, disable gfx cg*/
362 if (*level & profile_mode_mask) {
363 hwmgr->saved_dpm_level = hwmgr->dpm_level;
364 hwmgr->en_umd_pstate = true;
365 cgs_set_clockgating_state(hwmgr->device,
366 AMD_IP_BLOCK_TYPE_GFX,
367 AMD_CG_STATE_UNGATE);
368 cgs_set_powergating_state(hwmgr->device,
369 AMD_IP_BLOCK_TYPE_GFX,
370 AMD_PG_STATE_UNGATE);
373 /* exit umd pstate, restore level, enable gfx cg*/
374 if (!(*level & profile_mode_mask)) {
375 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
376 *level = hwmgr->saved_dpm_level;
377 hwmgr->en_umd_pstate = false;
378 cgs_set_clockgating_state(hwmgr->device,
379 AMD_IP_BLOCK_TYPE_GFX,
381 cgs_set_powergating_state(hwmgr->device,
382 AMD_IP_BLOCK_TYPE_GFX,
388 static int pp_dpm_force_performance_level(void *handle,
389 enum amd_dpm_forced_level level)
391 struct pp_hwmgr *hwmgr = handle;
394 ret = pp_check(hwmgr);
399 if (level == hwmgr->dpm_level)
402 mutex_lock(&hwmgr->smu_lock);
403 pp_dpm_en_umd_pstate(hwmgr, &level);
404 hwmgr->request_dpm_level = level;
405 hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
406 mutex_unlock(&hwmgr->smu_lock);
411 static enum amd_dpm_forced_level pp_dpm_get_performance_level(
414 struct pp_hwmgr *hwmgr = handle;
416 enum amd_dpm_forced_level level;
418 ret = pp_check(hwmgr);
423 mutex_lock(&hwmgr->smu_lock);
424 level = hwmgr->dpm_level;
425 mutex_unlock(&hwmgr->smu_lock);
429 static uint32_t pp_dpm_get_sclk(void *handle, bool low)
431 struct pp_hwmgr *hwmgr = handle;
435 ret = pp_check(hwmgr);
440 if (hwmgr->hwmgr_func->get_sclk == NULL) {
441 pr_info("%s was not implemented.\n", __func__);
444 mutex_lock(&hwmgr->smu_lock);
445 clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
446 mutex_unlock(&hwmgr->smu_lock);
450 static uint32_t pp_dpm_get_mclk(void *handle, bool low)
452 struct pp_hwmgr *hwmgr = handle;
456 ret = pp_check(hwmgr);
461 if (hwmgr->hwmgr_func->get_mclk == NULL) {
462 pr_info("%s was not implemented.\n", __func__);
465 mutex_lock(&hwmgr->smu_lock);
466 clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
467 mutex_unlock(&hwmgr->smu_lock);
471 static void pp_dpm_powergate_vce(void *handle, bool gate)
473 struct pp_hwmgr *hwmgr = handle;
476 ret = pp_check(hwmgr);
481 if (hwmgr->hwmgr_func->powergate_vce == NULL) {
482 pr_info("%s was not implemented.\n", __func__);
485 mutex_lock(&hwmgr->smu_lock);
486 hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
487 mutex_unlock(&hwmgr->smu_lock);
490 static void pp_dpm_powergate_uvd(void *handle, bool gate)
492 struct pp_hwmgr *hwmgr = handle;
495 ret = pp_check(hwmgr);
500 if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
501 pr_info("%s was not implemented.\n", __func__);
504 mutex_lock(&hwmgr->smu_lock);
505 hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
506 mutex_unlock(&hwmgr->smu_lock);
509 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
510 enum amd_pm_state_type *user_state)
513 struct pp_hwmgr *hwmgr = handle;
515 ret = pp_check(hwmgr);
520 mutex_lock(&hwmgr->smu_lock);
521 ret = hwmgr_handle_task(hwmgr, task_id, user_state);
522 mutex_unlock(&hwmgr->smu_lock);
527 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
529 struct pp_hwmgr *hwmgr = handle;
530 struct pp_power_state *state;
532 enum amd_pm_state_type pm_type;
534 ret = pp_check(hwmgr);
539 if (hwmgr->current_ps == NULL)
542 mutex_lock(&hwmgr->smu_lock);
544 state = hwmgr->current_ps;
546 switch (state->classification.ui_label) {
547 case PP_StateUILabel_Battery:
548 pm_type = POWER_STATE_TYPE_BATTERY;
550 case PP_StateUILabel_Balanced:
551 pm_type = POWER_STATE_TYPE_BALANCED;
553 case PP_StateUILabel_Performance:
554 pm_type = POWER_STATE_TYPE_PERFORMANCE;
557 if (state->classification.flags & PP_StateClassificationFlag_Boot)
558 pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
560 pm_type = POWER_STATE_TYPE_DEFAULT;
563 mutex_unlock(&hwmgr->smu_lock);
568 static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
570 struct pp_hwmgr *hwmgr = handle;
573 ret = pp_check(hwmgr);
578 if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
579 pr_info("%s was not implemented.\n", __func__);
582 mutex_lock(&hwmgr->smu_lock);
583 hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
584 mutex_unlock(&hwmgr->smu_lock);
587 static uint32_t pp_dpm_get_fan_control_mode(void *handle)
589 struct pp_hwmgr *hwmgr = handle;
593 ret = pp_check(hwmgr);
598 if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
599 pr_info("%s was not implemented.\n", __func__);
602 mutex_lock(&hwmgr->smu_lock);
603 mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
604 mutex_unlock(&hwmgr->smu_lock);
608 static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
610 struct pp_hwmgr *hwmgr = handle;
613 ret = pp_check(hwmgr);
618 if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
619 pr_info("%s was not implemented.\n", __func__);
622 mutex_lock(&hwmgr->smu_lock);
623 ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
624 mutex_unlock(&hwmgr->smu_lock);
628 static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
630 struct pp_hwmgr *hwmgr = handle;
633 ret = pp_check(hwmgr);
638 if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
639 pr_info("%s was not implemented.\n", __func__);
643 mutex_lock(&hwmgr->smu_lock);
644 ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
645 mutex_unlock(&hwmgr->smu_lock);
649 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
651 struct pp_hwmgr *hwmgr = handle;
654 ret = pp_check(hwmgr);
659 if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
662 mutex_lock(&hwmgr->smu_lock);
663 ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
664 mutex_unlock(&hwmgr->smu_lock);
668 static int pp_dpm_get_pp_num_states(void *handle,
669 struct pp_states_info *data)
671 struct pp_hwmgr *hwmgr = handle;
675 memset(data, 0, sizeof(*data));
677 ret = pp_check(hwmgr);
682 if (hwmgr->ps == NULL)
685 mutex_lock(&hwmgr->smu_lock);
687 data->nums = hwmgr->num_ps;
689 for (i = 0; i < hwmgr->num_ps; i++) {
690 struct pp_power_state *state = (struct pp_power_state *)
691 ((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
692 switch (state->classification.ui_label) {
693 case PP_StateUILabel_Battery:
694 data->states[i] = POWER_STATE_TYPE_BATTERY;
696 case PP_StateUILabel_Balanced:
697 data->states[i] = POWER_STATE_TYPE_BALANCED;
699 case PP_StateUILabel_Performance:
700 data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
703 if (state->classification.flags & PP_StateClassificationFlag_Boot)
704 data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
706 data->states[i] = POWER_STATE_TYPE_DEFAULT;
709 mutex_unlock(&hwmgr->smu_lock);
713 static int pp_dpm_get_pp_table(void *handle, char **table)
715 struct pp_hwmgr *hwmgr = handle;
719 ret = pp_check(hwmgr);
724 if (!hwmgr->soft_pp_table)
727 mutex_lock(&hwmgr->smu_lock);
728 *table = (char *)hwmgr->soft_pp_table;
729 size = hwmgr->soft_pp_table_size;
730 mutex_unlock(&hwmgr->smu_lock);
734 static int amd_powerplay_reset(void *handle)
736 struct pp_hwmgr *hwmgr = handle;
739 ret = pp_check(hwmgr);
743 ret = hwmgr_hw_fini(hwmgr);
747 ret = hwmgr_hw_init(hwmgr);
751 return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
754 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
756 struct pp_hwmgr *hwmgr = handle;
759 ret = pp_check(hwmgr);
764 mutex_lock(&hwmgr->smu_lock);
765 if (!hwmgr->hardcode_pp_table) {
766 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
767 hwmgr->soft_pp_table_size,
769 if (!hwmgr->hardcode_pp_table) {
770 mutex_unlock(&hwmgr->smu_lock);
775 memcpy(hwmgr->hardcode_pp_table, buf, size);
777 hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
778 mutex_unlock(&hwmgr->smu_lock);
780 ret = amd_powerplay_reset(handle);
784 if (hwmgr->hwmgr_func->avfs_control) {
785 ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
793 static int pp_dpm_force_clock_level(void *handle,
794 enum pp_clock_type type, uint32_t mask)
796 struct pp_hwmgr *hwmgr = handle;
799 ret = pp_check(hwmgr);
804 if (hwmgr->hwmgr_func->force_clock_level == NULL) {
805 pr_info("%s was not implemented.\n", __func__);
808 mutex_lock(&hwmgr->smu_lock);
809 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
810 ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
813 mutex_unlock(&hwmgr->smu_lock);
817 static int pp_dpm_print_clock_levels(void *handle,
818 enum pp_clock_type type, char *buf)
820 struct pp_hwmgr *hwmgr = handle;
823 ret = pp_check(hwmgr);
828 if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
829 pr_info("%s was not implemented.\n", __func__);
832 mutex_lock(&hwmgr->smu_lock);
833 ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
834 mutex_unlock(&hwmgr->smu_lock);
838 static int pp_dpm_get_sclk_od(void *handle)
840 struct pp_hwmgr *hwmgr = handle;
843 ret = pp_check(hwmgr);
848 if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
849 pr_info("%s was not implemented.\n", __func__);
852 mutex_lock(&hwmgr->smu_lock);
853 ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
854 mutex_unlock(&hwmgr->smu_lock);
858 static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
860 struct pp_hwmgr *hwmgr = handle;
863 ret = pp_check(hwmgr);
868 if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
869 pr_info("%s was not implemented.\n", __func__);
873 mutex_lock(&hwmgr->smu_lock);
874 ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
875 mutex_unlock(&hwmgr->smu_lock);
879 static int pp_dpm_get_mclk_od(void *handle)
881 struct pp_hwmgr *hwmgr = handle;
884 ret = pp_check(hwmgr);
889 if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
890 pr_info("%s was not implemented.\n", __func__);
893 mutex_lock(&hwmgr->smu_lock);
894 ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
895 mutex_unlock(&hwmgr->smu_lock);
899 static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
901 struct pp_hwmgr *hwmgr = handle;
904 ret = pp_check(hwmgr);
909 if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
910 pr_info("%s was not implemented.\n", __func__);
913 mutex_lock(&hwmgr->smu_lock);
914 ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
915 mutex_unlock(&hwmgr->smu_lock);
919 static int pp_dpm_read_sensor(void *handle, int idx,
920 void *value, int *size)
922 struct pp_hwmgr *hwmgr = handle;
925 ret = pp_check(hwmgr);
933 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
934 *((uint32_t *)value) = hwmgr->pstate_sclk;
936 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
937 *((uint32_t *)value) = hwmgr->pstate_mclk;
940 mutex_lock(&hwmgr->smu_lock);
941 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
942 mutex_unlock(&hwmgr->smu_lock);
947 static struct amd_vce_state*
948 pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
950 struct pp_hwmgr *hwmgr = handle;
953 ret = pp_check(hwmgr);
958 if (hwmgr && idx < hwmgr->num_vce_state_tables)
959 return &hwmgr->vce_states[idx];
963 static int pp_get_power_profile_mode(void *handle, char *buf)
965 struct pp_hwmgr *hwmgr = handle;
967 if (!buf || pp_check(hwmgr))
970 if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
971 pr_info("%s was not implemented.\n", __func__);
972 return snprintf(buf, PAGE_SIZE, "\n");
975 return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
978 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
980 struct pp_hwmgr *hwmgr = handle;
986 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
987 pr_info("%s was not implemented.\n", __func__);
990 mutex_lock(&hwmgr->smu_lock);
991 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
992 ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
993 mutex_unlock(&hwmgr->smu_lock);
997 static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
999 struct pp_hwmgr *hwmgr = handle;
1001 if (pp_check(hwmgr))
1004 if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
1005 pr_info("%s was not implemented.\n", __func__);
1009 return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
1012 static int pp_dpm_switch_power_profile(void *handle,
1013 enum PP_SMC_POWER_PROFILE type, bool en)
1015 struct pp_hwmgr *hwmgr = handle;
1019 if (pp_check(hwmgr))
1022 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
1023 pr_info("%s was not implemented.\n", __func__);
1027 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1030 mutex_lock(&hwmgr->smu_lock);
1033 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
1034 index = fls(hwmgr->workload_mask);
1035 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
1036 workload = hwmgr->workload_setting[index];
1038 hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
1039 index = fls(hwmgr->workload_mask);
1040 index = index <= Workload_Policy_Max ? index - 1 : 0;
1041 workload = hwmgr->workload_setting[index];
1044 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1045 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
1046 mutex_unlock(&hwmgr->smu_lock);
1051 static int pp_dpm_notify_smu_memory_info(void *handle,
1052 uint32_t virtual_addr_low,
1053 uint32_t virtual_addr_hi,
1054 uint32_t mc_addr_low,
1055 uint32_t mc_addr_hi,
1058 struct pp_hwmgr *hwmgr = handle;
1061 ret = pp_check(hwmgr);
1066 if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) {
1067 pr_info("%s was not implemented.\n", __func__);
1071 mutex_lock(&hwmgr->smu_lock);
1073 ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low,
1074 virtual_addr_hi, mc_addr_low, mc_addr_hi,
1077 mutex_unlock(&hwmgr->smu_lock);
1082 static int pp_set_power_limit(void *handle, uint32_t limit)
1084 struct pp_hwmgr *hwmgr = handle;
1087 ret = pp_check(hwmgr);
1092 if (hwmgr->hwmgr_func->set_power_limit == NULL) {
1093 pr_info("%s was not implemented.\n", __func__);
1098 limit = hwmgr->default_power_limit;
1100 if (limit > hwmgr->default_power_limit)
1103 mutex_lock(&hwmgr->smu_lock);
1104 hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
1105 hwmgr->power_limit = limit;
1106 mutex_unlock(&hwmgr->smu_lock);
1110 static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
1112 struct pp_hwmgr *hwmgr = handle;
1115 ret = pp_check(hwmgr);
1123 mutex_lock(&hwmgr->smu_lock);
1126 *limit = hwmgr->default_power_limit;
1128 *limit = hwmgr->power_limit;
1130 mutex_unlock(&hwmgr->smu_lock);
1135 static int pp_display_configuration_change(void *handle,
1136 const struct amd_pp_display_configuration *display_config)
1138 struct pp_hwmgr *hwmgr = handle;
1141 ret = pp_check(hwmgr);
1146 mutex_lock(&hwmgr->smu_lock);
1147 phm_store_dal_configuration_data(hwmgr, display_config);
1148 mutex_unlock(&hwmgr->smu_lock);
1152 static int pp_get_display_power_level(void *handle,
1153 struct amd_pp_simple_clock_info *output)
1155 struct pp_hwmgr *hwmgr = handle;
1158 ret = pp_check(hwmgr);
1166 mutex_lock(&hwmgr->smu_lock);
1167 ret = phm_get_dal_power_level(hwmgr, output);
1168 mutex_unlock(&hwmgr->smu_lock);
1172 static int pp_get_current_clocks(void *handle,
1173 struct amd_pp_clock_info *clocks)
1175 struct amd_pp_simple_clock_info simple_clocks;
1176 struct pp_clock_info hw_clocks;
1177 struct pp_hwmgr *hwmgr = handle;
1180 ret = pp_check(hwmgr);
1185 mutex_lock(&hwmgr->smu_lock);
1187 phm_get_dal_power_level(hwmgr, &simple_clocks);
1189 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1190 PHM_PlatformCaps_PowerContainment))
1191 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1192 &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1194 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1195 &hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1198 pr_info("Error in phm_get_clock_info \n");
1199 mutex_unlock(&hwmgr->smu_lock);
1203 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1204 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1205 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1206 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1207 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1208 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1210 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1211 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1213 clocks->max_clocks_state = simple_clocks.level;
1215 if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1216 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1217 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1219 mutex_unlock(&hwmgr->smu_lock);
1223 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1225 struct pp_hwmgr *hwmgr = handle;
1228 ret = pp_check(hwmgr);
1236 mutex_lock(&hwmgr->smu_lock);
1237 ret = phm_get_clock_by_type(hwmgr, type, clocks);
1238 mutex_unlock(&hwmgr->smu_lock);
1242 static int pp_get_clock_by_type_with_latency(void *handle,
1243 enum amd_pp_clock_type type,
1244 struct pp_clock_levels_with_latency *clocks)
1246 struct pp_hwmgr *hwmgr = handle;
1249 ret = pp_check(hwmgr);
1256 mutex_lock(&hwmgr->smu_lock);
1257 ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1258 mutex_unlock(&hwmgr->smu_lock);
1262 static int pp_get_clock_by_type_with_voltage(void *handle,
1263 enum amd_pp_clock_type type,
1264 struct pp_clock_levels_with_voltage *clocks)
1266 struct pp_hwmgr *hwmgr = handle;
1269 ret = pp_check(hwmgr);
1276 mutex_lock(&hwmgr->smu_lock);
1278 ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1280 mutex_unlock(&hwmgr->smu_lock);
1284 static int pp_set_watermarks_for_clocks_ranges(void *handle,
1285 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
1287 struct pp_hwmgr *hwmgr = handle;
1290 ret = pp_check(hwmgr);
1294 if (!wm_with_clock_ranges)
1297 mutex_lock(&hwmgr->smu_lock);
1298 ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
1299 wm_with_clock_ranges);
1300 mutex_unlock(&hwmgr->smu_lock);
1305 static int pp_display_clock_voltage_request(void *handle,
1306 struct pp_display_clock_request *clock)
1308 struct pp_hwmgr *hwmgr = handle;
1311 ret = pp_check(hwmgr);
1318 mutex_lock(&hwmgr->smu_lock);
1319 ret = phm_display_clock_voltage_request(hwmgr, clock);
1320 mutex_unlock(&hwmgr->smu_lock);
1325 static int pp_get_display_mode_validation_clocks(void *handle,
1326 struct amd_pp_simple_clock_info *clocks)
1328 struct pp_hwmgr *hwmgr = handle;
1331 ret = pp_check(hwmgr);
1339 mutex_lock(&hwmgr->smu_lock);
1341 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1342 ret = phm_get_max_high_clocks(hwmgr, clocks);
1344 mutex_unlock(&hwmgr->smu_lock);
1348 static int pp_set_mmhub_powergating_by_smu(void *handle)
1350 struct pp_hwmgr *hwmgr = handle;
1353 ret = pp_check(hwmgr);
1358 if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
1359 pr_info("%s was not implemented.\n", __func__);
1363 return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
1366 static const struct amd_pm_funcs pp_dpm_funcs = {
1367 .load_firmware = pp_dpm_load_fw,
1368 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1369 .force_performance_level = pp_dpm_force_performance_level,
1370 .get_performance_level = pp_dpm_get_performance_level,
1371 .get_current_power_state = pp_dpm_get_current_power_state,
1372 .powergate_vce = pp_dpm_powergate_vce,
1373 .powergate_uvd = pp_dpm_powergate_uvd,
1374 .dispatch_tasks = pp_dpm_dispatch_tasks,
1375 .set_fan_control_mode = pp_dpm_set_fan_control_mode,
1376 .get_fan_control_mode = pp_dpm_get_fan_control_mode,
1377 .set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
1378 .get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
1379 .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1380 .get_pp_num_states = pp_dpm_get_pp_num_states,
1381 .get_pp_table = pp_dpm_get_pp_table,
1382 .set_pp_table = pp_dpm_set_pp_table,
1383 .force_clock_level = pp_dpm_force_clock_level,
1384 .print_clock_levels = pp_dpm_print_clock_levels,
1385 .get_sclk_od = pp_dpm_get_sclk_od,
1386 .set_sclk_od = pp_dpm_set_sclk_od,
1387 .get_mclk_od = pp_dpm_get_mclk_od,
1388 .set_mclk_od = pp_dpm_set_mclk_od,
1389 .read_sensor = pp_dpm_read_sensor,
1390 .get_vce_clock_state = pp_dpm_get_vce_clock_state,
1391 .switch_power_profile = pp_dpm_switch_power_profile,
1392 .set_clockgating_by_smu = pp_set_clockgating_by_smu,
1393 .notify_smu_memory_info = pp_dpm_notify_smu_memory_info,
1394 .get_power_profile_mode = pp_get_power_profile_mode,
1395 .set_power_profile_mode = pp_set_power_profile_mode,
1396 .odn_edit_dpm_table = pp_odn_edit_dpm_table,
1397 .set_power_limit = pp_set_power_limit,
1398 .get_power_limit = pp_get_power_limit,
1400 .get_sclk = pp_dpm_get_sclk,
1401 .get_mclk = pp_dpm_get_mclk,
1402 .display_configuration_change = pp_display_configuration_change,
1403 .get_display_power_level = pp_get_display_power_level,
1404 .get_current_clocks = pp_get_current_clocks,
1405 .get_clock_by_type = pp_get_clock_by_type,
1406 .get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1407 .get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1408 .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1409 .display_clock_voltage_request = pp_display_clock_voltage_request,
1410 .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1411 .set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu,