Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / powerplay / amd_powerplay.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/gfp.h>
27 #include <linux/slab.h>
28 #include "amd_shared.h"
29 #include "amd_powerplay.h"
30 #include "power_state.h"
31 #include "amdgpu.h"
32 #include "hwmgr.h"
33
34 #define PP_DPM_DISABLED 0xCCCC
35
36 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
37                 enum amd_pm_state_type *user_state);
38
39 static const struct amd_pm_funcs pp_dpm_funcs;
40
41 static inline int pp_check(struct pp_hwmgr *hwmgr)
42 {
43         if (hwmgr == NULL || hwmgr->smumgr_funcs == NULL)
44                 return -EINVAL;
45
46         if (hwmgr->pm_en == 0 || hwmgr->hwmgr_func == NULL)
47                 return PP_DPM_DISABLED;
48
49         return 0;
50 }
51
52 static int amd_powerplay_create(struct amdgpu_device *adev)
53 {
54         struct pp_hwmgr *hwmgr;
55
56         if (adev == NULL)
57                 return -EINVAL;
58
59         hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
60         if (hwmgr == NULL)
61                 return -ENOMEM;
62
63         hwmgr->adev = adev;
64         hwmgr->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
65         hwmgr->device = amdgpu_cgs_create_device(adev);
66         mutex_init(&hwmgr->smu_lock);
67         hwmgr->chip_family = adev->family;
68         hwmgr->chip_id = adev->asic_type;
69         hwmgr->feature_mask = amdgpu_pp_feature_mask;
70         adev->powerplay.pp_handle = hwmgr;
71         adev->powerplay.pp_funcs = &pp_dpm_funcs;
72         return 0;
73 }
74
75
76 static int amd_powerplay_destroy(struct amdgpu_device *adev)
77 {
78         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
79
80         kfree(hwmgr->hardcode_pp_table);
81         hwmgr->hardcode_pp_table = NULL;
82
83         kfree(hwmgr);
84         hwmgr = NULL;
85
86         return 0;
87 }
88
89 static int pp_early_init(void *handle)
90 {
91         int ret;
92         struct amdgpu_device *adev = handle;
93
94         ret = amd_powerplay_create(adev);
95
96         if (ret != 0)
97                 return ret;
98
99         ret = hwmgr_early_init(adev->powerplay.pp_handle);
100         if (ret)
101                 return -EINVAL;
102
103         return 0;
104 }
105
106 static int pp_sw_init(void *handle)
107 {
108         struct amdgpu_device *adev = handle;
109         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
110         int ret = 0;
111
112         ret = pp_check(hwmgr);
113
114         if (ret >= 0) {
115                 if (hwmgr->smumgr_funcs->smu_init == NULL)
116                         return -EINVAL;
117
118                 ret = hwmgr->smumgr_funcs->smu_init(hwmgr);
119
120                 phm_register_irq_handlers(hwmgr);
121
122                 pr_debug("amdgpu: powerplay sw initialized\n");
123         }
124
125         return ret;
126 }
127
128 static int pp_sw_fini(void *handle)
129 {
130         struct amdgpu_device *adev = handle;
131         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
132         int ret = 0;
133
134         ret = pp_check(hwmgr);
135         if (ret >= 0) {
136                 if (hwmgr->smumgr_funcs->smu_fini != NULL)
137                         hwmgr->smumgr_funcs->smu_fini(hwmgr);
138         }
139
140         if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
141                 amdgpu_ucode_fini_bo(adev);
142
143         return 0;
144 }
145
146 static int pp_hw_init(void *handle)
147 {
148         int ret = 0;
149         struct amdgpu_device *adev = handle;
150         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
151
152         if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
153                 amdgpu_ucode_init_bo(adev);
154
155         ret = pp_check(hwmgr);
156
157         if (ret >= 0) {
158                 if (hwmgr->smumgr_funcs->start_smu == NULL)
159                         return -EINVAL;
160
161                 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
162                         pr_err("smc start failed\n");
163                         hwmgr->smumgr_funcs->smu_fini(hwmgr);
164                         return -EINVAL;
165                 }
166                 if (ret == PP_DPM_DISABLED)
167                         goto exit;
168                 ret = hwmgr_hw_init(hwmgr);
169                 if (ret)
170                         goto exit;
171         }
172         return ret;
173 exit:
174         hwmgr->pm_en = 0;
175         cgs_notify_dpm_enabled(hwmgr->device, false);
176         return 0;
177
178 }
179
180 static int pp_hw_fini(void *handle)
181 {
182         struct amdgpu_device *adev = handle;
183         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
184         int ret = 0;
185
186         ret = pp_check(hwmgr);
187         if (ret == 0)
188                 hwmgr_hw_fini(hwmgr);
189
190         return 0;
191 }
192
193 static int pp_late_init(void *handle)
194 {
195         struct amdgpu_device *adev = handle;
196         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
197         int ret = 0;
198
199         ret = pp_check(hwmgr);
200
201         if (ret == 0)
202                 pp_dpm_dispatch_tasks(hwmgr,
203                                         AMD_PP_TASK_COMPLETE_INIT, NULL);
204
205         return 0;
206 }
207
208 static void pp_late_fini(void *handle)
209 {
210         struct amdgpu_device *adev = handle;
211
212         amd_powerplay_destroy(adev);
213 }
214
215
216 static bool pp_is_idle(void *handle)
217 {
218         return false;
219 }
220
221 static int pp_wait_for_idle(void *handle)
222 {
223         return 0;
224 }
225
226 static int pp_sw_reset(void *handle)
227 {
228         return 0;
229 }
230
231 static int pp_set_powergating_state(void *handle,
232                                     enum amd_powergating_state state)
233 {
234         struct amdgpu_device *adev = handle;
235         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
236         int ret = 0;
237
238         ret = pp_check(hwmgr);
239
240         if (ret)
241                 return ret;
242
243         if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
244                 pr_info("%s was not implemented.\n", __func__);
245                 return 0;
246         }
247
248         /* Enable/disable GFX per cu powergating through SMU */
249         return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
250                         state == AMD_PG_STATE_GATE);
251 }
252
253 static int pp_suspend(void *handle)
254 {
255         struct amdgpu_device *adev = handle;
256         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
257         int ret = 0;
258
259         ret = pp_check(hwmgr);
260         if (ret == 0)
261                 hwmgr_hw_suspend(hwmgr);
262         return 0;
263 }
264
265 static int pp_resume(void *handle)
266 {
267         struct amdgpu_device *adev = handle;
268         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
269         int ret;
270
271         ret = pp_check(hwmgr);
272
273         if (ret < 0)
274                 return ret;
275
276         if (hwmgr->smumgr_funcs->start_smu == NULL)
277                 return -EINVAL;
278
279         if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
280                 pr_err("smc start failed\n");
281                 hwmgr->smumgr_funcs->smu_fini(hwmgr);
282                 return -EINVAL;
283         }
284
285         if (ret == PP_DPM_DISABLED)
286                 return 0;
287
288         return hwmgr_hw_resume(hwmgr);
289 }
290
291 static int pp_set_clockgating_state(void *handle,
292                                           enum amd_clockgating_state state)
293 {
294         return 0;
295 }
296
297 static const struct amd_ip_funcs pp_ip_funcs = {
298         .name = "powerplay",
299         .early_init = pp_early_init,
300         .late_init = pp_late_init,
301         .sw_init = pp_sw_init,
302         .sw_fini = pp_sw_fini,
303         .hw_init = pp_hw_init,
304         .hw_fini = pp_hw_fini,
305         .late_fini = pp_late_fini,
306         .suspend = pp_suspend,
307         .resume = pp_resume,
308         .is_idle = pp_is_idle,
309         .wait_for_idle = pp_wait_for_idle,
310         .soft_reset = pp_sw_reset,
311         .set_clockgating_state = pp_set_clockgating_state,
312         .set_powergating_state = pp_set_powergating_state,
313 };
314
315 const struct amdgpu_ip_block_version pp_smu_ip_block =
316 {
317         .type = AMD_IP_BLOCK_TYPE_SMC,
318         .major = 1,
319         .minor = 0,
320         .rev = 0,
321         .funcs = &pp_ip_funcs,
322 };
323
324 static int pp_dpm_load_fw(void *handle)
325 {
326         return 0;
327 }
328
329 static int pp_dpm_fw_loading_complete(void *handle)
330 {
331         return 0;
332 }
333
334 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
335 {
336         struct pp_hwmgr *hwmgr = handle;
337         int ret = 0;
338
339         ret = pp_check(hwmgr);
340
341         if (ret)
342                 return ret;
343
344         if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
345                 pr_info("%s was not implemented.\n", __func__);
346                 return 0;
347         }
348
349         return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
350 }
351
352 static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
353                                                 enum amd_dpm_forced_level *level)
354 {
355         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
356                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
357                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
358                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
359
360         if (!(hwmgr->dpm_level & profile_mode_mask)) {
361                 /* enter umd pstate, save current level, disable gfx cg*/
362                 if (*level & profile_mode_mask) {
363                         hwmgr->saved_dpm_level = hwmgr->dpm_level;
364                         hwmgr->en_umd_pstate = true;
365                         cgs_set_clockgating_state(hwmgr->device,
366                                                 AMD_IP_BLOCK_TYPE_GFX,
367                                                 AMD_CG_STATE_UNGATE);
368                         cgs_set_powergating_state(hwmgr->device,
369                                         AMD_IP_BLOCK_TYPE_GFX,
370                                         AMD_PG_STATE_UNGATE);
371                 }
372         } else {
373                 /* exit umd pstate, restore level, enable gfx cg*/
374                 if (!(*level & profile_mode_mask)) {
375                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
376                                 *level = hwmgr->saved_dpm_level;
377                         hwmgr->en_umd_pstate = false;
378                         cgs_set_clockgating_state(hwmgr->device,
379                                         AMD_IP_BLOCK_TYPE_GFX,
380                                         AMD_CG_STATE_GATE);
381                         cgs_set_powergating_state(hwmgr->device,
382                                         AMD_IP_BLOCK_TYPE_GFX,
383                                         AMD_PG_STATE_GATE);
384                 }
385         }
386 }
387
388 static int pp_dpm_force_performance_level(void *handle,
389                                         enum amd_dpm_forced_level level)
390 {
391         struct pp_hwmgr *hwmgr = handle;
392         int ret = 0;
393
394         ret = pp_check(hwmgr);
395
396         if (ret)
397                 return ret;
398
399         if (level == hwmgr->dpm_level)
400                 return 0;
401
402         mutex_lock(&hwmgr->smu_lock);
403         pp_dpm_en_umd_pstate(hwmgr, &level);
404         hwmgr->request_dpm_level = level;
405         hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
406         mutex_unlock(&hwmgr->smu_lock);
407
408         return 0;
409 }
410
411 static enum amd_dpm_forced_level pp_dpm_get_performance_level(
412                                                                 void *handle)
413 {
414         struct pp_hwmgr *hwmgr = handle;
415         int ret = 0;
416         enum amd_dpm_forced_level level;
417
418         ret = pp_check(hwmgr);
419
420         if (ret)
421                 return ret;
422
423         mutex_lock(&hwmgr->smu_lock);
424         level = hwmgr->dpm_level;
425         mutex_unlock(&hwmgr->smu_lock);
426         return level;
427 }
428
429 static uint32_t pp_dpm_get_sclk(void *handle, bool low)
430 {
431         struct pp_hwmgr *hwmgr = handle;
432         int ret = 0;
433         uint32_t clk = 0;
434
435         ret = pp_check(hwmgr);
436
437         if (ret)
438                 return ret;
439
440         if (hwmgr->hwmgr_func->get_sclk == NULL) {
441                 pr_info("%s was not implemented.\n", __func__);
442                 return 0;
443         }
444         mutex_lock(&hwmgr->smu_lock);
445         clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
446         mutex_unlock(&hwmgr->smu_lock);
447         return clk;
448 }
449
450 static uint32_t pp_dpm_get_mclk(void *handle, bool low)
451 {
452         struct pp_hwmgr *hwmgr = handle;
453         int ret = 0;
454         uint32_t clk = 0;
455
456         ret = pp_check(hwmgr);
457
458         if (ret)
459                 return ret;
460
461         if (hwmgr->hwmgr_func->get_mclk == NULL) {
462                 pr_info("%s was not implemented.\n", __func__);
463                 return 0;
464         }
465         mutex_lock(&hwmgr->smu_lock);
466         clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
467         mutex_unlock(&hwmgr->smu_lock);
468         return clk;
469 }
470
471 static void pp_dpm_powergate_vce(void *handle, bool gate)
472 {
473         struct pp_hwmgr *hwmgr = handle;
474         int ret = 0;
475
476         ret = pp_check(hwmgr);
477
478         if (ret)
479                 return;
480
481         if (hwmgr->hwmgr_func->powergate_vce == NULL) {
482                 pr_info("%s was not implemented.\n", __func__);
483                 return;
484         }
485         mutex_lock(&hwmgr->smu_lock);
486         hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
487         mutex_unlock(&hwmgr->smu_lock);
488 }
489
490 static void pp_dpm_powergate_uvd(void *handle, bool gate)
491 {
492         struct pp_hwmgr *hwmgr = handle;
493         int ret = 0;
494
495         ret = pp_check(hwmgr);
496
497         if (ret)
498                 return;
499
500         if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
501                 pr_info("%s was not implemented.\n", __func__);
502                 return;
503         }
504         mutex_lock(&hwmgr->smu_lock);
505         hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
506         mutex_unlock(&hwmgr->smu_lock);
507 }
508
509 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
510                 enum amd_pm_state_type *user_state)
511 {
512         int ret = 0;
513         struct pp_hwmgr *hwmgr = handle;
514
515         ret = pp_check(hwmgr);
516
517         if (ret)
518                 return ret;
519
520         mutex_lock(&hwmgr->smu_lock);
521         ret = hwmgr_handle_task(hwmgr, task_id, user_state);
522         mutex_unlock(&hwmgr->smu_lock);
523
524         return ret;
525 }
526
527 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
528 {
529         struct pp_hwmgr *hwmgr = handle;
530         struct pp_power_state *state;
531         int ret = 0;
532         enum amd_pm_state_type pm_type;
533
534         ret = pp_check(hwmgr);
535
536         if (ret)
537                 return ret;
538
539         if (hwmgr->current_ps == NULL)
540                 return -EINVAL;
541
542         mutex_lock(&hwmgr->smu_lock);
543
544         state = hwmgr->current_ps;
545
546         switch (state->classification.ui_label) {
547         case PP_StateUILabel_Battery:
548                 pm_type = POWER_STATE_TYPE_BATTERY;
549                 break;
550         case PP_StateUILabel_Balanced:
551                 pm_type = POWER_STATE_TYPE_BALANCED;
552                 break;
553         case PP_StateUILabel_Performance:
554                 pm_type = POWER_STATE_TYPE_PERFORMANCE;
555                 break;
556         default:
557                 if (state->classification.flags & PP_StateClassificationFlag_Boot)
558                         pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
559                 else
560                         pm_type = POWER_STATE_TYPE_DEFAULT;
561                 break;
562         }
563         mutex_unlock(&hwmgr->smu_lock);
564
565         return pm_type;
566 }
567
568 static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
569 {
570         struct pp_hwmgr *hwmgr = handle;
571         int ret = 0;
572
573         ret = pp_check(hwmgr);
574
575         if (ret)
576                 return;
577
578         if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
579                 pr_info("%s was not implemented.\n", __func__);
580                 return;
581         }
582         mutex_lock(&hwmgr->smu_lock);
583         hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
584         mutex_unlock(&hwmgr->smu_lock);
585 }
586
587 static uint32_t pp_dpm_get_fan_control_mode(void *handle)
588 {
589         struct pp_hwmgr *hwmgr = handle;
590         int ret = 0;
591         uint32_t mode = 0;
592
593         ret = pp_check(hwmgr);
594
595         if (ret)
596                 return ret;
597
598         if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
599                 pr_info("%s was not implemented.\n", __func__);
600                 return 0;
601         }
602         mutex_lock(&hwmgr->smu_lock);
603         mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
604         mutex_unlock(&hwmgr->smu_lock);
605         return mode;
606 }
607
608 static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
609 {
610         struct pp_hwmgr *hwmgr = handle;
611         int ret = 0;
612
613         ret = pp_check(hwmgr);
614
615         if (ret)
616                 return ret;
617
618         if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
619                 pr_info("%s was not implemented.\n", __func__);
620                 return 0;
621         }
622         mutex_lock(&hwmgr->smu_lock);
623         ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
624         mutex_unlock(&hwmgr->smu_lock);
625         return ret;
626 }
627
628 static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
629 {
630         struct pp_hwmgr *hwmgr = handle;
631         int ret = 0;
632
633         ret = pp_check(hwmgr);
634
635         if (ret)
636                 return ret;
637
638         if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
639                 pr_info("%s was not implemented.\n", __func__);
640                 return 0;
641         }
642
643         mutex_lock(&hwmgr->smu_lock);
644         ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
645         mutex_unlock(&hwmgr->smu_lock);
646         return ret;
647 }
648
649 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
650 {
651         struct pp_hwmgr *hwmgr = handle;
652         int ret = 0;
653
654         ret = pp_check(hwmgr);
655
656         if (ret)
657                 return ret;
658
659         if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
660                 return -EINVAL;
661
662         mutex_lock(&hwmgr->smu_lock);
663         ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
664         mutex_unlock(&hwmgr->smu_lock);
665         return ret;
666 }
667
668 static int pp_dpm_get_pp_num_states(void *handle,
669                 struct pp_states_info *data)
670 {
671         struct pp_hwmgr *hwmgr = handle;
672         int i;
673         int ret = 0;
674
675         memset(data, 0, sizeof(*data));
676
677         ret = pp_check(hwmgr);
678
679         if (ret)
680                 return ret;
681
682         if (hwmgr->ps == NULL)
683                 return -EINVAL;
684
685         mutex_lock(&hwmgr->smu_lock);
686
687         data->nums = hwmgr->num_ps;
688
689         for (i = 0; i < hwmgr->num_ps; i++) {
690                 struct pp_power_state *state = (struct pp_power_state *)
691                                 ((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
692                 switch (state->classification.ui_label) {
693                 case PP_StateUILabel_Battery:
694                         data->states[i] = POWER_STATE_TYPE_BATTERY;
695                         break;
696                 case PP_StateUILabel_Balanced:
697                         data->states[i] = POWER_STATE_TYPE_BALANCED;
698                         break;
699                 case PP_StateUILabel_Performance:
700                         data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
701                         break;
702                 default:
703                         if (state->classification.flags & PP_StateClassificationFlag_Boot)
704                                 data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
705                         else
706                                 data->states[i] = POWER_STATE_TYPE_DEFAULT;
707                 }
708         }
709         mutex_unlock(&hwmgr->smu_lock);
710         return 0;
711 }
712
713 static int pp_dpm_get_pp_table(void *handle, char **table)
714 {
715         struct pp_hwmgr *hwmgr = handle;
716         int ret = 0;
717         int size = 0;
718
719         ret = pp_check(hwmgr);
720
721         if (ret)
722                 return ret;
723
724         if (!hwmgr->soft_pp_table)
725                 return -EINVAL;
726
727         mutex_lock(&hwmgr->smu_lock);
728         *table = (char *)hwmgr->soft_pp_table;
729         size = hwmgr->soft_pp_table_size;
730         mutex_unlock(&hwmgr->smu_lock);
731         return size;
732 }
733
734 static int amd_powerplay_reset(void *handle)
735 {
736         struct pp_hwmgr *hwmgr = handle;
737         int ret;
738
739         ret = pp_check(hwmgr);
740         if (ret)
741                 return ret;
742
743         ret = hwmgr_hw_fini(hwmgr);
744         if (ret)
745                 return ret;
746
747         ret = hwmgr_hw_init(hwmgr);
748         if (ret)
749                 return ret;
750
751         return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
752 }
753
754 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
755 {
756         struct pp_hwmgr *hwmgr = handle;
757         int ret = 0;
758
759         ret = pp_check(hwmgr);
760
761         if (ret)
762                 return ret;
763
764         mutex_lock(&hwmgr->smu_lock);
765         if (!hwmgr->hardcode_pp_table) {
766                 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
767                                                    hwmgr->soft_pp_table_size,
768                                                    GFP_KERNEL);
769                 if (!hwmgr->hardcode_pp_table) {
770                         mutex_unlock(&hwmgr->smu_lock);
771                         return -ENOMEM;
772                 }
773         }
774
775         memcpy(hwmgr->hardcode_pp_table, buf, size);
776
777         hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
778         mutex_unlock(&hwmgr->smu_lock);
779
780         ret = amd_powerplay_reset(handle);
781         if (ret)
782                 return ret;
783
784         if (hwmgr->hwmgr_func->avfs_control) {
785                 ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
786                 if (ret)
787                         return ret;
788         }
789
790         return 0;
791 }
792
793 static int pp_dpm_force_clock_level(void *handle,
794                 enum pp_clock_type type, uint32_t mask)
795 {
796         struct pp_hwmgr *hwmgr = handle;
797         int ret = 0;
798
799         ret = pp_check(hwmgr);
800
801         if (ret)
802                 return ret;
803
804         if (hwmgr->hwmgr_func->force_clock_level == NULL) {
805                 pr_info("%s was not implemented.\n", __func__);
806                 return 0;
807         }
808         mutex_lock(&hwmgr->smu_lock);
809         if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
810                 ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
811         else
812                 ret = -EINVAL;
813         mutex_unlock(&hwmgr->smu_lock);
814         return ret;
815 }
816
817 static int pp_dpm_print_clock_levels(void *handle,
818                 enum pp_clock_type type, char *buf)
819 {
820         struct pp_hwmgr *hwmgr = handle;
821         int ret = 0;
822
823         ret = pp_check(hwmgr);
824
825         if (ret)
826                 return ret;
827
828         if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
829                 pr_info("%s was not implemented.\n", __func__);
830                 return 0;
831         }
832         mutex_lock(&hwmgr->smu_lock);
833         ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
834         mutex_unlock(&hwmgr->smu_lock);
835         return ret;
836 }
837
838 static int pp_dpm_get_sclk_od(void *handle)
839 {
840         struct pp_hwmgr *hwmgr = handle;
841         int ret = 0;
842
843         ret = pp_check(hwmgr);
844
845         if (ret)
846                 return ret;
847
848         if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
849                 pr_info("%s was not implemented.\n", __func__);
850                 return 0;
851         }
852         mutex_lock(&hwmgr->smu_lock);
853         ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
854         mutex_unlock(&hwmgr->smu_lock);
855         return ret;
856 }
857
858 static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
859 {
860         struct pp_hwmgr *hwmgr = handle;
861         int ret = 0;
862
863         ret = pp_check(hwmgr);
864
865         if (ret)
866                 return ret;
867
868         if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
869                 pr_info("%s was not implemented.\n", __func__);
870                 return 0;
871         }
872
873         mutex_lock(&hwmgr->smu_lock);
874         ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
875         mutex_unlock(&hwmgr->smu_lock);
876         return ret;
877 }
878
879 static int pp_dpm_get_mclk_od(void *handle)
880 {
881         struct pp_hwmgr *hwmgr = handle;
882         int ret = 0;
883
884         ret = pp_check(hwmgr);
885
886         if (ret)
887                 return ret;
888
889         if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
890                 pr_info("%s was not implemented.\n", __func__);
891                 return 0;
892         }
893         mutex_lock(&hwmgr->smu_lock);
894         ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
895         mutex_unlock(&hwmgr->smu_lock);
896         return ret;
897 }
898
899 static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
900 {
901         struct pp_hwmgr *hwmgr = handle;
902         int ret = 0;
903
904         ret = pp_check(hwmgr);
905
906         if (ret)
907                 return ret;
908
909         if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
910                 pr_info("%s was not implemented.\n", __func__);
911                 return 0;
912         }
913         mutex_lock(&hwmgr->smu_lock);
914         ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
915         mutex_unlock(&hwmgr->smu_lock);
916         return ret;
917 }
918
919 static int pp_dpm_read_sensor(void *handle, int idx,
920                               void *value, int *size)
921 {
922         struct pp_hwmgr *hwmgr = handle;
923         int ret = 0;
924
925         ret = pp_check(hwmgr);
926         if (ret)
927                 return ret;
928
929         if (value == NULL)
930                 return -EINVAL;
931
932         switch (idx) {
933         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
934                 *((uint32_t *)value) = hwmgr->pstate_sclk;
935                 return 0;
936         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
937                 *((uint32_t *)value) = hwmgr->pstate_mclk;
938                 return 0;
939         default:
940                 mutex_lock(&hwmgr->smu_lock);
941                 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
942                 mutex_unlock(&hwmgr->smu_lock);
943                 return ret;
944         }
945 }
946
947 static struct amd_vce_state*
948 pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
949 {
950         struct pp_hwmgr *hwmgr = handle;
951         int ret = 0;
952
953         ret = pp_check(hwmgr);
954
955         if (ret)
956                 return NULL;
957
958         if (hwmgr && idx < hwmgr->num_vce_state_tables)
959                 return &hwmgr->vce_states[idx];
960         return NULL;
961 }
962
963 static int pp_get_power_profile_mode(void *handle, char *buf)
964 {
965         struct pp_hwmgr *hwmgr = handle;
966
967         if (!buf || pp_check(hwmgr))
968                 return -EINVAL;
969
970         if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
971                 pr_info("%s was not implemented.\n", __func__);
972                 return snprintf(buf, PAGE_SIZE, "\n");
973         }
974
975         return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
976 }
977
978 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
979 {
980         struct pp_hwmgr *hwmgr = handle;
981         int ret = -EINVAL;
982
983         if (pp_check(hwmgr))
984                 return -EINVAL;
985
986         if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
987                 pr_info("%s was not implemented.\n", __func__);
988                 return -EINVAL;
989         }
990         mutex_lock(&hwmgr->smu_lock);
991         if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
992                 ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
993         mutex_unlock(&hwmgr->smu_lock);
994         return ret;
995 }
996
997 static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
998 {
999         struct pp_hwmgr *hwmgr = handle;
1000
1001         if (pp_check(hwmgr))
1002                 return -EINVAL;
1003
1004         if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
1005                 pr_info("%s was not implemented.\n", __func__);
1006                 return -EINVAL;
1007         }
1008
1009         return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
1010 }
1011
1012 static int pp_dpm_switch_power_profile(void *handle,
1013                 enum PP_SMC_POWER_PROFILE type, bool en)
1014 {
1015         struct pp_hwmgr *hwmgr = handle;
1016         long workload;
1017         uint32_t index;
1018
1019         if (pp_check(hwmgr))
1020                 return -EINVAL;
1021
1022         if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
1023                 pr_info("%s was not implemented.\n", __func__);
1024                 return -EINVAL;
1025         }
1026
1027         if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1028                 return -EINVAL;
1029
1030         mutex_lock(&hwmgr->smu_lock);
1031
1032         if (!en) {
1033                 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
1034                 index = fls(hwmgr->workload_mask);
1035                 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
1036                 workload = hwmgr->workload_setting[index];
1037         } else {
1038                 hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
1039                 index = fls(hwmgr->workload_mask);
1040                 index = index <= Workload_Policy_Max ? index - 1 : 0;
1041                 workload = hwmgr->workload_setting[index];
1042         }
1043
1044         if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1045                 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
1046         mutex_unlock(&hwmgr->smu_lock);
1047
1048         return 0;
1049 }
1050
1051 static int pp_dpm_notify_smu_memory_info(void *handle,
1052                                         uint32_t virtual_addr_low,
1053                                         uint32_t virtual_addr_hi,
1054                                         uint32_t mc_addr_low,
1055                                         uint32_t mc_addr_hi,
1056                                         uint32_t size)
1057 {
1058         struct pp_hwmgr *hwmgr = handle;
1059         int ret = 0;
1060
1061         ret = pp_check(hwmgr);
1062
1063         if (ret)
1064                 return ret;
1065
1066         if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) {
1067                 pr_info("%s was not implemented.\n", __func__);
1068                 return -EINVAL;
1069         }
1070
1071         mutex_lock(&hwmgr->smu_lock);
1072
1073         ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low,
1074                                         virtual_addr_hi, mc_addr_low, mc_addr_hi,
1075                                         size);
1076
1077         mutex_unlock(&hwmgr->smu_lock);
1078
1079         return ret;
1080 }
1081
1082 static int pp_set_power_limit(void *handle, uint32_t limit)
1083 {
1084         struct pp_hwmgr *hwmgr = handle;
1085         int ret = 0;
1086
1087         ret = pp_check(hwmgr);
1088
1089         if (ret)
1090                 return ret;
1091
1092         if (hwmgr->hwmgr_func->set_power_limit == NULL) {
1093                 pr_info("%s was not implemented.\n", __func__);
1094                 return -EINVAL;
1095         }
1096
1097         if (limit == 0)
1098                 limit = hwmgr->default_power_limit;
1099
1100         if (limit > hwmgr->default_power_limit)
1101                 return -EINVAL;
1102
1103         mutex_lock(&hwmgr->smu_lock);
1104         hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
1105         hwmgr->power_limit = limit;
1106         mutex_unlock(&hwmgr->smu_lock);
1107         return ret;
1108 }
1109
1110 static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
1111 {
1112         struct pp_hwmgr *hwmgr = handle;
1113         int ret = 0;
1114
1115         ret = pp_check(hwmgr);
1116
1117         if (ret)
1118                 return ret;
1119
1120         if (limit == NULL)
1121                 return -EINVAL;
1122
1123         mutex_lock(&hwmgr->smu_lock);
1124
1125         if (default_limit)
1126                 *limit = hwmgr->default_power_limit;
1127         else
1128                 *limit = hwmgr->power_limit;
1129
1130         mutex_unlock(&hwmgr->smu_lock);
1131
1132         return ret;
1133 }
1134
1135 static int pp_display_configuration_change(void *handle,
1136         const struct amd_pp_display_configuration *display_config)
1137 {
1138         struct pp_hwmgr *hwmgr = handle;
1139         int ret = 0;
1140
1141         ret = pp_check(hwmgr);
1142
1143         if (ret)
1144                 return ret;
1145
1146         mutex_lock(&hwmgr->smu_lock);
1147         phm_store_dal_configuration_data(hwmgr, display_config);
1148         mutex_unlock(&hwmgr->smu_lock);
1149         return 0;
1150 }
1151
1152 static int pp_get_display_power_level(void *handle,
1153                 struct amd_pp_simple_clock_info *output)
1154 {
1155         struct pp_hwmgr *hwmgr = handle;
1156         int ret = 0;
1157
1158         ret = pp_check(hwmgr);
1159
1160         if (ret)
1161                 return ret;
1162
1163         if (output == NULL)
1164                 return -EINVAL;
1165
1166         mutex_lock(&hwmgr->smu_lock);
1167         ret = phm_get_dal_power_level(hwmgr, output);
1168         mutex_unlock(&hwmgr->smu_lock);
1169         return ret;
1170 }
1171
1172 static int pp_get_current_clocks(void *handle,
1173                 struct amd_pp_clock_info *clocks)
1174 {
1175         struct amd_pp_simple_clock_info simple_clocks;
1176         struct pp_clock_info hw_clocks;
1177         struct pp_hwmgr *hwmgr = handle;
1178         int ret = 0;
1179
1180         ret = pp_check(hwmgr);
1181
1182         if (ret)
1183                 return ret;
1184
1185         mutex_lock(&hwmgr->smu_lock);
1186
1187         phm_get_dal_power_level(hwmgr, &simple_clocks);
1188
1189         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1190                                         PHM_PlatformCaps_PowerContainment))
1191                 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1192                                         &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1193         else
1194                 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1195                                         &hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1196
1197         if (ret) {
1198                 pr_info("Error in phm_get_clock_info \n");
1199                 mutex_unlock(&hwmgr->smu_lock);
1200                 return -EINVAL;
1201         }
1202
1203         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1204         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1205         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1206         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1207         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1208         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1209
1210         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1211         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1212
1213         clocks->max_clocks_state = simple_clocks.level;
1214
1215         if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1216                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1217                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1218         }
1219         mutex_unlock(&hwmgr->smu_lock);
1220         return 0;
1221 }
1222
1223 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1224 {
1225         struct pp_hwmgr *hwmgr = handle;
1226         int ret = 0;
1227
1228         ret = pp_check(hwmgr);
1229
1230         if (ret)
1231                 return ret;
1232
1233         if (clocks == NULL)
1234                 return -EINVAL;
1235
1236         mutex_lock(&hwmgr->smu_lock);
1237         ret = phm_get_clock_by_type(hwmgr, type, clocks);
1238         mutex_unlock(&hwmgr->smu_lock);
1239         return ret;
1240 }
1241
1242 static int pp_get_clock_by_type_with_latency(void *handle,
1243                 enum amd_pp_clock_type type,
1244                 struct pp_clock_levels_with_latency *clocks)
1245 {
1246         struct pp_hwmgr *hwmgr = handle;
1247         int ret = 0;
1248
1249         ret = pp_check(hwmgr);
1250         if (ret)
1251                 return ret;
1252
1253         if (!clocks)
1254                 return -EINVAL;
1255
1256         mutex_lock(&hwmgr->smu_lock);
1257         ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1258         mutex_unlock(&hwmgr->smu_lock);
1259         return ret;
1260 }
1261
1262 static int pp_get_clock_by_type_with_voltage(void *handle,
1263                 enum amd_pp_clock_type type,
1264                 struct pp_clock_levels_with_voltage *clocks)
1265 {
1266         struct pp_hwmgr *hwmgr = handle;
1267         int ret = 0;
1268
1269         ret = pp_check(hwmgr);
1270         if (ret)
1271                 return ret;
1272
1273         if (!clocks)
1274                 return -EINVAL;
1275
1276         mutex_lock(&hwmgr->smu_lock);
1277
1278         ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1279
1280         mutex_unlock(&hwmgr->smu_lock);
1281         return ret;
1282 }
1283
1284 static int pp_set_watermarks_for_clocks_ranges(void *handle,
1285                 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
1286 {
1287         struct pp_hwmgr *hwmgr = handle;
1288         int ret = 0;
1289
1290         ret = pp_check(hwmgr);
1291         if (ret)
1292                 return ret;
1293
1294         if (!wm_with_clock_ranges)
1295                 return -EINVAL;
1296
1297         mutex_lock(&hwmgr->smu_lock);
1298         ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
1299                         wm_with_clock_ranges);
1300         mutex_unlock(&hwmgr->smu_lock);
1301
1302         return ret;
1303 }
1304
1305 static int pp_display_clock_voltage_request(void *handle,
1306                 struct pp_display_clock_request *clock)
1307 {
1308         struct pp_hwmgr *hwmgr = handle;
1309         int ret = 0;
1310
1311         ret = pp_check(hwmgr);
1312         if (ret)
1313                 return ret;
1314
1315         if (!clock)
1316                 return -EINVAL;
1317
1318         mutex_lock(&hwmgr->smu_lock);
1319         ret = phm_display_clock_voltage_request(hwmgr, clock);
1320         mutex_unlock(&hwmgr->smu_lock);
1321
1322         return ret;
1323 }
1324
1325 static int pp_get_display_mode_validation_clocks(void *handle,
1326                 struct amd_pp_simple_clock_info *clocks)
1327 {
1328         struct pp_hwmgr *hwmgr = handle;
1329         int ret = 0;
1330
1331         ret = pp_check(hwmgr);
1332
1333         if (ret)
1334                 return ret;
1335
1336         if (clocks == NULL)
1337                 return -EINVAL;
1338
1339         mutex_lock(&hwmgr->smu_lock);
1340
1341         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1342                 ret = phm_get_max_high_clocks(hwmgr, clocks);
1343
1344         mutex_unlock(&hwmgr->smu_lock);
1345         return ret;
1346 }
1347
1348 static int pp_set_mmhub_powergating_by_smu(void *handle)
1349 {
1350         struct pp_hwmgr *hwmgr = handle;
1351         int ret = 0;
1352
1353         ret = pp_check(hwmgr);
1354
1355         if (ret)
1356                 return ret;
1357
1358         if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
1359                 pr_info("%s was not implemented.\n", __func__);
1360                 return 0;
1361         }
1362
1363         return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
1364 }
1365
1366 static const struct amd_pm_funcs pp_dpm_funcs = {
1367         .load_firmware = pp_dpm_load_fw,
1368         .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1369         .force_performance_level = pp_dpm_force_performance_level,
1370         .get_performance_level = pp_dpm_get_performance_level,
1371         .get_current_power_state = pp_dpm_get_current_power_state,
1372         .powergate_vce = pp_dpm_powergate_vce,
1373         .powergate_uvd = pp_dpm_powergate_uvd,
1374         .dispatch_tasks = pp_dpm_dispatch_tasks,
1375         .set_fan_control_mode = pp_dpm_set_fan_control_mode,
1376         .get_fan_control_mode = pp_dpm_get_fan_control_mode,
1377         .set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
1378         .get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
1379         .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1380         .get_pp_num_states = pp_dpm_get_pp_num_states,
1381         .get_pp_table = pp_dpm_get_pp_table,
1382         .set_pp_table = pp_dpm_set_pp_table,
1383         .force_clock_level = pp_dpm_force_clock_level,
1384         .print_clock_levels = pp_dpm_print_clock_levels,
1385         .get_sclk_od = pp_dpm_get_sclk_od,
1386         .set_sclk_od = pp_dpm_set_sclk_od,
1387         .get_mclk_od = pp_dpm_get_mclk_od,
1388         .set_mclk_od = pp_dpm_set_mclk_od,
1389         .read_sensor = pp_dpm_read_sensor,
1390         .get_vce_clock_state = pp_dpm_get_vce_clock_state,
1391         .switch_power_profile = pp_dpm_switch_power_profile,
1392         .set_clockgating_by_smu = pp_set_clockgating_by_smu,
1393         .notify_smu_memory_info = pp_dpm_notify_smu_memory_info,
1394         .get_power_profile_mode = pp_get_power_profile_mode,
1395         .set_power_profile_mode = pp_set_power_profile_mode,
1396         .odn_edit_dpm_table = pp_odn_edit_dpm_table,
1397         .set_power_limit = pp_set_power_limit,
1398         .get_power_limit = pp_get_power_limit,
1399 /* export to DC */
1400         .get_sclk = pp_dpm_get_sclk,
1401         .get_mclk = pp_dpm_get_mclk,
1402         .display_configuration_change = pp_display_configuration_change,
1403         .get_display_power_level = pp_get_display_power_level,
1404         .get_current_clocks = pp_get_current_clocks,
1405         .get_clock_by_type = pp_get_clock_by_type,
1406         .get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1407         .get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1408         .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1409         .display_clock_voltage_request = pp_display_clock_voltage_request,
1410         .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1411         .set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu,
1412 };