Merge tag 'drm-misc-next-2023-01-19' of git://anongit.freedesktop.org/drm/drm-misc...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / pm / powerplay / hwmgr / vega12_hwmgr.c
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/delay.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27
28 #include "hwmgr.h"
29 #include "amd_powerplay.h"
30 #include "vega12_smumgr.h"
31 #include "hardwaremanager.h"
32 #include "ppatomfwctrl.h"
33 #include "atomfirmware.h"
34 #include "cgs_common.h"
35 #include "vega12_inc.h"
36 #include "pppcielanes.h"
37 #include "vega12_hwmgr.h"
38 #include "vega12_processpptables.h"
39 #include "vega12_pptable.h"
40 #include "vega12_thermal.h"
41 #include "vega12_ppsmc.h"
42 #include "pp_debug.h"
43 #include "amd_pcie_helpers.h"
44 #include "ppinterrupt.h"
45 #include "pp_overdriver.h"
46 #include "pp_thermal.h"
47 #include "vega12_baco.h"
48
49 #define smnPCIE_LC_SPEED_CNTL                   0x11140290
50 #define smnPCIE_LC_LINK_WIDTH_CNTL              0x11140288
51
52 #define LINK_WIDTH_MAX                          6
53 #define LINK_SPEED_MAX                          3
54 static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
55 static const int link_speed[] = {25, 50, 80, 160};
56
57 static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
58                 enum pp_clock_type type, uint32_t mask);
59 static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
60                 uint32_t *clock,
61                 PPCLK_e clock_select,
62                 bool max);
63
64 static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
65 {
66         struct vega12_hwmgr *data =
67                         (struct vega12_hwmgr *)(hwmgr->backend);
68
69         data->gfxclk_average_alpha = PPVEGA12_VEGA12GFXCLKAVERAGEALPHA_DFLT;
70         data->socclk_average_alpha = PPVEGA12_VEGA12SOCCLKAVERAGEALPHA_DFLT;
71         data->uclk_average_alpha = PPVEGA12_VEGA12UCLKCLKAVERAGEALPHA_DFLT;
72         data->gfx_activity_average_alpha = PPVEGA12_VEGA12GFXACTIVITYAVERAGEALPHA_DFLT;
73         data->lowest_uclk_reserved_for_ulv = PPVEGA12_VEGA12LOWESTUCLKRESERVEDFORULV_DFLT;
74
75         data->display_voltage_mode = PPVEGA12_VEGA12DISPLAYVOLTAGEMODE_DFLT;
76         data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
77         data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
78         data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
79         data->disp_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
80         data->disp_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
81         data->disp_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
82         data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
83         data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
84         data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
85         data->phy_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
86         data->phy_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
87         data->phy_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
88
89         data->registry_data.disallowed_features = 0x0;
90         data->registry_data.od_state_in_dc_support = 0;
91         data->registry_data.thermal_support = 1;
92         data->registry_data.skip_baco_hardware = 0;
93
94         data->registry_data.log_avfs_param = 0;
95         data->registry_data.sclk_throttle_low_notification = 1;
96         data->registry_data.force_dpm_high = 0;
97         data->registry_data.stable_pstate_sclk_dpm_percentage = 75;
98
99         data->registry_data.didt_support = 0;
100         if (data->registry_data.didt_support) {
101                 data->registry_data.didt_mode = 6;
102                 data->registry_data.sq_ramping_support = 1;
103                 data->registry_data.db_ramping_support = 0;
104                 data->registry_data.td_ramping_support = 0;
105                 data->registry_data.tcp_ramping_support = 0;
106                 data->registry_data.dbr_ramping_support = 0;
107                 data->registry_data.edc_didt_support = 1;
108                 data->registry_data.gc_didt_support = 0;
109                 data->registry_data.psm_didt_support = 0;
110         }
111
112         data->registry_data.pcie_lane_override = 0xff;
113         data->registry_data.pcie_speed_override = 0xff;
114         data->registry_data.pcie_clock_override = 0xffffffff;
115         data->registry_data.regulator_hot_gpio_support = 1;
116         data->registry_data.ac_dc_switch_gpio_support = 0;
117         data->registry_data.quick_transition_support = 0;
118         data->registry_data.zrpm_start_temp = 0xffff;
119         data->registry_data.zrpm_stop_temp = 0xffff;
120         data->registry_data.odn_feature_enable = 1;
121         data->registry_data.disable_water_mark = 0;
122         data->registry_data.disable_pp_tuning = 0;
123         data->registry_data.disable_xlpp_tuning = 0;
124         data->registry_data.disable_workload_policy = 0;
125         data->registry_data.perf_ui_tuning_profile_turbo = 0x19190F0F;
126         data->registry_data.perf_ui_tuning_profile_powerSave = 0x19191919;
127         data->registry_data.perf_ui_tuning_profile_xl = 0x00000F0A;
128         data->registry_data.force_workload_policy_mask = 0;
129         data->registry_data.disable_3d_fs_detection = 0;
130         data->registry_data.fps_support = 1;
131         data->registry_data.disable_auto_wattman = 1;
132         data->registry_data.auto_wattman_debug = 0;
133         data->registry_data.auto_wattman_sample_period = 100;
134         data->registry_data.auto_wattman_threshold = 50;
135         data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
136 }
137
138 static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr)
139 {
140         struct vega12_hwmgr *data =
141                         (struct vega12_hwmgr *)(hwmgr->backend);
142         struct amdgpu_device *adev = hwmgr->adev;
143
144         if (data->vddci_control == VEGA12_VOLTAGE_CONTROL_NONE)
145                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
146                                 PHM_PlatformCaps_ControlVDDCI);
147
148         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
149                         PHM_PlatformCaps_TablelessHardwareInterface);
150
151         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
152                         PHM_PlatformCaps_EnableSMU7ThermalManagement);
153
154         if (adev->pg_flags & AMD_PG_SUPPORT_UVD) {
155                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
156                                 PHM_PlatformCaps_UVDPowerGating);
157                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
158                                 PHM_PlatformCaps_UVDDynamicPowerGating);
159         }
160
161         if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
162                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
163                                 PHM_PlatformCaps_VCEPowerGating);
164
165         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
166                         PHM_PlatformCaps_UnTabledHardwareInterface);
167
168         if (data->registry_data.odn_feature_enable)
169                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
170                                 PHM_PlatformCaps_ODNinACSupport);
171         else {
172                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
173                                 PHM_PlatformCaps_OD6inACSupport);
174                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
175                                 PHM_PlatformCaps_OD6PlusinACSupport);
176         }
177
178         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
179                         PHM_PlatformCaps_ActivityReporting);
180         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
181                         PHM_PlatformCaps_FanSpeedInTableIsRPM);
182
183         if (data->registry_data.od_state_in_dc_support) {
184                 if (data->registry_data.odn_feature_enable)
185                         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
186                                         PHM_PlatformCaps_ODNinDCSupport);
187                 else {
188                         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
189                                         PHM_PlatformCaps_OD6inDCSupport);
190                         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
191                                         PHM_PlatformCaps_OD6PlusinDCSupport);
192                 }
193         }
194
195         if (data->registry_data.thermal_support
196                         && data->registry_data.fuzzy_fan_control_support
197                         && hwmgr->thermal_controller.advanceFanControlParameters.usTMax)
198                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
199                                 PHM_PlatformCaps_ODFuzzyFanControlSupport);
200
201         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
202                                 PHM_PlatformCaps_DynamicPowerManagement);
203         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
204                         PHM_PlatformCaps_SMC);
205         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
206                         PHM_PlatformCaps_ThermalPolicyDelay);
207
208         if (data->registry_data.force_dpm_high)
209                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
210                                 PHM_PlatformCaps_ExclusiveModeAlwaysHigh);
211
212         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
213                         PHM_PlatformCaps_DynamicUVDState);
214
215         if (data->registry_data.sclk_throttle_low_notification)
216                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
217                                 PHM_PlatformCaps_SclkThrottleLowNotification);
218
219         /* power tune caps */
220         /* assume disabled */
221         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
222                         PHM_PlatformCaps_PowerContainment);
223         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
224                         PHM_PlatformCaps_DiDtSupport);
225         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
226                         PHM_PlatformCaps_SQRamping);
227         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
228                         PHM_PlatformCaps_DBRamping);
229         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
230                         PHM_PlatformCaps_TDRamping);
231         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
232                         PHM_PlatformCaps_TCPRamping);
233         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
234                         PHM_PlatformCaps_DBRRamping);
235         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
236                         PHM_PlatformCaps_DiDtEDCEnable);
237         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
238                         PHM_PlatformCaps_GCEDC);
239         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
240                         PHM_PlatformCaps_PSM);
241
242         if (data->registry_data.didt_support) {
243                 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport);
244                 if (data->registry_data.sq_ramping_support)
245                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping);
246                 if (data->registry_data.db_ramping_support)
247                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping);
248                 if (data->registry_data.td_ramping_support)
249                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping);
250                 if (data->registry_data.tcp_ramping_support)
251                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping);
252                 if (data->registry_data.dbr_ramping_support)
253                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping);
254                 if (data->registry_data.edc_didt_support)
255                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable);
256                 if (data->registry_data.gc_didt_support)
257                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC);
258                 if (data->registry_data.psm_didt_support)
259                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
260         }
261
262         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
263                         PHM_PlatformCaps_RegulatorHot);
264
265         if (data->registry_data.ac_dc_switch_gpio_support) {
266                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
267                                 PHM_PlatformCaps_AutomaticDCTransition);
268                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
269                                 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
270         }
271
272         if (data->registry_data.quick_transition_support) {
273                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
274                                 PHM_PlatformCaps_AutomaticDCTransition);
275                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
276                                 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
277                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
278                                 PHM_PlatformCaps_Falcon_QuickTransition);
279         }
280
281         if (data->lowest_uclk_reserved_for_ulv != PPVEGA12_VEGA12LOWESTUCLKRESERVEDFORULV_DFLT) {
282                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
283                                 PHM_PlatformCaps_LowestUclkReservedForUlv);
284                 if (data->lowest_uclk_reserved_for_ulv == 1)
285                         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
286                                         PHM_PlatformCaps_LowestUclkReservedForUlv);
287         }
288
289         if (data->registry_data.custom_fan_support)
290                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
291                                 PHM_PlatformCaps_CustomFanControlSupport);
292
293         return 0;
294 }
295
296 static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
297 {
298         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
299         struct amdgpu_device *adev = hwmgr->adev;
300         uint32_t top32, bottom32;
301         int i;
302
303         data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
304                         FEATURE_DPM_PREFETCHER_BIT;
305         data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
306                         FEATURE_DPM_GFXCLK_BIT;
307         data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
308                         FEATURE_DPM_UCLK_BIT;
309         data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
310                         FEATURE_DPM_SOCCLK_BIT;
311         data->smu_features[GNLD_DPM_UVD].smu_feature_id =
312                         FEATURE_DPM_UVD_BIT;
313         data->smu_features[GNLD_DPM_VCE].smu_feature_id =
314                         FEATURE_DPM_VCE_BIT;
315         data->smu_features[GNLD_ULV].smu_feature_id =
316                         FEATURE_ULV_BIT;
317         data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
318                         FEATURE_DPM_MP0CLK_BIT;
319         data->smu_features[GNLD_DPM_LINK].smu_feature_id =
320                         FEATURE_DPM_LINK_BIT;
321         data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
322                         FEATURE_DPM_DCEFCLK_BIT;
323         data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
324                         FEATURE_DS_GFXCLK_BIT;
325         data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
326                         FEATURE_DS_SOCCLK_BIT;
327         data->smu_features[GNLD_DS_LCLK].smu_feature_id =
328                         FEATURE_DS_LCLK_BIT;
329         data->smu_features[GNLD_PPT].smu_feature_id =
330                         FEATURE_PPT_BIT;
331         data->smu_features[GNLD_TDC].smu_feature_id =
332                         FEATURE_TDC_BIT;
333         data->smu_features[GNLD_THERMAL].smu_feature_id =
334                         FEATURE_THERMAL_BIT;
335         data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
336                         FEATURE_GFX_PER_CU_CG_BIT;
337         data->smu_features[GNLD_RM].smu_feature_id =
338                         FEATURE_RM_BIT;
339         data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
340                         FEATURE_DS_DCEFCLK_BIT;
341         data->smu_features[GNLD_ACDC].smu_feature_id =
342                         FEATURE_ACDC_BIT;
343         data->smu_features[GNLD_VR0HOT].smu_feature_id =
344                         FEATURE_VR0HOT_BIT;
345         data->smu_features[GNLD_VR1HOT].smu_feature_id =
346                         FEATURE_VR1HOT_BIT;
347         data->smu_features[GNLD_FW_CTF].smu_feature_id =
348                         FEATURE_FW_CTF_BIT;
349         data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
350                         FEATURE_LED_DISPLAY_BIT;
351         data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
352                         FEATURE_FAN_CONTROL_BIT;
353         data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
354         data->smu_features[GNLD_GFXOFF].smu_feature_id = FEATURE_GFXOFF_BIT;
355         data->smu_features[GNLD_CG].smu_feature_id = FEATURE_CG_BIT;
356         data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
357
358         for (i = 0; i < GNLD_FEATURES_MAX; i++) {
359                 data->smu_features[i].smu_feature_bitmap =
360                         (uint64_t)(1ULL << data->smu_features[i].smu_feature_id);
361                 data->smu_features[i].allowed =
362                         ((data->registry_data.disallowed_features >> i) & 1) ?
363                         false : true;
364         }
365
366         /* Get the SN to turn into a Unique ID */
367         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
368         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
369
370         adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
371 }
372
373 static int vega12_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
374 {
375         return 0;
376 }
377
378 static int vega12_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
379 {
380         kfree(hwmgr->backend);
381         hwmgr->backend = NULL;
382
383         return 0;
384 }
385
386 static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
387 {
388         int result = 0;
389         struct vega12_hwmgr *data;
390         struct amdgpu_device *adev = hwmgr->adev;
391
392         data = kzalloc(sizeof(struct vega12_hwmgr), GFP_KERNEL);
393         if (data == NULL)
394                 return -ENOMEM;
395
396         hwmgr->backend = data;
397
398         vega12_set_default_registry_data(hwmgr);
399
400         data->disable_dpm_mask = 0xff;
401         data->workload_mask = 0xff;
402
403         /* need to set voltage control types before EVV patching */
404         data->vddc_control = VEGA12_VOLTAGE_CONTROL_NONE;
405         data->mvdd_control = VEGA12_VOLTAGE_CONTROL_NONE;
406         data->vddci_control = VEGA12_VOLTAGE_CONTROL_NONE;
407
408         data->water_marks_bitmap = 0;
409         data->avfs_exist = false;
410
411         vega12_set_features_platform_caps(hwmgr);
412
413         vega12_init_dpm_defaults(hwmgr);
414
415         /* Parse pptable data read from VBIOS */
416         vega12_set_private_data_based_on_pptable(hwmgr);
417
418         data->is_tlu_enabled = false;
419
420         hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
421                         VEGA12_MAX_HARDWARE_POWERLEVELS;
422         hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
423         hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
424
425         hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
426         /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
427         hwmgr->platform_descriptor.clockStep.engineClock = 500;
428         hwmgr->platform_descriptor.clockStep.memoryClock = 500;
429
430         data->total_active_cus = adev->gfx.cu_info.number;
431         /* Setup default Overdrive Fan control settings */
432         data->odn_fan_table.target_fan_speed =
433                         hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
434         data->odn_fan_table.target_temperature =
435                         hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature;
436         data->odn_fan_table.min_performance_clock =
437                         hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit;
438         data->odn_fan_table.min_fan_limit =
439                         hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit *
440                         hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
441
442         if (hwmgr->feature_mask & PP_GFXOFF_MASK)
443                 data->gfxoff_controlled_by_driver = true;
444         else
445                 data->gfxoff_controlled_by_driver = false;
446
447         return result;
448 }
449
450 static int vega12_init_sclk_threshold(struct pp_hwmgr *hwmgr)
451 {
452         struct vega12_hwmgr *data =
453                         (struct vega12_hwmgr *)(hwmgr->backend);
454
455         data->low_sclk_interrupt_threshold = 0;
456
457         return 0;
458 }
459
460 static int vega12_setup_asic_task(struct pp_hwmgr *hwmgr)
461 {
462         PP_ASSERT_WITH_CODE(!vega12_init_sclk_threshold(hwmgr),
463                         "Failed to init sclk threshold!",
464                         return -EINVAL);
465
466         return 0;
467 }
468
469 /*
470  * @fn vega12_init_dpm_state
471  * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
472  *
473  * @param    dpm_state - the address of the DPM Table to initiailize.
474  * @return   None.
475  */
476 static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state)
477 {
478         dpm_state->soft_min_level = 0x0;
479         dpm_state->soft_max_level = 0xffff;
480         dpm_state->hard_min_level = 0x0;
481         dpm_state->hard_max_level = 0xffff;
482 }
483
484 static int vega12_override_pcie_parameters(struct pp_hwmgr *hwmgr)
485 {
486         struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
487         struct vega12_hwmgr *data =
488                         (struct vega12_hwmgr *)(hwmgr->backend);
489         uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg;
490         PPTable_t *pp_table = &(data->smc_state_table.pp_table);
491         int i;
492         int ret;
493
494         if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
495                 pcie_gen = 3;
496         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
497                 pcie_gen = 2;
498         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
499                 pcie_gen = 1;
500         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
501                 pcie_gen = 0;
502
503         if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
504                 pcie_width = 6;
505         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
506                 pcie_width = 5;
507         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
508                 pcie_width = 4;
509         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
510                 pcie_width = 3;
511         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
512                 pcie_width = 2;
513         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
514                 pcie_width = 1;
515
516         /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
517          * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
518          * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
519          */
520         for (i = 0; i < NUM_LINK_LEVELS; i++) {
521                 pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen :
522                         pp_table->PcieGenSpeed[i];
523                 pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width :
524                         pp_table->PcieLaneCount[i];
525
526                 if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg !=
527                     pp_table->PcieLaneCount[i]) {
528                         smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg;
529                         ret = smum_send_msg_to_smc_with_parameter(hwmgr,
530                                 PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
531                                 NULL);
532                         PP_ASSERT_WITH_CODE(!ret,
533                                 "[OverridePcieParameters] Attempt to override pcie params failed!",
534                                 return ret);
535                 }
536
537                 /* update the pptable */
538                 pp_table->PcieGenSpeed[i] = pcie_gen_arg;
539                 pp_table->PcieLaneCount[i] = pcie_width_arg;
540         }
541
542         /* override to the highest if it's disabled from ppfeaturmask */
543         if (data->registry_data.pcie_dpm_key_disabled) {
544                 for (i = 0; i < NUM_LINK_LEVELS; i++) {
545                         smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width;
546                         ret = smum_send_msg_to_smc_with_parameter(hwmgr,
547                                 PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
548                                 NULL);
549                         PP_ASSERT_WITH_CODE(!ret,
550                                 "[OverridePcieParameters] Attempt to override pcie params failed!",
551                                 return ret);
552
553                         pp_table->PcieGenSpeed[i] = pcie_gen;
554                         pp_table->PcieLaneCount[i] = pcie_width;
555                 }
556                 ret = vega12_enable_smc_features(hwmgr,
557                                 false,
558                                 data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap);
559                 PP_ASSERT_WITH_CODE(!ret,
560                                 "Attempt to Disable DPM LINK Failed!",
561                                 return ret);
562                 data->smu_features[GNLD_DPM_LINK].enabled = false;
563                 data->smu_features[GNLD_DPM_LINK].supported = false;
564         }
565         return 0;
566 }
567
568 static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
569                 PPCLK_e clk_id, uint32_t *num_of_levels)
570 {
571         int ret = 0;
572
573         ret = smum_send_msg_to_smc_with_parameter(hwmgr,
574                         PPSMC_MSG_GetDpmFreqByIndex,
575                         (clk_id << 16 | 0xFF),
576                         num_of_levels);
577         PP_ASSERT_WITH_CODE(!ret,
578                         "[GetNumOfDpmLevel] failed to get dpm levels!",
579                         return ret);
580
581         return ret;
582 }
583
584 static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
585                 PPCLK_e clkID, uint32_t index, uint32_t *clock)
586 {
587         /*
588          *SMU expects the Clock ID to be in the top 16 bits.
589          *Lower 16 bits specify the level
590          */
591         PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
592                 PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | index),
593                 clock) == 0,
594                 "[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!",
595                 return -EINVAL);
596
597         return 0;
598 }
599
600 static int vega12_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
601                 struct vega12_single_dpm_table *dpm_table, PPCLK_e clk_id)
602 {
603         int ret = 0;
604         uint32_t i, num_of_levels, clk;
605
606         ret = vega12_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels);
607         PP_ASSERT_WITH_CODE(!ret,
608                         "[SetupSingleDpmTable] failed to get clk levels!",
609                         return ret);
610
611         dpm_table->count = num_of_levels;
612
613         for (i = 0; i < num_of_levels; i++) {
614                 ret = vega12_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk);
615                 PP_ASSERT_WITH_CODE(!ret,
616                         "[SetupSingleDpmTable] failed to get clk of specific level!",
617                         return ret);
618                 dpm_table->dpm_levels[i].value = clk;
619                 dpm_table->dpm_levels[i].enabled = true;
620         }
621
622         return ret;
623 }
624
625 /*
626  * This function is to initialize all DPM state tables
627  * for SMU based on the dependency table.
628  * Dynamic state patching function will then trim these
629  * state tables to the allowed range based
630  * on the power policy or external client requests,
631  * such as UVD request, etc.
632  */
633 static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
634 {
635
636         struct vega12_hwmgr *data =
637                         (struct vega12_hwmgr *)(hwmgr->backend);
638         struct vega12_single_dpm_table *dpm_table;
639         int ret = 0;
640
641         memset(&data->dpm_table, 0, sizeof(data->dpm_table));
642
643         /* socclk */
644         dpm_table = &(data->dpm_table.soc_table);
645         if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
646                 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK);
647                 PP_ASSERT_WITH_CODE(!ret,
648                                 "[SetupDefaultDpmTable] failed to get socclk dpm levels!",
649                                 return ret);
650         } else {
651                 dpm_table->count = 1;
652                 dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100;
653         }
654         vega12_init_dpm_state(&(dpm_table->dpm_state));
655
656         /* gfxclk */
657         dpm_table = &(data->dpm_table.gfx_table);
658         if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
659                 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
660                 PP_ASSERT_WITH_CODE(!ret,
661                                 "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
662                                 return ret);
663         } else {
664                 dpm_table->count = 1;
665                 dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
666         }
667         vega12_init_dpm_state(&(dpm_table->dpm_state));
668
669         /* memclk */
670         dpm_table = &(data->dpm_table.mem_table);
671         if (data->smu_features[GNLD_DPM_UCLK].enabled) {
672                 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
673                 PP_ASSERT_WITH_CODE(!ret,
674                                 "[SetupDefaultDpmTable] failed to get memclk dpm levels!",
675                                 return ret);
676         } else {
677                 dpm_table->count = 1;
678                 dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
679         }
680         vega12_init_dpm_state(&(dpm_table->dpm_state));
681
682         /* eclk */
683         dpm_table = &(data->dpm_table.eclk_table);
684         if (data->smu_features[GNLD_DPM_VCE].enabled) {
685                 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK);
686                 PP_ASSERT_WITH_CODE(!ret,
687                                 "[SetupDefaultDpmTable] failed to get eclk dpm levels!",
688                                 return ret);
689         } else {
690                 dpm_table->count = 1;
691                 dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100;
692         }
693         vega12_init_dpm_state(&(dpm_table->dpm_state));
694
695         /* vclk */
696         dpm_table = &(data->dpm_table.vclk_table);
697         if (data->smu_features[GNLD_DPM_UVD].enabled) {
698                 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK);
699                 PP_ASSERT_WITH_CODE(!ret,
700                                 "[SetupDefaultDpmTable] failed to get vclk dpm levels!",
701                                 return ret);
702         } else {
703                 dpm_table->count = 1;
704                 dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100;
705         }
706         vega12_init_dpm_state(&(dpm_table->dpm_state));
707
708         /* dclk */
709         dpm_table = &(data->dpm_table.dclk_table);
710         if (data->smu_features[GNLD_DPM_UVD].enabled) {
711                 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK);
712                 PP_ASSERT_WITH_CODE(!ret,
713                                 "[SetupDefaultDpmTable] failed to get dclk dpm levels!",
714                                 return ret);
715         } else {
716                 dpm_table->count = 1;
717                 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100;
718         }
719         vega12_init_dpm_state(&(dpm_table->dpm_state));
720
721         /* dcefclk */
722         dpm_table = &(data->dpm_table.dcef_table);
723         if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
724                 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK);
725                 PP_ASSERT_WITH_CODE(!ret,
726                                 "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
727                                 return ret);
728         } else {
729                 dpm_table->count = 1;
730                 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100;
731         }
732         vega12_init_dpm_state(&(dpm_table->dpm_state));
733
734         /* pixclk */
735         dpm_table = &(data->dpm_table.pixel_table);
736         if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
737                 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK);
738                 PP_ASSERT_WITH_CODE(!ret,
739                                 "[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
740                                 return ret);
741         } else
742                 dpm_table->count = 0;
743         vega12_init_dpm_state(&(dpm_table->dpm_state));
744
745         /* dispclk */
746         dpm_table = &(data->dpm_table.display_table);
747         if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
748                 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK);
749                 PP_ASSERT_WITH_CODE(!ret,
750                                 "[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
751                                 return ret);
752         } else
753                 dpm_table->count = 0;
754         vega12_init_dpm_state(&(dpm_table->dpm_state));
755
756         /* phyclk */
757         dpm_table = &(data->dpm_table.phy_table);
758         if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
759                 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK);
760                 PP_ASSERT_WITH_CODE(!ret,
761                                 "[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
762                                 return ret);
763         } else
764                 dpm_table->count = 0;
765         vega12_init_dpm_state(&(dpm_table->dpm_state));
766
767         /* save a copy of the default DPM table */
768         memcpy(&(data->golden_dpm_table), &(data->dpm_table),
769                         sizeof(struct vega12_dpm_table));
770
771         return 0;
772 }
773
774 #if 0
775 static int vega12_save_default_power_profile(struct pp_hwmgr *hwmgr)
776 {
777         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
778         struct vega12_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
779         uint32_t min_level;
780
781         hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
782         hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
783
784         /* Optimize compute power profile: Use only highest
785          * 2 power levels (if more than 2 are available)
786          */
787         if (dpm_table->count > 2)
788                 min_level = dpm_table->count - 2;
789         else if (dpm_table->count == 2)
790                 min_level = 1;
791         else
792                 min_level = 0;
793
794         hwmgr->default_compute_power_profile.min_sclk =
795                         dpm_table->dpm_levels[min_level].value;
796
797         hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
798         hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
799
800         return 0;
801 }
802 #endif
803
804 /**
805  * vega12_init_smc_table - Initializes the SMC table and uploads it
806  *
807  * @hwmgr:  the address of the powerplay hardware manager.
808  * return:  always 0
809  */
810 static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
811 {
812         int result;
813         struct vega12_hwmgr *data =
814                         (struct vega12_hwmgr *)(hwmgr->backend);
815         PPTable_t *pp_table = &(data->smc_state_table.pp_table);
816         struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
817         struct phm_ppt_v3_information *pptable_information =
818                 (struct phm_ppt_v3_information *)hwmgr->pptable;
819
820         result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
821         if (!result) {
822                 data->vbios_boot_state.vddc     = boot_up_values.usVddc;
823                 data->vbios_boot_state.vddci    = boot_up_values.usVddci;
824                 data->vbios_boot_state.mvddc    = boot_up_values.usMvddc;
825                 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
826                 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
827                 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
828                 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
829                 data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
830                 data->vbios_boot_state.eclock = boot_up_values.ulEClk;
831                 data->vbios_boot_state.dclock = boot_up_values.ulDClk;
832                 data->vbios_boot_state.vclock = boot_up_values.ulVClk;
833                 smum_send_msg_to_smc_with_parameter(hwmgr,
834                                 PPSMC_MSG_SetMinDeepSleepDcefclk,
835                         (uint32_t)(data->vbios_boot_state.dcef_clock / 100),
836                                 NULL);
837         }
838
839         memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
840
841         result = smum_smc_table_manager(hwmgr,
842                                         (uint8_t *)pp_table, TABLE_PPTABLE, false);
843         PP_ASSERT_WITH_CODE(!result,
844                         "Failed to upload PPtable!", return result);
845
846         return 0;
847 }
848
849 static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr)
850 {
851         uint32_t result;
852
853         PP_ASSERT_WITH_CODE(
854                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &result) == 0,
855                 "[Run_ACG_BTC] Attempt to run ACG BTC failed!",
856                 return -EINVAL);
857
858         PP_ASSERT_WITH_CODE(result == 1,
859                         "Failed to run ACG BTC!", return -EINVAL);
860
861         return 0;
862 }
863
864 static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
865 {
866         struct vega12_hwmgr *data =
867                         (struct vega12_hwmgr *)(hwmgr->backend);
868         int i;
869         uint32_t allowed_features_low = 0, allowed_features_high = 0;
870
871         for (i = 0; i < GNLD_FEATURES_MAX; i++)
872                 if (data->smu_features[i].allowed)
873                         data->smu_features[i].smu_feature_id > 31 ?
874                                 (allowed_features_high |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_HIGH_SHIFT) & 0xFFFFFFFF)) :
875                                 (allowed_features_low |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT) & 0xFFFFFFFF));
876
877         PP_ASSERT_WITH_CODE(
878                 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high,
879                         NULL) == 0,
880                 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (high) failed!",
881                 return -1);
882
883         PP_ASSERT_WITH_CODE(
884                 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low,
885                         NULL) == 0,
886                 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
887                 return -1);
888
889         return 0;
890 }
891
892 static void vega12_init_powergate_state(struct pp_hwmgr *hwmgr)
893 {
894         struct vega12_hwmgr *data =
895                         (struct vega12_hwmgr *)(hwmgr->backend);
896
897         data->uvd_power_gated = true;
898         data->vce_power_gated = true;
899
900         if (data->smu_features[GNLD_DPM_UVD].enabled)
901                 data->uvd_power_gated = false;
902
903         if (data->smu_features[GNLD_DPM_VCE].enabled)
904                 data->vce_power_gated = false;
905 }
906
907 static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
908 {
909         struct vega12_hwmgr *data =
910                         (struct vega12_hwmgr *)(hwmgr->backend);
911         uint64_t features_enabled;
912         int i;
913         bool enabled;
914
915         PP_ASSERT_WITH_CODE(
916                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAllSmuFeatures, NULL) == 0,
917                 "[EnableAllSMUFeatures] Failed to enable all smu features!",
918                 return -1);
919
920         if (vega12_get_enabled_smc_features(hwmgr, &features_enabled) == 0) {
921                 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
922                         enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
923                         data->smu_features[i].enabled = enabled;
924                         data->smu_features[i].supported = enabled;
925                 }
926         }
927
928         vega12_init_powergate_state(hwmgr);
929
930         return 0;
931 }
932
933 static int vega12_disable_all_smu_features(struct pp_hwmgr *hwmgr)
934 {
935         struct vega12_hwmgr *data =
936                         (struct vega12_hwmgr *)(hwmgr->backend);
937         uint64_t features_enabled;
938         int i;
939         bool enabled;
940
941         PP_ASSERT_WITH_CODE(
942                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures, NULL) == 0,
943                 "[DisableAllSMUFeatures] Failed to disable all smu features!",
944                 return -1);
945
946         if (vega12_get_enabled_smc_features(hwmgr, &features_enabled) == 0) {
947                 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
948                         enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
949                         data->smu_features[i].enabled = enabled;
950                         data->smu_features[i].supported = enabled;
951                 }
952         }
953
954         return 0;
955 }
956
957 static int vega12_odn_initialize_default_settings(
958                 struct pp_hwmgr *hwmgr)
959 {
960         return 0;
961 }
962
963 static int vega12_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
964                 uint32_t adjust_percent)
965 {
966         return smum_send_msg_to_smc_with_parameter(hwmgr,
967                         PPSMC_MSG_OverDriveSetPercentage, adjust_percent,
968                         NULL);
969 }
970
971 static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
972 {
973         int adjust_percent, result = 0;
974
975         if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
976                 adjust_percent =
977                                 hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
978                                 hwmgr->platform_descriptor.TDPAdjustment :
979                                 (-1 * hwmgr->platform_descriptor.TDPAdjustment);
980                 result = vega12_set_overdrive_target_percentage(hwmgr,
981                                 (uint32_t)adjust_percent);
982         }
983         return result;
984 }
985
986 static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr,
987                 PPCLK_e clkid, struct vega12_clock_range *clock)
988 {
989         /* AC Max */
990         PP_ASSERT_WITH_CODE(
991                 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16),
992                         &(clock->ACMax)) == 0,
993                 "[GetClockRanges] Failed to get max ac clock from SMC!",
994                 return -EINVAL);
995
996         /* AC Min */
997         PP_ASSERT_WITH_CODE(
998                 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16),
999                         &(clock->ACMin)) == 0,
1000                 "[GetClockRanges] Failed to get min ac clock from SMC!",
1001                 return -EINVAL);
1002
1003         /* DC Max */
1004         PP_ASSERT_WITH_CODE(
1005                 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16),
1006                         &(clock->DCMax)) == 0,
1007                 "[GetClockRanges] Failed to get max dc clock from SMC!",
1008                 return -EINVAL);
1009
1010         return 0;
1011 }
1012
1013 static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr)
1014 {
1015         struct vega12_hwmgr *data =
1016                         (struct vega12_hwmgr *)(hwmgr->backend);
1017         uint32_t i;
1018
1019         for (i = 0; i < PPCLK_COUNT; i++)
1020                 PP_ASSERT_WITH_CODE(!vega12_get_all_clock_ranges_helper(hwmgr,
1021                                         i, &(data->clk_range[i])),
1022                                 "Failed to get clk range from SMC!",
1023                                 return -EINVAL);
1024
1025         return 0;
1026 }
1027
1028 static void vega12_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
1029 {
1030         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1031         struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
1032         struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
1033
1034         if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
1035             mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) {
1036                 hwmgr->pstate_sclk = gfx_dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
1037                 hwmgr->pstate_mclk = mem_dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
1038         } else {
1039                 hwmgr->pstate_sclk = gfx_dpm_table->dpm_levels[0].value;
1040                 hwmgr->pstate_mclk = mem_dpm_table->dpm_levels[0].value;
1041         }
1042
1043         hwmgr->pstate_sclk_peak = gfx_dpm_table->dpm_levels[gfx_dpm_table->count].value;
1044         hwmgr->pstate_mclk_peak = mem_dpm_table->dpm_levels[mem_dpm_table->count].value;
1045 }
1046
1047 static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1048 {
1049         int tmp_result, result = 0;
1050
1051         smum_send_msg_to_smc_with_parameter(hwmgr,
1052                         PPSMC_MSG_NumOfDisplays, 0, NULL);
1053
1054         result = vega12_set_allowed_featuresmask(hwmgr);
1055         PP_ASSERT_WITH_CODE(result == 0,
1056                         "[EnableDPMTasks] Failed to set allowed featuresmask!\n",
1057                         return result);
1058
1059         tmp_result = vega12_init_smc_table(hwmgr);
1060         PP_ASSERT_WITH_CODE(!tmp_result,
1061                         "Failed to initialize SMC table!",
1062                         result = tmp_result);
1063
1064         tmp_result = vega12_run_acg_btc(hwmgr);
1065         PP_ASSERT_WITH_CODE(!tmp_result,
1066                         "Failed to run ACG BTC!",
1067                         result = tmp_result);
1068
1069         result = vega12_enable_all_smu_features(hwmgr);
1070         PP_ASSERT_WITH_CODE(!result,
1071                         "Failed to enable all smu features!",
1072                         return result);
1073
1074         result = vega12_override_pcie_parameters(hwmgr);
1075         PP_ASSERT_WITH_CODE(!result,
1076                         "[EnableDPMTasks] Failed to override pcie parameters!",
1077                         return result);
1078
1079         tmp_result = vega12_power_control_set_level(hwmgr);
1080         PP_ASSERT_WITH_CODE(!tmp_result,
1081                         "Failed to power control set level!",
1082                         result = tmp_result);
1083
1084         result = vega12_get_all_clock_ranges(hwmgr);
1085         PP_ASSERT_WITH_CODE(!result,
1086                         "Failed to get all clock ranges!",
1087                         return result);
1088
1089         result = vega12_odn_initialize_default_settings(hwmgr);
1090         PP_ASSERT_WITH_CODE(!result,
1091                         "Failed to power control set level!",
1092                         return result);
1093
1094         result = vega12_setup_default_dpm_tables(hwmgr);
1095         PP_ASSERT_WITH_CODE(!result,
1096                         "Failed to setup default DPM tables!",
1097                         return result);
1098
1099         vega12_populate_umdpstate_clocks(hwmgr);
1100
1101         return result;
1102 }
1103
1104 static int vega12_patch_boot_state(struct pp_hwmgr *hwmgr,
1105              struct pp_hw_power_state *hw_ps)
1106 {
1107         return 0;
1108 }
1109
1110 static uint32_t vega12_find_lowest_dpm_level(
1111                 struct vega12_single_dpm_table *table)
1112 {
1113         uint32_t i;
1114
1115         for (i = 0; i < table->count; i++) {
1116                 if (table->dpm_levels[i].enabled)
1117                         break;
1118         }
1119
1120         if (i >= table->count) {
1121                 i = 0;
1122                 table->dpm_levels[i].enabled = true;
1123         }
1124
1125         return i;
1126 }
1127
1128 static uint32_t vega12_find_highest_dpm_level(
1129                 struct vega12_single_dpm_table *table)
1130 {
1131         int32_t i = 0;
1132         PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
1133                         "[FindHighestDPMLevel] DPM Table has too many entries!",
1134                         return MAX_REGULAR_DPM_NUMBER - 1);
1135
1136         for (i = table->count - 1; i >= 0; i--) {
1137                 if (table->dpm_levels[i].enabled)
1138                         break;
1139         }
1140
1141         if (i < 0) {
1142                 i = 0;
1143                 table->dpm_levels[i].enabled = true;
1144         }
1145
1146         return (uint32_t)i;
1147 }
1148
1149 static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
1150 {
1151         struct vega12_hwmgr *data = hwmgr->backend;
1152         uint32_t min_freq;
1153         int ret = 0;
1154
1155         if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
1156                 min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
1157                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1158                                         hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1159                                         (PPCLK_GFXCLK << 16) | (min_freq & 0xffff),
1160                                         NULL)),
1161                                         "Failed to set soft min gfxclk !",
1162                                         return ret);
1163         }
1164
1165         if (data->smu_features[GNLD_DPM_UCLK].enabled) {
1166                 min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
1167                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1168                                         hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1169                                         (PPCLK_UCLK << 16) | (min_freq & 0xffff),
1170                                         NULL)),
1171                                         "Failed to set soft min memclk !",
1172                                         return ret);
1173
1174                 min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
1175                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1176                                         hwmgr, PPSMC_MSG_SetHardMinByFreq,
1177                                         (PPCLK_UCLK << 16) | (min_freq & 0xffff),
1178                                         NULL)),
1179                                         "Failed to set hard min memclk !",
1180                                         return ret);
1181         }
1182
1183         if (data->smu_features[GNLD_DPM_UVD].enabled) {
1184                 min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
1185
1186                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1187                                         hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1188                                         (PPCLK_VCLK << 16) | (min_freq & 0xffff),
1189                                         NULL)),
1190                                         "Failed to set soft min vclk!",
1191                                         return ret);
1192
1193                 min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level;
1194
1195                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1196                                         hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1197                                         (PPCLK_DCLK << 16) | (min_freq & 0xffff),
1198                                         NULL)),
1199                                         "Failed to set soft min dclk!",
1200                                         return ret);
1201         }
1202
1203         if (data->smu_features[GNLD_DPM_VCE].enabled) {
1204                 min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
1205
1206                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1207                                         hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1208                                         (PPCLK_ECLK << 16) | (min_freq & 0xffff),
1209                                         NULL)),
1210                                         "Failed to set soft min eclk!",
1211                                         return ret);
1212         }
1213
1214         if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
1215                 min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
1216
1217                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1218                                         hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1219                                         (PPCLK_SOCCLK << 16) | (min_freq & 0xffff),
1220                                         NULL)),
1221                                         "Failed to set soft min socclk!",
1222                                         return ret);
1223         }
1224
1225         if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
1226                 min_freq = data->dpm_table.dcef_table.dpm_state.hard_min_level;
1227
1228                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1229                                         hwmgr, PPSMC_MSG_SetHardMinByFreq,
1230                                         (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff),
1231                                         NULL)),
1232                                         "Failed to set hard min dcefclk!",
1233                                         return ret);
1234         }
1235
1236         return ret;
1237
1238 }
1239
1240 static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
1241 {
1242         struct vega12_hwmgr *data = hwmgr->backend;
1243         uint32_t max_freq;
1244         int ret = 0;
1245
1246         if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
1247                 max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
1248
1249                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1250                                         hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1251                                         (PPCLK_GFXCLK << 16) | (max_freq & 0xffff),
1252                                         NULL)),
1253                                         "Failed to set soft max gfxclk!",
1254                                         return ret);
1255         }
1256
1257         if (data->smu_features[GNLD_DPM_UCLK].enabled) {
1258                 max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
1259
1260                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1261                                         hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1262                                         (PPCLK_UCLK << 16) | (max_freq & 0xffff),
1263                                         NULL)),
1264                                         "Failed to set soft max memclk!",
1265                                         return ret);
1266         }
1267
1268         if (data->smu_features[GNLD_DPM_UVD].enabled) {
1269                 max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
1270
1271                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1272                                         hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1273                                         (PPCLK_VCLK << 16) | (max_freq & 0xffff),
1274                                         NULL)),
1275                                         "Failed to set soft max vclk!",
1276                                         return ret);
1277
1278                 max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
1279                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1280                                         hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1281                                         (PPCLK_DCLK << 16) | (max_freq & 0xffff),
1282                                         NULL)),
1283                                         "Failed to set soft max dclk!",
1284                                         return ret);
1285         }
1286
1287         if (data->smu_features[GNLD_DPM_VCE].enabled) {
1288                 max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
1289
1290                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1291                                         hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1292                                         (PPCLK_ECLK << 16) | (max_freq & 0xffff),
1293                                         NULL)),
1294                                         "Failed to set soft max eclk!",
1295                                         return ret);
1296         }
1297
1298         if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
1299                 max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
1300
1301                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1302                                         hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1303                                         (PPCLK_SOCCLK << 16) | (max_freq & 0xffff),
1304                                         NULL)),
1305                                         "Failed to set soft max socclk!",
1306                                         return ret);
1307         }
1308
1309         return ret;
1310 }
1311
1312 int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
1313 {
1314         struct vega12_hwmgr *data =
1315                         (struct vega12_hwmgr *)(hwmgr->backend);
1316
1317         if (data->smu_features[GNLD_DPM_VCE].supported) {
1318                 PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
1319                                 enable,
1320                                 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
1321                                 "Attempt to Enable/Disable DPM VCE Failed!",
1322                                 return -1);
1323                 data->smu_features[GNLD_DPM_VCE].enabled = enable;
1324         }
1325
1326         return 0;
1327 }
1328
1329 static uint32_t vega12_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
1330 {
1331         struct vega12_hwmgr *data =
1332                         (struct vega12_hwmgr *)(hwmgr->backend);
1333         uint32_t gfx_clk;
1334
1335         if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
1336                 return -1;
1337
1338         if (low)
1339                 PP_ASSERT_WITH_CODE(
1340                         vega12_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, false) == 0,
1341                         "[GetSclks]: fail to get min PPCLK_GFXCLK\n",
1342                         return -1);
1343         else
1344                 PP_ASSERT_WITH_CODE(
1345                         vega12_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, true) == 0,
1346                         "[GetSclks]: fail to get max PPCLK_GFXCLK\n",
1347                         return -1);
1348
1349         return (gfx_clk * 100);
1350 }
1351
1352 static uint32_t vega12_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
1353 {
1354         struct vega12_hwmgr *data =
1355                         (struct vega12_hwmgr *)(hwmgr->backend);
1356         uint32_t mem_clk;
1357
1358         if (!data->smu_features[GNLD_DPM_UCLK].enabled)
1359                 return -1;
1360
1361         if (low)
1362                 PP_ASSERT_WITH_CODE(
1363                         vega12_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, false) == 0,
1364                         "[GetMclks]: fail to get min PPCLK_UCLK\n",
1365                         return -1);
1366         else
1367                 PP_ASSERT_WITH_CODE(
1368                         vega12_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, true) == 0,
1369                         "[GetMclks]: fail to get max PPCLK_UCLK\n",
1370                         return -1);
1371
1372         return (mem_clk * 100);
1373 }
1374
1375 static int vega12_get_metrics_table(struct pp_hwmgr *hwmgr,
1376                                     SmuMetrics_t *metrics_table,
1377                                     bool bypass_cache)
1378 {
1379         struct vega12_hwmgr *data =
1380                         (struct vega12_hwmgr *)(hwmgr->backend);
1381         int ret = 0;
1382
1383         if (bypass_cache ||
1384             !data->metrics_time ||
1385             time_after(jiffies, data->metrics_time + msecs_to_jiffies(1))) {
1386                 ret = smum_smc_table_manager(hwmgr,
1387                                              (uint8_t *)(&data->metrics_table),
1388                                              TABLE_SMU_METRICS,
1389                                              true);
1390                 if (ret) {
1391                         pr_info("Failed to export SMU metrics table!\n");
1392                         return ret;
1393                 }
1394                 data->metrics_time = jiffies;
1395         }
1396
1397         if (metrics_table)
1398                 memcpy(metrics_table, &data->metrics_table, sizeof(SmuMetrics_t));
1399
1400         return ret;
1401 }
1402
1403 static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query)
1404 {
1405         SmuMetrics_t metrics_table;
1406         int ret = 0;
1407
1408         ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
1409         if (ret)
1410                 return ret;
1411
1412         *query = metrics_table.CurrSocketPower << 8;
1413
1414         return ret;
1415 }
1416
1417 static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx_freq)
1418 {
1419         uint32_t gfx_clk = 0;
1420
1421         *gfx_freq = 0;
1422
1423         PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
1424                         PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16),
1425                         &gfx_clk) == 0,
1426                         "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
1427                         return -EINVAL);
1428
1429         *gfx_freq = gfx_clk * 100;
1430
1431         return 0;
1432 }
1433
1434 static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_freq)
1435 {
1436         uint32_t mem_clk = 0;
1437
1438         *mclk_freq = 0;
1439
1440         PP_ASSERT_WITH_CODE(
1441                         smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16),
1442                                 &mem_clk) == 0,
1443                         "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
1444                         return -EINVAL);
1445
1446         *mclk_freq = mem_clk * 100;
1447
1448         return 0;
1449 }
1450
1451 static int vega12_get_current_activity_percent(
1452                 struct pp_hwmgr *hwmgr,
1453                 int idx,
1454                 uint32_t *activity_percent)
1455 {
1456         SmuMetrics_t metrics_table;
1457         int ret = 0;
1458
1459         ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
1460         if (ret)
1461                 return ret;
1462
1463         switch (idx) {
1464         case AMDGPU_PP_SENSOR_GPU_LOAD:
1465                 *activity_percent = metrics_table.AverageGfxActivity;
1466                 break;
1467         case AMDGPU_PP_SENSOR_MEM_LOAD:
1468                 *activity_percent = metrics_table.AverageUclkActivity;
1469                 break;
1470         default:
1471                 pr_err("Invalid index for retrieving clock activity\n");
1472                 return -EINVAL;
1473         }
1474
1475         return ret;
1476 }
1477
1478 static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1479                               void *value, int *size)
1480 {
1481         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1482         SmuMetrics_t metrics_table;
1483         int ret = 0;
1484
1485         switch (idx) {
1486         case AMDGPU_PP_SENSOR_GFX_SCLK:
1487                 ret = vega12_get_current_gfx_clk_freq(hwmgr, (uint32_t *)value);
1488                 if (!ret)
1489                         *size = 4;
1490                 break;
1491         case AMDGPU_PP_SENSOR_GFX_MCLK:
1492                 ret = vega12_get_current_mclk_freq(hwmgr, (uint32_t *)value);
1493                 if (!ret)
1494                         *size = 4;
1495                 break;
1496         case AMDGPU_PP_SENSOR_GPU_LOAD:
1497         case AMDGPU_PP_SENSOR_MEM_LOAD:
1498                 ret = vega12_get_current_activity_percent(hwmgr, idx, (uint32_t *)value);
1499                 if (!ret)
1500                         *size = 4;
1501                 break;
1502         case AMDGPU_PP_SENSOR_GPU_TEMP:
1503                 *((uint32_t *)value) = vega12_thermal_get_temperature(hwmgr);
1504                 *size = 4;
1505                 break;
1506         case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1507                 ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
1508                 if (ret)
1509                         return ret;
1510
1511                 *((uint32_t *)value) = metrics_table.TemperatureHotspot *
1512                         PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1513                 *size = 4;
1514                 break;
1515         case AMDGPU_PP_SENSOR_MEM_TEMP:
1516                 ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
1517                 if (ret)
1518                         return ret;
1519
1520                 *((uint32_t *)value) = metrics_table.TemperatureHBM *
1521                         PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1522                 *size = 4;
1523                 break;
1524         case AMDGPU_PP_SENSOR_UVD_POWER:
1525                 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
1526                 *size = 4;
1527                 break;
1528         case AMDGPU_PP_SENSOR_VCE_POWER:
1529                 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
1530                 *size = 4;
1531                 break;
1532         case AMDGPU_PP_SENSOR_GPU_POWER:
1533                 ret = vega12_get_gpu_power(hwmgr, (uint32_t *)value);
1534                 if (!ret)
1535                         *size = 4;
1536                 break;
1537         case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
1538                 ret = vega12_get_enabled_smc_features(hwmgr, (uint64_t *)value);
1539                 if (!ret)
1540                         *size = 8;
1541                 break;
1542         default:
1543                 ret = -EOPNOTSUPP;
1544                 break;
1545         }
1546         return ret;
1547 }
1548
1549 static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr,
1550                 bool has_disp)
1551 {
1552         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1553
1554         if (data->smu_features[GNLD_DPM_UCLK].enabled)
1555                 return smum_send_msg_to_smc_with_parameter(hwmgr,
1556                         PPSMC_MSG_SetUclkFastSwitch,
1557                         has_disp ? 1 : 0,
1558                         NULL);
1559
1560         return 0;
1561 }
1562
1563 static int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
1564                 struct pp_display_clock_request *clock_req)
1565 {
1566         int result = 0;
1567         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1568         enum amd_pp_clock_type clk_type = clock_req->clock_type;
1569         uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1570         PPCLK_e clk_select = 0;
1571         uint32_t clk_request = 0;
1572
1573         if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
1574                 switch (clk_type) {
1575                 case amd_pp_dcef_clock:
1576                         clk_select = PPCLK_DCEFCLK;
1577                         break;
1578                 case amd_pp_disp_clock:
1579                         clk_select = PPCLK_DISPCLK;
1580                         break;
1581                 case amd_pp_pixel_clock:
1582                         clk_select = PPCLK_PIXCLK;
1583                         break;
1584                 case amd_pp_phy_clock:
1585                         clk_select = PPCLK_PHYCLK;
1586                         break;
1587                 default:
1588                         pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
1589                         result = -1;
1590                         break;
1591                 }
1592
1593                 if (!result) {
1594                         clk_request = (clk_select << 16) | clk_freq;
1595                         result = smum_send_msg_to_smc_with_parameter(hwmgr,
1596                                         PPSMC_MSG_SetHardMinByFreq,
1597                                         clk_request,
1598                                         NULL);
1599                 }
1600         }
1601
1602         return result;
1603 }
1604
1605 static int vega12_notify_smc_display_config_after_ps_adjustment(
1606                 struct pp_hwmgr *hwmgr)
1607 {
1608         struct vega12_hwmgr *data =
1609                         (struct vega12_hwmgr *)(hwmgr->backend);
1610         struct PP_Clocks min_clocks = {0};
1611         struct pp_display_clock_request clock_req;
1612
1613         if ((hwmgr->display_config->num_display > 1) &&
1614              !hwmgr->display_config->multi_monitor_in_sync &&
1615              !hwmgr->display_config->nb_pstate_switch_disable)
1616                 vega12_notify_smc_display_change(hwmgr, false);
1617         else
1618                 vega12_notify_smc_display_change(hwmgr, true);
1619
1620         min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
1621         min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
1622         min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
1623
1624         if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
1625                 clock_req.clock_type = amd_pp_dcef_clock;
1626                 clock_req.clock_freq_in_khz = min_clocks.dcefClock/10;
1627                 if (!vega12_display_clock_voltage_request(hwmgr, &clock_req)) {
1628                         if (data->smu_features[GNLD_DS_DCEFCLK].supported)
1629                                 PP_ASSERT_WITH_CODE(
1630                                         !smum_send_msg_to_smc_with_parameter(
1631                                         hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
1632                                         min_clocks.dcefClockInSR /100,
1633                                         NULL),
1634                                         "Attempt to set divider for DCEFCLK Failed!",
1635                                         return -1);
1636                 } else {
1637                         pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
1638                 }
1639         }
1640
1641         return 0;
1642 }
1643
1644 static int vega12_force_dpm_highest(struct pp_hwmgr *hwmgr)
1645 {
1646         struct vega12_hwmgr *data =
1647                         (struct vega12_hwmgr *)(hwmgr->backend);
1648
1649         uint32_t soft_level;
1650
1651         soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
1652
1653         data->dpm_table.gfx_table.dpm_state.soft_min_level =
1654                 data->dpm_table.gfx_table.dpm_state.soft_max_level =
1655                 data->dpm_table.gfx_table.dpm_levels[soft_level].value;
1656
1657         soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
1658
1659         data->dpm_table.mem_table.dpm_state.soft_min_level =
1660                 data->dpm_table.mem_table.dpm_state.soft_max_level =
1661                 data->dpm_table.mem_table.dpm_levels[soft_level].value;
1662
1663         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1664                         "Failed to upload boot level to highest!",
1665                         return -1);
1666
1667         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1668                         "Failed to upload dpm max level to highest!",
1669                         return -1);
1670
1671         return 0;
1672 }
1673
1674 static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
1675 {
1676         struct vega12_hwmgr *data =
1677                         (struct vega12_hwmgr *)(hwmgr->backend);
1678         uint32_t soft_level;
1679
1680         soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
1681
1682         data->dpm_table.gfx_table.dpm_state.soft_min_level =
1683                 data->dpm_table.gfx_table.dpm_state.soft_max_level =
1684                 data->dpm_table.gfx_table.dpm_levels[soft_level].value;
1685
1686         soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
1687
1688         data->dpm_table.mem_table.dpm_state.soft_min_level =
1689                 data->dpm_table.mem_table.dpm_state.soft_max_level =
1690                 data->dpm_table.mem_table.dpm_levels[soft_level].value;
1691
1692         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1693                         "Failed to upload boot level to highest!",
1694                         return -1);
1695
1696         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1697                         "Failed to upload dpm max level to highest!",
1698                         return -1);
1699
1700         return 0;
1701
1702 }
1703
1704 static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
1705 {
1706         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1707                         "Failed to upload DPM Bootup Levels!",
1708                         return -1);
1709
1710         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1711                         "Failed to upload DPM Max Levels!",
1712                         return -1);
1713
1714         return 0;
1715 }
1716
1717 static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
1718                                 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
1719 {
1720         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1721         struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
1722         struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
1723         struct vega12_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table);
1724
1725         *sclk_mask = 0;
1726         *mclk_mask = 0;
1727         *soc_mask  = 0;
1728
1729         if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
1730             mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL &&
1731             soc_dpm_table->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL) {
1732                 *sclk_mask = VEGA12_UMD_PSTATE_GFXCLK_LEVEL;
1733                 *mclk_mask = VEGA12_UMD_PSTATE_MCLK_LEVEL;
1734                 *soc_mask  = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
1735         }
1736
1737         if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
1738                 *sclk_mask = 0;
1739         } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
1740                 *mclk_mask = 0;
1741         } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
1742                 *sclk_mask = gfx_dpm_table->count - 1;
1743                 *mclk_mask = mem_dpm_table->count - 1;
1744                 *soc_mask  = soc_dpm_table->count - 1;
1745         }
1746
1747         return 0;
1748 }
1749
1750 static void vega12_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
1751 {
1752         switch (mode) {
1753         case AMD_FAN_CTRL_NONE:
1754                 break;
1755         case AMD_FAN_CTRL_MANUAL:
1756                 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
1757                         vega12_fan_ctrl_stop_smc_fan_control(hwmgr);
1758                 break;
1759         case AMD_FAN_CTRL_AUTO:
1760                 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
1761                         vega12_fan_ctrl_start_smc_fan_control(hwmgr);
1762                 break;
1763         default:
1764                 break;
1765         }
1766 }
1767
1768 static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1769                                 enum amd_dpm_forced_level level)
1770 {
1771         int ret = 0;
1772         uint32_t sclk_mask = 0;
1773         uint32_t mclk_mask = 0;
1774         uint32_t soc_mask = 0;
1775
1776         switch (level) {
1777         case AMD_DPM_FORCED_LEVEL_HIGH:
1778                 ret = vega12_force_dpm_highest(hwmgr);
1779                 break;
1780         case AMD_DPM_FORCED_LEVEL_LOW:
1781                 ret = vega12_force_dpm_lowest(hwmgr);
1782                 break;
1783         case AMD_DPM_FORCED_LEVEL_AUTO:
1784                 ret = vega12_unforce_dpm_levels(hwmgr);
1785                 break;
1786         case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1787         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1788         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1789         case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1790                 ret = vega12_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
1791                 if (ret)
1792                         return ret;
1793                 vega12_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask);
1794                 vega12_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask);
1795                 break;
1796         case AMD_DPM_FORCED_LEVEL_MANUAL:
1797         case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1798         default:
1799                 break;
1800         }
1801
1802         return ret;
1803 }
1804
1805 static uint32_t vega12_get_fan_control_mode(struct pp_hwmgr *hwmgr)
1806 {
1807         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1808
1809         if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
1810                 return AMD_FAN_CTRL_MANUAL;
1811         else
1812                 return AMD_FAN_CTRL_AUTO;
1813 }
1814
1815 static int vega12_get_dal_power_level(struct pp_hwmgr *hwmgr,
1816                 struct amd_pp_simple_clock_info *info)
1817 {
1818 #if 0
1819         struct phm_ppt_v2_information *table_info =
1820                         (struct phm_ppt_v2_information *)hwmgr->pptable;
1821         struct phm_clock_and_voltage_limits *max_limits =
1822                         &table_info->max_clock_voltage_on_ac;
1823
1824         info->engine_max_clock = max_limits->sclk;
1825         info->memory_max_clock = max_limits->mclk;
1826 #endif
1827         return 0;
1828 }
1829
1830 static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
1831                 uint32_t *clock,
1832                 PPCLK_e clock_select,
1833                 bool max)
1834 {
1835         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1836
1837         if (max)
1838                 *clock = data->clk_range[clock_select].ACMax;
1839         else
1840                 *clock = data->clk_range[clock_select].ACMin;
1841
1842         return 0;
1843 }
1844
1845 static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
1846                 struct pp_clock_levels_with_latency *clocks)
1847 {
1848         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1849         uint32_t ucount;
1850         int i;
1851         struct vega12_single_dpm_table *dpm_table;
1852
1853         if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
1854                 return -1;
1855
1856         dpm_table = &(data->dpm_table.gfx_table);
1857         ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
1858                 MAX_NUM_CLOCKS : dpm_table->count;
1859
1860         for (i = 0; i < ucount; i++) {
1861                 clocks->data[i].clocks_in_khz =
1862                         dpm_table->dpm_levels[i].value * 1000;
1863
1864                 clocks->data[i].latency_in_us = 0;
1865         }
1866
1867         clocks->num_levels = ucount;
1868
1869         return 0;
1870 }
1871
1872 static uint32_t vega12_get_mem_latency(struct pp_hwmgr *hwmgr,
1873                 uint32_t clock)
1874 {
1875         return 25;
1876 }
1877
1878 static int vega12_get_memclocks(struct pp_hwmgr *hwmgr,
1879                 struct pp_clock_levels_with_latency *clocks)
1880 {
1881         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1882         uint32_t ucount;
1883         int i;
1884         struct vega12_single_dpm_table *dpm_table;
1885         if (!data->smu_features[GNLD_DPM_UCLK].enabled)
1886                 return -1;
1887
1888         dpm_table = &(data->dpm_table.mem_table);
1889         ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
1890                 MAX_NUM_CLOCKS : dpm_table->count;
1891
1892         for (i = 0; i < ucount; i++) {
1893                 clocks->data[i].clocks_in_khz = dpm_table->dpm_levels[i].value * 1000;
1894                 data->mclk_latency_table.entries[i].frequency = dpm_table->dpm_levels[i].value * 100;
1895                 clocks->data[i].latency_in_us =
1896                         data->mclk_latency_table.entries[i].latency =
1897                         vega12_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
1898         }
1899
1900         clocks->num_levels = data->mclk_latency_table.count = ucount;
1901
1902         return 0;
1903 }
1904
1905 static int vega12_get_dcefclocks(struct pp_hwmgr *hwmgr,
1906                 struct pp_clock_levels_with_latency *clocks)
1907 {
1908         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1909         uint32_t ucount;
1910         int i;
1911         struct vega12_single_dpm_table *dpm_table;
1912
1913         if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled)
1914                 return -1;
1915
1916
1917         dpm_table = &(data->dpm_table.dcef_table);
1918         ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
1919                 MAX_NUM_CLOCKS : dpm_table->count;
1920
1921         for (i = 0; i < ucount; i++) {
1922                 clocks->data[i].clocks_in_khz =
1923                         dpm_table->dpm_levels[i].value * 1000;
1924
1925                 clocks->data[i].latency_in_us = 0;
1926         }
1927
1928         clocks->num_levels = ucount;
1929
1930         return 0;
1931 }
1932
1933 static int vega12_get_socclocks(struct pp_hwmgr *hwmgr,
1934                 struct pp_clock_levels_with_latency *clocks)
1935 {
1936         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1937         uint32_t ucount;
1938         int i;
1939         struct vega12_single_dpm_table *dpm_table;
1940
1941         if (!data->smu_features[GNLD_DPM_SOCCLK].enabled)
1942                 return -1;
1943
1944
1945         dpm_table = &(data->dpm_table.soc_table);
1946         ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
1947                 MAX_NUM_CLOCKS : dpm_table->count;
1948
1949         for (i = 0; i < ucount; i++) {
1950                 clocks->data[i].clocks_in_khz =
1951                         dpm_table->dpm_levels[i].value * 1000;
1952
1953                 clocks->data[i].latency_in_us = 0;
1954         }
1955
1956         clocks->num_levels = ucount;
1957
1958         return 0;
1959
1960 }
1961
1962 static int vega12_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
1963                 enum amd_pp_clock_type type,
1964                 struct pp_clock_levels_with_latency *clocks)
1965 {
1966         int ret;
1967
1968         switch (type) {
1969         case amd_pp_sys_clock:
1970                 ret = vega12_get_sclks(hwmgr, clocks);
1971                 break;
1972         case amd_pp_mem_clock:
1973                 ret = vega12_get_memclocks(hwmgr, clocks);
1974                 break;
1975         case amd_pp_dcef_clock:
1976                 ret = vega12_get_dcefclocks(hwmgr, clocks);
1977                 break;
1978         case amd_pp_soc_clock:
1979                 ret = vega12_get_socclocks(hwmgr, clocks);
1980                 break;
1981         default:
1982                 return -EINVAL;
1983         }
1984
1985         return ret;
1986 }
1987
1988 static int vega12_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
1989                 enum amd_pp_clock_type type,
1990                 struct pp_clock_levels_with_voltage *clocks)
1991 {
1992         clocks->num_levels = 0;
1993
1994         return 0;
1995 }
1996
1997 static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
1998                                                         void *clock_ranges)
1999 {
2000         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2001         Watermarks_t *table = &(data->smc_state_table.water_marks_table);
2002         struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
2003
2004         if (!data->registry_data.disable_water_mark &&
2005                         data->smu_features[GNLD_DPM_DCEFCLK].supported &&
2006                         data->smu_features[GNLD_DPM_SOCCLK].supported) {
2007                 smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
2008                 data->water_marks_bitmap |= WaterMarksExist;
2009                 data->water_marks_bitmap &= ~WaterMarksLoaded;
2010         }
2011
2012         return 0;
2013 }
2014
2015 static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
2016                 enum pp_clock_type type, uint32_t mask)
2017 {
2018         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2019         uint32_t soft_min_level, soft_max_level, hard_min_level;
2020         int ret = 0;
2021
2022         switch (type) {
2023         case PP_SCLK:
2024                 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2025                 soft_max_level = mask ? (fls(mask) - 1) : 0;
2026
2027                 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2028                         data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
2029                 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2030                         data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
2031
2032                 ret = vega12_upload_dpm_min_level(hwmgr);
2033                 PP_ASSERT_WITH_CODE(!ret,
2034                         "Failed to upload boot level to lowest!",
2035                         return ret);
2036
2037                 ret = vega12_upload_dpm_max_level(hwmgr);
2038                 PP_ASSERT_WITH_CODE(!ret,
2039                         "Failed to upload dpm max level to highest!",
2040                         return ret);
2041                 break;
2042
2043         case PP_MCLK:
2044                 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2045                 soft_max_level = mask ? (fls(mask) - 1) : 0;
2046
2047                 data->dpm_table.mem_table.dpm_state.soft_min_level =
2048                         data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
2049                 data->dpm_table.mem_table.dpm_state.soft_max_level =
2050                         data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
2051
2052                 ret = vega12_upload_dpm_min_level(hwmgr);
2053                 PP_ASSERT_WITH_CODE(!ret,
2054                         "Failed to upload boot level to lowest!",
2055                         return ret);
2056
2057                 ret = vega12_upload_dpm_max_level(hwmgr);
2058                 PP_ASSERT_WITH_CODE(!ret,
2059                         "Failed to upload dpm max level to highest!",
2060                         return ret);
2061
2062                 break;
2063
2064         case PP_SOCCLK:
2065                 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2066                 soft_max_level = mask ? (fls(mask) - 1) : 0;
2067
2068                 if (soft_max_level >= data->dpm_table.soc_table.count) {
2069                         pr_err("Clock level specified %d is over max allowed %d\n",
2070                                         soft_max_level,
2071                                         data->dpm_table.soc_table.count - 1);
2072                         return -EINVAL;
2073                 }
2074
2075                 data->dpm_table.soc_table.dpm_state.soft_min_level =
2076                         data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
2077                 data->dpm_table.soc_table.dpm_state.soft_max_level =
2078                         data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
2079
2080                 ret = vega12_upload_dpm_min_level(hwmgr);
2081                 PP_ASSERT_WITH_CODE(!ret,
2082                         "Failed to upload boot level to lowest!",
2083                         return ret);
2084
2085                 ret = vega12_upload_dpm_max_level(hwmgr);
2086                 PP_ASSERT_WITH_CODE(!ret,
2087                         "Failed to upload dpm max level to highest!",
2088                         return ret);
2089
2090                 break;
2091
2092         case PP_DCEFCLK:
2093                 hard_min_level = mask ? (ffs(mask) - 1) : 0;
2094
2095                 if (hard_min_level >= data->dpm_table.dcef_table.count) {
2096                         pr_err("Clock level specified %d is over max allowed %d\n",
2097                                         hard_min_level,
2098                                         data->dpm_table.dcef_table.count - 1);
2099                         return -EINVAL;
2100                 }
2101
2102                 data->dpm_table.dcef_table.dpm_state.hard_min_level =
2103                         data->dpm_table.dcef_table.dpm_levels[hard_min_level].value;
2104
2105                 ret = vega12_upload_dpm_min_level(hwmgr);
2106                 PP_ASSERT_WITH_CODE(!ret,
2107                         "Failed to upload boot level to lowest!",
2108                         return ret);
2109
2110                 //TODO: Setting DCEFCLK max dpm level is not supported
2111
2112                 break;
2113
2114         case PP_PCIE:
2115                 break;
2116
2117         default:
2118                 break;
2119         }
2120
2121         return 0;
2122 }
2123
2124 static int vega12_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
2125 {
2126         static const char *ppfeature_name[] = {
2127                         "DPM_PREFETCHER",
2128                         "GFXCLK_DPM",
2129                         "UCLK_DPM",
2130                         "SOCCLK_DPM",
2131                         "UVD_DPM",
2132                         "VCE_DPM",
2133                         "ULV",
2134                         "MP0CLK_DPM",
2135                         "LINK_DPM",
2136                         "DCEFCLK_DPM",
2137                         "GFXCLK_DS",
2138                         "SOCCLK_DS",
2139                         "LCLK_DS",
2140                         "PPT",
2141                         "TDC",
2142                         "THERMAL",
2143                         "GFX_PER_CU_CG",
2144                         "RM",
2145                         "DCEFCLK_DS",
2146                         "ACDC",
2147                         "VR0HOT",
2148                         "VR1HOT",
2149                         "FW_CTF",
2150                         "LED_DISPLAY",
2151                         "FAN_CONTROL",
2152                         "DIDT",
2153                         "GFXOFF",
2154                         "CG",
2155                         "ACG"};
2156         static const char *output_title[] = {
2157                         "FEATURES",
2158                         "BITMASK",
2159                         "ENABLEMENT"};
2160         uint64_t features_enabled;
2161         int i;
2162         int ret = 0;
2163         int size = 0;
2164
2165         phm_get_sysfs_buf(&buf, &size);
2166
2167         ret = vega12_get_enabled_smc_features(hwmgr, &features_enabled);
2168         PP_ASSERT_WITH_CODE(!ret,
2169                 "[EnableAllSmuFeatures] Failed to get enabled smc features!",
2170                 return ret);
2171
2172         size += sysfs_emit_at(buf, size, "Current ppfeatures: 0x%016llx\n", features_enabled);
2173         size += sysfs_emit_at(buf, size, "%-19s %-22s %s\n",
2174                                 output_title[0],
2175                                 output_title[1],
2176                                 output_title[2]);
2177         for (i = 0; i < GNLD_FEATURES_MAX; i++) {
2178                 size += sysfs_emit_at(buf, size, "%-19s 0x%016llx %6s\n",
2179                                 ppfeature_name[i],
2180                                 1ULL << i,
2181                                 (features_enabled & (1ULL << i)) ? "Y" : "N");
2182         }
2183
2184         return size;
2185 }
2186
2187 static int vega12_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks)
2188 {
2189         uint64_t features_enabled;
2190         uint64_t features_to_enable;
2191         uint64_t features_to_disable;
2192         int ret = 0;
2193
2194         if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
2195                 return -EINVAL;
2196
2197         ret = vega12_get_enabled_smc_features(hwmgr, &features_enabled);
2198         if (ret)
2199                 return ret;
2200
2201         features_to_disable =
2202                 features_enabled & ~new_ppfeature_masks;
2203         features_to_enable =
2204                 ~features_enabled & new_ppfeature_masks;
2205
2206         pr_debug("features_to_disable 0x%llx\n", features_to_disable);
2207         pr_debug("features_to_enable 0x%llx\n", features_to_enable);
2208
2209         if (features_to_disable) {
2210                 ret = vega12_enable_smc_features(hwmgr, false, features_to_disable);
2211                 if (ret)
2212                         return ret;
2213         }
2214
2215         if (features_to_enable) {
2216                 ret = vega12_enable_smc_features(hwmgr, true, features_to_enable);
2217                 if (ret)
2218                         return ret;
2219         }
2220
2221         return 0;
2222 }
2223
2224 static int vega12_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
2225 {
2226         struct amdgpu_device *adev = hwmgr->adev;
2227
2228         return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
2229                 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
2230                 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
2231 }
2232
2233 static int vega12_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
2234 {
2235         uint32_t width_level;
2236
2237         width_level = vega12_get_current_pcie_link_width_level(hwmgr);
2238         if (width_level > LINK_WIDTH_MAX)
2239                 width_level = 0;
2240
2241         return link_width[width_level];
2242 }
2243
2244 static int vega12_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
2245 {
2246         struct amdgpu_device *adev = hwmgr->adev;
2247
2248         return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
2249                 PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
2250                 >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
2251 }
2252
2253 static int vega12_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
2254 {
2255         uint32_t speed_level;
2256
2257         speed_level = vega12_get_current_pcie_link_speed_level(hwmgr);
2258         if (speed_level > LINK_SPEED_MAX)
2259                 speed_level = 0;
2260
2261         return link_speed[speed_level];
2262 }
2263
2264 static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
2265                 enum pp_clock_type type, char *buf)
2266 {
2267         int i, now, size = 0;
2268         struct pp_clock_levels_with_latency clocks;
2269
2270         switch (type) {
2271         case PP_SCLK:
2272                 PP_ASSERT_WITH_CODE(
2273                                 vega12_get_current_gfx_clk_freq(hwmgr, &now) == 0,
2274                                 "Attempt to get current gfx clk Failed!",
2275                                 return -1);
2276
2277                 PP_ASSERT_WITH_CODE(
2278                                 vega12_get_sclks(hwmgr, &clocks) == 0,
2279                                 "Attempt to get gfx clk levels Failed!",
2280                                 return -1);
2281                 for (i = 0; i < clocks.num_levels; i++)
2282                         size += sprintf(buf + size, "%d: %uMhz %s\n",
2283                                 i, clocks.data[i].clocks_in_khz / 1000,
2284                                 (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
2285                 break;
2286
2287         case PP_MCLK:
2288                 PP_ASSERT_WITH_CODE(
2289                                 vega12_get_current_mclk_freq(hwmgr, &now) == 0,
2290                                 "Attempt to get current mclk freq Failed!",
2291                                 return -1);
2292
2293                 PP_ASSERT_WITH_CODE(
2294                                 vega12_get_memclocks(hwmgr, &clocks) == 0,
2295                                 "Attempt to get memory clk levels Failed!",
2296                                 return -1);
2297                 for (i = 0; i < clocks.num_levels; i++)
2298                         size += sprintf(buf + size, "%d: %uMhz %s\n",
2299                                 i, clocks.data[i].clocks_in_khz / 1000,
2300                                 (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
2301                 break;
2302
2303         case PP_SOCCLK:
2304                 PP_ASSERT_WITH_CODE(
2305                                 smum_send_msg_to_smc_with_parameter(hwmgr,
2306                                         PPSMC_MSG_GetDpmClockFreq, (PPCLK_SOCCLK << 16),
2307                                         &now) == 0,
2308                                 "Attempt to get Current SOCCLK Frequency Failed!",
2309                                 return -EINVAL);
2310
2311                 PP_ASSERT_WITH_CODE(
2312                                 vega12_get_socclocks(hwmgr, &clocks) == 0,
2313                                 "Attempt to get soc clk levels Failed!",
2314                                 return -1);
2315                 for (i = 0; i < clocks.num_levels; i++)
2316                         size += sprintf(buf + size, "%d: %uMhz %s\n",
2317                                 i, clocks.data[i].clocks_in_khz / 1000,
2318                                 (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
2319                 break;
2320
2321         case PP_DCEFCLK:
2322                 PP_ASSERT_WITH_CODE(
2323                                 smum_send_msg_to_smc_with_parameter(hwmgr,
2324                                         PPSMC_MSG_GetDpmClockFreq, (PPCLK_DCEFCLK << 16),
2325                                         &now) == 0,
2326                                 "Attempt to get Current DCEFCLK Frequency Failed!",
2327                                 return -EINVAL);
2328
2329                 PP_ASSERT_WITH_CODE(
2330                                 vega12_get_dcefclocks(hwmgr, &clocks) == 0,
2331                                 "Attempt to get dcef clk levels Failed!",
2332                                 return -1);
2333                 for (i = 0; i < clocks.num_levels; i++)
2334                         size += sprintf(buf + size, "%d: %uMhz %s\n",
2335                                 i, clocks.data[i].clocks_in_khz / 1000,
2336                                 (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
2337                 break;
2338
2339         case PP_PCIE:
2340                 break;
2341
2342         default:
2343                 break;
2344         }
2345         return size;
2346 }
2347
2348 static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
2349 {
2350         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2351         struct vega12_single_dpm_table *dpm_table;
2352         bool vblank_too_short = false;
2353         bool disable_mclk_switching;
2354         uint32_t i, latency;
2355
2356         disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
2357                                   !hwmgr->display_config->multi_monitor_in_sync) ||
2358                                   vblank_too_short;
2359         latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
2360
2361         /* gfxclk */
2362         dpm_table = &(data->dpm_table.gfx_table);
2363         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2364         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2365         dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2366         dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2367
2368         if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2369                 if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
2370                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
2371                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
2372                 }
2373
2374                 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2375                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2376                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
2377                 }
2378
2379                 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2380                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2381                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2382                 }
2383         }
2384
2385         /* memclk */
2386         dpm_table = &(data->dpm_table.mem_table);
2387         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2388         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2389         dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2390         dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2391
2392         if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2393                 if (VEGA12_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
2394                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
2395                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
2396                 }
2397
2398                 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
2399                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2400                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
2401                 }
2402
2403                 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2404                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2405                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2406                 }
2407         }
2408
2409         /* honour DAL's UCLK Hardmin */
2410         if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
2411                 dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
2412
2413         /* Hardmin is dependent on displayconfig */
2414         if (disable_mclk_switching) {
2415                 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2416                 for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
2417                         if (data->mclk_latency_table.entries[i].latency <= latency) {
2418                                 if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
2419                                         dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
2420                                         break;
2421                                 }
2422                         }
2423                 }
2424         }
2425
2426         if (hwmgr->display_config->nb_pstate_switch_disable)
2427                 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2428
2429         /* vclk */
2430         dpm_table = &(data->dpm_table.vclk_table);
2431         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2432         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2433         dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2434         dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2435
2436         if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2437                 if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
2438                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
2439                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
2440                 }
2441
2442                 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2443                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2444                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2445                 }
2446         }
2447
2448         /* dclk */
2449         dpm_table = &(data->dpm_table.dclk_table);
2450         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2451         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2452         dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2453         dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2454
2455         if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2456                 if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
2457                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
2458                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
2459                 }
2460
2461                 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2462                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2463                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2464                 }
2465         }
2466
2467         /* socclk */
2468         dpm_table = &(data->dpm_table.soc_table);
2469         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2470         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2471         dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2472         dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2473
2474         if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2475                 if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
2476                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
2477                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
2478                 }
2479
2480                 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2481                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2482                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2483                 }
2484         }
2485
2486         /* eclk */
2487         dpm_table = &(data->dpm_table.eclk_table);
2488         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2489         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2490         dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2491         dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2492
2493         if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2494                 if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
2495                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
2496                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
2497                 }
2498
2499                 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2500                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2501                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2502                 }
2503         }
2504
2505         return 0;
2506 }
2507
2508 static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
2509                 struct vega12_single_dpm_table *dpm_table)
2510 {
2511         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2512         int ret = 0;
2513
2514         if (data->smu_features[GNLD_DPM_UCLK].enabled) {
2515                 PP_ASSERT_WITH_CODE(dpm_table->count > 0,
2516                                 "[SetUclkToHightestDpmLevel] Dpm table has no entry!",
2517                                 return -EINVAL);
2518                 PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS,
2519                                 "[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
2520                                 return -EINVAL);
2521
2522                 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2523                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2524                                 PPSMC_MSG_SetHardMinByFreq,
2525                                 (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
2526                                 NULL)),
2527                                 "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
2528                                 return ret);
2529         }
2530
2531         return ret;
2532 }
2533
2534 static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
2535 {
2536         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2537         int ret = 0;
2538
2539         smum_send_msg_to_smc_with_parameter(hwmgr,
2540                         PPSMC_MSG_NumOfDisplays, 0,
2541                         NULL);
2542
2543         ret = vega12_set_uclk_to_highest_dpm_level(hwmgr,
2544                         &data->dpm_table.mem_table);
2545
2546         return ret;
2547 }
2548
2549 static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
2550 {
2551         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2552         int result = 0;
2553         Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
2554
2555         if ((data->water_marks_bitmap & WaterMarksExist) &&
2556                         !(data->water_marks_bitmap & WaterMarksLoaded)) {
2557                 result = smum_smc_table_manager(hwmgr,
2558                                                 (uint8_t *)wm_table, TABLE_WATERMARKS, false);
2559                 PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return -EINVAL);
2560                 data->water_marks_bitmap |= WaterMarksLoaded;
2561         }
2562
2563         if ((data->water_marks_bitmap & WaterMarksExist) &&
2564                 data->smu_features[GNLD_DPM_DCEFCLK].supported &&
2565                 data->smu_features[GNLD_DPM_SOCCLK].supported)
2566                 smum_send_msg_to_smc_with_parameter(hwmgr,
2567                         PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display,
2568                         NULL);
2569
2570         return result;
2571 }
2572
2573 static int vega12_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
2574 {
2575         struct vega12_hwmgr *data =
2576                         (struct vega12_hwmgr *)(hwmgr->backend);
2577
2578         if (data->smu_features[GNLD_DPM_UVD].supported) {
2579                 PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
2580                                 enable,
2581                                 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
2582                                 "Attempt to Enable/Disable DPM UVD Failed!",
2583                                 return -1);
2584                 data->smu_features[GNLD_DPM_UVD].enabled = enable;
2585         }
2586
2587         return 0;
2588 }
2589
2590 static void vega12_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
2591 {
2592         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2593
2594         if (data->vce_power_gated == bgate)
2595                 return;
2596
2597         data->vce_power_gated = bgate;
2598         vega12_enable_disable_vce_dpm(hwmgr, !bgate);
2599 }
2600
2601 static void vega12_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
2602 {
2603         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2604
2605         if (data->uvd_power_gated == bgate)
2606                 return;
2607
2608         data->uvd_power_gated = bgate;
2609         vega12_enable_disable_uvd_dpm(hwmgr, !bgate);
2610 }
2611
2612 static bool
2613 vega12_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
2614 {
2615         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2616         bool is_update_required = false;
2617
2618         if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
2619                 is_update_required = true;
2620
2621         if (data->registry_data.gfx_clk_deep_sleep_support) {
2622                 if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr)
2623                         is_update_required = true;
2624         }
2625
2626         return is_update_required;
2627 }
2628
2629 static int vega12_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
2630 {
2631         int tmp_result, result = 0;
2632
2633         tmp_result = vega12_disable_all_smu_features(hwmgr);
2634         PP_ASSERT_WITH_CODE((tmp_result == 0),
2635                         "Failed to disable all smu features!", result = tmp_result);
2636
2637         return result;
2638 }
2639
2640 static int vega12_power_off_asic(struct pp_hwmgr *hwmgr)
2641 {
2642         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2643         int result;
2644
2645         result = vega12_disable_dpm_tasks(hwmgr);
2646         PP_ASSERT_WITH_CODE((0 == result),
2647                         "[disable_dpm_tasks] Failed to disable DPM!",
2648                         );
2649         data->water_marks_bitmap &= ~(WaterMarksLoaded);
2650
2651         return result;
2652 }
2653
2654 #if 0
2655 static void vega12_find_min_clock_index(struct pp_hwmgr *hwmgr,
2656                 uint32_t *sclk_idx, uint32_t *mclk_idx,
2657                 uint32_t min_sclk, uint32_t min_mclk)
2658 {
2659         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2660         struct vega12_dpm_table *dpm_table = &(data->dpm_table);
2661         uint32_t i;
2662
2663         for (i = 0; i < dpm_table->gfx_table.count; i++) {
2664                 if (dpm_table->gfx_table.dpm_levels[i].enabled &&
2665                         dpm_table->gfx_table.dpm_levels[i].value >= min_sclk) {
2666                         *sclk_idx = i;
2667                         break;
2668                 }
2669         }
2670
2671         for (i = 0; i < dpm_table->mem_table.count; i++) {
2672                 if (dpm_table->mem_table.dpm_levels[i].enabled &&
2673                         dpm_table->mem_table.dpm_levels[i].value >= min_mclk) {
2674                         *mclk_idx = i;
2675                         break;
2676                 }
2677         }
2678 }
2679 #endif
2680
2681 #if 0
2682 static int vega12_set_power_profile_state(struct pp_hwmgr *hwmgr,
2683                 struct amd_pp_profile *request)
2684 {
2685         return 0;
2686 }
2687
2688 static int vega12_get_sclk_od(struct pp_hwmgr *hwmgr)
2689 {
2690         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2691         struct vega12_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
2692         struct vega12_single_dpm_table *golden_sclk_table =
2693                         &(data->golden_dpm_table.gfx_table);
2694         int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
2695         int golden_value = golden_sclk_table->dpm_levels
2696                         [golden_sclk_table->count - 1].value;
2697
2698         value -= golden_value;
2699         value = DIV_ROUND_UP(value * 100, golden_value);
2700
2701         return value;
2702 }
2703
2704 static int vega12_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
2705 {
2706         return 0;
2707 }
2708
2709 static int vega12_get_mclk_od(struct pp_hwmgr *hwmgr)
2710 {
2711         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2712         struct vega12_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
2713         struct vega12_single_dpm_table *golden_mclk_table =
2714                         &(data->golden_dpm_table.mem_table);
2715         int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
2716         int golden_value = golden_mclk_table->dpm_levels
2717                         [golden_mclk_table->count - 1].value;
2718
2719         value -= golden_value;
2720         value = DIV_ROUND_UP(value * 100, golden_value);
2721
2722         return value;
2723 }
2724
2725 static int vega12_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
2726 {
2727         return 0;
2728 }
2729 #endif
2730
2731 static int vega12_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
2732                                         uint32_t virtual_addr_low,
2733                                         uint32_t virtual_addr_hi,
2734                                         uint32_t mc_addr_low,
2735                                         uint32_t mc_addr_hi,
2736                                         uint32_t size)
2737 {
2738         smum_send_msg_to_smc_with_parameter(hwmgr,
2739                                         PPSMC_MSG_SetSystemVirtualDramAddrHigh,
2740                                         virtual_addr_hi,
2741                                         NULL);
2742         smum_send_msg_to_smc_with_parameter(hwmgr,
2743                                         PPSMC_MSG_SetSystemVirtualDramAddrLow,
2744                                         virtual_addr_low,
2745                                         NULL);
2746         smum_send_msg_to_smc_with_parameter(hwmgr,
2747                                         PPSMC_MSG_DramLogSetDramAddrHigh,
2748                                         mc_addr_hi,
2749                                         NULL);
2750
2751         smum_send_msg_to_smc_with_parameter(hwmgr,
2752                                         PPSMC_MSG_DramLogSetDramAddrLow,
2753                                         mc_addr_low,
2754                                         NULL);
2755
2756         smum_send_msg_to_smc_with_parameter(hwmgr,
2757                                         PPSMC_MSG_DramLogSetDramSize,
2758                                         size,
2759                                         NULL);
2760         return 0;
2761 }
2762
2763 static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
2764                 struct PP_TemperatureRange *thermal_data)
2765 {
2766         struct vega12_hwmgr *data =
2767                         (struct vega12_hwmgr *)(hwmgr->backend);
2768         PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2769
2770         memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
2771
2772         thermal_data->max = pp_table->TedgeLimit *
2773                 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2774         thermal_data->edge_emergency_max = (pp_table->TedgeLimit + CTF_OFFSET_EDGE) *
2775                 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2776         thermal_data->hotspot_crit_max = pp_table->ThotspotLimit *
2777                 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2778         thermal_data->hotspot_emergency_max = (pp_table->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
2779                 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2780         thermal_data->mem_crit_max = pp_table->ThbmLimit *
2781                 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2782         thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
2783                 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2784
2785         return 0;
2786 }
2787
2788 static int vega12_enable_gfx_off(struct pp_hwmgr *hwmgr)
2789 {
2790         struct vega12_hwmgr *data =
2791                         (struct vega12_hwmgr *)(hwmgr->backend);
2792         int ret = 0;
2793
2794         if (data->gfxoff_controlled_by_driver)
2795                 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff, NULL);
2796
2797         return ret;
2798 }
2799
2800 static int vega12_disable_gfx_off(struct pp_hwmgr *hwmgr)
2801 {
2802         struct vega12_hwmgr *data =
2803                         (struct vega12_hwmgr *)(hwmgr->backend);
2804         int ret = 0;
2805
2806         if (data->gfxoff_controlled_by_driver)
2807                 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff, NULL);
2808
2809         return ret;
2810 }
2811
2812 static int vega12_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
2813 {
2814         if (enable)
2815                 return vega12_enable_gfx_off(hwmgr);
2816         else
2817                 return vega12_disable_gfx_off(hwmgr);
2818 }
2819
2820 static int vega12_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
2821                                 PHM_PerformanceLevelDesignation designation, uint32_t index,
2822                                 PHM_PerformanceLevel *level)
2823 {
2824         return 0;
2825 }
2826
2827 static int vega12_set_mp1_state(struct pp_hwmgr *hwmgr,
2828                                 enum pp_mp1_state mp1_state)
2829 {
2830         uint16_t msg;
2831         int ret;
2832
2833         switch (mp1_state) {
2834         case PP_MP1_STATE_UNLOAD:
2835                 msg = PPSMC_MSG_PrepareMp1ForUnload;
2836                 break;
2837         case PP_MP1_STATE_SHUTDOWN:
2838         case PP_MP1_STATE_RESET:
2839         case PP_MP1_STATE_NONE:
2840         default:
2841                 return 0;
2842         }
2843
2844         PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0,
2845                             "[PrepareMp1] Failed!",
2846                             return ret);
2847
2848         return 0;
2849 }
2850
2851 static void vega12_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics)
2852 {
2853         memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0));
2854
2855         gpu_metrics->common_header.structure_size =
2856                                 sizeof(struct gpu_metrics_v1_0);
2857         gpu_metrics->common_header.format_revision = 1;
2858         gpu_metrics->common_header.content_revision = 0;
2859
2860         gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
2861 }
2862
2863 static ssize_t vega12_get_gpu_metrics(struct pp_hwmgr *hwmgr,
2864                                       void **table)
2865 {
2866         struct vega12_hwmgr *data =
2867                         (struct vega12_hwmgr *)(hwmgr->backend);
2868         struct gpu_metrics_v1_0 *gpu_metrics =
2869                         &data->gpu_metrics_table;
2870         SmuMetrics_t metrics;
2871         uint32_t fan_speed_rpm;
2872         int ret;
2873
2874         ret = vega12_get_metrics_table(hwmgr, &metrics, true);
2875         if (ret)
2876                 return ret;
2877
2878         vega12_init_gpu_metrics_v1_0(gpu_metrics);
2879
2880         gpu_metrics->temperature_edge = metrics.TemperatureEdge;
2881         gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
2882         gpu_metrics->temperature_mem = metrics.TemperatureHBM;
2883         gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
2884         gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem;
2885
2886         gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
2887         gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
2888
2889         gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
2890         gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
2891         gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
2892
2893         gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
2894         gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
2895         gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
2896         gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
2897         gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
2898
2899         gpu_metrics->throttle_status = metrics.ThrottlerStatus;
2900
2901         vega12_fan_ctrl_get_fan_speed_rpm(hwmgr, &fan_speed_rpm);
2902         gpu_metrics->current_fan_speed = (uint16_t)fan_speed_rpm;
2903
2904         gpu_metrics->pcie_link_width =
2905                         vega12_get_current_pcie_link_width(hwmgr);
2906         gpu_metrics->pcie_link_speed =
2907                         vega12_get_current_pcie_link_speed(hwmgr);
2908
2909         *table = (void *)gpu_metrics;
2910
2911         return sizeof(struct gpu_metrics_v1_0);
2912 }
2913
2914 static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
2915         .backend_init = vega12_hwmgr_backend_init,
2916         .backend_fini = vega12_hwmgr_backend_fini,
2917         .asic_setup = vega12_setup_asic_task,
2918         .dynamic_state_management_enable = vega12_enable_dpm_tasks,
2919         .dynamic_state_management_disable = vega12_disable_dpm_tasks,
2920         .patch_boot_state = vega12_patch_boot_state,
2921         .get_sclk = vega12_dpm_get_sclk,
2922         .get_mclk = vega12_dpm_get_mclk,
2923         .notify_smc_display_config_after_ps_adjustment =
2924                         vega12_notify_smc_display_config_after_ps_adjustment,
2925         .force_dpm_level = vega12_dpm_force_dpm_level,
2926         .stop_thermal_controller = vega12_thermal_stop_thermal_controller,
2927         .get_fan_speed_info = vega12_fan_ctrl_get_fan_speed_info,
2928         .reset_fan_speed_to_default =
2929                         vega12_fan_ctrl_reset_fan_speed_to_default,
2930         .get_fan_speed_rpm = vega12_fan_ctrl_get_fan_speed_rpm,
2931         .set_fan_control_mode = vega12_set_fan_control_mode,
2932         .get_fan_control_mode = vega12_get_fan_control_mode,
2933         .read_sensor = vega12_read_sensor,
2934         .get_dal_power_level = vega12_get_dal_power_level,
2935         .get_clock_by_type_with_latency = vega12_get_clock_by_type_with_latency,
2936         .get_clock_by_type_with_voltage = vega12_get_clock_by_type_with_voltage,
2937         .set_watermarks_for_clocks_ranges = vega12_set_watermarks_for_clocks_ranges,
2938         .display_clock_voltage_request = vega12_display_clock_voltage_request,
2939         .force_clock_level = vega12_force_clock_level,
2940         .print_clock_levels = vega12_print_clock_levels,
2941         .apply_clocks_adjust_rules =
2942                 vega12_apply_clocks_adjust_rules,
2943         .pre_display_config_changed =
2944                 vega12_pre_display_configuration_changed_task,
2945         .display_config_changed = vega12_display_configuration_changed_task,
2946         .powergate_uvd = vega12_power_gate_uvd,
2947         .powergate_vce = vega12_power_gate_vce,
2948         .check_smc_update_required_for_display_configuration =
2949                         vega12_check_smc_update_required_for_display_configuration,
2950         .power_off_asic = vega12_power_off_asic,
2951         .disable_smc_firmware_ctf = vega12_thermal_disable_alert,
2952 #if 0
2953         .set_power_profile_state = vega12_set_power_profile_state,
2954         .get_sclk_od = vega12_get_sclk_od,
2955         .set_sclk_od = vega12_set_sclk_od,
2956         .get_mclk_od = vega12_get_mclk_od,
2957         .set_mclk_od = vega12_set_mclk_od,
2958 #endif
2959         .notify_cac_buffer_info = vega12_notify_cac_buffer_info,
2960         .get_thermal_temperature_range = vega12_get_thermal_temperature_range,
2961         .register_irq_handlers = smu9_register_irq_handlers,
2962         .start_thermal_controller = vega12_start_thermal_controller,
2963         .powergate_gfx = vega12_gfx_off_control,
2964         .get_performance_level = vega12_get_performance_level,
2965         .get_asic_baco_capability = smu9_baco_get_capability,
2966         .get_asic_baco_state = smu9_baco_get_state,
2967         .set_asic_baco_state = vega12_baco_set_state,
2968         .get_ppfeature_status = vega12_get_ppfeature_status,
2969         .set_ppfeature_status = vega12_set_ppfeature_status,
2970         .set_mp1_state = vega12_set_mp1_state,
2971         .get_gpu_metrics = vega12_get_gpu_metrics,
2972 };
2973
2974 int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
2975 {
2976         hwmgr->hwmgr_func = &vega12_hwmgr_funcs;
2977         hwmgr->pptable_func = &vega12_pptable_funcs;
2978
2979         return 0;
2980 }