Merge tag 'for-linus' of git://linux-c6x.org/git/projects/linux-c6x-upstreaming
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / powerplay / smumgr / vega20_smumgr.c
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include "smumgr.h"
25 #include "vega20_inc.h"
26 #include "soc15_common.h"
27 #include "vega20_smumgr.h"
28 #include "vega20_ppsmc.h"
29 #include "smu11_driver_if.h"
30 #include "ppatomctrl.h"
31 #include "pp_debug.h"
32 #include "smu_ucode_xfer_vi.h"
33 #include "smu7_smumgr.h"
34 #include "vega20_hwmgr.h"
35
36 /* MP Apertures */
37 #define MP0_Public                      0x03800000
38 #define MP0_SRAM                        0x03900000
39 #define MP1_Public                      0x03b00000
40 #define MP1_SRAM                        0x03c00004
41
42 /* address block */
43 #define smnMP1_FIRMWARE_FLAGS           0x3010024
44 #define smnMP0_FW_INTF                  0x30101c0
45 #define smnMP1_PUB_CTRL                 0x3010b14
46
47 static bool vega20_is_smc_ram_running(struct pp_hwmgr *hwmgr)
48 {
49         struct amdgpu_device *adev = hwmgr->adev;
50         uint32_t mp1_fw_flags;
51
52         WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
53                      (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
54
55         mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
56
57         if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
58             MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
59                 return true;
60
61         return false;
62 }
63
64 /*
65  * Check if SMC has responded to previous message.
66  *
67  * @param    smumgr  the address of the powerplay hardware manager.
68  * @return   TRUE    SMC has responded, FALSE otherwise.
69  */
70 static uint32_t vega20_wait_for_response(struct pp_hwmgr *hwmgr)
71 {
72         struct amdgpu_device *adev = hwmgr->adev;
73         uint32_t reg;
74
75         reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
76
77         phm_wait_for_register_unequal(hwmgr, reg,
78                         0, MP1_C2PMSG_90__CONTENT_MASK);
79
80         return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
81 }
82
83 /*
84  * Send a message to the SMC, and do not wait for its response.
85  * @param    smumgr  the address of the powerplay hardware manager.
86  * @param    msg the message to send.
87  * @return   Always return 0.
88  */
89 static int vega20_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
90                 uint16_t msg)
91 {
92         struct amdgpu_device *adev = hwmgr->adev;
93
94         WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
95
96         return 0;
97 }
98
99 /*
100  * Send a message to the SMC, and wait for its response.
101  * @param    hwmgr  the address of the powerplay hardware manager.
102  * @param    msg the message to send.
103  * @return   Always return 0.
104  */
105 static int vega20_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
106 {
107         struct amdgpu_device *adev = hwmgr->adev;
108         int ret = 0;
109
110         vega20_wait_for_response(hwmgr);
111
112         WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
113
114         vega20_send_msg_to_smc_without_waiting(hwmgr, msg);
115
116         ret = vega20_wait_for_response(hwmgr);
117         if (ret != PPSMC_Result_OK)
118                 pr_err("Failed to send message 0x%x, response 0x%x\n", msg, ret);
119
120         return (ret == PPSMC_Result_OK) ? 0 : -EIO;
121 }
122
123 /*
124  * Send a message to the SMC with parameter
125  * @param    hwmgr:  the address of the powerplay hardware manager.
126  * @param    msg: the message to send.
127  * @param    parameter: the parameter to send
128  * @return   Always return 0.
129  */
130 static int vega20_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
131                 uint16_t msg, uint32_t parameter)
132 {
133         struct amdgpu_device *adev = hwmgr->adev;
134         int ret = 0;
135
136         vega20_wait_for_response(hwmgr);
137
138         WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
139
140         WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
141
142         vega20_send_msg_to_smc_without_waiting(hwmgr, msg);
143
144         ret = vega20_wait_for_response(hwmgr);
145         if (ret != PPSMC_Result_OK)
146                 pr_err("Failed to send message 0x%x, response 0x%x\n", msg, ret);
147
148         return (ret == PPSMC_Result_OK) ? 0 : -EIO;
149 }
150
151 static uint32_t vega20_get_argument(struct pp_hwmgr *hwmgr)
152 {
153         struct amdgpu_device *adev = hwmgr->adev;
154
155         return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
156 }
157
158 /*
159  * Copy table from SMC into driver FB
160  * @param   hwmgr    the address of the HW manager
161  * @param   table_id    the driver's table ID to copy from
162  */
163 static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
164                                       uint8_t *table, int16_t table_id)
165 {
166         struct vega20_smumgr *priv =
167                         (struct vega20_smumgr *)(hwmgr->smu_backend);
168         int ret = 0;
169
170         PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
171                         "Invalid SMU Table ID!", return -EINVAL);
172         PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
173                         "Invalid SMU Table version!", return -EINVAL);
174         PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
175                         "Invalid SMU Table Length!", return -EINVAL);
176
177         PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
178                         PPSMC_MSG_SetDriverDramAddrHigh,
179                         upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
180                         "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!",
181                         return ret);
182         PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
183                         PPSMC_MSG_SetDriverDramAddrLow,
184                         lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
185                         "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
186                         return ret);
187         PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
188                         PPSMC_MSG_TransferTableSmu2Dram, table_id)) == 0,
189                         "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
190                         return ret);
191
192         memcpy(table, priv->smu_tables.entry[table_id].table,
193                         priv->smu_tables.entry[table_id].size);
194
195         return 0;
196 }
197
198 /*
199  * Copy table from Driver FB into SMC
200  * @param   hwmgr    the address of the HW manager
201  * @param   table_id    the table to copy from
202  */
203 static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
204                                     uint8_t *table, int16_t table_id)
205 {
206         struct vega20_smumgr *priv =
207                         (struct vega20_smumgr *)(hwmgr->smu_backend);
208         int ret = 0;
209
210         PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
211                         "Invalid SMU Table ID!", return -EINVAL);
212         PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
213                         "Invalid SMU Table version!", return -EINVAL);
214         PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
215                         "Invalid SMU Table Length!", return -EINVAL);
216
217         memcpy(priv->smu_tables.entry[table_id].table, table,
218                         priv->smu_tables.entry[table_id].size);
219
220         PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
221                         PPSMC_MSG_SetDriverDramAddrHigh,
222                         upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
223                         "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
224                         return ret);
225         PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
226                         PPSMC_MSG_SetDriverDramAddrLow,
227                         lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
228                         "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
229                         return ret);
230         PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
231                         PPSMC_MSG_TransferTableDram2Smu, table_id)) == 0,
232                         "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
233                         return ret);
234
235         return 0;
236 }
237
238 int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
239                 uint8_t *table, uint16_t workload_type)
240 {
241         struct vega20_smumgr *priv =
242                         (struct vega20_smumgr *)(hwmgr->smu_backend);
243         int ret = 0;
244
245         memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table,
246                         priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
247
248         PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
249                         PPSMC_MSG_SetDriverDramAddrHigh,
250                         upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
251                         "[SetActivityMonitor] Attempt to Set Dram Addr High Failed!",
252                         return ret);
253         PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
254                         PPSMC_MSG_SetDriverDramAddrLow,
255                         lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
256                         "[SetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
257                         return ret);
258         PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
259                         PPSMC_MSG_TransferTableDram2Smu, TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0,
260                         "[SetActivityMonitor] Attempt to Transfer Table To SMU Failed!",
261                         return ret);
262
263         return 0;
264 }
265
266 int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
267                 uint8_t *table, uint16_t workload_type)
268 {
269         struct vega20_smumgr *priv =
270                         (struct vega20_smumgr *)(hwmgr->smu_backend);
271         int ret = 0;
272
273         PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
274                         PPSMC_MSG_SetDriverDramAddrHigh,
275                         upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
276                         "[GetActivityMonitor] Attempt to Set Dram Addr High Failed!",
277                         return ret);
278         PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
279                         PPSMC_MSG_SetDriverDramAddrLow,
280                         lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
281                         "[GetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
282                         return ret);
283         PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
284                         PPSMC_MSG_TransferTableSmu2Dram,
285                         TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0,
286                         "[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!",
287                         return ret);
288
289         memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
290                         priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
291
292         return 0;
293 }
294
295 int vega20_enable_smc_features(struct pp_hwmgr *hwmgr,
296                 bool enable, uint64_t feature_mask)
297 {
298         uint32_t smu_features_low, smu_features_high;
299         int ret = 0;
300
301         smu_features_low = (uint32_t)((feature_mask & SMU_FEATURES_LOW_MASK) >> SMU_FEATURES_LOW_SHIFT);
302         smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
303
304         if (enable) {
305                 PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
306                                 PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low)) == 0,
307                                 "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!",
308                                 return ret);
309                 PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
310                                 PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high)) == 0,
311                                 "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!",
312                                 return ret);
313         } else {
314                 PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
315                                 PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low)) == 0,
316                                 "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!",
317                                 return ret);
318                 PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
319                                 PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high)) == 0,
320                                 "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!",
321                                 return ret);
322         }
323
324         return 0;
325 }
326
327 int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
328                 uint64_t *features_enabled)
329 {
330         uint32_t smc_features_low, smc_features_high;
331         int ret = 0;
332
333         if (features_enabled == NULL)
334                 return -EINVAL;
335
336         PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
337                         PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0,
338                         "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!",
339                         return ret);
340         smc_features_low = vega20_get_argument(hwmgr);
341         PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
342                         PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0,
343                         "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!",
344                         return ret);
345         smc_features_high = vega20_get_argument(hwmgr);
346
347         *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
348                         (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
349
350         return 0;
351 }
352
353 static int vega20_set_tools_address(struct pp_hwmgr *hwmgr)
354 {
355         struct vega20_smumgr *priv =
356                         (struct vega20_smumgr *)(hwmgr->smu_backend);
357         int ret = 0;
358
359         if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
360                 ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
361                                 PPSMC_MSG_SetToolsDramAddrHigh,
362                                 upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
363                 if (!ret)
364                         ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
365                                         PPSMC_MSG_SetToolsDramAddrLow,
366                                         lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
367         }
368
369         return ret;
370 }
371
372 static int vega20_smu_init(struct pp_hwmgr *hwmgr)
373 {
374         struct vega20_smumgr *priv;
375         unsigned long tools_size = 0x19000;
376         int ret = 0;
377
378         struct cgs_firmware_info info = {0};
379
380         ret = cgs_get_firmware_info(hwmgr->device,
381                                 smu7_convert_fw_type_to_cgs(UCODE_ID_SMU),
382                                 &info);
383         if (ret || !info.kptr)
384                 return -EINVAL;
385
386         priv = kzalloc(sizeof(struct vega20_smumgr), GFP_KERNEL);
387         if (!priv)
388                 return -ENOMEM;
389
390         hwmgr->smu_backend = priv;
391
392         /* allocate space for pptable */
393         ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
394                         sizeof(PPTable_t),
395                         PAGE_SIZE,
396                         AMDGPU_GEM_DOMAIN_VRAM,
397                         &priv->smu_tables.entry[TABLE_PPTABLE].handle,
398                         &priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
399                         &priv->smu_tables.entry[TABLE_PPTABLE].table);
400         if (ret)
401                 goto free_backend;
402
403         priv->smu_tables.entry[TABLE_PPTABLE].version = 0x01;
404         priv->smu_tables.entry[TABLE_PPTABLE].size = sizeof(PPTable_t);
405
406         /* allocate space for watermarks table */
407         ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
408                         sizeof(Watermarks_t),
409                         PAGE_SIZE,
410                         AMDGPU_GEM_DOMAIN_VRAM,
411                         &priv->smu_tables.entry[TABLE_WATERMARKS].handle,
412                         &priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
413                         &priv->smu_tables.entry[TABLE_WATERMARKS].table);
414         if (ret)
415                 goto err0;
416
417         priv->smu_tables.entry[TABLE_WATERMARKS].version = 0x01;
418         priv->smu_tables.entry[TABLE_WATERMARKS].size = sizeof(Watermarks_t);
419
420         /* allocate space for pmstatuslog table */
421         ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
422                         tools_size,
423                         PAGE_SIZE,
424                         AMDGPU_GEM_DOMAIN_VRAM,
425                         &priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
426                         &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
427                         &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
428         if (ret)
429                 goto err1;
430
431         priv->smu_tables.entry[TABLE_PMSTATUSLOG].version = 0x01;
432         priv->smu_tables.entry[TABLE_PMSTATUSLOG].size = tools_size;
433
434         /* allocate space for OverDrive table */
435         ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
436                         sizeof(OverDriveTable_t),
437                         PAGE_SIZE,
438                         AMDGPU_GEM_DOMAIN_VRAM,
439                         &priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
440                         &priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
441                         &priv->smu_tables.entry[TABLE_OVERDRIVE].table);
442         if (ret)
443                 goto err2;
444
445         priv->smu_tables.entry[TABLE_OVERDRIVE].version = 0x01;
446         priv->smu_tables.entry[TABLE_OVERDRIVE].size = sizeof(OverDriveTable_t);
447
448         /* allocate space for SmuMetrics table */
449         ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
450                         sizeof(SmuMetrics_t),
451                         PAGE_SIZE,
452                         AMDGPU_GEM_DOMAIN_VRAM,
453                         &priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
454                         &priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
455                         &priv->smu_tables.entry[TABLE_SMU_METRICS].table);
456         if (ret)
457                 goto err3;
458
459         priv->smu_tables.entry[TABLE_SMU_METRICS].version = 0x01;
460         priv->smu_tables.entry[TABLE_SMU_METRICS].size = sizeof(SmuMetrics_t);
461
462         /* allocate space for ActivityMonitor table */
463         ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
464                         sizeof(DpmActivityMonitorCoeffInt_t),
465                         PAGE_SIZE,
466                         AMDGPU_GEM_DOMAIN_VRAM,
467                         &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].handle,
468                         &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr,
469                         &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table);
470         if (ret)
471                 goto err4;
472
473         priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
474         priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
475
476         return 0;
477
478 err4:
479         amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
480                         &priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
481                         &priv->smu_tables.entry[TABLE_SMU_METRICS].table);
482 err3:
483         amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
484                         &priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
485                         &priv->smu_tables.entry[TABLE_OVERDRIVE].table);
486 err2:
487         amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
488                         &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
489                         &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
490 err1:
491         amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_WATERMARKS].handle,
492                         &priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
493                         &priv->smu_tables.entry[TABLE_WATERMARKS].table);
494 err0:
495         amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
496                         &priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
497                         &priv->smu_tables.entry[TABLE_PPTABLE].table);
498 free_backend:
499         kfree(hwmgr->smu_backend);
500
501         return -EINVAL;
502 }
503
504 static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
505 {
506         struct vega20_smumgr *priv =
507                         (struct vega20_smumgr *)(hwmgr->smu_backend);
508
509         if (priv) {
510                 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
511                                 &priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
512                                 &priv->smu_tables.entry[TABLE_PPTABLE].table);
513                 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_WATERMARKS].handle,
514                                 &priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
515                                 &priv->smu_tables.entry[TABLE_WATERMARKS].table);
516                 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
517                                 &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
518                                 &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
519                 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
520                                 &priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
521                                 &priv->smu_tables.entry[TABLE_OVERDRIVE].table);
522                 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
523                                 &priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
524                                 &priv->smu_tables.entry[TABLE_SMU_METRICS].table);
525                 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].handle,
526                                 &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr,
527                                 &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table);
528                 kfree(hwmgr->smu_backend);
529                 hwmgr->smu_backend = NULL;
530         }
531         return 0;
532 }
533
534 static int vega20_start_smu(struct pp_hwmgr *hwmgr)
535 {
536         int ret;
537
538         ret = vega20_is_smc_ram_running(hwmgr);
539         PP_ASSERT_WITH_CODE(ret,
540                         "[Vega20StartSmu] SMC is not running!",
541                         return -EINVAL);
542
543         ret = vega20_set_tools_address(hwmgr);
544         PP_ASSERT_WITH_CODE(!ret,
545                         "[Vega20StartSmu] Failed to set tools address!",
546                         return ret);
547
548         return 0;
549 }
550
551 static bool vega20_is_dpm_running(struct pp_hwmgr *hwmgr)
552 {
553         uint64_t features_enabled = 0;
554
555         vega20_get_enabled_smc_features(hwmgr, &features_enabled);
556
557         if (features_enabled & SMC_DPM_FEATURES)
558                 return true;
559         else
560                 return false;
561 }
562
563 static int vega20_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table,
564                                     uint16_t table_id, bool rw)
565 {
566         int ret;
567
568         if (rw)
569                 ret = vega20_copy_table_from_smc(hwmgr, table, table_id);
570         else
571                 ret = vega20_copy_table_to_smc(hwmgr, table, table_id);
572
573         return ret;
574 }
575
576 const struct pp_smumgr_func vega20_smu_funcs = {
577         .smu_init = &vega20_smu_init,
578         .smu_fini = &vega20_smu_fini,
579         .start_smu = &vega20_start_smu,
580         .request_smu_load_specific_fw = NULL,
581         .send_msg_to_smc = &vega20_send_msg_to_smc,
582         .send_msg_to_smc_with_parameter = &vega20_send_msg_to_smc_with_parameter,
583         .download_pptable_settings = NULL,
584         .upload_pptable_settings = NULL,
585         .is_dpm_running = vega20_is_dpm_running,
586         .get_argument = vega20_get_argument,
587         .smc_table_manager = vega20_smc_table_manager,
588 };