drm/msm: fix memleak on release
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/firmware.h>
24
25 #include "pp_debug.h"
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "smu_internal.h"
29 #include "soc15_common.h"
30 #include "smu_v11_0.h"
31 #include "smu_v12_0.h"
32 #include "atom.h"
33 #include "amd_pcie.h"
34 #include "vega20_ppt.h"
35 #include "arcturus_ppt.h"
36 #include "navi10_ppt.h"
37 #include "renoir_ppt.h"
38
39 #undef __SMU_DUMMY_MAP
40 #define __SMU_DUMMY_MAP(type)   #type
41 static const char* __smu_message_names[] = {
42         SMU_MESSAGE_TYPES
43 };
44
45 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
46 {
47         if (type < 0 || type >= SMU_MSG_MAX_COUNT)
48                 return "unknown smu message";
49         return __smu_message_names[type];
50 }
51
52 #undef __SMU_DUMMY_MAP
53 #define __SMU_DUMMY_MAP(fea)    #fea
54 static const char* __smu_feature_names[] = {
55         SMU_FEATURE_MASKS
56 };
57
58 const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
59 {
60         if (feature < 0 || feature >= SMU_FEATURE_COUNT)
61                 return "unknown smu feature";
62         return __smu_feature_names[feature];
63 }
64
65 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
66 {
67         size_t size = 0;
68         int ret = 0, i = 0;
69         uint32_t feature_mask[2] = { 0 };
70         int32_t feature_index = 0;
71         uint32_t count = 0;
72         uint32_t sort_feature[SMU_FEATURE_COUNT];
73         uint64_t hw_feature_count = 0;
74
75         mutex_lock(&smu->mutex);
76
77         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
78         if (ret)
79                 goto failed;
80
81         size =  sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
82                         feature_mask[1], feature_mask[0]);
83
84         for (i = 0; i < SMU_FEATURE_COUNT; i++) {
85                 feature_index = smu_feature_get_index(smu, i);
86                 if (feature_index < 0)
87                         continue;
88                 sort_feature[feature_index] = i;
89                 hw_feature_count++;
90         }
91
92         for (i = 0; i < hw_feature_count; i++) {
93                 size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
94                                count++,
95                                smu_get_feature_name(smu, sort_feature[i]),
96                                i,
97                                !!smu_feature_is_enabled(smu, sort_feature[i]) ?
98                                "enabled" : "disabled");
99         }
100
101 failed:
102         mutex_unlock(&smu->mutex);
103
104         return size;
105 }
106
107 static int smu_feature_update_enable_state(struct smu_context *smu,
108                                            uint64_t feature_mask,
109                                            bool enabled)
110 {
111         struct smu_feature *feature = &smu->smu_feature;
112         uint32_t feature_low = 0, feature_high = 0;
113         int ret = 0;
114
115         if (!smu->pm_enabled)
116                 return ret;
117
118         feature_low = (feature_mask >> 0 ) & 0xffffffff;
119         feature_high = (feature_mask >> 32) & 0xffffffff;
120
121         if (enabled) {
122                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
123                                                   feature_low);
124                 if (ret)
125                         return ret;
126                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
127                                                   feature_high);
128                 if (ret)
129                         return ret;
130         } else {
131                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
132                                                   feature_low);
133                 if (ret)
134                         return ret;
135                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
136                                                   feature_high);
137                 if (ret)
138                         return ret;
139         }
140
141         mutex_lock(&feature->mutex);
142         if (enabled)
143                 bitmap_or(feature->enabled, feature->enabled,
144                                 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
145         else
146                 bitmap_andnot(feature->enabled, feature->enabled,
147                                 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
148         mutex_unlock(&feature->mutex);
149
150         return ret;
151 }
152
153 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
154 {
155         int ret = 0;
156         uint32_t feature_mask[2] = { 0 };
157         uint64_t feature_2_enabled = 0;
158         uint64_t feature_2_disabled = 0;
159         uint64_t feature_enables = 0;
160
161         mutex_lock(&smu->mutex);
162
163         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
164         if (ret)
165                 goto out;
166
167         feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
168
169         feature_2_enabled  = ~feature_enables & new_mask;
170         feature_2_disabled = feature_enables & ~new_mask;
171
172         if (feature_2_enabled) {
173                 ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
174                 if (ret)
175                         goto out;
176         }
177         if (feature_2_disabled) {
178                 ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
179                 if (ret)
180                         goto out;
181         }
182
183 out:
184         mutex_unlock(&smu->mutex);
185
186         return ret;
187 }
188
189 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
190 {
191         int ret = 0;
192
193         if (!if_version && !smu_version)
194                 return -EINVAL;
195
196         if (if_version) {
197                 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
198                 if (ret)
199                         return ret;
200
201                 ret = smu_read_smc_arg(smu, if_version);
202                 if (ret)
203                         return ret;
204         }
205
206         if (smu_version) {
207                 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
208                 if (ret)
209                         return ret;
210
211                 ret = smu_read_smc_arg(smu, smu_version);
212                 if (ret)
213                         return ret;
214         }
215
216         return ret;
217 }
218
219 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
220                             uint32_t min, uint32_t max)
221 {
222         int ret = 0;
223
224         if (min <= 0 && max <= 0)
225                 return -EINVAL;
226
227         if (!smu_clk_dpm_is_enabled(smu, clk_type))
228                 return 0;
229
230         ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
231         return ret;
232 }
233
234 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
235                             uint32_t min, uint32_t max)
236 {
237         int ret = 0, clk_id = 0;
238         uint32_t param;
239
240         if (min <= 0 && max <= 0)
241                 return -EINVAL;
242
243         if (!smu_clk_dpm_is_enabled(smu, clk_type))
244                 return 0;
245
246         clk_id = smu_clk_get_index(smu, clk_type);
247         if (clk_id < 0)
248                 return clk_id;
249
250         if (max > 0) {
251                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
252                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
253                                                   param);
254                 if (ret)
255                         return ret;
256         }
257
258         if (min > 0) {
259                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
260                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
261                                                   param);
262                 if (ret)
263                         return ret;
264         }
265
266
267         return ret;
268 }
269
270 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
271                            uint32_t *min, uint32_t *max, bool lock_needed)
272 {
273         uint32_t clock_limit;
274         int ret = 0;
275
276         if (!min && !max)
277                 return -EINVAL;
278
279         if (lock_needed)
280                 mutex_lock(&smu->mutex);
281
282         if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
283                 switch (clk_type) {
284                 case SMU_MCLK:
285                 case SMU_UCLK:
286                         clock_limit = smu->smu_table.boot_values.uclk;
287                         break;
288                 case SMU_GFXCLK:
289                 case SMU_SCLK:
290                         clock_limit = smu->smu_table.boot_values.gfxclk;
291                         break;
292                 case SMU_SOCCLK:
293                         clock_limit = smu->smu_table.boot_values.socclk;
294                         break;
295                 default:
296                         clock_limit = 0;
297                         break;
298                 }
299
300                 /* clock in Mhz unit */
301                 if (min)
302                         *min = clock_limit / 100;
303                 if (max)
304                         *max = clock_limit / 100;
305         } else {
306                 /*
307                  * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
308                  * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
309                  */
310                 ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
311         }
312
313         if (lock_needed)
314                 mutex_unlock(&smu->mutex);
315
316         return ret;
317 }
318
319 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
320                               uint16_t level, uint32_t *value)
321 {
322         int ret = 0, clk_id = 0;
323         uint32_t param;
324
325         if (!value)
326                 return -EINVAL;
327
328         if (!smu_clk_dpm_is_enabled(smu, clk_type))
329                 return 0;
330
331         clk_id = smu_clk_get_index(smu, clk_type);
332         if (clk_id < 0)
333                 return clk_id;
334
335         param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
336
337         ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
338                                           param);
339         if (ret)
340                 return ret;
341
342         ret = smu_read_smc_arg(smu, &param);
343         if (ret)
344                 return ret;
345
346         /* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
347          * now, we un-support it */
348         *value = param & 0x7fffffff;
349
350         return ret;
351 }
352
353 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
354                             uint32_t *value)
355 {
356         return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
357 }
358
359 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
360 {
361         enum smu_feature_mask feature_id = 0;
362
363         switch (clk_type) {
364         case SMU_MCLK:
365         case SMU_UCLK:
366                 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
367                 break;
368         case SMU_GFXCLK:
369         case SMU_SCLK:
370                 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
371                 break;
372         case SMU_SOCCLK:
373                 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
374                 break;
375         default:
376                 return true;
377         }
378
379         if(!smu_feature_is_enabled(smu, feature_id)) {
380                 return false;
381         }
382
383         return true;
384 }
385
386
387 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
388                            bool gate)
389 {
390         int ret = 0;
391
392         mutex_lock(&smu->mutex);
393
394         switch (block_type) {
395         case AMD_IP_BLOCK_TYPE_UVD:
396                 ret = smu_dpm_set_uvd_enable(smu, gate);
397                 break;
398         case AMD_IP_BLOCK_TYPE_VCE:
399                 ret = smu_dpm_set_vce_enable(smu, gate);
400                 break;
401         case AMD_IP_BLOCK_TYPE_GFX:
402                 ret = smu_gfx_off_control(smu, gate);
403                 break;
404         case AMD_IP_BLOCK_TYPE_SDMA:
405                 ret = smu_powergate_sdma(smu, gate);
406                 break;
407         default:
408                 break;
409         }
410
411         mutex_unlock(&smu->mutex);
412
413         return ret;
414 }
415
416 int smu_get_power_num_states(struct smu_context *smu,
417                              struct pp_states_info *state_info)
418 {
419         if (!state_info)
420                 return -EINVAL;
421
422         /* not support power state */
423         memset(state_info, 0, sizeof(struct pp_states_info));
424         state_info->nums = 1;
425         state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
426
427         return 0;
428 }
429
430 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
431                            void *data, uint32_t *size)
432 {
433         struct smu_power_context *smu_power = &smu->smu_power;
434         struct smu_power_gate *power_gate = &smu_power->power_gate;
435         int ret = 0;
436
437         if(!data || !size)
438                 return -EINVAL;
439
440         switch (sensor) {
441         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
442                 *((uint32_t *)data) = smu->pstate_sclk;
443                 *size = 4;
444                 break;
445         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
446                 *((uint32_t *)data) = smu->pstate_mclk;
447                 *size = 4;
448                 break;
449         case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
450                 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
451                 *size = 8;
452                 break;
453         case AMDGPU_PP_SENSOR_UVD_POWER:
454                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
455                 *size = 4;
456                 break;
457         case AMDGPU_PP_SENSOR_VCE_POWER:
458                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
459                 *size = 4;
460                 break;
461         case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
462                 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
463                 *size = 4;
464                 break;
465         default:
466                 ret = -EINVAL;
467                 break;
468         }
469
470         if (ret)
471                 *size = 0;
472
473         return ret;
474 }
475
476 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
477                      void *table_data, bool drv2smu)
478 {
479         struct smu_table_context *smu_table = &smu->smu_table;
480         struct amdgpu_device *adev = smu->adev;
481         struct smu_table *table = NULL;
482         int ret = 0;
483         int table_id = smu_table_get_index(smu, table_index);
484
485         if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
486                 return -EINVAL;
487
488         table = &smu_table->tables[table_index];
489
490         if (drv2smu)
491                 memcpy(table->cpu_addr, table_data, table->size);
492
493         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
494                                           upper_32_bits(table->mc_address));
495         if (ret)
496                 return ret;
497         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
498                                           lower_32_bits(table->mc_address));
499         if (ret)
500                 return ret;
501         ret = smu_send_smc_msg_with_param(smu, drv2smu ?
502                                           SMU_MSG_TransferTableDram2Smu :
503                                           SMU_MSG_TransferTableSmu2Dram,
504                                           table_id | ((argument & 0xFFFF) << 16));
505         if (ret)
506                 return ret;
507
508         /* flush hdp cache */
509         adev->nbio.funcs->hdp_flush(adev, NULL);
510
511         if (!drv2smu)
512                 memcpy(table_data, table->cpu_addr, table->size);
513
514         return ret;
515 }
516
517 bool is_support_sw_smu(struct amdgpu_device *adev)
518 {
519         if (adev->asic_type == CHIP_VEGA20)
520                 return (amdgpu_dpm == 2) ? true : false;
521         else if (adev->asic_type >= CHIP_ARCTURUS)
522                 return true;
523         else
524                 return false;
525 }
526
527 bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
528 {
529         if (amdgpu_dpm != 1)
530                 return false;
531
532         if (adev->asic_type == CHIP_VEGA20)
533                 return true;
534
535         return false;
536 }
537
538 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
539 {
540         struct smu_table_context *smu_table = &smu->smu_table;
541         uint32_t powerplay_table_size;
542
543         if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
544                 return -EINVAL;
545
546         mutex_lock(&smu->mutex);
547
548         if (smu_table->hardcode_pptable)
549                 *table = smu_table->hardcode_pptable;
550         else
551                 *table = smu_table->power_play_table;
552
553         powerplay_table_size = smu_table->power_play_table_size;
554
555         mutex_unlock(&smu->mutex);
556
557         return powerplay_table_size;
558 }
559
560 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
561 {
562         struct smu_table_context *smu_table = &smu->smu_table;
563         ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
564         int ret = 0;
565
566         if (!smu->pm_enabled)
567                 return -EINVAL;
568         if (header->usStructureSize != size) {
569                 pr_err("pp table size not matched !\n");
570                 return -EIO;
571         }
572
573         mutex_lock(&smu->mutex);
574         if (!smu_table->hardcode_pptable)
575                 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
576         if (!smu_table->hardcode_pptable) {
577                 ret = -ENOMEM;
578                 goto failed;
579         }
580
581         memcpy(smu_table->hardcode_pptable, buf, size);
582         smu_table->power_play_table = smu_table->hardcode_pptable;
583         smu_table->power_play_table_size = size;
584
585         ret = smu_reset(smu);
586         if (ret)
587                 pr_info("smu reset failed, ret = %d\n", ret);
588
589 failed:
590         mutex_unlock(&smu->mutex);
591         return ret;
592 }
593
594 int smu_feature_init_dpm(struct smu_context *smu)
595 {
596         struct smu_feature *feature = &smu->smu_feature;
597         int ret = 0;
598         uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
599
600         if (!smu->pm_enabled)
601                 return ret;
602         mutex_lock(&feature->mutex);
603         bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
604         mutex_unlock(&feature->mutex);
605
606         ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
607                                              SMU_FEATURE_MAX/32);
608         if (ret)
609                 return ret;
610
611         mutex_lock(&feature->mutex);
612         bitmap_or(feature->allowed, feature->allowed,
613                       (unsigned long *)allowed_feature_mask,
614                       feature->feature_num);
615         mutex_unlock(&feature->mutex);
616
617         return ret;
618 }
619
620
621 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
622 {
623         struct amdgpu_device *adev = smu->adev;
624         struct smu_feature *feature = &smu->smu_feature;
625         int feature_id;
626         int ret = 0;
627
628         if (adev->flags & AMD_IS_APU)
629                 return 1;
630
631         feature_id = smu_feature_get_index(smu, mask);
632         if (feature_id < 0)
633                 return 0;
634
635         WARN_ON(feature_id > feature->feature_num);
636
637         mutex_lock(&feature->mutex);
638         ret = test_bit(feature_id, feature->enabled);
639         mutex_unlock(&feature->mutex);
640
641         return ret;
642 }
643
644 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
645                             bool enable)
646 {
647         struct smu_feature *feature = &smu->smu_feature;
648         int feature_id;
649
650         feature_id = smu_feature_get_index(smu, mask);
651         if (feature_id < 0)
652                 return -EINVAL;
653
654         WARN_ON(feature_id > feature->feature_num);
655
656         return smu_feature_update_enable_state(smu,
657                                                1ULL << feature_id,
658                                                enable);
659 }
660
661 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
662 {
663         struct smu_feature *feature = &smu->smu_feature;
664         int feature_id;
665         int ret = 0;
666
667         feature_id = smu_feature_get_index(smu, mask);
668         if (feature_id < 0)
669                 return 0;
670
671         WARN_ON(feature_id > feature->feature_num);
672
673         mutex_lock(&feature->mutex);
674         ret = test_bit(feature_id, feature->supported);
675         mutex_unlock(&feature->mutex);
676
677         return ret;
678 }
679
680 int smu_feature_set_supported(struct smu_context *smu,
681                               enum smu_feature_mask mask,
682                               bool enable)
683 {
684         struct smu_feature *feature = &smu->smu_feature;
685         int feature_id;
686         int ret = 0;
687
688         feature_id = smu_feature_get_index(smu, mask);
689         if (feature_id < 0)
690                 return -EINVAL;
691
692         WARN_ON(feature_id > feature->feature_num);
693
694         mutex_lock(&feature->mutex);
695         if (enable)
696                 test_and_set_bit(feature_id, feature->supported);
697         else
698                 test_and_clear_bit(feature_id, feature->supported);
699         mutex_unlock(&feature->mutex);
700
701         return ret;
702 }
703
704 static int smu_set_funcs(struct amdgpu_device *adev)
705 {
706         struct smu_context *smu = &adev->smu;
707
708         switch (adev->asic_type) {
709         case CHIP_VEGA20:
710                 vega20_set_ppt_funcs(smu);
711                 break;
712         case CHIP_NAVI10:
713         case CHIP_NAVI14:
714         case CHIP_NAVI12:
715                 navi10_set_ppt_funcs(smu);
716                 break;
717         case CHIP_ARCTURUS:
718                 arcturus_set_ppt_funcs(smu);
719                 break;
720         case CHIP_RENOIR:
721                 renoir_set_ppt_funcs(smu);
722                 break;
723         default:
724                 return -EINVAL;
725         }
726
727         if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
728                 smu->od_enabled = true;
729
730         return 0;
731 }
732
733 static int smu_early_init(void *handle)
734 {
735         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
736         struct smu_context *smu = &adev->smu;
737
738         smu->adev = adev;
739         smu->pm_enabled = !!amdgpu_dpm;
740         smu->is_apu = false;
741         mutex_init(&smu->mutex);
742
743         return smu_set_funcs(adev);
744 }
745
746 static int smu_late_init(void *handle)
747 {
748         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
749         struct smu_context *smu = &adev->smu;
750
751         if (!smu->pm_enabled)
752                 return 0;
753
754         smu_handle_task(&adev->smu,
755                         smu->smu_dpm.dpm_level,
756                         AMD_PP_TASK_COMPLETE_INIT,
757                         false);
758
759         return 0;
760 }
761
762 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
763                             uint16_t *size, uint8_t *frev, uint8_t *crev,
764                             uint8_t **addr)
765 {
766         struct amdgpu_device *adev = smu->adev;
767         uint16_t data_start;
768
769         if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
770                                            size, frev, crev, &data_start))
771                 return -EINVAL;
772
773         *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
774
775         return 0;
776 }
777
778 static int smu_initialize_pptable(struct smu_context *smu)
779 {
780         /* TODO */
781         return 0;
782 }
783
784 static int smu_smc_table_sw_init(struct smu_context *smu)
785 {
786         int ret;
787
788         ret = smu_initialize_pptable(smu);
789         if (ret) {
790                 pr_err("Failed to init smu_initialize_pptable!\n");
791                 return ret;
792         }
793
794         /**
795          * Create smu_table structure, and init smc tables such as
796          * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
797          */
798         ret = smu_init_smc_tables(smu);
799         if (ret) {
800                 pr_err("Failed to init smc tables!\n");
801                 return ret;
802         }
803
804         /**
805          * Create smu_power_context structure, and allocate smu_dpm_context and
806          * context size to fill the smu_power_context data.
807          */
808         ret = smu_init_power(smu);
809         if (ret) {
810                 pr_err("Failed to init smu_init_power!\n");
811                 return ret;
812         }
813
814         return 0;
815 }
816
817 static int smu_smc_table_sw_fini(struct smu_context *smu)
818 {
819         int ret;
820
821         ret = smu_fini_smc_tables(smu);
822         if (ret) {
823                 pr_err("Failed to smu_fini_smc_tables!\n");
824                 return ret;
825         }
826
827         return 0;
828 }
829
830 static int smu_sw_init(void *handle)
831 {
832         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
833         struct smu_context *smu = &adev->smu;
834         int ret;
835
836         smu->pool_size = adev->pm.smu_prv_buffer_size;
837         smu->smu_feature.feature_num = SMU_FEATURE_MAX;
838         mutex_init(&smu->smu_feature.mutex);
839         bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
840         bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
841         bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
842
843         mutex_init(&smu->smu_baco.mutex);
844         smu->smu_baco.state = SMU_BACO_STATE_EXIT;
845         smu->smu_baco.platform_support = false;
846
847         mutex_init(&smu->sensor_lock);
848
849         smu->watermarks_bitmap = 0;
850         smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
851         smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
852
853         smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
854         smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
855         smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
856         smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
857         smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
858         smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
859         smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
860         smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
861
862         smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
863         smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
864         smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
865         smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
866         smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
867         smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
868         smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
869         smu->display_config = &adev->pm.pm_display_cfg;
870
871         smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
872         smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
873         ret = smu_init_microcode(smu);
874         if (ret) {
875                 pr_err("Failed to load smu firmware!\n");
876                 return ret;
877         }
878
879         ret = smu_smc_table_sw_init(smu);
880         if (ret) {
881                 pr_err("Failed to sw init smc table!\n");
882                 return ret;
883         }
884
885         ret = smu_register_irq_handler(smu);
886         if (ret) {
887                 pr_err("Failed to register smc irq handler!\n");
888                 return ret;
889         }
890
891         return 0;
892 }
893
894 static int smu_sw_fini(void *handle)
895 {
896         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
897         struct smu_context *smu = &adev->smu;
898         int ret;
899
900         kfree(smu->irq_source);
901         smu->irq_source = NULL;
902
903         ret = smu_smc_table_sw_fini(smu);
904         if (ret) {
905                 pr_err("Failed to sw fini smc table!\n");
906                 return ret;
907         }
908
909         ret = smu_fini_power(smu);
910         if (ret) {
911                 pr_err("Failed to init smu_fini_power!\n");
912                 return ret;
913         }
914
915         return 0;
916 }
917
918 static int smu_init_fb_allocations(struct smu_context *smu)
919 {
920         struct amdgpu_device *adev = smu->adev;
921         struct smu_table_context *smu_table = &smu->smu_table;
922         struct smu_table *tables = smu_table->tables;
923         int ret, i;
924
925         for (i = 0; i < SMU_TABLE_COUNT; i++) {
926                 if (tables[i].size == 0)
927                         continue;
928                 ret = amdgpu_bo_create_kernel(adev,
929                                               tables[i].size,
930                                               tables[i].align,
931                                               tables[i].domain,
932                                               &tables[i].bo,
933                                               &tables[i].mc_address,
934                                               &tables[i].cpu_addr);
935                 if (ret)
936                         goto failed;
937         }
938
939         return 0;
940 failed:
941         while (--i >= 0) {
942                 if (tables[i].size == 0)
943                         continue;
944                 amdgpu_bo_free_kernel(&tables[i].bo,
945                                       &tables[i].mc_address,
946                                       &tables[i].cpu_addr);
947
948         }
949         return ret;
950 }
951
952 static int smu_fini_fb_allocations(struct smu_context *smu)
953 {
954         struct smu_table_context *smu_table = &smu->smu_table;
955         struct smu_table *tables = smu_table->tables;
956         uint32_t i = 0;
957
958         if (!tables)
959                 return 0;
960
961         for (i = 0; i < SMU_TABLE_COUNT; i++) {
962                 if (tables[i].size == 0)
963                         continue;
964                 amdgpu_bo_free_kernel(&tables[i].bo,
965                                       &tables[i].mc_address,
966                                       &tables[i].cpu_addr);
967         }
968
969         return 0;
970 }
971
972 static int smu_smc_table_hw_init(struct smu_context *smu,
973                                  bool initialize)
974 {
975         struct amdgpu_device *adev = smu->adev;
976         int ret;
977
978         if (smu_is_dpm_running(smu) && adev->in_suspend) {
979                 pr_info("dpm has been enabled\n");
980                 return 0;
981         }
982
983         if (adev->asic_type != CHIP_ARCTURUS) {
984                 ret = smu_init_display_count(smu, 0);
985                 if (ret)
986                         return ret;
987         }
988
989         if (initialize) {
990                 /* get boot_values from vbios to set revision, gfxclk, and etc. */
991                 ret = smu_get_vbios_bootup_values(smu);
992                 if (ret)
993                         return ret;
994
995                 ret = smu_setup_pptable(smu);
996                 if (ret)
997                         return ret;
998
999                 ret = smu_get_clk_info_from_vbios(smu);
1000                 if (ret)
1001                         return ret;
1002
1003                 /*
1004                  * check if the format_revision in vbios is up to pptable header
1005                  * version, and the structure size is not 0.
1006                  */
1007                 ret = smu_check_pptable(smu);
1008                 if (ret)
1009                         return ret;
1010
1011                 /*
1012                  * allocate vram bos to store smc table contents.
1013                  */
1014                 ret = smu_init_fb_allocations(smu);
1015                 if (ret)
1016                         return ret;
1017
1018                 /*
1019                  * Parse pptable format and fill PPTable_t smc_pptable to
1020                  * smu_table_context structure. And read the smc_dpm_table from vbios,
1021                  * then fill it into smc_pptable.
1022                  */
1023                 ret = smu_parse_pptable(smu);
1024                 if (ret)
1025                         return ret;
1026
1027                 /*
1028                  * Send msg GetDriverIfVersion to check if the return value is equal
1029                  * with DRIVER_IF_VERSION of smc header.
1030                  */
1031                 ret = smu_check_fw_version(smu);
1032                 if (ret)
1033                         return ret;
1034         }
1035
1036         /* smu_dump_pptable(smu); */
1037
1038         /*
1039          * Copy pptable bo in the vram to smc with SMU MSGs such as
1040          * SetDriverDramAddr and TransferTableDram2Smu.
1041          */
1042         ret = smu_write_pptable(smu);
1043         if (ret)
1044                 return ret;
1045
1046         /* issue Run*Btc msg */
1047         ret = smu_run_btc(smu);
1048         if (ret)
1049                 return ret;
1050
1051         ret = smu_feature_set_allowed_mask(smu);
1052         if (ret)
1053                 return ret;
1054
1055         ret = smu_system_features_control(smu, true);
1056         if (ret)
1057                 return ret;
1058
1059         if (adev->asic_type != CHIP_ARCTURUS) {
1060                 ret = smu_override_pcie_parameters(smu);
1061                 if (ret)
1062                         return ret;
1063
1064                 ret = smu_notify_display_change(smu);
1065                 if (ret)
1066                         return ret;
1067
1068                 /*
1069                  * Set min deep sleep dce fclk with bootup value from vbios via
1070                  * SetMinDeepSleepDcefclk MSG.
1071                  */
1072                 ret = smu_set_min_dcef_deep_sleep(smu);
1073                 if (ret)
1074                         return ret;
1075         }
1076
1077         /*
1078          * Set initialized values (get from vbios) to dpm tables context such as
1079          * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1080          * type of clks.
1081          */
1082         if (initialize) {
1083                 ret = smu_populate_smc_tables(smu);
1084                 if (ret)
1085                         return ret;
1086
1087                 ret = smu_init_max_sustainable_clocks(smu);
1088                 if (ret)
1089                         return ret;
1090         }
1091
1092         ret = smu_set_default_od_settings(smu, initialize);
1093         if (ret)
1094                 return ret;
1095
1096         if (initialize) {
1097                 ret = smu_populate_umd_state_clk(smu);
1098                 if (ret)
1099                         return ret;
1100
1101                 ret = smu_get_power_limit(smu, &smu->default_power_limit, true, false);
1102                 if (ret)
1103                         return ret;
1104         }
1105
1106         /*
1107          * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1108          */
1109         ret = smu_set_tool_table_location(smu);
1110
1111         if (!smu_is_dpm_running(smu))
1112                 pr_info("dpm has been disabled\n");
1113
1114         return ret;
1115 }
1116
1117 /**
1118  * smu_alloc_memory_pool - allocate memory pool in the system memory
1119  *
1120  * @smu: amdgpu_device pointer
1121  *
1122  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1123  * and DramLogSetDramAddr can notify it changed.
1124  *
1125  * Returns 0 on success, error on failure.
1126  */
1127 static int smu_alloc_memory_pool(struct smu_context *smu)
1128 {
1129         struct amdgpu_device *adev = smu->adev;
1130         struct smu_table_context *smu_table = &smu->smu_table;
1131         struct smu_table *memory_pool = &smu_table->memory_pool;
1132         uint64_t pool_size = smu->pool_size;
1133         int ret = 0;
1134
1135         if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1136                 return ret;
1137
1138         memory_pool->size = pool_size;
1139         memory_pool->align = PAGE_SIZE;
1140         memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1141
1142         switch (pool_size) {
1143         case SMU_MEMORY_POOL_SIZE_256_MB:
1144         case SMU_MEMORY_POOL_SIZE_512_MB:
1145         case SMU_MEMORY_POOL_SIZE_1_GB:
1146         case SMU_MEMORY_POOL_SIZE_2_GB:
1147                 ret = amdgpu_bo_create_kernel(adev,
1148                                               memory_pool->size,
1149                                               memory_pool->align,
1150                                               memory_pool->domain,
1151                                               &memory_pool->bo,
1152                                               &memory_pool->mc_address,
1153                                               &memory_pool->cpu_addr);
1154                 break;
1155         default:
1156                 break;
1157         }
1158
1159         return ret;
1160 }
1161
1162 static int smu_free_memory_pool(struct smu_context *smu)
1163 {
1164         struct smu_table_context *smu_table = &smu->smu_table;
1165         struct smu_table *memory_pool = &smu_table->memory_pool;
1166         int ret = 0;
1167
1168         if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1169                 return ret;
1170
1171         amdgpu_bo_free_kernel(&memory_pool->bo,
1172                               &memory_pool->mc_address,
1173                               &memory_pool->cpu_addr);
1174
1175         memset(memory_pool, 0, sizeof(struct smu_table));
1176
1177         return ret;
1178 }
1179
1180 static int smu_start_smc_engine(struct smu_context *smu)
1181 {
1182         struct amdgpu_device *adev = smu->adev;
1183         int ret = 0;
1184
1185         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1186                 if (adev->asic_type < CHIP_NAVI10) {
1187                         if (smu->ppt_funcs->load_microcode) {
1188                                 ret = smu->ppt_funcs->load_microcode(smu);
1189                                 if (ret)
1190                                         return ret;
1191                         }
1192                 }
1193         }
1194
1195         if (smu->ppt_funcs->check_fw_status) {
1196                 ret = smu->ppt_funcs->check_fw_status(smu);
1197                 if (ret)
1198                         pr_err("SMC is not ready\n");
1199         }
1200
1201         return ret;
1202 }
1203
1204 static int smu_hw_init(void *handle)
1205 {
1206         int ret;
1207         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1208         struct smu_context *smu = &adev->smu;
1209
1210         ret = smu_start_smc_engine(smu);
1211         if (ret) {
1212                 pr_err("SMU is not ready yet!\n");
1213                 return ret;
1214         }
1215
1216         if (adev->flags & AMD_IS_APU) {
1217                 smu_powergate_sdma(&adev->smu, false);
1218                 smu_powergate_vcn(&adev->smu, false);
1219                 smu_set_gfx_cgpg(&adev->smu, true);
1220         }
1221
1222         if (!smu->pm_enabled)
1223                 return 0;
1224
1225         ret = smu_feature_init_dpm(smu);
1226         if (ret)
1227                 goto failed;
1228
1229         ret = smu_smc_table_hw_init(smu, true);
1230         if (ret)
1231                 goto failed;
1232
1233         ret = smu_alloc_memory_pool(smu);
1234         if (ret)
1235                 goto failed;
1236
1237         /*
1238          * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1239          * pool location.
1240          */
1241         ret = smu_notify_memory_pool_location(smu);
1242         if (ret)
1243                 goto failed;
1244
1245         ret = smu_start_thermal_control(smu);
1246         if (ret)
1247                 goto failed;
1248
1249         if (!smu->pm_enabled)
1250                 adev->pm.dpm_enabled = false;
1251         else
1252                 adev->pm.dpm_enabled = true;    /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
1253
1254         pr_info("SMU is initialized successfully!\n");
1255
1256         return 0;
1257
1258 failed:
1259         return ret;
1260 }
1261
1262 static int smu_stop_dpms(struct smu_context *smu)
1263 {
1264         return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures);
1265 }
1266
1267 static int smu_hw_fini(void *handle)
1268 {
1269         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1270         struct smu_context *smu = &adev->smu;
1271         struct smu_table_context *table_context = &smu->smu_table;
1272         int ret = 0;
1273
1274         if (adev->flags & AMD_IS_APU) {
1275                 smu_powergate_sdma(&adev->smu, true);
1276                 smu_powergate_vcn(&adev->smu, true);
1277         }
1278
1279         ret = smu_stop_thermal_control(smu);
1280         if (ret) {
1281                 pr_warn("Fail to stop thermal control!\n");
1282                 return ret;
1283         }
1284
1285         ret = smu_stop_dpms(smu);
1286         if (ret) {
1287                 pr_warn("Fail to stop Dpms!\n");
1288                 return ret;
1289         }
1290
1291         kfree(table_context->driver_pptable);
1292         table_context->driver_pptable = NULL;
1293
1294         kfree(table_context->max_sustainable_clocks);
1295         table_context->max_sustainable_clocks = NULL;
1296
1297         kfree(table_context->overdrive_table);
1298         table_context->overdrive_table = NULL;
1299
1300         ret = smu_fini_fb_allocations(smu);
1301         if (ret)
1302                 return ret;
1303
1304         ret = smu_free_memory_pool(smu);
1305         if (ret)
1306                 return ret;
1307
1308         return 0;
1309 }
1310
1311 int smu_reset(struct smu_context *smu)
1312 {
1313         struct amdgpu_device *adev = smu->adev;
1314         int ret = 0;
1315
1316         ret = smu_hw_fini(adev);
1317         if (ret)
1318                 return ret;
1319
1320         ret = smu_hw_init(adev);
1321         if (ret)
1322                 return ret;
1323
1324         return ret;
1325 }
1326
1327 static int smu_suspend(void *handle)
1328 {
1329         int ret;
1330         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1331         struct smu_context *smu = &adev->smu;
1332         bool baco_feature_is_enabled = false;
1333
1334         if(!(adev->flags & AMD_IS_APU))
1335                 baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
1336
1337         ret = smu_system_features_control(smu, false);
1338         if (ret)
1339                 return ret;
1340
1341         if (adev->in_gpu_reset && baco_feature_is_enabled) {
1342                 ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
1343                 if (ret) {
1344                         pr_warn("set BACO feature enabled failed, return %d\n", ret);
1345                         return ret;
1346                 }
1347         }
1348
1349         smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1350
1351         if (adev->asic_type >= CHIP_NAVI10 &&
1352             adev->gfx.rlc.funcs->stop)
1353                 adev->gfx.rlc.funcs->stop(adev);
1354         if (smu->is_apu)
1355                 smu_set_gfx_cgpg(&adev->smu, false);
1356
1357         return 0;
1358 }
1359
1360 static int smu_resume(void *handle)
1361 {
1362         int ret;
1363         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1364         struct smu_context *smu = &adev->smu;
1365
1366         pr_info("SMU is resuming...\n");
1367
1368         ret = smu_start_smc_engine(smu);
1369         if (ret) {
1370                 pr_err("SMU is not ready yet!\n");
1371                 goto failed;
1372         }
1373
1374         ret = smu_smc_table_hw_init(smu, false);
1375         if (ret)
1376                 goto failed;
1377
1378         ret = smu_start_thermal_control(smu);
1379         if (ret)
1380                 goto failed;
1381
1382         if (smu->is_apu)
1383                 smu_set_gfx_cgpg(&adev->smu, true);
1384
1385         smu->disable_uclk_switch = 0;
1386
1387         pr_info("SMU is resumed successfully!\n");
1388
1389         return 0;
1390
1391 failed:
1392         return ret;
1393 }
1394
1395 int smu_display_configuration_change(struct smu_context *smu,
1396                                      const struct amd_pp_display_configuration *display_config)
1397 {
1398         int index = 0;
1399         int num_of_active_display = 0;
1400
1401         if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
1402                 return -EINVAL;
1403
1404         if (!display_config)
1405                 return -EINVAL;
1406
1407         mutex_lock(&smu->mutex);
1408
1409         if (smu->ppt_funcs->set_deep_sleep_dcefclk)
1410                 smu->ppt_funcs->set_deep_sleep_dcefclk(smu,
1411                                 display_config->min_dcef_deep_sleep_set_clk / 100);
1412
1413         for (index = 0; index < display_config->num_path_including_non_display; index++) {
1414                 if (display_config->displays[index].controller_id != 0)
1415                         num_of_active_display++;
1416         }
1417
1418         smu_set_active_display_count(smu, num_of_active_display);
1419
1420         smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1421                            display_config->cpu_cc6_disable,
1422                            display_config->cpu_pstate_disable,
1423                            display_config->nb_pstate_switch_disable);
1424
1425         mutex_unlock(&smu->mutex);
1426
1427         return 0;
1428 }
1429
1430 static int smu_get_clock_info(struct smu_context *smu,
1431                               struct smu_clock_info *clk_info,
1432                               enum smu_perf_level_designation designation)
1433 {
1434         int ret;
1435         struct smu_performance_level level = {0};
1436
1437         if (!clk_info)
1438                 return -EINVAL;
1439
1440         ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1441         if (ret)
1442                 return -EINVAL;
1443
1444         clk_info->min_mem_clk = level.memory_clock;
1445         clk_info->min_eng_clk = level.core_clock;
1446         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1447
1448         ret = smu_get_perf_level(smu, designation, &level);
1449         if (ret)
1450                 return -EINVAL;
1451
1452         clk_info->min_mem_clk = level.memory_clock;
1453         clk_info->min_eng_clk = level.core_clock;
1454         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1455
1456         return 0;
1457 }
1458
1459 int smu_get_current_clocks(struct smu_context *smu,
1460                            struct amd_pp_clock_info *clocks)
1461 {
1462         struct amd_pp_simple_clock_info simple_clocks = {0};
1463         struct smu_clock_info hw_clocks;
1464         int ret = 0;
1465
1466         if (!is_support_sw_smu(smu->adev))
1467                 return -EINVAL;
1468
1469         mutex_lock(&smu->mutex);
1470
1471         smu_get_dal_power_level(smu, &simple_clocks);
1472
1473         if (smu->support_power_containment)
1474                 ret = smu_get_clock_info(smu, &hw_clocks,
1475                                          PERF_LEVEL_POWER_CONTAINMENT);
1476         else
1477                 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1478
1479         if (ret) {
1480                 pr_err("Error in smu_get_clock_info\n");
1481                 goto failed;
1482         }
1483
1484         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1485         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1486         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1487         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1488         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1489         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1490         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1491         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1492
1493         if (simple_clocks.level == 0)
1494                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1495         else
1496                 clocks->max_clocks_state = simple_clocks.level;
1497
1498         if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1499                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1500                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1501         }
1502
1503 failed:
1504         mutex_unlock(&smu->mutex);
1505         return ret;
1506 }
1507
1508 static int smu_set_clockgating_state(void *handle,
1509                                      enum amd_clockgating_state state)
1510 {
1511         return 0;
1512 }
1513
1514 static int smu_set_powergating_state(void *handle,
1515                                      enum amd_powergating_state state)
1516 {
1517         return 0;
1518 }
1519
1520 static int smu_enable_umd_pstate(void *handle,
1521                       enum amd_dpm_forced_level *level)
1522 {
1523         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1524                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1525                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1526                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1527
1528         struct smu_context *smu = (struct smu_context*)(handle);
1529         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1530
1531         if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
1532                 return -EINVAL;
1533
1534         if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1535                 /* enter umd pstate, save current level, disable gfx cg*/
1536                 if (*level & profile_mode_mask) {
1537                         smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1538                         smu_dpm_ctx->enable_umd_pstate = true;
1539                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1540                                                                AMD_IP_BLOCK_TYPE_GFX,
1541                                                                AMD_CG_STATE_UNGATE);
1542                         amdgpu_device_ip_set_powergating_state(smu->adev,
1543                                                                AMD_IP_BLOCK_TYPE_GFX,
1544                                                                AMD_PG_STATE_UNGATE);
1545                 }
1546         } else {
1547                 /* exit umd pstate, restore level, enable gfx cg*/
1548                 if (!(*level & profile_mode_mask)) {
1549                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1550                                 *level = smu_dpm_ctx->saved_dpm_level;
1551                         smu_dpm_ctx->enable_umd_pstate = false;
1552                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1553                                                                AMD_IP_BLOCK_TYPE_GFX,
1554                                                                AMD_CG_STATE_GATE);
1555                         amdgpu_device_ip_set_powergating_state(smu->adev,
1556                                                                AMD_IP_BLOCK_TYPE_GFX,
1557                                                                AMD_PG_STATE_GATE);
1558                 }
1559         }
1560
1561         return 0;
1562 }
1563
1564 static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1565 {
1566         int ret = 0;
1567         uint32_t sclk_mask, mclk_mask, soc_mask;
1568
1569         switch (level) {
1570         case AMD_DPM_FORCED_LEVEL_HIGH:
1571                 ret = smu_force_dpm_limit_value(smu, true);
1572                 break;
1573         case AMD_DPM_FORCED_LEVEL_LOW:
1574                 ret = smu_force_dpm_limit_value(smu, false);
1575                 break;
1576         case AMD_DPM_FORCED_LEVEL_AUTO:
1577         case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1578                 ret = smu_unforce_dpm_levels(smu);
1579                 break;
1580         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1581         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1582         case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1583                 ret = smu_get_profiling_clk_mask(smu, level,
1584                                                  &sclk_mask,
1585                                                  &mclk_mask,
1586                                                  &soc_mask);
1587                 if (ret)
1588                         return ret;
1589                 smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
1590                 smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
1591                 smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
1592                 break;
1593         case AMD_DPM_FORCED_LEVEL_MANUAL:
1594         case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1595         default:
1596                 break;
1597         }
1598         return ret;
1599 }
1600
1601 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1602                                    enum amd_dpm_forced_level level,
1603                                    bool skip_display_settings)
1604 {
1605         int ret = 0;
1606         int index = 0;
1607         long workload;
1608         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1609
1610         if (!smu->pm_enabled)
1611                 return -EINVAL;
1612
1613         if (!skip_display_settings) {
1614                 ret = smu_display_config_changed(smu);
1615                 if (ret) {
1616                         pr_err("Failed to change display config!");
1617                         return ret;
1618                 }
1619         }
1620
1621         ret = smu_apply_clocks_adjust_rules(smu);
1622         if (ret) {
1623                 pr_err("Failed to apply clocks adjust rules!");
1624                 return ret;
1625         }
1626
1627         if (!skip_display_settings) {
1628                 ret = smu_notify_smc_dispaly_config(smu);
1629                 if (ret) {
1630                         pr_err("Failed to notify smc display config!");
1631                         return ret;
1632                 }
1633         }
1634
1635         if (smu_dpm_ctx->dpm_level != level) {
1636                 ret = smu_asic_set_performance_level(smu, level);
1637                 if (ret) {
1638                         ret = smu_default_set_performance_level(smu, level);
1639                         if (ret) {
1640                                 pr_err("Failed to set performance level!");
1641                                 return ret;
1642                         }
1643                 }
1644
1645                 /* update the saved copy */
1646                 smu_dpm_ctx->dpm_level = level;
1647         }
1648
1649         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1650                 index = fls(smu->workload_mask);
1651                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1652                 workload = smu->workload_setting[index];
1653
1654                 if (smu->power_profile_mode != workload)
1655                         smu_set_power_profile_mode(smu, &workload, 0, false);
1656         }
1657
1658         return ret;
1659 }
1660
1661 int smu_handle_task(struct smu_context *smu,
1662                     enum amd_dpm_forced_level level,
1663                     enum amd_pp_task task_id,
1664                     bool lock_needed)
1665 {
1666         int ret = 0;
1667
1668         if (lock_needed)
1669                 mutex_lock(&smu->mutex);
1670
1671         switch (task_id) {
1672         case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1673                 ret = smu_pre_display_config_changed(smu);
1674                 if (ret)
1675                         goto out;
1676                 ret = smu_set_cpu_power_state(smu);
1677                 if (ret)
1678                         goto out;
1679                 ret = smu_adjust_power_state_dynamic(smu, level, false);
1680                 break;
1681         case AMD_PP_TASK_COMPLETE_INIT:
1682         case AMD_PP_TASK_READJUST_POWER_STATE:
1683                 ret = smu_adjust_power_state_dynamic(smu, level, true);
1684                 break;
1685         default:
1686                 break;
1687         }
1688
1689 out:
1690         if (lock_needed)
1691                 mutex_unlock(&smu->mutex);
1692
1693         return ret;
1694 }
1695
1696 int smu_switch_power_profile(struct smu_context *smu,
1697                              enum PP_SMC_POWER_PROFILE type,
1698                              bool en)
1699 {
1700         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1701         long workload;
1702         uint32_t index;
1703
1704         if (!smu->pm_enabled)
1705                 return -EINVAL;
1706
1707         if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1708                 return -EINVAL;
1709
1710         mutex_lock(&smu->mutex);
1711
1712         if (!en) {
1713                 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1714                 index = fls(smu->workload_mask);
1715                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1716                 workload = smu->workload_setting[index];
1717         } else {
1718                 smu->workload_mask |= (1 << smu->workload_prority[type]);
1719                 index = fls(smu->workload_mask);
1720                 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1721                 workload = smu->workload_setting[index];
1722         }
1723
1724         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1725                 smu_set_power_profile_mode(smu, &workload, 0, false);
1726
1727         mutex_unlock(&smu->mutex);
1728
1729         return 0;
1730 }
1731
1732 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1733 {
1734         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1735         enum amd_dpm_forced_level level;
1736
1737         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1738                 return -EINVAL;
1739
1740         mutex_lock(&(smu->mutex));
1741         level = smu_dpm_ctx->dpm_level;
1742         mutex_unlock(&(smu->mutex));
1743
1744         return level;
1745 }
1746
1747 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1748 {
1749         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1750         int ret = 0;
1751
1752         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1753                 return -EINVAL;
1754
1755         mutex_lock(&smu->mutex);
1756
1757         ret = smu_enable_umd_pstate(smu, &level);
1758         if (ret) {
1759                 mutex_unlock(&smu->mutex);
1760                 return ret;
1761         }
1762
1763         ret = smu_handle_task(smu, level,
1764                               AMD_PP_TASK_READJUST_POWER_STATE,
1765                               false);
1766
1767         mutex_unlock(&smu->mutex);
1768
1769         return ret;
1770 }
1771
1772 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1773 {
1774         int ret = 0;
1775
1776         mutex_lock(&smu->mutex);
1777         ret = smu_init_display_count(smu, count);
1778         mutex_unlock(&smu->mutex);
1779
1780         return ret;
1781 }
1782
1783 int smu_force_clk_levels(struct smu_context *smu,
1784                          enum smu_clk_type clk_type,
1785                          uint32_t mask,
1786                          bool lock_needed)
1787 {
1788         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1789         int ret = 0;
1790
1791         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1792                 pr_debug("force clock level is for dpm manual mode only.\n");
1793                 return -EINVAL;
1794         }
1795
1796         if (lock_needed)
1797                 mutex_lock(&smu->mutex);
1798
1799         if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1800                 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1801
1802         if (lock_needed)
1803                 mutex_unlock(&smu->mutex);
1804
1805         return ret;
1806 }
1807
1808 int smu_set_mp1_state(struct smu_context *smu,
1809                       enum pp_mp1_state mp1_state)
1810 {
1811         uint16_t msg;
1812         int ret;
1813
1814         /*
1815          * The SMC is not fully ready. That may be
1816          * expected as the IP may be masked.
1817          * So, just return without error.
1818          */
1819         if (!smu->pm_enabled)
1820                 return 0;
1821
1822         mutex_lock(&smu->mutex);
1823
1824         switch (mp1_state) {
1825         case PP_MP1_STATE_SHUTDOWN:
1826                 msg = SMU_MSG_PrepareMp1ForShutdown;
1827                 break;
1828         case PP_MP1_STATE_UNLOAD:
1829                 msg = SMU_MSG_PrepareMp1ForUnload;
1830                 break;
1831         case PP_MP1_STATE_RESET:
1832                 msg = SMU_MSG_PrepareMp1ForReset;
1833                 break;
1834         case PP_MP1_STATE_NONE:
1835         default:
1836                 mutex_unlock(&smu->mutex);
1837                 return 0;
1838         }
1839
1840         /* some asics may not support those messages */
1841         if (smu_msg_get_index(smu, msg) < 0) {
1842                 mutex_unlock(&smu->mutex);
1843                 return 0;
1844         }
1845
1846         ret = smu_send_smc_msg(smu, msg);
1847         if (ret)
1848                 pr_err("[PrepareMp1] Failed!\n");
1849
1850         mutex_unlock(&smu->mutex);
1851
1852         return ret;
1853 }
1854
1855 int smu_set_df_cstate(struct smu_context *smu,
1856                       enum pp_df_cstate state)
1857 {
1858         int ret = 0;
1859
1860         /*
1861          * The SMC is not fully ready. That may be
1862          * expected as the IP may be masked.
1863          * So, just return without error.
1864          */
1865         if (!smu->pm_enabled)
1866                 return 0;
1867
1868         if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
1869                 return 0;
1870
1871         mutex_lock(&smu->mutex);
1872
1873         ret = smu->ppt_funcs->set_df_cstate(smu, state);
1874         if (ret)
1875                 pr_err("[SetDfCstate] failed!\n");
1876
1877         mutex_unlock(&smu->mutex);
1878
1879         return ret;
1880 }
1881
1882 int smu_write_watermarks_table(struct smu_context *smu)
1883 {
1884         int ret = 0;
1885         struct smu_table_context *smu_table = &smu->smu_table;
1886         struct smu_table *table = NULL;
1887
1888         table = &smu_table->tables[SMU_TABLE_WATERMARKS];
1889
1890         if (!table->cpu_addr)
1891                 return -EINVAL;
1892
1893         ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
1894                                 true);
1895
1896         return ret;
1897 }
1898
1899 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
1900                 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
1901 {
1902         int ret = 0;
1903         struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
1904         void *table = watermarks->cpu_addr;
1905
1906         mutex_lock(&smu->mutex);
1907
1908         if (!smu->disable_watermark &&
1909                         smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
1910                         smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1911                 smu_set_watermarks_table(smu, table, clock_ranges);
1912                 smu->watermarks_bitmap |= WATERMARKS_EXIST;
1913                 smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
1914         }
1915
1916         mutex_unlock(&smu->mutex);
1917
1918         return ret;
1919 }
1920
1921 const struct amd_ip_funcs smu_ip_funcs = {
1922         .name = "smu",
1923         .early_init = smu_early_init,
1924         .late_init = smu_late_init,
1925         .sw_init = smu_sw_init,
1926         .sw_fini = smu_sw_fini,
1927         .hw_init = smu_hw_init,
1928         .hw_fini = smu_hw_fini,
1929         .suspend = smu_suspend,
1930         .resume = smu_resume,
1931         .is_idle = NULL,
1932         .check_soft_reset = NULL,
1933         .wait_for_idle = NULL,
1934         .soft_reset = NULL,
1935         .set_clockgating_state = smu_set_clockgating_state,
1936         .set_powergating_state = smu_set_powergating_state,
1937         .enable_umd_pstate = smu_enable_umd_pstate,
1938 };
1939
1940 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1941 {
1942         .type = AMD_IP_BLOCK_TYPE_SMC,
1943         .major = 11,
1944         .minor = 0,
1945         .rev = 0,
1946         .funcs = &smu_ip_funcs,
1947 };
1948
1949 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
1950 {
1951         .type = AMD_IP_BLOCK_TYPE_SMC,
1952         .major = 12,
1953         .minor = 0,
1954         .rev = 0,
1955         .funcs = &smu_ip_funcs,
1956 };
1957
1958 int smu_load_microcode(struct smu_context *smu)
1959 {
1960         int ret = 0;
1961
1962         mutex_lock(&smu->mutex);
1963
1964         if (smu->ppt_funcs->load_microcode)
1965                 ret = smu->ppt_funcs->load_microcode(smu);
1966
1967         mutex_unlock(&smu->mutex);
1968
1969         return ret;
1970 }
1971
1972 int smu_check_fw_status(struct smu_context *smu)
1973 {
1974         int ret = 0;
1975
1976         mutex_lock(&smu->mutex);
1977
1978         if (smu->ppt_funcs->check_fw_status)
1979                 ret = smu->ppt_funcs->check_fw_status(smu);
1980
1981         mutex_unlock(&smu->mutex);
1982
1983         return ret;
1984 }
1985
1986 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
1987 {
1988         int ret = 0;
1989
1990         mutex_lock(&smu->mutex);
1991
1992         if (smu->ppt_funcs->set_gfx_cgpg)
1993                 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
1994
1995         mutex_unlock(&smu->mutex);
1996
1997         return ret;
1998 }
1999
2000 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
2001 {
2002         int ret = 0;
2003
2004         mutex_lock(&smu->mutex);
2005
2006         if (smu->ppt_funcs->set_fan_speed_rpm)
2007                 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2008
2009         mutex_unlock(&smu->mutex);
2010
2011         return ret;
2012 }
2013
2014 int smu_get_power_limit(struct smu_context *smu,
2015                         uint32_t *limit,
2016                         bool def,
2017                         bool lock_needed)
2018 {
2019         int ret = 0;
2020
2021         if (lock_needed)
2022                 mutex_lock(&smu->mutex);
2023
2024         if (smu->ppt_funcs->get_power_limit)
2025                 ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
2026
2027         if (lock_needed)
2028                 mutex_unlock(&smu->mutex);
2029
2030         return ret;
2031 }
2032
2033 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
2034 {
2035         int ret = 0;
2036
2037         mutex_lock(&smu->mutex);
2038
2039         if (smu->ppt_funcs->set_power_limit)
2040                 ret = smu->ppt_funcs->set_power_limit(smu, limit);
2041
2042         mutex_unlock(&smu->mutex);
2043
2044         return ret;
2045 }
2046
2047 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2048 {
2049         int ret = 0;
2050
2051         mutex_lock(&smu->mutex);
2052
2053         if (smu->ppt_funcs->print_clk_levels)
2054                 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2055
2056         mutex_unlock(&smu->mutex);
2057
2058         return ret;
2059 }
2060
2061 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
2062 {
2063         int ret = 0;
2064
2065         mutex_lock(&smu->mutex);
2066
2067         if (smu->ppt_funcs->get_od_percentage)
2068                 ret = smu->ppt_funcs->get_od_percentage(smu, type);
2069
2070         mutex_unlock(&smu->mutex);
2071
2072         return ret;
2073 }
2074
2075 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
2076 {
2077         int ret = 0;
2078
2079         mutex_lock(&smu->mutex);
2080
2081         if (smu->ppt_funcs->set_od_percentage)
2082                 ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
2083
2084         mutex_unlock(&smu->mutex);
2085
2086         return ret;
2087 }
2088
2089 int smu_od_edit_dpm_table(struct smu_context *smu,
2090                           enum PP_OD_DPM_TABLE_COMMAND type,
2091                           long *input, uint32_t size)
2092 {
2093         int ret = 0;
2094
2095         mutex_lock(&smu->mutex);
2096
2097         if (smu->ppt_funcs->od_edit_dpm_table)
2098                 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2099
2100         mutex_unlock(&smu->mutex);
2101
2102         return ret;
2103 }
2104
2105 int smu_read_sensor(struct smu_context *smu,
2106                     enum amd_pp_sensors sensor,
2107                     void *data, uint32_t *size)
2108 {
2109         int ret = 0;
2110
2111         mutex_lock(&smu->mutex);
2112
2113         if (smu->ppt_funcs->read_sensor)
2114                 ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
2115
2116         mutex_unlock(&smu->mutex);
2117
2118         return ret;
2119 }
2120
2121 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2122 {
2123         int ret = 0;
2124
2125         mutex_lock(&smu->mutex);
2126
2127         if (smu->ppt_funcs->get_power_profile_mode)
2128                 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2129
2130         mutex_unlock(&smu->mutex);
2131
2132         return ret;
2133 }
2134
2135 int smu_set_power_profile_mode(struct smu_context *smu,
2136                                long *param,
2137                                uint32_t param_size,
2138                                bool lock_needed)
2139 {
2140         int ret = 0;
2141
2142         if (lock_needed)
2143                 mutex_lock(&smu->mutex);
2144
2145         if (smu->ppt_funcs->set_power_profile_mode)
2146                 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2147
2148         if (lock_needed)
2149                 mutex_unlock(&smu->mutex);
2150
2151         return ret;
2152 }
2153
2154
2155 int smu_get_fan_control_mode(struct smu_context *smu)
2156 {
2157         int ret = 0;
2158
2159         mutex_lock(&smu->mutex);
2160
2161         if (smu->ppt_funcs->get_fan_control_mode)
2162                 ret = smu->ppt_funcs->get_fan_control_mode(smu);
2163
2164         mutex_unlock(&smu->mutex);
2165
2166         return ret;
2167 }
2168
2169 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2170 {
2171         int ret = 0;
2172
2173         mutex_lock(&smu->mutex);
2174
2175         if (smu->ppt_funcs->set_fan_control_mode)
2176                 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2177
2178         mutex_unlock(&smu->mutex);
2179
2180         return ret;
2181 }
2182
2183 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2184 {
2185         int ret = 0;
2186
2187         mutex_lock(&smu->mutex);
2188
2189         if (smu->ppt_funcs->get_fan_speed_percent)
2190                 ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
2191
2192         mutex_unlock(&smu->mutex);
2193
2194         return ret;
2195 }
2196
2197 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2198 {
2199         int ret = 0;
2200
2201         mutex_lock(&smu->mutex);
2202
2203         if (smu->ppt_funcs->set_fan_speed_percent)
2204                 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
2205
2206         mutex_unlock(&smu->mutex);
2207
2208         return ret;
2209 }
2210
2211 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2212 {
2213         int ret = 0;
2214
2215         mutex_lock(&smu->mutex);
2216
2217         if (smu->ppt_funcs->get_fan_speed_rpm)
2218                 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2219
2220         mutex_unlock(&smu->mutex);
2221
2222         return ret;
2223 }
2224
2225 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2226 {
2227         int ret = 0;
2228
2229         mutex_lock(&smu->mutex);
2230
2231         if (smu->ppt_funcs->set_deep_sleep_dcefclk)
2232                 ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk);
2233
2234         mutex_unlock(&smu->mutex);
2235
2236         return ret;
2237 }
2238
2239 int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
2240 {
2241         int ret = 0;
2242
2243         mutex_lock(&smu->mutex);
2244
2245         if (smu->ppt_funcs->set_active_display_count)
2246                 ret = smu->ppt_funcs->set_active_display_count(smu, count);
2247
2248         mutex_unlock(&smu->mutex);
2249
2250         return ret;
2251 }
2252
2253 int smu_get_clock_by_type(struct smu_context *smu,
2254                           enum amd_pp_clock_type type,
2255                           struct amd_pp_clocks *clocks)
2256 {
2257         int ret = 0;
2258
2259         mutex_lock(&smu->mutex);
2260
2261         if (smu->ppt_funcs->get_clock_by_type)
2262                 ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
2263
2264         mutex_unlock(&smu->mutex);
2265
2266         return ret;
2267 }
2268
2269 int smu_get_max_high_clocks(struct smu_context *smu,
2270                             struct amd_pp_simple_clock_info *clocks)
2271 {
2272         int ret = 0;
2273
2274         mutex_lock(&smu->mutex);
2275
2276         if (smu->ppt_funcs->get_max_high_clocks)
2277                 ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
2278
2279         mutex_unlock(&smu->mutex);
2280
2281         return ret;
2282 }
2283
2284 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2285                                        enum smu_clk_type clk_type,
2286                                        struct pp_clock_levels_with_latency *clocks)
2287 {
2288         int ret = 0;
2289
2290         mutex_lock(&smu->mutex);
2291
2292         if (smu->ppt_funcs->get_clock_by_type_with_latency)
2293                 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2294
2295         mutex_unlock(&smu->mutex);
2296
2297         return ret;
2298 }
2299
2300 int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2301                                        enum amd_pp_clock_type type,
2302                                        struct pp_clock_levels_with_voltage *clocks)
2303 {
2304         int ret = 0;
2305
2306         mutex_lock(&smu->mutex);
2307
2308         if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2309                 ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2310
2311         mutex_unlock(&smu->mutex);
2312
2313         return ret;
2314 }
2315
2316
2317 int smu_display_clock_voltage_request(struct smu_context *smu,
2318                                       struct pp_display_clock_request *clock_req)
2319 {
2320         int ret = 0;
2321
2322         mutex_lock(&smu->mutex);
2323
2324         if (smu->ppt_funcs->display_clock_voltage_request)
2325                 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2326
2327         mutex_unlock(&smu->mutex);
2328
2329         return ret;
2330 }
2331
2332
2333 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2334 {
2335         int ret = -EINVAL;
2336
2337         mutex_lock(&smu->mutex);
2338
2339         if (smu->ppt_funcs->display_disable_memory_clock_switch)
2340                 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2341
2342         mutex_unlock(&smu->mutex);
2343
2344         return ret;
2345 }
2346
2347 int smu_notify_smu_enable_pwe(struct smu_context *smu)
2348 {
2349         int ret = 0;
2350
2351         mutex_lock(&smu->mutex);
2352
2353         if (smu->ppt_funcs->notify_smu_enable_pwe)
2354                 ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
2355
2356         mutex_unlock(&smu->mutex);
2357
2358         return ret;
2359 }
2360
2361 int smu_set_xgmi_pstate(struct smu_context *smu,
2362                         uint32_t pstate)
2363 {
2364         int ret = 0;
2365
2366         mutex_lock(&smu->mutex);
2367
2368         if (smu->ppt_funcs->set_xgmi_pstate)
2369                 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2370
2371         mutex_unlock(&smu->mutex);
2372
2373         return ret;
2374 }
2375
2376 int smu_set_azalia_d3_pme(struct smu_context *smu)
2377 {
2378         int ret = 0;
2379
2380         mutex_lock(&smu->mutex);
2381
2382         if (smu->ppt_funcs->set_azalia_d3_pme)
2383                 ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
2384
2385         mutex_unlock(&smu->mutex);
2386
2387         return ret;
2388 }
2389
2390 bool smu_baco_is_support(struct smu_context *smu)
2391 {
2392         bool ret = false;
2393
2394         mutex_lock(&smu->mutex);
2395
2396         if (smu->ppt_funcs->baco_is_support)
2397                 ret = smu->ppt_funcs->baco_is_support(smu);
2398
2399         mutex_unlock(&smu->mutex);
2400
2401         return ret;
2402 }
2403
2404 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2405 {
2406         if (smu->ppt_funcs->baco_get_state)
2407                 return -EINVAL;
2408
2409         mutex_lock(&smu->mutex);
2410         *state = smu->ppt_funcs->baco_get_state(smu);
2411         mutex_unlock(&smu->mutex);
2412
2413         return 0;
2414 }
2415
2416 int smu_baco_reset(struct smu_context *smu)
2417 {
2418         int ret = 0;
2419
2420         mutex_lock(&smu->mutex);
2421
2422         if (smu->ppt_funcs->baco_reset)
2423                 ret = smu->ppt_funcs->baco_reset(smu);
2424
2425         mutex_unlock(&smu->mutex);
2426
2427         return ret;
2428 }
2429
2430 int smu_mode2_reset(struct smu_context *smu)
2431 {
2432         int ret = 0;
2433
2434         mutex_lock(&smu->mutex);
2435
2436         if (smu->ppt_funcs->mode2_reset)
2437                 ret = smu->ppt_funcs->mode2_reset(smu);
2438
2439         mutex_unlock(&smu->mutex);
2440
2441         return ret;
2442 }
2443
2444 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2445                                          struct pp_smu_nv_clock_table *max_clocks)
2446 {
2447         int ret = 0;
2448
2449         mutex_lock(&smu->mutex);
2450
2451         if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2452                 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2453
2454         mutex_unlock(&smu->mutex);
2455
2456         return ret;
2457 }
2458
2459 int smu_get_uclk_dpm_states(struct smu_context *smu,
2460                             unsigned int *clock_values_in_khz,
2461                             unsigned int *num_states)
2462 {
2463         int ret = 0;
2464
2465         mutex_lock(&smu->mutex);
2466
2467         if (smu->ppt_funcs->get_uclk_dpm_states)
2468                 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2469
2470         mutex_unlock(&smu->mutex);
2471
2472         return ret;
2473 }
2474
2475 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2476 {
2477         enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2478
2479         mutex_lock(&smu->mutex);
2480
2481         if (smu->ppt_funcs->get_current_power_state)
2482                 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2483
2484         mutex_unlock(&smu->mutex);
2485
2486         return pm_state;
2487 }
2488
2489 int smu_get_dpm_clock_table(struct smu_context *smu,
2490                             struct dpm_clocks *clock_table)
2491 {
2492         int ret = 0;
2493
2494         mutex_lock(&smu->mutex);
2495
2496         if (smu->ppt_funcs->get_dpm_clock_table)
2497                 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2498
2499         mutex_unlock(&smu->mutex);
2500
2501         return ret;
2502 }