6dc1410b380f376982551dbebe06f4dd84edf3b2
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / ci_dpm.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25 #include "drmP.h"
26 #include "amdgpu.h"
27 #include "amdgpu_pm.h"
28 #include "amdgpu_ucode.h"
29 #include "cikd.h"
30 #include "amdgpu_dpm.h"
31 #include "ci_dpm.h"
32 #include "gfx_v7_0.h"
33 #include "atom.h"
34 #include "amd_pcie.h"
35 #include <linux/seq_file.h>
36
37 #include "smu/smu_7_0_1_d.h"
38 #include "smu/smu_7_0_1_sh_mask.h"
39
40 #include "dce/dce_8_0_d.h"
41 #include "dce/dce_8_0_sh_mask.h"
42
43 #include "bif/bif_4_1_d.h"
44 #include "bif/bif_4_1_sh_mask.h"
45
46 #include "gca/gfx_7_2_d.h"
47 #include "gca/gfx_7_2_sh_mask.h"
48
49 #include "gmc/gmc_7_1_d.h"
50 #include "gmc/gmc_7_1_sh_mask.h"
51
52 MODULE_FIRMWARE("radeon/bonaire_smc.bin");
53 MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
54 MODULE_FIRMWARE("radeon/hawaii_smc.bin");
55 MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
56
57 #define MC_CG_ARB_FREQ_F0           0x0a
58 #define MC_CG_ARB_FREQ_F1           0x0b
59 #define MC_CG_ARB_FREQ_F2           0x0c
60 #define MC_CG_ARB_FREQ_F3           0x0d
61
62 #define SMC_RAM_END 0x40000
63
64 #define VOLTAGE_SCALE               4
65 #define VOLTAGE_VID_OFFSET_SCALE1    625
66 #define VOLTAGE_VID_OFFSET_SCALE2    100
67
68 static const struct ci_pt_defaults defaults_hawaii_xt =
69 {
70         1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
71         { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
72         { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
73 };
74
75 static const struct ci_pt_defaults defaults_hawaii_pro =
76 {
77         1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
78         { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
79         { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
80 };
81
82 static const struct ci_pt_defaults defaults_bonaire_xt =
83 {
84         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
85         { 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
86         { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
87 };
88
89 #if 0
90 static const struct ci_pt_defaults defaults_bonaire_pro =
91 {
92         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
93         { 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
94         { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
95 };
96 #endif
97
98 static const struct ci_pt_defaults defaults_saturn_xt =
99 {
100         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
101         { 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
102         { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
103 };
104
105 #if 0
106 static const struct ci_pt_defaults defaults_saturn_pro =
107 {
108         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
109         { 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
110         { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
111 };
112 #endif
113
114 static const struct ci_pt_config_reg didt_config_ci[] =
115 {
116         { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117         { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118         { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119         { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120         { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121         { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
122         { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
123         { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
124         { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125         { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126         { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127         { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128         { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
129         { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
130         { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
131         { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
132         { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
133         { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134         { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135         { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136         { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137         { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138         { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139         { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
140         { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
141         { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
142         { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143         { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144         { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145         { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146         { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
147         { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
148         { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
149         { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
150         { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
151         { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152         { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153         { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154         { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155         { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
156         { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
157         { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
158         { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
159         { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
160         { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
161         { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
162         { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
163         { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
164         { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
165         { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
166         { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
167         { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
168         { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
169         { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
170         { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
171         { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
172         { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
173         { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
174         { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
175         { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
176         { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
177         { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
178         { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
179         { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
180         { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
181         { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
182         { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
183         { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
184         { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
185         { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
186         { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
187         { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
188         { 0xFFFFFFFF }
189 };
190
191 static u8 ci_get_memory_module_index(struct amdgpu_device *adev)
192 {
193         return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff);
194 }
195
196 #define MC_CG_ARB_FREQ_F0           0x0a
197 #define MC_CG_ARB_FREQ_F1           0x0b
198 #define MC_CG_ARB_FREQ_F2           0x0c
199 #define MC_CG_ARB_FREQ_F3           0x0d
200
201 static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev,
202                                        u32 arb_freq_src, u32 arb_freq_dest)
203 {
204         u32 mc_arb_dram_timing;
205         u32 mc_arb_dram_timing2;
206         u32 burst_time;
207         u32 mc_cg_config;
208
209         switch (arb_freq_src) {
210         case MC_CG_ARB_FREQ_F0:
211                 mc_arb_dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING);
212                 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
213                 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >>
214                          MC_ARB_BURST_TIME__STATE0__SHIFT;
215                 break;
216         case MC_CG_ARB_FREQ_F1:
217                 mc_arb_dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING_1);
218                 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1);
219                 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >>
220                          MC_ARB_BURST_TIME__STATE1__SHIFT;
221                 break;
222         default:
223                 return -EINVAL;
224         }
225
226         switch (arb_freq_dest) {
227         case MC_CG_ARB_FREQ_F0:
228                 WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
229                 WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
230                 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT),
231                         ~MC_ARB_BURST_TIME__STATE0_MASK);
232                 break;
233         case MC_CG_ARB_FREQ_F1:
234                 WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
235                 WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
236                 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT),
237                         ~MC_ARB_BURST_TIME__STATE1_MASK);
238                 break;
239         default:
240                 return -EINVAL;
241         }
242
243         mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F;
244         WREG32(mmMC_CG_CONFIG, mc_cg_config);
245         WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT,
246                 ~MC_ARB_CG__CG_ARB_REQ_MASK);
247
248         return 0;
249 }
250
251 static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
252 {
253         u8 mc_para_index;
254
255         if (memory_clock < 10000)
256                 mc_para_index = 0;
257         else if (memory_clock >= 80000)
258                 mc_para_index = 0x0f;
259         else
260                 mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
261         return mc_para_index;
262 }
263
264 static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
265 {
266         u8 mc_para_index;
267
268         if (strobe_mode) {
269                 if (memory_clock < 12500)
270                         mc_para_index = 0x00;
271                 else if (memory_clock > 47500)
272                         mc_para_index = 0x0f;
273                 else
274                         mc_para_index = (u8)((memory_clock - 10000) / 2500);
275         } else {
276                 if (memory_clock < 65000)
277                         mc_para_index = 0x00;
278                 else if (memory_clock > 135000)
279                         mc_para_index = 0x0f;
280                 else
281                         mc_para_index = (u8)((memory_clock - 60000) / 5000);
282         }
283         return mc_para_index;
284 }
285
286 static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
287                                                      u32 max_voltage_steps,
288                                                      struct atom_voltage_table *voltage_table)
289 {
290         unsigned int i, diff;
291
292         if (voltage_table->count <= max_voltage_steps)
293                 return;
294
295         diff = voltage_table->count - max_voltage_steps;
296
297         for (i = 0; i < max_voltage_steps; i++)
298                 voltage_table->entries[i] = voltage_table->entries[i + diff];
299
300         voltage_table->count = max_voltage_steps;
301 }
302
303 static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
304                                          struct atom_voltage_table_entry *voltage_table,
305                                          u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
306 static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
307 static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
308                                        u32 target_tdp);
309 static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
310 static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev);
311 static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
312
313 static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
314                                                              PPSMC_Msg msg, u32 parameter);
315 static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev);
316 static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
317
318 static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev)
319 {
320         struct ci_power_info *pi = adev->pm.dpm.priv;
321
322         return pi;
323 }
324
325 static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps)
326 {
327         struct ci_ps *ps = rps->ps_priv;
328
329         return ps;
330 }
331
332 static void ci_initialize_powertune_defaults(struct amdgpu_device *adev)
333 {
334         struct ci_power_info *pi = ci_get_pi(adev);
335
336         switch (adev->pdev->device) {
337         case 0x6649:
338         case 0x6650:
339         case 0x6651:
340         case 0x6658:
341         case 0x665C:
342         case 0x665D:
343         default:
344                 pi->powertune_defaults = &defaults_bonaire_xt;
345                 break;
346         case 0x6640:
347         case 0x6641:
348         case 0x6646:
349         case 0x6647:
350                 pi->powertune_defaults = &defaults_saturn_xt;
351                 break;
352         case 0x67B8:
353         case 0x67B0:
354                 pi->powertune_defaults = &defaults_hawaii_xt;
355                 break;
356         case 0x67BA:
357         case 0x67B1:
358                 pi->powertune_defaults = &defaults_hawaii_pro;
359                 break;
360         case 0x67A0:
361         case 0x67A1:
362         case 0x67A2:
363         case 0x67A8:
364         case 0x67A9:
365         case 0x67AA:
366         case 0x67B9:
367         case 0x67BE:
368                 pi->powertune_defaults = &defaults_bonaire_xt;
369                 break;
370         }
371
372         pi->dte_tj_offset = 0;
373
374         pi->caps_power_containment = true;
375         pi->caps_cac = false;
376         pi->caps_sq_ramping = false;
377         pi->caps_db_ramping = false;
378         pi->caps_td_ramping = false;
379         pi->caps_tcp_ramping = false;
380
381         if (pi->caps_power_containment) {
382                 pi->caps_cac = true;
383                 if (adev->asic_type == CHIP_HAWAII)
384                         pi->enable_bapm_feature = false;
385                 else
386                         pi->enable_bapm_feature = true;
387                 pi->enable_tdc_limit_feature = true;
388                 pi->enable_pkg_pwr_tracking_feature = true;
389         }
390 }
391
392 static u8 ci_convert_to_vid(u16 vddc)
393 {
394         return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
395 }
396
397 static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev)
398 {
399         struct ci_power_info *pi = ci_get_pi(adev);
400         u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
401         u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
402         u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
403         u32 i;
404
405         if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
406                 return -EINVAL;
407         if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
408                 return -EINVAL;
409         if (adev->pm.dpm.dyn_state.cac_leakage_table.count !=
410             adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
411                 return -EINVAL;
412
413         for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
414                 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
415                         lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
416                         hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
417                         hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
418                 } else {
419                         lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
420                         hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
421                 }
422         }
423         return 0;
424 }
425
426 static int ci_populate_vddc_vid(struct amdgpu_device *adev)
427 {
428         struct ci_power_info *pi = ci_get_pi(adev);
429         u8 *vid = pi->smc_powertune_table.VddCVid;
430         u32 i;
431
432         if (pi->vddc_voltage_table.count > 8)
433                 return -EINVAL;
434
435         for (i = 0; i < pi->vddc_voltage_table.count; i++)
436                 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
437
438         return 0;
439 }
440
441 static int ci_populate_svi_load_line(struct amdgpu_device *adev)
442 {
443         struct ci_power_info *pi = ci_get_pi(adev);
444         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
445
446         pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
447         pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
448         pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
449         pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
450
451         return 0;
452 }
453
454 static int ci_populate_tdc_limit(struct amdgpu_device *adev)
455 {
456         struct ci_power_info *pi = ci_get_pi(adev);
457         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
458         u16 tdc_limit;
459
460         tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
461         pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
462         pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
463                 pt_defaults->tdc_vddc_throttle_release_limit_perc;
464         pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
465
466         return 0;
467 }
468
469 static int ci_populate_dw8(struct amdgpu_device *adev)
470 {
471         struct ci_power_info *pi = ci_get_pi(adev);
472         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
473         int ret;
474
475         ret = amdgpu_ci_read_smc_sram_dword(adev,
476                                      SMU7_FIRMWARE_HEADER_LOCATION +
477                                      offsetof(SMU7_Firmware_Header, PmFuseTable) +
478                                      offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
479                                      (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
480                                      pi->sram_end);
481         if (ret)
482                 return -EINVAL;
483         else
484                 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
485
486         return 0;
487 }
488
489 static int ci_populate_fuzzy_fan(struct amdgpu_device *adev)
490 {
491         struct ci_power_info *pi = ci_get_pi(adev);
492
493         if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
494             (adev->pm.dpm.fan.fan_output_sensitivity == 0))
495                 adev->pm.dpm.fan.fan_output_sensitivity =
496                         adev->pm.dpm.fan.default_fan_output_sensitivity;
497
498         pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
499                 cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity);
500
501         return 0;
502 }
503
504 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev)
505 {
506         struct ci_power_info *pi = ci_get_pi(adev);
507         u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
508         u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
509         int i, min, max;
510
511         min = max = hi_vid[0];
512         for (i = 0; i < 8; i++) {
513                 if (0 != hi_vid[i]) {
514                         if (min > hi_vid[i])
515                                 min = hi_vid[i];
516                         if (max < hi_vid[i])
517                                 max = hi_vid[i];
518                 }
519
520                 if (0 != lo_vid[i]) {
521                         if (min > lo_vid[i])
522                                 min = lo_vid[i];
523                         if (max < lo_vid[i])
524                                 max = lo_vid[i];
525                 }
526         }
527
528         if ((min == 0) || (max == 0))
529                 return -EINVAL;
530         pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
531         pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
532
533         return 0;
534 }
535
536 static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev)
537 {
538         struct ci_power_info *pi = ci_get_pi(adev);
539         u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
540         u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
541         struct amdgpu_cac_tdp_table *cac_tdp_table =
542                 adev->pm.dpm.dyn_state.cac_tdp_table;
543
544         hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
545         lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
546
547         pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
548         pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
549
550         return 0;
551 }
552
553 static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev)
554 {
555         struct ci_power_info *pi = ci_get_pi(adev);
556         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
557         SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
558         struct amdgpu_cac_tdp_table *cac_tdp_table =
559                 adev->pm.dpm.dyn_state.cac_tdp_table;
560         struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
561         int i, j, k;
562         const u16 *def1;
563         const u16 *def2;
564
565         dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
566         dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
567
568         dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
569         dpm_table->GpuTjMax =
570                 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
571         dpm_table->GpuTjHyst = 8;
572
573         dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
574
575         if (ppm) {
576                 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
577                 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
578         } else {
579                 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
580                 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
581         }
582
583         dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
584         def1 = pt_defaults->bapmti_r;
585         def2 = pt_defaults->bapmti_rc;
586
587         for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
588                 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
589                         for (k = 0; k < SMU7_DTE_SINKS; k++) {
590                                 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
591                                 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
592                                 def1++;
593                                 def2++;
594                         }
595                 }
596         }
597
598         return 0;
599 }
600
601 static int ci_populate_pm_base(struct amdgpu_device *adev)
602 {
603         struct ci_power_info *pi = ci_get_pi(adev);
604         u32 pm_fuse_table_offset;
605         int ret;
606
607         if (pi->caps_power_containment) {
608                 ret = amdgpu_ci_read_smc_sram_dword(adev,
609                                              SMU7_FIRMWARE_HEADER_LOCATION +
610                                              offsetof(SMU7_Firmware_Header, PmFuseTable),
611                                              &pm_fuse_table_offset, pi->sram_end);
612                 if (ret)
613                         return ret;
614                 ret = ci_populate_bapm_vddc_vid_sidd(adev);
615                 if (ret)
616                         return ret;
617                 ret = ci_populate_vddc_vid(adev);
618                 if (ret)
619                         return ret;
620                 ret = ci_populate_svi_load_line(adev);
621                 if (ret)
622                         return ret;
623                 ret = ci_populate_tdc_limit(adev);
624                 if (ret)
625                         return ret;
626                 ret = ci_populate_dw8(adev);
627                 if (ret)
628                         return ret;
629                 ret = ci_populate_fuzzy_fan(adev);
630                 if (ret)
631                         return ret;
632                 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev);
633                 if (ret)
634                         return ret;
635                 ret = ci_populate_bapm_vddc_base_leakage_sidd(adev);
636                 if (ret)
637                         return ret;
638                 ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset,
639                                            (u8 *)&pi->smc_powertune_table,
640                                            sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
641                 if (ret)
642                         return ret;
643         }
644
645         return 0;
646 }
647
648 static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable)
649 {
650         struct ci_power_info *pi = ci_get_pi(adev);
651         u32 data;
652
653         if (pi->caps_sq_ramping) {
654                 data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
655                 if (enable)
656                         data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
657                 else
658                         data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
659                 WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
660         }
661
662         if (pi->caps_db_ramping) {
663                 data = RREG32_DIDT(ixDIDT_DB_CTRL0);
664                 if (enable)
665                         data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
666                 else
667                         data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
668                 WREG32_DIDT(ixDIDT_DB_CTRL0, data);
669         }
670
671         if (pi->caps_td_ramping) {
672                 data = RREG32_DIDT(ixDIDT_TD_CTRL0);
673                 if (enable)
674                         data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
675                 else
676                         data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
677                 WREG32_DIDT(ixDIDT_TD_CTRL0, data);
678         }
679
680         if (pi->caps_tcp_ramping) {
681                 data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
682                 if (enable)
683                         data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
684                 else
685                         data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
686                 WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
687         }
688 }
689
690 static int ci_program_pt_config_registers(struct amdgpu_device *adev,
691                                           const struct ci_pt_config_reg *cac_config_regs)
692 {
693         const struct ci_pt_config_reg *config_regs = cac_config_regs;
694         u32 data;
695         u32 cache = 0;
696
697         if (config_regs == NULL)
698                 return -EINVAL;
699
700         while (config_regs->offset != 0xFFFFFFFF) {
701                 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
702                         cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
703                 } else {
704                         switch (config_regs->type) {
705                         case CISLANDS_CONFIGREG_SMC_IND:
706                                 data = RREG32_SMC(config_regs->offset);
707                                 break;
708                         case CISLANDS_CONFIGREG_DIDT_IND:
709                                 data = RREG32_DIDT(config_regs->offset);
710                                 break;
711                         default:
712                                 data = RREG32(config_regs->offset);
713                                 break;
714                         }
715
716                         data &= ~config_regs->mask;
717                         data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
718                         data |= cache;
719
720                         switch (config_regs->type) {
721                         case CISLANDS_CONFIGREG_SMC_IND:
722                                 WREG32_SMC(config_regs->offset, data);
723                                 break;
724                         case CISLANDS_CONFIGREG_DIDT_IND:
725                                 WREG32_DIDT(config_regs->offset, data);
726                                 break;
727                         default:
728                                 WREG32(config_regs->offset, data);
729                                 break;
730                         }
731                         cache = 0;
732                 }
733                 config_regs++;
734         }
735         return 0;
736 }
737
738 static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
739 {
740         struct ci_power_info *pi = ci_get_pi(adev);
741         int ret;
742
743         if (pi->caps_sq_ramping || pi->caps_db_ramping ||
744             pi->caps_td_ramping || pi->caps_tcp_ramping) {
745                 adev->gfx.rlc.funcs->enter_safe_mode(adev);
746
747                 if (enable) {
748                         ret = ci_program_pt_config_registers(adev, didt_config_ci);
749                         if (ret) {
750                                 adev->gfx.rlc.funcs->exit_safe_mode(adev);
751                                 return ret;
752                         }
753                 }
754
755                 ci_do_enable_didt(adev, enable);
756
757                 adev->gfx.rlc.funcs->exit_safe_mode(adev);
758         }
759
760         return 0;
761 }
762
763 static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable)
764 {
765         struct ci_power_info *pi = ci_get_pi(adev);
766         PPSMC_Result smc_result;
767         int ret = 0;
768
769         if (enable) {
770                 pi->power_containment_features = 0;
771                 if (pi->caps_power_containment) {
772                         if (pi->enable_bapm_feature) {
773                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
774                                 if (smc_result != PPSMC_Result_OK)
775                                         ret = -EINVAL;
776                                 else
777                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
778                         }
779
780                         if (pi->enable_tdc_limit_feature) {
781                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable);
782                                 if (smc_result != PPSMC_Result_OK)
783                                         ret = -EINVAL;
784                                 else
785                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
786                         }
787
788                         if (pi->enable_pkg_pwr_tracking_feature) {
789                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable);
790                                 if (smc_result != PPSMC_Result_OK) {
791                                         ret = -EINVAL;
792                                 } else {
793                                         struct amdgpu_cac_tdp_table *cac_tdp_table =
794                                                 adev->pm.dpm.dyn_state.cac_tdp_table;
795                                         u32 default_pwr_limit =
796                                                 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
797
798                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
799
800                                         ci_set_power_limit(adev, default_pwr_limit);
801                                 }
802                         }
803                 }
804         } else {
805                 if (pi->caps_power_containment && pi->power_containment_features) {
806                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
807                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable);
808
809                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
810                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
811
812                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
813                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable);
814                         pi->power_containment_features = 0;
815                 }
816         }
817
818         return ret;
819 }
820
821 static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable)
822 {
823         struct ci_power_info *pi = ci_get_pi(adev);
824         PPSMC_Result smc_result;
825         int ret = 0;
826
827         if (pi->caps_cac) {
828                 if (enable) {
829                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
830                         if (smc_result != PPSMC_Result_OK) {
831                                 ret = -EINVAL;
832                                 pi->cac_enabled = false;
833                         } else {
834                                 pi->cac_enabled = true;
835                         }
836                 } else if (pi->cac_enabled) {
837                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
838                         pi->cac_enabled = false;
839                 }
840         }
841
842         return ret;
843 }
844
845 static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev,
846                                             bool enable)
847 {
848         struct ci_power_info *pi = ci_get_pi(adev);
849         PPSMC_Result smc_result = PPSMC_Result_OK;
850
851         if (pi->thermal_sclk_dpm_enabled) {
852                 if (enable)
853                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM);
854                 else
855                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM);
856         }
857
858         if (smc_result == PPSMC_Result_OK)
859                 return 0;
860         else
861                 return -EINVAL;
862 }
863
864 static int ci_power_control_set_level(struct amdgpu_device *adev)
865 {
866         struct ci_power_info *pi = ci_get_pi(adev);
867         struct amdgpu_cac_tdp_table *cac_tdp_table =
868                 adev->pm.dpm.dyn_state.cac_tdp_table;
869         s32 adjust_percent;
870         s32 target_tdp;
871         int ret = 0;
872         bool adjust_polarity = false; /* ??? */
873
874         if (pi->caps_power_containment) {
875                 adjust_percent = adjust_polarity ?
876                         adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment);
877                 target_tdp = ((100 + adjust_percent) *
878                               (s32)cac_tdp_table->configurable_tdp) / 100;
879
880                 ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp);
881         }
882
883         return ret;
884 }
885
886 static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
887 {
888         struct ci_power_info *pi = ci_get_pi(adev);
889
890         pi->uvd_power_gated = gate;
891
892         if (gate) {
893                 /* stop the UVD block */
894                 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
895                                                         AMD_PG_STATE_GATE);
896                 ci_update_uvd_dpm(adev, gate);
897         } else {
898                 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
899                                                         AMD_PG_STATE_UNGATE);
900                 ci_update_uvd_dpm(adev, gate);
901         }
902 }
903
904 static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
905 {
906         u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
907         u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
908
909         if (vblank_time < switch_limit)
910                 return true;
911         else
912                 return false;
913
914 }
915
916 static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
917                                         struct amdgpu_ps *rps)
918 {
919         struct ci_ps *ps = ci_get_ps(rps);
920         struct ci_power_info *pi = ci_get_pi(adev);
921         struct amdgpu_clock_and_voltage_limits *max_limits;
922         bool disable_mclk_switching;
923         u32 sclk, mclk;
924         int i;
925
926         if (rps->vce_active) {
927                 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
928                 rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
929         } else {
930                 rps->evclk = 0;
931                 rps->ecclk = 0;
932         }
933
934         if ((adev->pm.dpm.new_active_crtc_count > 1) ||
935             ci_dpm_vblank_too_short(adev))
936                 disable_mclk_switching = true;
937         else
938                 disable_mclk_switching = false;
939
940         if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
941                 pi->battery_state = true;
942         else
943                 pi->battery_state = false;
944
945         if (adev->pm.dpm.ac_power)
946                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
947         else
948                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
949
950         if (adev->pm.dpm.ac_power == false) {
951                 for (i = 0; i < ps->performance_level_count; i++) {
952                         if (ps->performance_levels[i].mclk > max_limits->mclk)
953                                 ps->performance_levels[i].mclk = max_limits->mclk;
954                         if (ps->performance_levels[i].sclk > max_limits->sclk)
955                                 ps->performance_levels[i].sclk = max_limits->sclk;
956                 }
957         }
958
959         /* XXX validate the min clocks required for display */
960
961         if (disable_mclk_switching) {
962                 mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
963                 sclk = ps->performance_levels[0].sclk;
964         } else {
965                 mclk = ps->performance_levels[0].mclk;
966                 sclk = ps->performance_levels[0].sclk;
967         }
968
969         if (adev->pm.pm_display_cfg.min_core_set_clock > sclk)
970                 sclk = adev->pm.pm_display_cfg.min_core_set_clock;
971
972         if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk)
973                 mclk = adev->pm.pm_display_cfg.min_mem_set_clock;
974
975         if (rps->vce_active) {
976                 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
977                         sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
978                 if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
979                         mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
980         }
981
982         ps->performance_levels[0].sclk = sclk;
983         ps->performance_levels[0].mclk = mclk;
984
985         if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
986                 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
987
988         if (disable_mclk_switching) {
989                 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
990                         ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
991         } else {
992                 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
993                         ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
994         }
995 }
996
997 static int ci_thermal_set_temperature_range(struct amdgpu_device *adev,
998                                             int min_temp, int max_temp)
999 {
1000         int low_temp = 0 * 1000;
1001         int high_temp = 255 * 1000;
1002         u32 tmp;
1003
1004         if (low_temp < min_temp)
1005                 low_temp = min_temp;
1006         if (high_temp > max_temp)
1007                 high_temp = max_temp;
1008         if (high_temp < low_temp) {
1009                 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
1010                 return -EINVAL;
1011         }
1012
1013         tmp = RREG32_SMC(ixCG_THERMAL_INT);
1014         tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK);
1015         tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) |
1016                 ((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT;
1017         WREG32_SMC(ixCG_THERMAL_INT, tmp);
1018
1019 #if 0
1020         /* XXX: need to figure out how to handle this properly */
1021         tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1022         tmp &= DIG_THERM_DPM_MASK;
1023         tmp |= DIG_THERM_DPM(high_temp / 1000);
1024         WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1025 #endif
1026
1027         adev->pm.dpm.thermal.min_temp = low_temp;
1028         adev->pm.dpm.thermal.max_temp = high_temp;
1029         return 0;
1030 }
1031
1032 static int ci_thermal_enable_alert(struct amdgpu_device *adev,
1033                                    bool enable)
1034 {
1035         u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
1036         PPSMC_Result result;
1037
1038         if (enable) {
1039                 thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1040                                  CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
1041                 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1042                 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable);
1043                 if (result != PPSMC_Result_OK) {
1044                         DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1045                         return -EINVAL;
1046                 }
1047         } else {
1048                 thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1049                         CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
1050                 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1051                 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable);
1052                 if (result != PPSMC_Result_OK) {
1053                         DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
1054                         return -EINVAL;
1055                 }
1056         }
1057
1058         return 0;
1059 }
1060
1061 static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
1062 {
1063         struct ci_power_info *pi = ci_get_pi(adev);
1064         u32 tmp;
1065
1066         if (pi->fan_ctrl_is_in_default_mode) {
1067                 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK)
1068                         >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1069                 pi->fan_ctrl_default_mode = tmp;
1070                 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK)
1071                         >> CG_FDO_CTRL2__TMIN__SHIFT;
1072                 pi->t_min = tmp;
1073                 pi->fan_ctrl_is_in_default_mode = false;
1074         }
1075
1076         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1077         tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
1078         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1079
1080         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1081         tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1082         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1083 }
1084
1085 static int ci_thermal_setup_fan_table(struct amdgpu_device *adev)
1086 {
1087         struct ci_power_info *pi = ci_get_pi(adev);
1088         SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1089         u32 duty100;
1090         u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1091         u16 fdo_min, slope1, slope2;
1092         u32 reference_clock, tmp;
1093         int ret;
1094         u64 tmp64;
1095
1096         if (!pi->fan_table_start) {
1097                 adev->pm.dpm.fan.ucode_fan_control = false;
1098                 return 0;
1099         }
1100
1101         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1102                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1103
1104         if (duty100 == 0) {
1105                 adev->pm.dpm.fan.ucode_fan_control = false;
1106                 return 0;
1107         }
1108
1109         tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
1110         do_div(tmp64, 10000);
1111         fdo_min = (u16)tmp64;
1112
1113         t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
1114         t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
1115
1116         pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
1117         pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
1118
1119         slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
1120         slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
1121
1122         fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
1123         fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
1124         fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
1125
1126         fan_table.Slope1 = cpu_to_be16(slope1);
1127         fan_table.Slope2 = cpu_to_be16(slope2);
1128
1129         fan_table.FdoMin = cpu_to_be16(fdo_min);
1130
1131         fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
1132
1133         fan_table.HystUp = cpu_to_be16(1);
1134
1135         fan_table.HystSlope = cpu_to_be16(1);
1136
1137         fan_table.TempRespLim = cpu_to_be16(5);
1138
1139         reference_clock = amdgpu_asic_get_xclk(adev);
1140
1141         fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
1142                                                reference_clock) / 1600);
1143
1144         fan_table.FdoMax = cpu_to_be16((u16)duty100);
1145
1146         tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK)
1147                 >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
1148         fan_table.TempSrc = (uint8_t)tmp;
1149
1150         ret = amdgpu_ci_copy_bytes_to_smc(adev,
1151                                           pi->fan_table_start,
1152                                           (u8 *)(&fan_table),
1153                                           sizeof(fan_table),
1154                                           pi->sram_end);
1155
1156         if (ret) {
1157                 DRM_ERROR("Failed to load fan table to the SMC.");
1158                 adev->pm.dpm.fan.ucode_fan_control = false;
1159         }
1160
1161         return 0;
1162 }
1163
1164 static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
1165 {
1166         struct ci_power_info *pi = ci_get_pi(adev);
1167         PPSMC_Result ret;
1168
1169         if (pi->caps_od_fuzzy_fan_control_support) {
1170                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1171                                                                PPSMC_StartFanControl,
1172                                                                FAN_CONTROL_FUZZY);
1173                 if (ret != PPSMC_Result_OK)
1174                         return -EINVAL;
1175                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1176                                                                PPSMC_MSG_SetFanPwmMax,
1177                                                                adev->pm.dpm.fan.default_max_fan_pwm);
1178                 if (ret != PPSMC_Result_OK)
1179                         return -EINVAL;
1180         } else {
1181                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1182                                                                PPSMC_StartFanControl,
1183                                                                FAN_CONTROL_TABLE);
1184                 if (ret != PPSMC_Result_OK)
1185                         return -EINVAL;
1186         }
1187
1188         pi->fan_is_controlled_by_smc = true;
1189         return 0;
1190 }
1191
1192
1193 static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
1194 {
1195         PPSMC_Result ret;
1196         struct ci_power_info *pi = ci_get_pi(adev);
1197
1198         ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl);
1199         if (ret == PPSMC_Result_OK) {
1200                 pi->fan_is_controlled_by_smc = false;
1201                 return 0;
1202         } else {
1203                 return -EINVAL;
1204         }
1205 }
1206
1207 static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
1208                                         u32 *speed)
1209 {
1210         u32 duty, duty100;
1211         u64 tmp64;
1212
1213         if (adev->pm.no_fan)
1214                 return -ENOENT;
1215
1216         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1217                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1218         duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK)
1219                 >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
1220
1221         if (duty100 == 0)
1222                 return -EINVAL;
1223
1224         tmp64 = (u64)duty * 100;
1225         do_div(tmp64, duty100);
1226         *speed = (u32)tmp64;
1227
1228         if (*speed > 100)
1229                 *speed = 100;
1230
1231         return 0;
1232 }
1233
1234 static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
1235                                         u32 speed)
1236 {
1237         u32 tmp;
1238         u32 duty, duty100;
1239         u64 tmp64;
1240         struct ci_power_info *pi = ci_get_pi(adev);
1241
1242         if (adev->pm.no_fan)
1243                 return -ENOENT;
1244
1245         if (pi->fan_is_controlled_by_smc)
1246                 return -EINVAL;
1247
1248         if (speed > 100)
1249                 return -EINVAL;
1250
1251         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1252                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1253
1254         if (duty100 == 0)
1255                 return -EINVAL;
1256
1257         tmp64 = (u64)speed * duty100;
1258         do_div(tmp64, 100);
1259         duty = (u32)tmp64;
1260
1261         tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
1262         tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
1263         WREG32_SMC(ixCG_FDO_CTRL0, tmp);
1264
1265         return 0;
1266 }
1267
1268 static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
1269 {
1270         switch (mode) {
1271         case AMD_FAN_CTRL_NONE:
1272                 if (adev->pm.dpm.fan.ucode_fan_control)
1273                         ci_fan_ctrl_stop_smc_fan_control(adev);
1274                 ci_dpm_set_fan_speed_percent(adev, 100);
1275                 break;
1276         case AMD_FAN_CTRL_MANUAL:
1277                 if (adev->pm.dpm.fan.ucode_fan_control)
1278                         ci_fan_ctrl_stop_smc_fan_control(adev);
1279                 break;
1280         case AMD_FAN_CTRL_AUTO:
1281                 if (adev->pm.dpm.fan.ucode_fan_control)
1282                         ci_thermal_start_smc_fan_control(adev);
1283                 break;
1284         default:
1285                 break;
1286         }
1287 }
1288
1289 static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
1290 {
1291         struct ci_power_info *pi = ci_get_pi(adev);
1292
1293         if (pi->fan_is_controlled_by_smc)
1294                 return AMD_FAN_CTRL_AUTO;
1295         else
1296                 return AMD_FAN_CTRL_MANUAL;
1297 }
1298
1299 #if 0
1300 static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
1301                                          u32 *speed)
1302 {
1303         u32 tach_period;
1304         u32 xclk = amdgpu_asic_get_xclk(adev);
1305
1306         if (adev->pm.no_fan)
1307                 return -ENOENT;
1308
1309         if (adev->pm.fan_pulses_per_revolution == 0)
1310                 return -ENOENT;
1311
1312         tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK)
1313                 >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
1314         if (tach_period == 0)
1315                 return -ENOENT;
1316
1317         *speed = 60 * xclk * 10000 / tach_period;
1318
1319         return 0;
1320 }
1321
1322 static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
1323                                          u32 speed)
1324 {
1325         u32 tach_period, tmp;
1326         u32 xclk = amdgpu_asic_get_xclk(adev);
1327
1328         if (adev->pm.no_fan)
1329                 return -ENOENT;
1330
1331         if (adev->pm.fan_pulses_per_revolution == 0)
1332                 return -ENOENT;
1333
1334         if ((speed < adev->pm.fan_min_rpm) ||
1335             (speed > adev->pm.fan_max_rpm))
1336                 return -EINVAL;
1337
1338         if (adev->pm.dpm.fan.ucode_fan_control)
1339                 ci_fan_ctrl_stop_smc_fan_control(adev);
1340
1341         tach_period = 60 * xclk * 10000 / (8 * speed);
1342         tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
1343         tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
1344         WREG32_SMC(CG_TACH_CTRL, tmp);
1345
1346         ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
1347
1348         return 0;
1349 }
1350 #endif
1351
1352 static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
1353 {
1354         struct ci_power_info *pi = ci_get_pi(adev);
1355         u32 tmp;
1356
1357         if (!pi->fan_ctrl_is_in_default_mode) {
1358                 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1359                 tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1360                 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1361
1362                 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1363                 tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
1364                 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1365                 pi->fan_ctrl_is_in_default_mode = true;
1366         }
1367 }
1368
1369 static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev)
1370 {
1371         if (adev->pm.dpm.fan.ucode_fan_control) {
1372                 ci_fan_ctrl_start_smc_fan_control(adev);
1373                 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
1374         }
1375 }
1376
1377 static void ci_thermal_initialize(struct amdgpu_device *adev)
1378 {
1379         u32 tmp;
1380
1381         if (adev->pm.fan_pulses_per_revolution) {
1382                 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
1383                 tmp |= (adev->pm.fan_pulses_per_revolution - 1)
1384                         << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
1385                 WREG32_SMC(ixCG_TACH_CTRL, tmp);
1386         }
1387
1388         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
1389         tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
1390         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1391 }
1392
1393 static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev)
1394 {
1395         int ret;
1396
1397         ci_thermal_initialize(adev);
1398         ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX);
1399         if (ret)
1400                 return ret;
1401         ret = ci_thermal_enable_alert(adev, true);
1402         if (ret)
1403                 return ret;
1404         if (adev->pm.dpm.fan.ucode_fan_control) {
1405                 ret = ci_thermal_setup_fan_table(adev);
1406                 if (ret)
1407                         return ret;
1408                 ci_thermal_start_smc_fan_control(adev);
1409         }
1410
1411         return 0;
1412 }
1413
1414 static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev)
1415 {
1416         if (!adev->pm.no_fan)
1417                 ci_fan_ctrl_set_default_mode(adev);
1418 }
1419
1420 static int ci_read_smc_soft_register(struct amdgpu_device *adev,
1421                                      u16 reg_offset, u32 *value)
1422 {
1423         struct ci_power_info *pi = ci_get_pi(adev);
1424
1425         return amdgpu_ci_read_smc_sram_dword(adev,
1426                                       pi->soft_regs_start + reg_offset,
1427                                       value, pi->sram_end);
1428 }
1429
1430 static int ci_write_smc_soft_register(struct amdgpu_device *adev,
1431                                       u16 reg_offset, u32 value)
1432 {
1433         struct ci_power_info *pi = ci_get_pi(adev);
1434
1435         return amdgpu_ci_write_smc_sram_dword(adev,
1436                                        pi->soft_regs_start + reg_offset,
1437                                        value, pi->sram_end);
1438 }
1439
1440 static void ci_init_fps_limits(struct amdgpu_device *adev)
1441 {
1442         struct ci_power_info *pi = ci_get_pi(adev);
1443         SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1444
1445         if (pi->caps_fps) {
1446                 u16 tmp;
1447
1448                 tmp = 45;
1449                 table->FpsHighT = cpu_to_be16(tmp);
1450
1451                 tmp = 30;
1452                 table->FpsLowT = cpu_to_be16(tmp);
1453         }
1454 }
1455
1456 static int ci_update_sclk_t(struct amdgpu_device *adev)
1457 {
1458         struct ci_power_info *pi = ci_get_pi(adev);
1459         int ret = 0;
1460         u32 low_sclk_interrupt_t = 0;
1461
1462         if (pi->caps_sclk_throttle_low_notification) {
1463                 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1464
1465                 ret = amdgpu_ci_copy_bytes_to_smc(adev,
1466                                            pi->dpm_table_start +
1467                                            offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1468                                            (u8 *)&low_sclk_interrupt_t,
1469                                            sizeof(u32), pi->sram_end);
1470
1471         }
1472
1473         return ret;
1474 }
1475
1476 static void ci_get_leakage_voltages(struct amdgpu_device *adev)
1477 {
1478         struct ci_power_info *pi = ci_get_pi(adev);
1479         u16 leakage_id, virtual_voltage_id;
1480         u16 vddc, vddci;
1481         int i;
1482
1483         pi->vddc_leakage.count = 0;
1484         pi->vddci_leakage.count = 0;
1485
1486         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1487                 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1488                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1489                         if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0)
1490                                 continue;
1491                         if (vddc != 0 && vddc != virtual_voltage_id) {
1492                                 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1493                                 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1494                                 pi->vddc_leakage.count++;
1495                         }
1496                 }
1497         } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) {
1498                 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1499                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1500                         if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci,
1501                                                                                      virtual_voltage_id,
1502                                                                                      leakage_id) == 0) {
1503                                 if (vddc != 0 && vddc != virtual_voltage_id) {
1504                                         pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1505                                         pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1506                                         pi->vddc_leakage.count++;
1507                                 }
1508                                 if (vddci != 0 && vddci != virtual_voltage_id) {
1509                                         pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1510                                         pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1511                                         pi->vddci_leakage.count++;
1512                                 }
1513                         }
1514                 }
1515         }
1516 }
1517
1518 static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
1519 {
1520         struct ci_power_info *pi = ci_get_pi(adev);
1521         bool want_thermal_protection;
1522         enum amdgpu_dpm_event_src dpm_event_src;
1523         u32 tmp;
1524
1525         switch (sources) {
1526         case 0:
1527         default:
1528                 want_thermal_protection = false;
1529                 break;
1530         case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
1531                 want_thermal_protection = true;
1532                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
1533                 break;
1534         case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1535                 want_thermal_protection = true;
1536                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
1537                 break;
1538         case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1539               (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1540                 want_thermal_protection = true;
1541                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1542                 break;
1543         }
1544
1545         if (want_thermal_protection) {
1546 #if 0
1547                 /* XXX: need to figure out how to handle this properly */
1548                 tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1549                 tmp &= DPM_EVENT_SRC_MASK;
1550                 tmp |= DPM_EVENT_SRC(dpm_event_src);
1551                 WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1552 #endif
1553
1554                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1555                 if (pi->thermal_protection)
1556                         tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1557                 else
1558                         tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1559                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1560         } else {
1561                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1562                 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1563                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1564         }
1565 }
1566
1567 static void ci_enable_auto_throttle_source(struct amdgpu_device *adev,
1568                                            enum amdgpu_dpm_auto_throttle_src source,
1569                                            bool enable)
1570 {
1571         struct ci_power_info *pi = ci_get_pi(adev);
1572
1573         if (enable) {
1574                 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1575                         pi->active_auto_throttle_sources |= 1 << source;
1576                         ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1577                 }
1578         } else {
1579                 if (pi->active_auto_throttle_sources & (1 << source)) {
1580                         pi->active_auto_throttle_sources &= ~(1 << source);
1581                         ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1582                 }
1583         }
1584 }
1585
1586 static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev)
1587 {
1588         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1589                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1590 }
1591
1592 static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1593 {
1594         struct ci_power_info *pi = ci_get_pi(adev);
1595         PPSMC_Result smc_result;
1596
1597         if (!pi->need_update_smu7_dpm_table)
1598                 return 0;
1599
1600         if ((!pi->sclk_dpm_key_disabled) &&
1601             (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1602                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1603                 if (smc_result != PPSMC_Result_OK)
1604                         return -EINVAL;
1605         }
1606
1607         if ((!pi->mclk_dpm_key_disabled) &&
1608             (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1609                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1610                 if (smc_result != PPSMC_Result_OK)
1611                         return -EINVAL;
1612         }
1613
1614         pi->need_update_smu7_dpm_table = 0;
1615         return 0;
1616 }
1617
1618 static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable)
1619 {
1620         struct ci_power_info *pi = ci_get_pi(adev);
1621         PPSMC_Result smc_result;
1622
1623         if (enable) {
1624                 if (!pi->sclk_dpm_key_disabled) {
1625                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable);
1626                         if (smc_result != PPSMC_Result_OK)
1627                                 return -EINVAL;
1628                 }
1629
1630                 if (!pi->mclk_dpm_key_disabled) {
1631                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable);
1632                         if (smc_result != PPSMC_Result_OK)
1633                                 return -EINVAL;
1634
1635                         WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK,
1636                                         ~MC_SEQ_CNTL_3__CAC_EN_MASK);
1637
1638                         WREG32_SMC(ixLCAC_MC0_CNTL, 0x05);
1639                         WREG32_SMC(ixLCAC_MC1_CNTL, 0x05);
1640                         WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005);
1641
1642                         udelay(10);
1643
1644                         WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005);
1645                         WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005);
1646                         WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005);
1647                 }
1648         } else {
1649                 if (!pi->sclk_dpm_key_disabled) {
1650                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable);
1651                         if (smc_result != PPSMC_Result_OK)
1652                                 return -EINVAL;
1653                 }
1654
1655                 if (!pi->mclk_dpm_key_disabled) {
1656                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable);
1657                         if (smc_result != PPSMC_Result_OK)
1658                                 return -EINVAL;
1659                 }
1660         }
1661
1662         return 0;
1663 }
1664
1665 static int ci_start_dpm(struct amdgpu_device *adev)
1666 {
1667         struct ci_power_info *pi = ci_get_pi(adev);
1668         PPSMC_Result smc_result;
1669         int ret;
1670         u32 tmp;
1671
1672         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1673         tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1674         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1675
1676         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1677         tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1678         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1679
1680         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1681
1682         WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK);
1683
1684         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable);
1685         if (smc_result != PPSMC_Result_OK)
1686                 return -EINVAL;
1687
1688         ret = ci_enable_sclk_mclk_dpm(adev, true);
1689         if (ret)
1690                 return ret;
1691
1692         if (!pi->pcie_dpm_key_disabled) {
1693                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable);
1694                 if (smc_result != PPSMC_Result_OK)
1695                         return -EINVAL;
1696         }
1697
1698         return 0;
1699 }
1700
1701 static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1702 {
1703         struct ci_power_info *pi = ci_get_pi(adev);
1704         PPSMC_Result smc_result;
1705
1706         if (!pi->need_update_smu7_dpm_table)
1707                 return 0;
1708
1709         if ((!pi->sclk_dpm_key_disabled) &&
1710             (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1711                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1712                 if (smc_result != PPSMC_Result_OK)
1713                         return -EINVAL;
1714         }
1715
1716         if ((!pi->mclk_dpm_key_disabled) &&
1717             (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1718                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1719                 if (smc_result != PPSMC_Result_OK)
1720                         return -EINVAL;
1721         }
1722
1723         return 0;
1724 }
1725
1726 static int ci_stop_dpm(struct amdgpu_device *adev)
1727 {
1728         struct ci_power_info *pi = ci_get_pi(adev);
1729         PPSMC_Result smc_result;
1730         int ret;
1731         u32 tmp;
1732
1733         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1734         tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1735         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1736
1737         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1738         tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1739         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1740
1741         if (!pi->pcie_dpm_key_disabled) {
1742                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable);
1743                 if (smc_result != PPSMC_Result_OK)
1744                         return -EINVAL;
1745         }
1746
1747         ret = ci_enable_sclk_mclk_dpm(adev, false);
1748         if (ret)
1749                 return ret;
1750
1751         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable);
1752         if (smc_result != PPSMC_Result_OK)
1753                 return -EINVAL;
1754
1755         return 0;
1756 }
1757
1758 static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable)
1759 {
1760         u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1761
1762         if (enable)
1763                 tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1764         else
1765                 tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1766         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1767 }
1768
1769 #if 0
1770 static int ci_notify_hw_of_power_source(struct amdgpu_device *adev,
1771                                         bool ac_power)
1772 {
1773         struct ci_power_info *pi = ci_get_pi(adev);
1774         struct amdgpu_cac_tdp_table *cac_tdp_table =
1775                 adev->pm.dpm.dyn_state.cac_tdp_table;
1776         u32 power_limit;
1777
1778         if (ac_power)
1779                 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1780         else
1781                 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1782
1783         ci_set_power_limit(adev, power_limit);
1784
1785         if (pi->caps_automatic_dc_transition) {
1786                 if (ac_power)
1787                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC);
1788                 else
1789                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp);
1790         }
1791
1792         return 0;
1793 }
1794 #endif
1795
1796 static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
1797                                                       PPSMC_Msg msg, u32 parameter)
1798 {
1799         WREG32(mmSMC_MSG_ARG_0, parameter);
1800         return amdgpu_ci_send_msg_to_smc(adev, msg);
1801 }
1802
1803 static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev,
1804                                                         PPSMC_Msg msg, u32 *parameter)
1805 {
1806         PPSMC_Result smc_result;
1807
1808         smc_result = amdgpu_ci_send_msg_to_smc(adev, msg);
1809
1810         if ((smc_result == PPSMC_Result_OK) && parameter)
1811                 *parameter = RREG32(mmSMC_MSG_ARG_0);
1812
1813         return smc_result;
1814 }
1815
1816 static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n)
1817 {
1818         struct ci_power_info *pi = ci_get_pi(adev);
1819
1820         if (!pi->sclk_dpm_key_disabled) {
1821                 PPSMC_Result smc_result =
1822                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1823                 if (smc_result != PPSMC_Result_OK)
1824                         return -EINVAL;
1825         }
1826
1827         return 0;
1828 }
1829
1830 static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n)
1831 {
1832         struct ci_power_info *pi = ci_get_pi(adev);
1833
1834         if (!pi->mclk_dpm_key_disabled) {
1835                 PPSMC_Result smc_result =
1836                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1837                 if (smc_result != PPSMC_Result_OK)
1838                         return -EINVAL;
1839         }
1840
1841         return 0;
1842 }
1843
1844 static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n)
1845 {
1846         struct ci_power_info *pi = ci_get_pi(adev);
1847
1848         if (!pi->pcie_dpm_key_disabled) {
1849                 PPSMC_Result smc_result =
1850                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1851                 if (smc_result != PPSMC_Result_OK)
1852                         return -EINVAL;
1853         }
1854
1855         return 0;
1856 }
1857
1858 static int ci_set_power_limit(struct amdgpu_device *adev, u32 n)
1859 {
1860         struct ci_power_info *pi = ci_get_pi(adev);
1861
1862         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1863                 PPSMC_Result smc_result =
1864                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n);
1865                 if (smc_result != PPSMC_Result_OK)
1866                         return -EINVAL;
1867         }
1868
1869         return 0;
1870 }
1871
1872 static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
1873                                        u32 target_tdp)
1874 {
1875         PPSMC_Result smc_result =
1876                 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1877         if (smc_result != PPSMC_Result_OK)
1878                 return -EINVAL;
1879         return 0;
1880 }
1881
1882 #if 0
1883 static int ci_set_boot_state(struct amdgpu_device *adev)
1884 {
1885         return ci_enable_sclk_mclk_dpm(adev, false);
1886 }
1887 #endif
1888
1889 static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev)
1890 {
1891         u32 sclk_freq;
1892         PPSMC_Result smc_result =
1893                 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1894                                                     PPSMC_MSG_API_GetSclkFrequency,
1895                                                     &sclk_freq);
1896         if (smc_result != PPSMC_Result_OK)
1897                 sclk_freq = 0;
1898
1899         return sclk_freq;
1900 }
1901
1902 static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev)
1903 {
1904         u32 mclk_freq;
1905         PPSMC_Result smc_result =
1906                 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1907                                                     PPSMC_MSG_API_GetMclkFrequency,
1908                                                     &mclk_freq);
1909         if (smc_result != PPSMC_Result_OK)
1910                 mclk_freq = 0;
1911
1912         return mclk_freq;
1913 }
1914
1915 static void ci_dpm_start_smc(struct amdgpu_device *adev)
1916 {
1917         int i;
1918
1919         amdgpu_ci_program_jump_on_start(adev);
1920         amdgpu_ci_start_smc_clock(adev);
1921         amdgpu_ci_start_smc(adev);
1922         for (i = 0; i < adev->usec_timeout; i++) {
1923                 if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
1924                         break;
1925         }
1926 }
1927
1928 static void ci_dpm_stop_smc(struct amdgpu_device *adev)
1929 {
1930         amdgpu_ci_reset_smc(adev);
1931         amdgpu_ci_stop_smc_clock(adev);
1932 }
1933
1934 static int ci_process_firmware_header(struct amdgpu_device *adev)
1935 {
1936         struct ci_power_info *pi = ci_get_pi(adev);
1937         u32 tmp;
1938         int ret;
1939
1940         ret = amdgpu_ci_read_smc_sram_dword(adev,
1941                                      SMU7_FIRMWARE_HEADER_LOCATION +
1942                                      offsetof(SMU7_Firmware_Header, DpmTable),
1943                                      &tmp, pi->sram_end);
1944         if (ret)
1945                 return ret;
1946
1947         pi->dpm_table_start = tmp;
1948
1949         ret = amdgpu_ci_read_smc_sram_dword(adev,
1950                                      SMU7_FIRMWARE_HEADER_LOCATION +
1951                                      offsetof(SMU7_Firmware_Header, SoftRegisters),
1952                                      &tmp, pi->sram_end);
1953         if (ret)
1954                 return ret;
1955
1956         pi->soft_regs_start = tmp;
1957
1958         ret = amdgpu_ci_read_smc_sram_dword(adev,
1959                                      SMU7_FIRMWARE_HEADER_LOCATION +
1960                                      offsetof(SMU7_Firmware_Header, mcRegisterTable),
1961                                      &tmp, pi->sram_end);
1962         if (ret)
1963                 return ret;
1964
1965         pi->mc_reg_table_start = tmp;
1966
1967         ret = amdgpu_ci_read_smc_sram_dword(adev,
1968                                      SMU7_FIRMWARE_HEADER_LOCATION +
1969                                      offsetof(SMU7_Firmware_Header, FanTable),
1970                                      &tmp, pi->sram_end);
1971         if (ret)
1972                 return ret;
1973
1974         pi->fan_table_start = tmp;
1975
1976         ret = amdgpu_ci_read_smc_sram_dword(adev,
1977                                      SMU7_FIRMWARE_HEADER_LOCATION +
1978                                      offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1979                                      &tmp, pi->sram_end);
1980         if (ret)
1981                 return ret;
1982
1983         pi->arb_table_start = tmp;
1984
1985         return 0;
1986 }
1987
1988 static void ci_read_clock_registers(struct amdgpu_device *adev)
1989 {
1990         struct ci_power_info *pi = ci_get_pi(adev);
1991
1992         pi->clock_registers.cg_spll_func_cntl =
1993                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL);
1994         pi->clock_registers.cg_spll_func_cntl_2 =
1995                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2);
1996         pi->clock_registers.cg_spll_func_cntl_3 =
1997                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3);
1998         pi->clock_registers.cg_spll_func_cntl_4 =
1999                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4);
2000         pi->clock_registers.cg_spll_spread_spectrum =
2001                 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2002         pi->clock_registers.cg_spll_spread_spectrum_2 =
2003                 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2);
2004         pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL);
2005         pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL);
2006         pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL);
2007         pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL);
2008         pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL);
2009         pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1);
2010         pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2);
2011         pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1);
2012         pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2);
2013 }
2014
2015 static void ci_init_sclk_t(struct amdgpu_device *adev)
2016 {
2017         struct ci_power_info *pi = ci_get_pi(adev);
2018
2019         pi->low_sclk_interrupt_t = 0;
2020 }
2021
2022 static void ci_enable_thermal_protection(struct amdgpu_device *adev,
2023                                          bool enable)
2024 {
2025         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2026
2027         if (enable)
2028                 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2029         else
2030                 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2031         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2032 }
2033
2034 static void ci_enable_acpi_power_management(struct amdgpu_device *adev)
2035 {
2036         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2037
2038         tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK;
2039
2040         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2041 }
2042
2043 #if 0
2044 static int ci_enter_ulp_state(struct amdgpu_device *adev)
2045 {
2046
2047         WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
2048
2049         udelay(25000);
2050
2051         return 0;
2052 }
2053
2054 static int ci_exit_ulp_state(struct amdgpu_device *adev)
2055 {
2056         int i;
2057
2058         WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
2059
2060         udelay(7000);
2061
2062         for (i = 0; i < adev->usec_timeout; i++) {
2063                 if (RREG32(mmSMC_RESP_0) == 1)
2064                         break;
2065                 udelay(1000);
2066         }
2067
2068         return 0;
2069 }
2070 #endif
2071
2072 static int ci_notify_smc_display_change(struct amdgpu_device *adev,
2073                                         bool has_display)
2074 {
2075         PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
2076
2077         return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
2078 }
2079
2080 static int ci_enable_ds_master_switch(struct amdgpu_device *adev,
2081                                       bool enable)
2082 {
2083         struct ci_power_info *pi = ci_get_pi(adev);
2084
2085         if (enable) {
2086                 if (pi->caps_sclk_ds) {
2087                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
2088                                 return -EINVAL;
2089                 } else {
2090                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2091                                 return -EINVAL;
2092                 }
2093         } else {
2094                 if (pi->caps_sclk_ds) {
2095                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2096                                 return -EINVAL;
2097                 }
2098         }
2099
2100         return 0;
2101 }
2102
2103 static void ci_program_display_gap(struct amdgpu_device *adev)
2104 {
2105         u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2106         u32 pre_vbi_time_in_us;
2107         u32 frame_time_in_us;
2108         u32 ref_clock = adev->clock.spll.reference_freq;
2109         u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev);
2110         u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
2111
2112         tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK;
2113         if (adev->pm.dpm.new_active_crtc_count > 0)
2114                 tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2115         else
2116                 tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2117         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2118
2119         if (refresh_rate == 0)
2120                 refresh_rate = 60;
2121         if (vblank_time == 0xffffffff)
2122                 vblank_time = 500;
2123         frame_time_in_us = 1000000 / refresh_rate;
2124         pre_vbi_time_in_us =
2125                 frame_time_in_us - 200 - vblank_time;
2126         tmp = pre_vbi_time_in_us * (ref_clock / 100);
2127
2128         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp);
2129         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
2130         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
2131
2132
2133         ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1));
2134
2135 }
2136
2137 static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
2138 {
2139         struct ci_power_info *pi = ci_get_pi(adev);
2140         u32 tmp;
2141
2142         if (enable) {
2143                 if (pi->caps_sclk_ss_support) {
2144                         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2145                         tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2146                         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2147                 }
2148         } else {
2149                 tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2150                 tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
2151                 WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp);
2152
2153                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2154                 tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2155                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2156         }
2157 }
2158
2159 static void ci_program_sstp(struct amdgpu_device *adev)
2160 {
2161         WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER,
2162         ((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) |
2163          (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT)));
2164 }
2165
2166 static void ci_enable_display_gap(struct amdgpu_device *adev)
2167 {
2168         u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2169
2170         tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK |
2171                         CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK);
2172         tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) |
2173                 (AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT));
2174
2175         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2176 }
2177
2178 static void ci_program_vc(struct amdgpu_device *adev)
2179 {
2180         u32 tmp;
2181
2182         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2183         tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2184         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2185
2186         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0);
2187         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1);
2188         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2);
2189         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3);
2190         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4);
2191         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5);
2192         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6);
2193         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7);
2194 }
2195
2196 static void ci_clear_vc(struct amdgpu_device *adev)
2197 {
2198         u32 tmp;
2199
2200         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2201         tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2202         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2203
2204         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
2205         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0);
2206         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0);
2207         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0);
2208         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0);
2209         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0);
2210         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0);
2211         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0);
2212 }
2213
2214 static int ci_upload_firmware(struct amdgpu_device *adev)
2215 {
2216         int i, ret;
2217
2218         if (amdgpu_ci_is_smc_running(adev)) {
2219                 DRM_INFO("smc is running, no need to load smc firmware\n");
2220                 return 0;
2221         }
2222
2223         for (i = 0; i < adev->usec_timeout; i++) {
2224                 if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
2225                         break;
2226         }
2227         WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1);
2228
2229         amdgpu_ci_stop_smc_clock(adev);
2230         amdgpu_ci_reset_smc(adev);
2231
2232         ret = amdgpu_ci_load_smc_ucode(adev, SMC_RAM_END);
2233
2234         return ret;
2235
2236 }
2237
2238 static int ci_get_svi2_voltage_table(struct amdgpu_device *adev,
2239                                      struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
2240                                      struct atom_voltage_table *voltage_table)
2241 {
2242         u32 i;
2243
2244         if (voltage_dependency_table == NULL)
2245                 return -EINVAL;
2246
2247         voltage_table->mask_low = 0;
2248         voltage_table->phase_delay = 0;
2249
2250         voltage_table->count = voltage_dependency_table->count;
2251         for (i = 0; i < voltage_table->count; i++) {
2252                 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2253                 voltage_table->entries[i].smio_low = 0;
2254         }
2255
2256         return 0;
2257 }
2258
2259 static int ci_construct_voltage_tables(struct amdgpu_device *adev)
2260 {
2261         struct ci_power_info *pi = ci_get_pi(adev);
2262         int ret;
2263
2264         if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2265                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
2266                                                         VOLTAGE_OBJ_GPIO_LUT,
2267                                                         &pi->vddc_voltage_table);
2268                 if (ret)
2269                         return ret;
2270         } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2271                 ret = ci_get_svi2_voltage_table(adev,
2272                                                 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2273                                                 &pi->vddc_voltage_table);
2274                 if (ret)
2275                         return ret;
2276         }
2277
2278         if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2279                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC,
2280                                                          &pi->vddc_voltage_table);
2281
2282         if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2283                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
2284                                                         VOLTAGE_OBJ_GPIO_LUT,
2285                                                         &pi->vddci_voltage_table);
2286                 if (ret)
2287                         return ret;
2288         } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2289                 ret = ci_get_svi2_voltage_table(adev,
2290                                                 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2291                                                 &pi->vddci_voltage_table);
2292                 if (ret)
2293                         return ret;
2294         }
2295
2296         if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2297                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI,
2298                                                          &pi->vddci_voltage_table);
2299
2300         if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2301                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
2302                                                         VOLTAGE_OBJ_GPIO_LUT,
2303                                                         &pi->mvdd_voltage_table);
2304                 if (ret)
2305                         return ret;
2306         } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2307                 ret = ci_get_svi2_voltage_table(adev,
2308                                                 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2309                                                 &pi->mvdd_voltage_table);
2310                 if (ret)
2311                         return ret;
2312         }
2313
2314         if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2315                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD,
2316                                                          &pi->mvdd_voltage_table);
2317
2318         return 0;
2319 }
2320
2321 static void ci_populate_smc_voltage_table(struct amdgpu_device *adev,
2322                                           struct atom_voltage_table_entry *voltage_table,
2323                                           SMU7_Discrete_VoltageLevel *smc_voltage_table)
2324 {
2325         int ret;
2326
2327         ret = ci_get_std_voltage_value_sidd(adev, voltage_table,
2328                                             &smc_voltage_table->StdVoltageHiSidd,
2329                                             &smc_voltage_table->StdVoltageLoSidd);
2330
2331         if (ret) {
2332                 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2333                 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2334         }
2335
2336         smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2337         smc_voltage_table->StdVoltageHiSidd =
2338                 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2339         smc_voltage_table->StdVoltageLoSidd =
2340                 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2341 }
2342
2343 static int ci_populate_smc_vddc_table(struct amdgpu_device *adev,
2344                                       SMU7_Discrete_DpmTable *table)
2345 {
2346         struct ci_power_info *pi = ci_get_pi(adev);
2347         unsigned int count;
2348
2349         table->VddcLevelCount = pi->vddc_voltage_table.count;
2350         for (count = 0; count < table->VddcLevelCount; count++) {
2351                 ci_populate_smc_voltage_table(adev,
2352                                               &pi->vddc_voltage_table.entries[count],
2353                                               &table->VddcLevel[count]);
2354
2355                 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2356                         table->VddcLevel[count].Smio |=
2357                                 pi->vddc_voltage_table.entries[count].smio_low;
2358                 else
2359                         table->VddcLevel[count].Smio = 0;
2360         }
2361         table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2362
2363         return 0;
2364 }
2365
2366 static int ci_populate_smc_vddci_table(struct amdgpu_device *adev,
2367                                        SMU7_Discrete_DpmTable *table)
2368 {
2369         unsigned int count;
2370         struct ci_power_info *pi = ci_get_pi(adev);
2371
2372         table->VddciLevelCount = pi->vddci_voltage_table.count;
2373         for (count = 0; count < table->VddciLevelCount; count++) {
2374                 ci_populate_smc_voltage_table(adev,
2375                                               &pi->vddci_voltage_table.entries[count],
2376                                               &table->VddciLevel[count]);
2377
2378                 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2379                         table->VddciLevel[count].Smio |=
2380                                 pi->vddci_voltage_table.entries[count].smio_low;
2381                 else
2382                         table->VddciLevel[count].Smio = 0;
2383         }
2384         table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2385
2386         return 0;
2387 }
2388
2389 static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev,
2390                                       SMU7_Discrete_DpmTable *table)
2391 {
2392         struct ci_power_info *pi = ci_get_pi(adev);
2393         unsigned int count;
2394
2395         table->MvddLevelCount = pi->mvdd_voltage_table.count;
2396         for (count = 0; count < table->MvddLevelCount; count++) {
2397                 ci_populate_smc_voltage_table(adev,
2398                                               &pi->mvdd_voltage_table.entries[count],
2399                                               &table->MvddLevel[count]);
2400
2401                 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2402                         table->MvddLevel[count].Smio |=
2403                                 pi->mvdd_voltage_table.entries[count].smio_low;
2404                 else
2405                         table->MvddLevel[count].Smio = 0;
2406         }
2407         table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2408
2409         return 0;
2410 }
2411
2412 static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev,
2413                                           SMU7_Discrete_DpmTable *table)
2414 {
2415         int ret;
2416
2417         ret = ci_populate_smc_vddc_table(adev, table);
2418         if (ret)
2419                 return ret;
2420
2421         ret = ci_populate_smc_vddci_table(adev, table);
2422         if (ret)
2423                 return ret;
2424
2425         ret = ci_populate_smc_mvdd_table(adev, table);
2426         if (ret)
2427                 return ret;
2428
2429         return 0;
2430 }
2431
2432 static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
2433                                   SMU7_Discrete_VoltageLevel *voltage)
2434 {
2435         struct ci_power_info *pi = ci_get_pi(adev);
2436         u32 i = 0;
2437
2438         if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2439                 for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2440                         if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2441                                 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2442                                 break;
2443                         }
2444                 }
2445
2446                 if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2447                         return -EINVAL;
2448         }
2449
2450         return -EINVAL;
2451 }
2452
2453 static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
2454                                          struct atom_voltage_table_entry *voltage_table,
2455                                          u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2456 {
2457         u16 v_index, idx;
2458         bool voltage_found = false;
2459         *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2460         *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2461
2462         if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2463                 return -EINVAL;
2464
2465         if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2466                 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2467                         if (voltage_table->value ==
2468                             adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2469                                 voltage_found = true;
2470                                 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2471                                         idx = v_index;
2472                                 else
2473                                         idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2474                                 *std_voltage_lo_sidd =
2475                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2476                                 *std_voltage_hi_sidd =
2477                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2478                                 break;
2479                         }
2480                 }
2481
2482                 if (!voltage_found) {
2483                         for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2484                                 if (voltage_table->value <=
2485                                     adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2486                                         voltage_found = true;
2487                                         if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2488                                                 idx = v_index;
2489                                         else
2490                                                 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2491                                         *std_voltage_lo_sidd =
2492                                                 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2493                                         *std_voltage_hi_sidd =
2494                                                 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2495                                         break;
2496                                 }
2497                         }
2498                 }
2499         }
2500
2501         return 0;
2502 }
2503
2504 static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev,
2505                                                   const struct amdgpu_phase_shedding_limits_table *limits,
2506                                                   u32 sclk,
2507                                                   u32 *phase_shedding)
2508 {
2509         unsigned int i;
2510
2511         *phase_shedding = 1;
2512
2513         for (i = 0; i < limits->count; i++) {
2514                 if (sclk < limits->entries[i].sclk) {
2515                         *phase_shedding = i;
2516                         break;
2517                 }
2518         }
2519 }
2520
2521 static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev,
2522                                                   const struct amdgpu_phase_shedding_limits_table *limits,
2523                                                   u32 mclk,
2524                                                   u32 *phase_shedding)
2525 {
2526         unsigned int i;
2527
2528         *phase_shedding = 1;
2529
2530         for (i = 0; i < limits->count; i++) {
2531                 if (mclk < limits->entries[i].mclk) {
2532                         *phase_shedding = i;
2533                         break;
2534                 }
2535         }
2536 }
2537
2538 static int ci_init_arb_table_index(struct amdgpu_device *adev)
2539 {
2540         struct ci_power_info *pi = ci_get_pi(adev);
2541         u32 tmp;
2542         int ret;
2543
2544         ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start,
2545                                      &tmp, pi->sram_end);
2546         if (ret)
2547                 return ret;
2548
2549         tmp &= 0x00FFFFFF;
2550         tmp |= MC_CG_ARB_FREQ_F1 << 24;
2551
2552         return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start,
2553                                        tmp, pi->sram_end);
2554 }
2555
2556 static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
2557                                          struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table,
2558                                          u32 clock, u32 *voltage)
2559 {
2560         u32 i = 0;
2561
2562         if (allowed_clock_voltage_table->count == 0)
2563                 return -EINVAL;
2564
2565         for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2566                 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2567                         *voltage = allowed_clock_voltage_table->entries[i].v;
2568                         return 0;
2569                 }
2570         }
2571
2572         *voltage = allowed_clock_voltage_table->entries[i-1].v;
2573
2574         return 0;
2575 }
2576
2577 static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr)
2578 {
2579         u32 i;
2580         u32 tmp;
2581         u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK);
2582
2583         if (sclk < min)
2584                 return 0;
2585
2586         for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
2587                 tmp = sclk >> i;
2588                 if (tmp >= min || i == 0)
2589                         break;
2590         }
2591
2592         return (u8)i;
2593 }
2594
2595 static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
2596 {
2597         return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2598 }
2599
2600 static int ci_reset_to_default(struct amdgpu_device *adev)
2601 {
2602         return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2603                 0 : -EINVAL;
2604 }
2605
2606 static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev)
2607 {
2608         u32 tmp;
2609
2610         tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8;
2611
2612         if (tmp == MC_CG_ARB_FREQ_F0)
2613                 return 0;
2614
2615         return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
2616 }
2617
2618 static void ci_register_patching_mc_arb(struct amdgpu_device *adev,
2619                                         const u32 engine_clock,
2620                                         const u32 memory_clock,
2621                                         u32 *dram_timimg2)
2622 {
2623         bool patch;
2624         u32 tmp, tmp2;
2625
2626         tmp = RREG32(mmMC_SEQ_MISC0);
2627         patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2628
2629         if (patch &&
2630             ((adev->pdev->device == 0x67B0) ||
2631              (adev->pdev->device == 0x67B1))) {
2632                 if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2633                         tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2634                         *dram_timimg2 &= ~0x00ff0000;
2635                         *dram_timimg2 |= tmp2 << 16;
2636                 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2637                         tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2638                         *dram_timimg2 &= ~0x00ff0000;
2639                         *dram_timimg2 |= tmp2 << 16;
2640                 }
2641         }
2642 }
2643
2644 static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev,
2645                                                 u32 sclk,
2646                                                 u32 mclk,
2647                                                 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2648 {
2649         u32 dram_timing;
2650         u32 dram_timing2;
2651         u32 burst_time;
2652
2653         amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk);
2654
2655         dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING);
2656         dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
2657         burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK;
2658
2659         ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2);
2660
2661         arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2662         arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2663         arb_regs->McArbBurstTime = (u8)burst_time;
2664
2665         return 0;
2666 }
2667
2668 static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev)
2669 {
2670         struct ci_power_info *pi = ci_get_pi(adev);
2671         SMU7_Discrete_MCArbDramTimingTable arb_regs;
2672         u32 i, j;
2673         int ret =  0;
2674
2675         memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2676
2677         for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2678                 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2679                         ret = ci_populate_memory_timing_parameters(adev,
2680                                                                    pi->dpm_table.sclk_table.dpm_levels[i].value,
2681                                                                    pi->dpm_table.mclk_table.dpm_levels[j].value,
2682                                                                    &arb_regs.entries[i][j]);
2683                         if (ret)
2684                                 break;
2685                 }
2686         }
2687
2688         if (ret == 0)
2689                 ret = amdgpu_ci_copy_bytes_to_smc(adev,
2690                                            pi->arb_table_start,
2691                                            (u8 *)&arb_regs,
2692                                            sizeof(SMU7_Discrete_MCArbDramTimingTable),
2693                                            pi->sram_end);
2694
2695         return ret;
2696 }
2697
2698 static int ci_program_memory_timing_parameters(struct amdgpu_device *adev)
2699 {
2700         struct ci_power_info *pi = ci_get_pi(adev);
2701
2702         if (pi->need_update_smu7_dpm_table == 0)
2703                 return 0;
2704
2705         return ci_do_program_memory_timing_parameters(adev);
2706 }
2707
2708 static void ci_populate_smc_initial_state(struct amdgpu_device *adev,
2709                                           struct amdgpu_ps *amdgpu_boot_state)
2710 {
2711         struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state);
2712         struct ci_power_info *pi = ci_get_pi(adev);
2713         u32 level = 0;
2714
2715         for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2716                 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2717                     boot_state->performance_levels[0].sclk) {
2718                         pi->smc_state_table.GraphicsBootLevel = level;
2719                         break;
2720                 }
2721         }
2722
2723         for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2724                 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2725                     boot_state->performance_levels[0].mclk) {
2726                         pi->smc_state_table.MemoryBootLevel = level;
2727                         break;
2728                 }
2729         }
2730 }
2731
2732 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2733 {
2734         u32 i;
2735         u32 mask_value = 0;
2736
2737         for (i = dpm_table->count; i > 0; i--) {
2738                 mask_value = mask_value << 1;
2739                 if (dpm_table->dpm_levels[i-1].enabled)
2740                         mask_value |= 0x1;
2741                 else
2742                         mask_value &= 0xFFFFFFFE;
2743         }
2744
2745         return mask_value;
2746 }
2747
2748 static void ci_populate_smc_link_level(struct amdgpu_device *adev,
2749                                        SMU7_Discrete_DpmTable *table)
2750 {
2751         struct ci_power_info *pi = ci_get_pi(adev);
2752         struct ci_dpm_table *dpm_table = &pi->dpm_table;
2753         u32 i;
2754
2755         for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2756                 table->LinkLevel[i].PcieGenSpeed =
2757                         (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2758                 table->LinkLevel[i].PcieLaneCount =
2759                         amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2760                 table->LinkLevel[i].EnabledForActivity = 1;
2761                 table->LinkLevel[i].DownT = cpu_to_be32(5);
2762                 table->LinkLevel[i].UpT = cpu_to_be32(30);
2763         }
2764
2765         pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2766         pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2767                 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2768 }
2769
2770 static int ci_populate_smc_uvd_level(struct amdgpu_device *adev,
2771                                      SMU7_Discrete_DpmTable *table)
2772 {
2773         u32 count;
2774         struct atom_clock_dividers dividers;
2775         int ret = -EINVAL;
2776
2777         table->UvdLevelCount =
2778                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2779
2780         for (count = 0; count < table->UvdLevelCount; count++) {
2781                 table->UvdLevel[count].VclkFrequency =
2782                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2783                 table->UvdLevel[count].DclkFrequency =
2784                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2785                 table->UvdLevel[count].MinVddc =
2786                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2787                 table->UvdLevel[count].MinVddcPhases = 1;
2788
2789                 ret = amdgpu_atombios_get_clock_dividers(adev,
2790                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2791                                                          table->UvdLevel[count].VclkFrequency, false, &dividers);
2792                 if (ret)
2793                         return ret;
2794
2795                 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2796
2797                 ret = amdgpu_atombios_get_clock_dividers(adev,
2798                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2799                                                          table->UvdLevel[count].DclkFrequency, false, &dividers);
2800                 if (ret)
2801                         return ret;
2802
2803                 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2804
2805                 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2806                 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2807                 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2808         }
2809
2810         return ret;
2811 }
2812
2813 static int ci_populate_smc_vce_level(struct amdgpu_device *adev,
2814                                      SMU7_Discrete_DpmTable *table)
2815 {
2816         u32 count;
2817         struct atom_clock_dividers dividers;
2818         int ret = -EINVAL;
2819
2820         table->VceLevelCount =
2821                 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2822
2823         for (count = 0; count < table->VceLevelCount; count++) {
2824                 table->VceLevel[count].Frequency =
2825                         adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2826                 table->VceLevel[count].MinVoltage =
2827                         (u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2828                 table->VceLevel[count].MinPhases = 1;
2829
2830                 ret = amdgpu_atombios_get_clock_dividers(adev,
2831                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2832                                                          table->VceLevel[count].Frequency, false, &dividers);
2833                 if (ret)
2834                         return ret;
2835
2836                 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2837
2838                 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2839                 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2840         }
2841
2842         return ret;
2843
2844 }
2845
2846 static int ci_populate_smc_acp_level(struct amdgpu_device *adev,
2847                                      SMU7_Discrete_DpmTable *table)
2848 {
2849         u32 count;
2850         struct atom_clock_dividers dividers;
2851         int ret = -EINVAL;
2852
2853         table->AcpLevelCount = (u8)
2854                 (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2855
2856         for (count = 0; count < table->AcpLevelCount; count++) {
2857                 table->AcpLevel[count].Frequency =
2858                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2859                 table->AcpLevel[count].MinVoltage =
2860                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2861                 table->AcpLevel[count].MinPhases = 1;
2862
2863                 ret = amdgpu_atombios_get_clock_dividers(adev,
2864                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2865                                                          table->AcpLevel[count].Frequency, false, &dividers);
2866                 if (ret)
2867                         return ret;
2868
2869                 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2870
2871                 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2872                 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2873         }
2874
2875         return ret;
2876 }
2877
2878 static int ci_populate_smc_samu_level(struct amdgpu_device *adev,
2879                                       SMU7_Discrete_DpmTable *table)
2880 {
2881         u32 count;
2882         struct atom_clock_dividers dividers;
2883         int ret = -EINVAL;
2884
2885         table->SamuLevelCount =
2886                 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2887
2888         for (count = 0; count < table->SamuLevelCount; count++) {
2889                 table->SamuLevel[count].Frequency =
2890                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2891                 table->SamuLevel[count].MinVoltage =
2892                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2893                 table->SamuLevel[count].MinPhases = 1;
2894
2895                 ret = amdgpu_atombios_get_clock_dividers(adev,
2896                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2897                                                          table->SamuLevel[count].Frequency, false, &dividers);
2898                 if (ret)
2899                         return ret;
2900
2901                 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2902
2903                 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2904                 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2905         }
2906
2907         return ret;
2908 }
2909
2910 static int ci_calculate_mclk_params(struct amdgpu_device *adev,
2911                                     u32 memory_clock,
2912                                     SMU7_Discrete_MemoryLevel *mclk,
2913                                     bool strobe_mode,
2914                                     bool dll_state_on)
2915 {
2916         struct ci_power_info *pi = ci_get_pi(adev);
2917         u32  dll_cntl = pi->clock_registers.dll_cntl;
2918         u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2919         u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2920         u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2921         u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2922         u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2923         u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2924         u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2925         u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2926         struct atom_mpll_param mpll_param;
2927         int ret;
2928
2929         ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
2930         if (ret)
2931                 return ret;
2932
2933         mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK;
2934         mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT);
2935
2936         mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK |
2937                         MPLL_FUNC_CNTL_1__VCO_MODE_MASK);
2938         mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT |
2939                 (mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) |
2940                 (mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT);
2941
2942         mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
2943         mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2944
2945         if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
2946                 mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
2947                                 MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
2948                 mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
2949                                 (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2950         }
2951
2952         if (pi->caps_mclk_ss_support) {
2953                 struct amdgpu_atom_ss ss;
2954                 u32 freq_nom;
2955                 u32 tmp;
2956                 u32 reference_clock = adev->clock.mpll.reference_freq;
2957
2958                 if (mpll_param.qdr == 1)
2959                         freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2960                 else
2961                         freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2962
2963                 tmp = (freq_nom / reference_clock);
2964                 tmp = tmp * tmp;
2965                 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
2966                                                      ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2967                         u32 clks = reference_clock * 5 / ss.rate;
2968                         u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2969
2970                         mpll_ss1 &= ~MPLL_SS1__CLKV_MASK;
2971                         mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT);
2972
2973                         mpll_ss2 &= ~MPLL_SS2__CLKS_MASK;
2974                         mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT);
2975                 }
2976         }
2977
2978         mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK;
2979         mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT);
2980
2981         if (dll_state_on)
2982                 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2983                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK;
2984         else
2985                 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2986                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
2987
2988         mclk->MclkFrequency = memory_clock;
2989         mclk->MpllFuncCntl = mpll_func_cntl;
2990         mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2991         mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2992         mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2993         mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2994         mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2995         mclk->DllCntl = dll_cntl;
2996         mclk->MpllSs1 = mpll_ss1;
2997         mclk->MpllSs2 = mpll_ss2;
2998
2999         return 0;
3000 }
3001
3002 static int ci_populate_single_memory_level(struct amdgpu_device *adev,
3003                                            u32 memory_clock,
3004                                            SMU7_Discrete_MemoryLevel *memory_level)
3005 {
3006         struct ci_power_info *pi = ci_get_pi(adev);
3007         int ret;
3008         bool dll_state_on;
3009
3010         if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
3011                 ret = ci_get_dependency_volt_by_clk(adev,
3012                                                     &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
3013                                                     memory_clock, &memory_level->MinVddc);
3014                 if (ret)
3015                         return ret;
3016         }
3017
3018         if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
3019                 ret = ci_get_dependency_volt_by_clk(adev,
3020                                                     &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
3021                                                     memory_clock, &memory_level->MinVddci);
3022                 if (ret)
3023                         return ret;
3024         }
3025
3026         if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
3027                 ret = ci_get_dependency_volt_by_clk(adev,
3028                                                     &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
3029                                                     memory_clock, &memory_level->MinMvdd);
3030                 if (ret)
3031                         return ret;
3032         }
3033
3034         memory_level->MinVddcPhases = 1;