Merge tag 'wireless-drivers-for-davem-2017-06-20' of git://git.kernel.org/pub/scm...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / ci_dpm.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25 #include "drmP.h"
26 #include "amdgpu.h"
27 #include "amdgpu_pm.h"
28 #include "amdgpu_ucode.h"
29 #include "cikd.h"
30 #include "amdgpu_dpm.h"
31 #include "ci_dpm.h"
32 #include "gfx_v7_0.h"
33 #include "atom.h"
34 #include "amd_pcie.h"
35 #include <linux/seq_file.h>
36
37 #include "smu/smu_7_0_1_d.h"
38 #include "smu/smu_7_0_1_sh_mask.h"
39
40 #include "dce/dce_8_0_d.h"
41 #include "dce/dce_8_0_sh_mask.h"
42
43 #include "bif/bif_4_1_d.h"
44 #include "bif/bif_4_1_sh_mask.h"
45
46 #include "gca/gfx_7_2_d.h"
47 #include "gca/gfx_7_2_sh_mask.h"
48
49 #include "gmc/gmc_7_1_d.h"
50 #include "gmc/gmc_7_1_sh_mask.h"
51
52 MODULE_FIRMWARE("radeon/bonaire_smc.bin");
53 MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
54 MODULE_FIRMWARE("radeon/hawaii_smc.bin");
55 MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
56
57 #define MC_CG_ARB_FREQ_F0           0x0a
58 #define MC_CG_ARB_FREQ_F1           0x0b
59 #define MC_CG_ARB_FREQ_F2           0x0c
60 #define MC_CG_ARB_FREQ_F3           0x0d
61
62 #define SMC_RAM_END 0x40000
63
64 #define VOLTAGE_SCALE               4
65 #define VOLTAGE_VID_OFFSET_SCALE1    625
66 #define VOLTAGE_VID_OFFSET_SCALE2    100
67
68 static const struct ci_pt_defaults defaults_hawaii_xt =
69 {
70         1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
71         { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
72         { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
73 };
74
75 static const struct ci_pt_defaults defaults_hawaii_pro =
76 {
77         1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
78         { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
79         { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
80 };
81
82 static const struct ci_pt_defaults defaults_bonaire_xt =
83 {
84         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
85         { 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
86         { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
87 };
88
89 #if 0
90 static const struct ci_pt_defaults defaults_bonaire_pro =
91 {
92         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
93         { 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
94         { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
95 };
96 #endif
97
98 static const struct ci_pt_defaults defaults_saturn_xt =
99 {
100         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
101         { 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
102         { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
103 };
104
105 #if 0
106 static const struct ci_pt_defaults defaults_saturn_pro =
107 {
108         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
109         { 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
110         { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
111 };
112 #endif
113
114 static const struct ci_pt_config_reg didt_config_ci[] =
115 {
116         { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117         { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118         { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119         { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120         { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121         { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
122         { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
123         { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
124         { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125         { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126         { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127         { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128         { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
129         { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
130         { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
131         { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
132         { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
133         { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134         { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135         { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136         { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137         { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138         { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139         { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
140         { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
141         { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
142         { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143         { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144         { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145         { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146         { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
147         { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
148         { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
149         { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
150         { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
151         { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152         { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153         { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154         { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155         { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
156         { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
157         { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
158         { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
159         { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
160         { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
161         { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
162         { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
163         { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
164         { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
165         { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
166         { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
167         { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
168         { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
169         { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
170         { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
171         { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
172         { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
173         { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
174         { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
175         { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
176         { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
177         { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
178         { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
179         { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
180         { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
181         { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
182         { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
183         { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
184         { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
185         { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
186         { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
187         { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
188         { 0xFFFFFFFF }
189 };
190
191 static u8 ci_get_memory_module_index(struct amdgpu_device *adev)
192 {
193         return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff);
194 }
195
196 #define MC_CG_ARB_FREQ_F0           0x0a
197 #define MC_CG_ARB_FREQ_F1           0x0b
198 #define MC_CG_ARB_FREQ_F2           0x0c
199 #define MC_CG_ARB_FREQ_F3           0x0d
200
201 static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev,
202                                        u32 arb_freq_src, u32 arb_freq_dest)
203 {
204         u32 mc_arb_dram_timing;
205         u32 mc_arb_dram_timing2;
206         u32 burst_time;
207         u32 mc_cg_config;
208
209         switch (arb_freq_src) {
210         case MC_CG_ARB_FREQ_F0:
211                 mc_arb_dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING);
212                 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
213                 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >>
214                          MC_ARB_BURST_TIME__STATE0__SHIFT;
215                 break;
216         case MC_CG_ARB_FREQ_F1:
217                 mc_arb_dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING_1);
218                 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1);
219                 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >>
220                          MC_ARB_BURST_TIME__STATE1__SHIFT;
221                 break;
222         default:
223                 return -EINVAL;
224         }
225
226         switch (arb_freq_dest) {
227         case MC_CG_ARB_FREQ_F0:
228                 WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
229                 WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
230                 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT),
231                         ~MC_ARB_BURST_TIME__STATE0_MASK);
232                 break;
233         case MC_CG_ARB_FREQ_F1:
234                 WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
235                 WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
236                 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT),
237                         ~MC_ARB_BURST_TIME__STATE1_MASK);
238                 break;
239         default:
240                 return -EINVAL;
241         }
242
243         mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F;
244         WREG32(mmMC_CG_CONFIG, mc_cg_config);
245         WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT,
246                 ~MC_ARB_CG__CG_ARB_REQ_MASK);
247
248         return 0;
249 }
250
251 static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
252 {
253         u8 mc_para_index;
254
255         if (memory_clock < 10000)
256                 mc_para_index = 0;
257         else if (memory_clock >= 80000)
258                 mc_para_index = 0x0f;
259         else
260                 mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
261         return mc_para_index;
262 }
263
264 static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
265 {
266         u8 mc_para_index;
267
268         if (strobe_mode) {
269                 if (memory_clock < 12500)
270                         mc_para_index = 0x00;
271                 else if (memory_clock > 47500)
272                         mc_para_index = 0x0f;
273                 else
274                         mc_para_index = (u8)((memory_clock - 10000) / 2500);
275         } else {
276                 if (memory_clock < 65000)
277                         mc_para_index = 0x00;
278                 else if (memory_clock > 135000)
279                         mc_para_index = 0x0f;
280                 else
281                         mc_para_index = (u8)((memory_clock - 60000) / 5000);
282         }
283         return mc_para_index;
284 }
285
286 static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
287                                                      u32 max_voltage_steps,
288                                                      struct atom_voltage_table *voltage_table)
289 {
290         unsigned int i, diff;
291
292         if (voltage_table->count <= max_voltage_steps)
293                 return;
294
295         diff = voltage_table->count - max_voltage_steps;
296
297         for (i = 0; i < max_voltage_steps; i++)
298                 voltage_table->entries[i] = voltage_table->entries[i + diff];
299
300         voltage_table->count = max_voltage_steps;
301 }
302
303 static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
304                                          struct atom_voltage_table_entry *voltage_table,
305                                          u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
306 static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
307 static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
308                                        u32 target_tdp);
309 static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
310 static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev);
311 static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
312
313 static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
314                                                              PPSMC_Msg msg, u32 parameter);
315 static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev);
316 static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
317
318 static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev)
319 {
320         struct ci_power_info *pi = adev->pm.dpm.priv;
321
322         return pi;
323 }
324
325 static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps)
326 {
327         struct ci_ps *ps = rps->ps_priv;
328
329         return ps;
330 }
331
332 static void ci_initialize_powertune_defaults(struct amdgpu_device *adev)
333 {
334         struct ci_power_info *pi = ci_get_pi(adev);
335
336         switch (adev->pdev->device) {
337         case 0x6649:
338         case 0x6650:
339         case 0x6651:
340         case 0x6658:
341         case 0x665C:
342         case 0x665D:
343         default:
344                 pi->powertune_defaults = &defaults_bonaire_xt;
345                 break;
346         case 0x6640:
347         case 0x6641:
348         case 0x6646:
349         case 0x6647:
350                 pi->powertune_defaults = &defaults_saturn_xt;
351                 break;
352         case 0x67B8:
353         case 0x67B0:
354                 pi->powertune_defaults = &defaults_hawaii_xt;
355                 break;
356         case 0x67BA:
357         case 0x67B1:
358                 pi->powertune_defaults = &defaults_hawaii_pro;
359                 break;
360         case 0x67A0:
361         case 0x67A1:
362         case 0x67A2:
363         case 0x67A8:
364         case 0x67A9:
365         case 0x67AA:
366         case 0x67B9:
367         case 0x67BE:
368                 pi->powertune_defaults = &defaults_bonaire_xt;
369                 break;
370         }
371
372         pi->dte_tj_offset = 0;
373
374         pi->caps_power_containment = true;
375         pi->caps_cac = false;
376         pi->caps_sq_ramping = false;
377         pi->caps_db_ramping = false;
378         pi->caps_td_ramping = false;
379         pi->caps_tcp_ramping = false;
380
381         if (pi->caps_power_containment) {
382                 pi->caps_cac = true;
383                 if (adev->asic_type == CHIP_HAWAII)
384                         pi->enable_bapm_feature = false;
385                 else
386                         pi->enable_bapm_feature = true;
387                 pi->enable_tdc_limit_feature = true;
388                 pi->enable_pkg_pwr_tracking_feature = true;
389         }
390 }
391
392 static u8 ci_convert_to_vid(u16 vddc)
393 {
394         return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
395 }
396
397 static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev)
398 {
399         struct ci_power_info *pi = ci_get_pi(adev);
400         u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
401         u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
402         u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
403         u32 i;
404
405         if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
406                 return -EINVAL;
407         if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
408                 return -EINVAL;
409         if (adev->pm.dpm.dyn_state.cac_leakage_table.count !=
410             adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
411                 return -EINVAL;
412
413         for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
414                 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
415                         lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
416                         hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
417                         hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
418                 } else {
419                         lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
420                         hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
421                 }
422         }
423         return 0;
424 }
425
426 static int ci_populate_vddc_vid(struct amdgpu_device *adev)
427 {
428         struct ci_power_info *pi = ci_get_pi(adev);
429         u8 *vid = pi->smc_powertune_table.VddCVid;
430         u32 i;
431
432         if (pi->vddc_voltage_table.count > 8)
433                 return -EINVAL;
434
435         for (i = 0; i < pi->vddc_voltage_table.count; i++)
436                 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
437
438         return 0;
439 }
440
441 static int ci_populate_svi_load_line(struct amdgpu_device *adev)
442 {
443         struct ci_power_info *pi = ci_get_pi(adev);
444         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
445
446         pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
447         pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
448         pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
449         pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
450
451         return 0;
452 }
453
454 static int ci_populate_tdc_limit(struct amdgpu_device *adev)
455 {
456         struct ci_power_info *pi = ci_get_pi(adev);
457         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
458         u16 tdc_limit;
459
460         tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
461         pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
462         pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
463                 pt_defaults->tdc_vddc_throttle_release_limit_perc;
464         pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
465
466         return 0;
467 }
468
469 static int ci_populate_dw8(struct amdgpu_device *adev)
470 {
471         struct ci_power_info *pi = ci_get_pi(adev);
472         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
473         int ret;
474
475         ret = amdgpu_ci_read_smc_sram_dword(adev,
476                                      SMU7_FIRMWARE_HEADER_LOCATION +
477                                      offsetof(SMU7_Firmware_Header, PmFuseTable) +
478                                      offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
479                                      (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
480                                      pi->sram_end);
481         if (ret)
482                 return -EINVAL;
483         else
484                 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
485
486         return 0;
487 }
488
489 static int ci_populate_fuzzy_fan(struct amdgpu_device *adev)
490 {
491         struct ci_power_info *pi = ci_get_pi(adev);
492
493         if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
494             (adev->pm.dpm.fan.fan_output_sensitivity == 0))
495                 adev->pm.dpm.fan.fan_output_sensitivity =
496                         adev->pm.dpm.fan.default_fan_output_sensitivity;
497
498         pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
499                 cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity);
500
501         return 0;
502 }
503
504 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev)
505 {
506         struct ci_power_info *pi = ci_get_pi(adev);
507         u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
508         u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
509         int i, min, max;
510
511         min = max = hi_vid[0];
512         for (i = 0; i < 8; i++) {
513                 if (0 != hi_vid[i]) {
514                         if (min > hi_vid[i])
515                                 min = hi_vid[i];
516                         if (max < hi_vid[i])
517                                 max = hi_vid[i];
518                 }
519
520                 if (0 != lo_vid[i]) {
521                         if (min > lo_vid[i])
522                                 min = lo_vid[i];
523                         if (max < lo_vid[i])
524                                 max = lo_vid[i];
525                 }
526         }
527
528         if ((min == 0) || (max == 0))
529                 return -EINVAL;
530         pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
531         pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
532
533         return 0;
534 }
535
536 static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev)
537 {
538         struct ci_power_info *pi = ci_get_pi(adev);
539         u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
540         u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
541         struct amdgpu_cac_tdp_table *cac_tdp_table =
542                 adev->pm.dpm.dyn_state.cac_tdp_table;
543
544         hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
545         lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
546
547         pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
548         pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
549
550         return 0;
551 }
552
553 static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev)
554 {
555         struct ci_power_info *pi = ci_get_pi(adev);
556         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
557         SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
558         struct amdgpu_cac_tdp_table *cac_tdp_table =
559                 adev->pm.dpm.dyn_state.cac_tdp_table;
560         struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
561         int i, j, k;
562         const u16 *def1;
563         const u16 *def2;
564
565         dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
566         dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
567
568         dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
569         dpm_table->GpuTjMax =
570                 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
571         dpm_table->GpuTjHyst = 8;
572
573         dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
574
575         if (ppm) {
576                 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
577                 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
578         } else {
579                 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
580                 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
581         }
582
583         dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
584         def1 = pt_defaults->bapmti_r;
585         def2 = pt_defaults->bapmti_rc;
586
587         for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
588                 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
589                         for (k = 0; k < SMU7_DTE_SINKS; k++) {
590                                 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
591                                 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
592                                 def1++;
593                                 def2++;
594                         }
595                 }
596         }
597
598         return 0;
599 }
600
601 static int ci_populate_pm_base(struct amdgpu_device *adev)
602 {
603         struct ci_power_info *pi = ci_get_pi(adev);
604         u32 pm_fuse_table_offset;
605         int ret;
606
607         if (pi->caps_power_containment) {
608                 ret = amdgpu_ci_read_smc_sram_dword(adev,
609                                              SMU7_FIRMWARE_HEADER_LOCATION +
610                                              offsetof(SMU7_Firmware_Header, PmFuseTable),
611                                              &pm_fuse_table_offset, pi->sram_end);
612                 if (ret)
613                         return ret;
614                 ret = ci_populate_bapm_vddc_vid_sidd(adev);
615                 if (ret)
616                         return ret;
617                 ret = ci_populate_vddc_vid(adev);
618                 if (ret)
619                         return ret;
620                 ret = ci_populate_svi_load_line(adev);
621                 if (ret)
622                         return ret;
623                 ret = ci_populate_tdc_limit(adev);
624                 if (ret)
625                         return ret;
626                 ret = ci_populate_dw8(adev);
627                 if (ret)
628                         return ret;
629                 ret = ci_populate_fuzzy_fan(adev);
630                 if (ret)
631                         return ret;
632                 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev);
633                 if (ret)
634                         return ret;
635                 ret = ci_populate_bapm_vddc_base_leakage_sidd(adev);
636                 if (ret)
637                         return ret;
638                 ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset,
639                                            (u8 *)&pi->smc_powertune_table,
640                                            sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
641                 if (ret)
642                         return ret;
643         }
644
645         return 0;
646 }
647
648 static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable)
649 {
650         struct ci_power_info *pi = ci_get_pi(adev);
651         u32 data;
652
653         if (pi->caps_sq_ramping) {
654                 data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
655                 if (enable)
656                         data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
657                 else
658                         data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
659                 WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
660         }
661
662         if (pi->caps_db_ramping) {
663                 data = RREG32_DIDT(ixDIDT_DB_CTRL0);
664                 if (enable)
665                         data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
666                 else
667                         data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
668                 WREG32_DIDT(ixDIDT_DB_CTRL0, data);
669         }
670
671         if (pi->caps_td_ramping) {
672                 data = RREG32_DIDT(ixDIDT_TD_CTRL0);
673                 if (enable)
674                         data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
675                 else
676                         data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
677                 WREG32_DIDT(ixDIDT_TD_CTRL0, data);
678         }
679
680         if (pi->caps_tcp_ramping) {
681                 data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
682                 if (enable)
683                         data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
684                 else
685                         data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
686                 WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
687         }
688 }
689
690 static int ci_program_pt_config_registers(struct amdgpu_device *adev,
691                                           const struct ci_pt_config_reg *cac_config_regs)
692 {
693         const struct ci_pt_config_reg *config_regs = cac_config_regs;
694         u32 data;
695         u32 cache = 0;
696
697         if (config_regs == NULL)
698                 return -EINVAL;
699
700         while (config_regs->offset != 0xFFFFFFFF) {
701                 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
702                         cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
703                 } else {
704                         switch (config_regs->type) {
705                         case CISLANDS_CONFIGREG_SMC_IND:
706                                 data = RREG32_SMC(config_regs->offset);
707                                 break;
708                         case CISLANDS_CONFIGREG_DIDT_IND:
709                                 data = RREG32_DIDT(config_regs->offset);
710                                 break;
711                         default:
712                                 data = RREG32(config_regs->offset);
713                                 break;
714                         }
715
716                         data &= ~config_regs->mask;
717                         data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
718                         data |= cache;
719
720                         switch (config_regs->type) {
721                         case CISLANDS_CONFIGREG_SMC_IND:
722                                 WREG32_SMC(config_regs->offset, data);
723                                 break;
724                         case CISLANDS_CONFIGREG_DIDT_IND:
725                                 WREG32_DIDT(config_regs->offset, data);
726                                 break;
727                         default:
728                                 WREG32(config_regs->offset, data);
729                                 break;
730                         }
731                         cache = 0;
732                 }
733                 config_regs++;
734         }
735         return 0;
736 }
737
738 static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
739 {
740         struct ci_power_info *pi = ci_get_pi(adev);
741         int ret;
742
743         if (pi->caps_sq_ramping || pi->caps_db_ramping ||
744             pi->caps_td_ramping || pi->caps_tcp_ramping) {
745                 adev->gfx.rlc.funcs->enter_safe_mode(adev);
746
747                 if (enable) {
748                         ret = ci_program_pt_config_registers(adev, didt_config_ci);
749                         if (ret) {
750                                 adev->gfx.rlc.funcs->exit_safe_mode(adev);
751                                 return ret;
752                         }
753                 }
754
755                 ci_do_enable_didt(adev, enable);
756
757                 adev->gfx.rlc.funcs->exit_safe_mode(adev);
758         }
759
760         return 0;
761 }
762
763 static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable)
764 {
765         struct ci_power_info *pi = ci_get_pi(adev);
766         PPSMC_Result smc_result;
767         int ret = 0;
768
769         if (enable) {
770                 pi->power_containment_features = 0;
771                 if (pi->caps_power_containment) {
772                         if (pi->enable_bapm_feature) {
773                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
774                                 if (smc_result != PPSMC_Result_OK)
775                                         ret = -EINVAL;
776                                 else
777                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
778                         }
779
780                         if (pi->enable_tdc_limit_feature) {
781                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable);
782                                 if (smc_result != PPSMC_Result_OK)
783                                         ret = -EINVAL;
784                                 else
785                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
786                         }
787
788                         if (pi->enable_pkg_pwr_tracking_feature) {
789                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable);
790                                 if (smc_result != PPSMC_Result_OK) {
791                                         ret = -EINVAL;
792                                 } else {
793                                         struct amdgpu_cac_tdp_table *cac_tdp_table =
794                                                 adev->pm.dpm.dyn_state.cac_tdp_table;
795                                         u32 default_pwr_limit =
796                                                 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
797
798                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
799
800                                         ci_set_power_limit(adev, default_pwr_limit);
801                                 }
802                         }
803                 }
804         } else {
805                 if (pi->caps_power_containment && pi->power_containment_features) {
806                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
807                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable);
808
809                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
810                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
811
812                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
813                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable);
814                         pi->power_containment_features = 0;
815                 }
816         }
817
818         return ret;
819 }
820
821 static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable)
822 {
823         struct ci_power_info *pi = ci_get_pi(adev);
824         PPSMC_Result smc_result;
825         int ret = 0;
826
827         if (pi->caps_cac) {
828                 if (enable) {
829                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
830                         if (smc_result != PPSMC_Result_OK) {
831                                 ret = -EINVAL;
832                                 pi->cac_enabled = false;
833                         } else {
834                                 pi->cac_enabled = true;
835                         }
836                 } else if (pi->cac_enabled) {
837                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
838                         pi->cac_enabled = false;
839                 }
840         }
841
842         return ret;
843 }
844
845 static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev,
846                                             bool enable)
847 {
848         struct ci_power_info *pi = ci_get_pi(adev);
849         PPSMC_Result smc_result = PPSMC_Result_OK;
850
851         if (pi->thermal_sclk_dpm_enabled) {
852                 if (enable)
853                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM);
854                 else
855                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM);
856         }
857
858         if (smc_result == PPSMC_Result_OK)
859                 return 0;
860         else
861                 return -EINVAL;
862 }
863
864 static int ci_power_control_set_level(struct amdgpu_device *adev)
865 {
866         struct ci_power_info *pi = ci_get_pi(adev);
867         struct amdgpu_cac_tdp_table *cac_tdp_table =
868                 adev->pm.dpm.dyn_state.cac_tdp_table;
869         s32 adjust_percent;
870         s32 target_tdp;
871         int ret = 0;
872         bool adjust_polarity = false; /* ??? */
873
874         if (pi->caps_power_containment) {
875                 adjust_percent = adjust_polarity ?
876                         adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment);
877                 target_tdp = ((100 + adjust_percent) *
878                               (s32)cac_tdp_table->configurable_tdp) / 100;
879
880                 ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp);
881         }
882
883         return ret;
884 }
885
886 static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
887 {
888         struct ci_power_info *pi = ci_get_pi(adev);
889
890         pi->uvd_power_gated = gate;
891
892         if (gate) {
893                 /* stop the UVD block */
894                 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
895                                                         AMD_PG_STATE_GATE);
896                 ci_update_uvd_dpm(adev, gate);
897         } else {
898                 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
899                                                         AMD_PG_STATE_UNGATE);
900                 ci_update_uvd_dpm(adev, gate);
901         }
902 }
903
904 static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
905 {
906         u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
907         u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
908
909         /* disable mclk switching if the refresh is >120Hz, even if the
910          * blanking period would allow it
911          */
912         if (amdgpu_dpm_get_vrefresh(adev) > 120)
913                 return true;
914
915         if (vblank_time < switch_limit)
916                 return true;
917         else
918                 return false;
919
920 }
921
922 static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
923                                         struct amdgpu_ps *rps)
924 {
925         struct ci_ps *ps = ci_get_ps(rps);
926         struct ci_power_info *pi = ci_get_pi(adev);
927         struct amdgpu_clock_and_voltage_limits *max_limits;
928         bool disable_mclk_switching;
929         u32 sclk, mclk;
930         int i;
931
932         if (rps->vce_active) {
933                 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
934                 rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
935         } else {
936                 rps->evclk = 0;
937                 rps->ecclk = 0;
938         }
939
940         if ((adev->pm.dpm.new_active_crtc_count > 1) ||
941             ci_dpm_vblank_too_short(adev))
942                 disable_mclk_switching = true;
943         else
944                 disable_mclk_switching = false;
945
946         if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
947                 pi->battery_state = true;
948         else
949                 pi->battery_state = false;
950
951         if (adev->pm.dpm.ac_power)
952                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
953         else
954                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
955
956         if (adev->pm.dpm.ac_power == false) {
957                 for (i = 0; i < ps->performance_level_count; i++) {
958                         if (ps->performance_levels[i].mclk > max_limits->mclk)
959                                 ps->performance_levels[i].mclk = max_limits->mclk;
960                         if (ps->performance_levels[i].sclk > max_limits->sclk)
961                                 ps->performance_levels[i].sclk = max_limits->sclk;
962                 }
963         }
964
965         /* XXX validate the min clocks required for display */
966
967         if (disable_mclk_switching) {
968                 mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
969                 sclk = ps->performance_levels[0].sclk;
970         } else {
971                 mclk = ps->performance_levels[0].mclk;
972                 sclk = ps->performance_levels[0].sclk;
973         }
974
975         if (adev->pm.pm_display_cfg.min_core_set_clock > sclk)
976                 sclk = adev->pm.pm_display_cfg.min_core_set_clock;
977
978         if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk)
979                 mclk = adev->pm.pm_display_cfg.min_mem_set_clock;
980
981         if (rps->vce_active) {
982                 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
983                         sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
984                 if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
985                         mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
986         }
987
988         ps->performance_levels[0].sclk = sclk;
989         ps->performance_levels[0].mclk = mclk;
990
991         if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
992                 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
993
994         if (disable_mclk_switching) {
995                 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
996                         ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
997         } else {
998                 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
999                         ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
1000         }
1001 }
1002
1003 static int ci_thermal_set_temperature_range(struct amdgpu_device *adev,
1004                                             int min_temp, int max_temp)
1005 {
1006         int low_temp = 0 * 1000;
1007         int high_temp = 255 * 1000;
1008         u32 tmp;
1009
1010         if (low_temp < min_temp)
1011                 low_temp = min_temp;
1012         if (high_temp > max_temp)
1013                 high_temp = max_temp;
1014         if (high_temp < low_temp) {
1015                 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
1016                 return -EINVAL;
1017         }
1018
1019         tmp = RREG32_SMC(ixCG_THERMAL_INT);
1020         tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK);
1021         tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) |
1022                 ((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT;
1023         WREG32_SMC(ixCG_THERMAL_INT, tmp);
1024
1025 #if 0
1026         /* XXX: need to figure out how to handle this properly */
1027         tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1028         tmp &= DIG_THERM_DPM_MASK;
1029         tmp |= DIG_THERM_DPM(high_temp / 1000);
1030         WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1031 #endif
1032
1033         adev->pm.dpm.thermal.min_temp = low_temp;
1034         adev->pm.dpm.thermal.max_temp = high_temp;
1035         return 0;
1036 }
1037
1038 static int ci_thermal_enable_alert(struct amdgpu_device *adev,
1039                                    bool enable)
1040 {
1041         u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
1042         PPSMC_Result result;
1043
1044         if (enable) {
1045                 thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1046                                  CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
1047                 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1048                 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable);
1049                 if (result != PPSMC_Result_OK) {
1050                         DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1051                         return -EINVAL;
1052                 }
1053         } else {
1054                 thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1055                         CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
1056                 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1057                 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable);
1058                 if (result != PPSMC_Result_OK) {
1059                         DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
1060                         return -EINVAL;
1061                 }
1062         }
1063
1064         return 0;
1065 }
1066
1067 static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
1068 {
1069         struct ci_power_info *pi = ci_get_pi(adev);
1070         u32 tmp;
1071
1072         if (pi->fan_ctrl_is_in_default_mode) {
1073                 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK)
1074                         >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1075                 pi->fan_ctrl_default_mode = tmp;
1076                 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK)
1077                         >> CG_FDO_CTRL2__TMIN__SHIFT;
1078                 pi->t_min = tmp;
1079                 pi->fan_ctrl_is_in_default_mode = false;
1080         }
1081
1082         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1083         tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
1084         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1085
1086         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1087         tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1088         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1089 }
1090
1091 static int ci_thermal_setup_fan_table(struct amdgpu_device *adev)
1092 {
1093         struct ci_power_info *pi = ci_get_pi(adev);
1094         SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1095         u32 duty100;
1096         u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1097         u16 fdo_min, slope1, slope2;
1098         u32 reference_clock, tmp;
1099         int ret;
1100         u64 tmp64;
1101
1102         if (!pi->fan_table_start) {
1103                 adev->pm.dpm.fan.ucode_fan_control = false;
1104                 return 0;
1105         }
1106
1107         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1108                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1109
1110         if (duty100 == 0) {
1111                 adev->pm.dpm.fan.ucode_fan_control = false;
1112                 return 0;
1113         }
1114
1115         tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
1116         do_div(tmp64, 10000);
1117         fdo_min = (u16)tmp64;
1118
1119         t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
1120         t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
1121
1122         pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
1123         pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
1124
1125         slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
1126         slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
1127
1128         fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
1129         fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
1130         fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
1131
1132         fan_table.Slope1 = cpu_to_be16(slope1);
1133         fan_table.Slope2 = cpu_to_be16(slope2);
1134
1135         fan_table.FdoMin = cpu_to_be16(fdo_min);
1136
1137         fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
1138
1139         fan_table.HystUp = cpu_to_be16(1);
1140
1141         fan_table.HystSlope = cpu_to_be16(1);
1142
1143         fan_table.TempRespLim = cpu_to_be16(5);
1144
1145         reference_clock = amdgpu_asic_get_xclk(adev);
1146
1147         fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
1148                                                reference_clock) / 1600);
1149
1150         fan_table.FdoMax = cpu_to_be16((u16)duty100);
1151
1152         tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK)
1153                 >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
1154         fan_table.TempSrc = (uint8_t)tmp;
1155
1156         ret = amdgpu_ci_copy_bytes_to_smc(adev,
1157                                           pi->fan_table_start,
1158                                           (u8 *)(&fan_table),
1159                                           sizeof(fan_table),
1160                                           pi->sram_end);
1161
1162         if (ret) {
1163                 DRM_ERROR("Failed to load fan table to the SMC.");
1164                 adev->pm.dpm.fan.ucode_fan_control = false;
1165         }
1166
1167         return 0;
1168 }
1169
1170 static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
1171 {
1172         struct ci_power_info *pi = ci_get_pi(adev);
1173         PPSMC_Result ret;
1174
1175         if (pi->caps_od_fuzzy_fan_control_support) {
1176                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1177                                                                PPSMC_StartFanControl,
1178                                                                FAN_CONTROL_FUZZY);
1179                 if (ret != PPSMC_Result_OK)
1180                         return -EINVAL;
1181                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1182                                                                PPSMC_MSG_SetFanPwmMax,
1183                                                                adev->pm.dpm.fan.default_max_fan_pwm);
1184                 if (ret != PPSMC_Result_OK)
1185                         return -EINVAL;
1186         } else {
1187                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1188                                                                PPSMC_StartFanControl,
1189                                                                FAN_CONTROL_TABLE);
1190                 if (ret != PPSMC_Result_OK)
1191                         return -EINVAL;
1192         }
1193
1194         pi->fan_is_controlled_by_smc = true;
1195         return 0;
1196 }
1197
1198
1199 static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
1200 {
1201         PPSMC_Result ret;
1202         struct ci_power_info *pi = ci_get_pi(adev);
1203
1204         ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl);
1205         if (ret == PPSMC_Result_OK) {
1206                 pi->fan_is_controlled_by_smc = false;
1207                 return 0;
1208         } else {
1209                 return -EINVAL;
1210         }
1211 }
1212
1213 static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
1214                                         u32 *speed)
1215 {
1216         u32 duty, duty100;
1217         u64 tmp64;
1218
1219         if (adev->pm.no_fan)
1220                 return -ENOENT;
1221
1222         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1223                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1224         duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK)
1225                 >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
1226
1227         if (duty100 == 0)
1228                 return -EINVAL;
1229
1230         tmp64 = (u64)duty * 100;
1231         do_div(tmp64, duty100);
1232         *speed = (u32)tmp64;
1233
1234         if (*speed > 100)
1235                 *speed = 100;
1236
1237         return 0;
1238 }
1239
1240 static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
1241                                         u32 speed)
1242 {
1243         u32 tmp;
1244         u32 duty, duty100;
1245         u64 tmp64;
1246         struct ci_power_info *pi = ci_get_pi(adev);
1247
1248         if (adev->pm.no_fan)
1249                 return -ENOENT;
1250
1251         if (pi->fan_is_controlled_by_smc)
1252                 return -EINVAL;
1253
1254         if (speed > 100)
1255                 return -EINVAL;
1256
1257         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1258                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1259
1260         if (duty100 == 0)
1261                 return -EINVAL;
1262
1263         tmp64 = (u64)speed * duty100;
1264         do_div(tmp64, 100);
1265         duty = (u32)tmp64;
1266
1267         tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
1268         tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
1269         WREG32_SMC(ixCG_FDO_CTRL0, tmp);
1270
1271         return 0;
1272 }
1273
1274 static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
1275 {
1276         switch (mode) {
1277         case AMD_FAN_CTRL_NONE:
1278                 if (adev->pm.dpm.fan.ucode_fan_control)
1279                         ci_fan_ctrl_stop_smc_fan_control(adev);
1280                 ci_dpm_set_fan_speed_percent(adev, 100);
1281                 break;
1282         case AMD_FAN_CTRL_MANUAL:
1283                 if (adev->pm.dpm.fan.ucode_fan_control)
1284                         ci_fan_ctrl_stop_smc_fan_control(adev);
1285                 break;
1286         case AMD_FAN_CTRL_AUTO:
1287                 if (adev->pm.dpm.fan.ucode_fan_control)
1288                         ci_thermal_start_smc_fan_control(adev);
1289                 break;
1290         default:
1291                 break;
1292         }
1293 }
1294
1295 static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
1296 {
1297         struct ci_power_info *pi = ci_get_pi(adev);
1298
1299         if (pi->fan_is_controlled_by_smc)
1300                 return AMD_FAN_CTRL_AUTO;
1301         else
1302                 return AMD_FAN_CTRL_MANUAL;
1303 }
1304
1305 #if 0
1306 static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
1307                                          u32 *speed)
1308 {
1309         u32 tach_period;
1310         u32 xclk = amdgpu_asic_get_xclk(adev);
1311
1312         if (adev->pm.no_fan)
1313                 return -ENOENT;
1314
1315         if (adev->pm.fan_pulses_per_revolution == 0)
1316                 return -ENOENT;
1317
1318         tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK)
1319                 >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
1320         if (tach_period == 0)
1321                 return -ENOENT;
1322
1323         *speed = 60 * xclk * 10000 / tach_period;
1324
1325         return 0;
1326 }
1327
1328 static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
1329                                          u32 speed)
1330 {
1331         u32 tach_period, tmp;
1332         u32 xclk = amdgpu_asic_get_xclk(adev);
1333
1334         if (adev->pm.no_fan)
1335                 return -ENOENT;
1336
1337         if (adev->pm.fan_pulses_per_revolution == 0)
1338                 return -ENOENT;
1339
1340         if ((speed < adev->pm.fan_min_rpm) ||
1341             (speed > adev->pm.fan_max_rpm))
1342                 return -EINVAL;
1343
1344         if (adev->pm.dpm.fan.ucode_fan_control)
1345                 ci_fan_ctrl_stop_smc_fan_control(adev);
1346
1347         tach_period = 60 * xclk * 10000 / (8 * speed);
1348         tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
1349         tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
1350         WREG32_SMC(CG_TACH_CTRL, tmp);
1351
1352         ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
1353
1354         return 0;
1355 }
1356 #endif
1357
1358 static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
1359 {
1360         struct ci_power_info *pi = ci_get_pi(adev);
1361         u32 tmp;
1362
1363         if (!pi->fan_ctrl_is_in_default_mode) {
1364                 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1365                 tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1366                 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1367
1368                 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1369                 tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
1370                 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1371                 pi->fan_ctrl_is_in_default_mode = true;
1372         }
1373 }
1374
1375 static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev)
1376 {
1377         if (adev->pm.dpm.fan.ucode_fan_control) {
1378                 ci_fan_ctrl_start_smc_fan_control(adev);
1379                 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
1380         }
1381 }
1382
1383 static void ci_thermal_initialize(struct amdgpu_device *adev)
1384 {
1385         u32 tmp;
1386
1387         if (adev->pm.fan_pulses_per_revolution) {
1388                 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
1389                 tmp |= (adev->pm.fan_pulses_per_revolution - 1)
1390                         << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
1391                 WREG32_SMC(ixCG_TACH_CTRL, tmp);
1392         }
1393
1394         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
1395         tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
1396         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1397 }
1398
1399 static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev)
1400 {
1401         int ret;
1402
1403         ci_thermal_initialize(adev);
1404         ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX);
1405         if (ret)
1406                 return ret;
1407         ret = ci_thermal_enable_alert(adev, true);
1408         if (ret)
1409                 return ret;
1410         if (adev->pm.dpm.fan.ucode_fan_control) {
1411                 ret = ci_thermal_setup_fan_table(adev);
1412                 if (ret)
1413                         return ret;
1414                 ci_thermal_start_smc_fan_control(adev);
1415         }
1416
1417         return 0;
1418 }
1419
1420 static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev)
1421 {
1422         if (!adev->pm.no_fan)
1423                 ci_fan_ctrl_set_default_mode(adev);
1424 }
1425
1426 static int ci_read_smc_soft_register(struct amdgpu_device *adev,
1427                                      u16 reg_offset, u32 *value)
1428 {
1429         struct ci_power_info *pi = ci_get_pi(adev);
1430
1431         return amdgpu_ci_read_smc_sram_dword(adev,
1432                                       pi->soft_regs_start + reg_offset,
1433                                       value, pi->sram_end);
1434 }
1435
1436 static int ci_write_smc_soft_register(struct amdgpu_device *adev,
1437                                       u16 reg_offset, u32 value)
1438 {
1439         struct ci_power_info *pi = ci_get_pi(adev);
1440
1441         return amdgpu_ci_write_smc_sram_dword(adev,
1442                                        pi->soft_regs_start + reg_offset,
1443                                        value, pi->sram_end);
1444 }
1445
1446 static void ci_init_fps_limits(struct amdgpu_device *adev)
1447 {
1448         struct ci_power_info *pi = ci_get_pi(adev);
1449         SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1450
1451         if (pi->caps_fps) {
1452                 u16 tmp;
1453
1454                 tmp = 45;
1455                 table->FpsHighT = cpu_to_be16(tmp);
1456
1457                 tmp = 30;
1458                 table->FpsLowT = cpu_to_be16(tmp);
1459         }
1460 }
1461
1462 static int ci_update_sclk_t(struct amdgpu_device *adev)
1463 {
1464         struct ci_power_info *pi = ci_get_pi(adev);
1465         int ret = 0;
1466         u32 low_sclk_interrupt_t = 0;
1467
1468         if (pi->caps_sclk_throttle_low_notification) {
1469                 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1470
1471                 ret = amdgpu_ci_copy_bytes_to_smc(adev,
1472                                            pi->dpm_table_start +
1473                                            offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1474                                            (u8 *)&low_sclk_interrupt_t,
1475                                            sizeof(u32), pi->sram_end);
1476
1477         }
1478
1479         return ret;
1480 }
1481
1482 static void ci_get_leakage_voltages(struct amdgpu_device *adev)
1483 {
1484         struct ci_power_info *pi = ci_get_pi(adev);
1485         u16 leakage_id, virtual_voltage_id;
1486         u16 vddc, vddci;
1487         int i;
1488
1489         pi->vddc_leakage.count = 0;
1490         pi->vddci_leakage.count = 0;
1491
1492         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1493                 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1494                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1495                         if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0)
1496                                 continue;
1497                         if (vddc != 0 && vddc != virtual_voltage_id) {
1498                                 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1499                                 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1500                                 pi->vddc_leakage.count++;
1501                         }
1502                 }
1503         } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) {
1504                 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1505                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1506                         if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci,
1507                                                                                      virtual_voltage_id,
1508                                                                                      leakage_id) == 0) {
1509                                 if (vddc != 0 && vddc != virtual_voltage_id) {
1510                                         pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1511                                         pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1512                                         pi->vddc_leakage.count++;
1513                                 }
1514                                 if (vddci != 0 && vddci != virtual_voltage_id) {
1515                                         pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1516                                         pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1517                                         pi->vddci_leakage.count++;
1518                                 }
1519                         }
1520                 }
1521         }
1522 }
1523
1524 static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
1525 {
1526         struct ci_power_info *pi = ci_get_pi(adev);
1527         bool want_thermal_protection;
1528         enum amdgpu_dpm_event_src dpm_event_src;
1529         u32 tmp;
1530
1531         switch (sources) {
1532         case 0:
1533         default:
1534                 want_thermal_protection = false;
1535                 break;
1536         case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
1537                 want_thermal_protection = true;
1538                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
1539                 break;
1540         case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1541                 want_thermal_protection = true;
1542                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
1543                 break;
1544         case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1545               (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1546                 want_thermal_protection = true;
1547                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1548                 break;
1549         }
1550
1551         if (want_thermal_protection) {
1552 #if 0
1553                 /* XXX: need to figure out how to handle this properly */
1554                 tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1555                 tmp &= DPM_EVENT_SRC_MASK;
1556                 tmp |= DPM_EVENT_SRC(dpm_event_src);
1557                 WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1558 #endif
1559
1560                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1561                 if (pi->thermal_protection)
1562                         tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1563                 else
1564                         tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1565                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1566         } else {
1567                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1568                 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1569                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1570         }
1571 }
1572
1573 static void ci_enable_auto_throttle_source(struct amdgpu_device *adev,
1574                                            enum amdgpu_dpm_auto_throttle_src source,
1575                                            bool enable)
1576 {
1577         struct ci_power_info *pi = ci_get_pi(adev);
1578
1579         if (enable) {
1580                 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1581                         pi->active_auto_throttle_sources |= 1 << source;
1582                         ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1583                 }
1584         } else {
1585                 if (pi->active_auto_throttle_sources & (1 << source)) {
1586                         pi->active_auto_throttle_sources &= ~(1 << source);
1587                         ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1588                 }
1589         }
1590 }
1591
1592 static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev)
1593 {
1594         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1595                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1596 }
1597
1598 static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1599 {
1600         struct ci_power_info *pi = ci_get_pi(adev);
1601         PPSMC_Result smc_result;
1602
1603         if (!pi->need_update_smu7_dpm_table)
1604                 return 0;
1605
1606         if ((!pi->sclk_dpm_key_disabled) &&
1607             (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1608                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1609                 if (smc_result != PPSMC_Result_OK)
1610                         return -EINVAL;
1611         }
1612
1613         if ((!pi->mclk_dpm_key_disabled) &&
1614             (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1615                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1616                 if (smc_result != PPSMC_Result_OK)
1617                         return -EINVAL;
1618         }
1619
1620         pi->need_update_smu7_dpm_table = 0;
1621         return 0;
1622 }
1623
1624 static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable)
1625 {
1626         struct ci_power_info *pi = ci_get_pi(adev);
1627         PPSMC_Result smc_result;
1628
1629         if (enable) {
1630                 if (!pi->sclk_dpm_key_disabled) {
1631                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable);
1632                         if (smc_result != PPSMC_Result_OK)
1633                                 return -EINVAL;
1634                 }
1635
1636                 if (!pi->mclk_dpm_key_disabled) {
1637                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable);
1638                         if (smc_result != PPSMC_Result_OK)
1639                                 return -EINVAL;
1640
1641                         WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK,
1642                                         ~MC_SEQ_CNTL_3__CAC_EN_MASK);
1643
1644                         WREG32_SMC(ixLCAC_MC0_CNTL, 0x05);
1645                         WREG32_SMC(ixLCAC_MC1_CNTL, 0x05);
1646                         WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005);
1647
1648                         udelay(10);
1649
1650                         WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005);
1651                         WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005);
1652                         WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005);
1653                 }
1654         } else {
1655                 if (!pi->sclk_dpm_key_disabled) {
1656                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable);
1657                         if (smc_result != PPSMC_Result_OK)
1658                                 return -EINVAL;
1659                 }
1660
1661                 if (!pi->mclk_dpm_key_disabled) {
1662                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable);
1663                         if (smc_result != PPSMC_Result_OK)
1664                                 return -EINVAL;
1665                 }
1666         }
1667
1668         return 0;
1669 }
1670
1671 static int ci_start_dpm(struct amdgpu_device *adev)
1672 {
1673         struct ci_power_info *pi = ci_get_pi(adev);
1674         PPSMC_Result smc_result;
1675         int ret;
1676         u32 tmp;
1677
1678         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1679         tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1680         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1681
1682         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1683         tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1684         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1685
1686         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1687
1688         WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK);
1689
1690         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable);
1691         if (smc_result != PPSMC_Result_OK)
1692                 return -EINVAL;
1693
1694         ret = ci_enable_sclk_mclk_dpm(adev, true);
1695         if (ret)
1696                 return ret;
1697
1698         if (!pi->pcie_dpm_key_disabled) {
1699                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable);
1700                 if (smc_result != PPSMC_Result_OK)
1701                         return -EINVAL;
1702         }
1703
1704         return 0;
1705 }
1706
1707 static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1708 {
1709         struct ci_power_info *pi = ci_get_pi(adev);
1710         PPSMC_Result smc_result;
1711
1712         if (!pi->need_update_smu7_dpm_table)
1713                 return 0;
1714
1715         if ((!pi->sclk_dpm_key_disabled) &&
1716             (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1717                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1718                 if (smc_result != PPSMC_Result_OK)
1719                         return -EINVAL;
1720         }
1721
1722         if ((!pi->mclk_dpm_key_disabled) &&
1723             (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1724                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1725                 if (smc_result != PPSMC_Result_OK)
1726                         return -EINVAL;
1727         }
1728
1729         return 0;
1730 }
1731
1732 static int ci_stop_dpm(struct amdgpu_device *adev)
1733 {
1734         struct ci_power_info *pi = ci_get_pi(adev);
1735         PPSMC_Result smc_result;
1736         int ret;
1737         u32 tmp;
1738
1739         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1740         tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1741         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1742
1743         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1744         tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1745         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1746
1747         if (!pi->pcie_dpm_key_disabled) {
1748                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable);
1749                 if (smc_result != PPSMC_Result_OK)
1750                         return -EINVAL;
1751         }
1752
1753         ret = ci_enable_sclk_mclk_dpm(adev, false);
1754         if (ret)
1755                 return ret;
1756
1757         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable);
1758         if (smc_result != PPSMC_Result_OK)
1759                 return -EINVAL;
1760
1761         return 0;
1762 }
1763
1764 static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable)
1765 {
1766         u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1767
1768         if (enable)
1769                 tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1770         else
1771                 tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1772         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1773 }
1774
1775 #if 0
1776 static int ci_notify_hw_of_power_source(struct amdgpu_device *adev,
1777                                         bool ac_power)
1778 {
1779         struct ci_power_info *pi = ci_get_pi(adev);
1780         struct amdgpu_cac_tdp_table *cac_tdp_table =
1781                 adev->pm.dpm.dyn_state.cac_tdp_table;
1782         u32 power_limit;
1783
1784         if (ac_power)
1785                 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1786         else
1787                 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1788
1789         ci_set_power_limit(adev, power_limit);
1790
1791         if (pi->caps_automatic_dc_transition) {
1792                 if (ac_power)
1793                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC);
1794                 else
1795                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp);
1796         }
1797
1798         return 0;
1799 }
1800 #endif
1801
1802 static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
1803                                                       PPSMC_Msg msg, u32 parameter)
1804 {
1805         WREG32(mmSMC_MSG_ARG_0, parameter);
1806         return amdgpu_ci_send_msg_to_smc(adev, msg);
1807 }
1808
1809 static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev,
1810                                                         PPSMC_Msg msg, u32 *parameter)
1811 {
1812         PPSMC_Result smc_result;
1813
1814         smc_result = amdgpu_ci_send_msg_to_smc(adev, msg);
1815
1816         if ((smc_result == PPSMC_Result_OK) && parameter)
1817                 *parameter = RREG32(mmSMC_MSG_ARG_0);
1818
1819         return smc_result;
1820 }
1821
1822 static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n)
1823 {
1824         struct ci_power_info *pi = ci_get_pi(adev);
1825
1826         if (!pi->sclk_dpm_key_disabled) {
1827                 PPSMC_Result smc_result =
1828                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1829                 if (smc_result != PPSMC_Result_OK)
1830                         return -EINVAL;
1831         }
1832
1833         return 0;
1834 }
1835
1836 static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n)
1837 {
1838         struct ci_power_info *pi = ci_get_pi(adev);
1839
1840         if (!pi->mclk_dpm_key_disabled) {
1841                 PPSMC_Result smc_result =
1842                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1843                 if (smc_result != PPSMC_Result_OK)
1844                         return -EINVAL;
1845         }
1846
1847         return 0;
1848 }
1849
1850 static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n)
1851 {
1852         struct ci_power_info *pi = ci_get_pi(adev);
1853
1854         if (!pi->pcie_dpm_key_disabled) {
1855                 PPSMC_Result smc_result =
1856                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1857                 if (smc_result != PPSMC_Result_OK)
1858                         return -EINVAL;
1859         }
1860
1861         return 0;
1862 }
1863
1864 static int ci_set_power_limit(struct amdgpu_device *adev, u32 n)
1865 {
1866         struct ci_power_info *pi = ci_get_pi(adev);
1867
1868         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1869                 PPSMC_Result smc_result =
1870                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n);
1871                 if (smc_result != PPSMC_Result_OK)
1872                         return -EINVAL;
1873         }
1874
1875         return 0;
1876 }
1877
1878 static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
1879                                        u32 target_tdp)
1880 {
1881         PPSMC_Result smc_result =
1882                 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1883         if (smc_result != PPSMC_Result_OK)
1884                 return -EINVAL;
1885         return 0;
1886 }
1887
1888 #if 0
1889 static int ci_set_boot_state(struct amdgpu_device *adev)
1890 {
1891         return ci_enable_sclk_mclk_dpm(adev, false);
1892 }
1893 #endif
1894
1895 static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev)
1896 {
1897         u32 sclk_freq;
1898         PPSMC_Result smc_result =
1899                 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1900                                                     PPSMC_MSG_API_GetSclkFrequency,
1901                                                     &sclk_freq);
1902         if (smc_result != PPSMC_Result_OK)
1903                 sclk_freq = 0;
1904
1905         return sclk_freq;
1906 }
1907
1908 static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev)
1909 {
1910         u32 mclk_freq;
1911         PPSMC_Result smc_result =
1912                 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1913                                                     PPSMC_MSG_API_GetMclkFrequency,
1914                                                     &mclk_freq);
1915         if (smc_result != PPSMC_Result_OK)
1916                 mclk_freq = 0;
1917
1918         return mclk_freq;
1919 }
1920
1921 static void ci_dpm_start_smc(struct amdgpu_device *adev)
1922 {
1923         int i;
1924
1925         amdgpu_ci_program_jump_on_start(adev);
1926         amdgpu_ci_start_smc_clock(adev);
1927         amdgpu_ci_start_smc(adev);
1928         for (i = 0; i < adev->usec_timeout; i++) {
1929                 if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
1930                         break;
1931         }
1932 }
1933
1934 static void ci_dpm_stop_smc(struct amdgpu_device *adev)
1935 {
1936         amdgpu_ci_reset_smc(adev);
1937         amdgpu_ci_stop_smc_clock(adev);
1938 }
1939
1940 static int ci_process_firmware_header(struct amdgpu_device *adev)
1941 {
1942         struct ci_power_info *pi = ci_get_pi(adev);
1943         u32 tmp;
1944         int ret;
1945
1946         ret = amdgpu_ci_read_smc_sram_dword(adev,
1947                                      SMU7_FIRMWARE_HEADER_LOCATION +
1948                                      offsetof(SMU7_Firmware_Header, DpmTable),
1949                                      &tmp, pi->sram_end);
1950         if (ret)
1951                 return ret;
1952
1953         pi->dpm_table_start = tmp;
1954
1955         ret = amdgpu_ci_read_smc_sram_dword(adev,
1956                                      SMU7_FIRMWARE_HEADER_LOCATION +
1957                                      offsetof(SMU7_Firmware_Header, SoftRegisters),
1958                                      &tmp, pi->sram_end);
1959         if (ret)
1960                 return ret;
1961
1962         pi->soft_regs_start = tmp;
1963
1964         ret = amdgpu_ci_read_smc_sram_dword(adev,
1965                                      SMU7_FIRMWARE_HEADER_LOCATION +
1966                                      offsetof(SMU7_Firmware_Header, mcRegisterTable),
1967                                      &tmp, pi->sram_end);
1968         if (ret)
1969                 return ret;
1970
1971         pi->mc_reg_table_start = tmp;
1972
1973         ret = amdgpu_ci_read_smc_sram_dword(adev,
1974                                      SMU7_FIRMWARE_HEADER_LOCATION +
1975                                      offsetof(SMU7_Firmware_Header, FanTable),
1976                                      &tmp, pi->sram_end);
1977         if (ret)
1978                 return ret;
1979
1980         pi->fan_table_start = tmp;
1981
1982         ret = amdgpu_ci_read_smc_sram_dword(adev,
1983                                      SMU7_FIRMWARE_HEADER_LOCATION +
1984                                      offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1985                                      &tmp, pi->sram_end);
1986         if (ret)
1987                 return ret;
1988
1989         pi->arb_table_start = tmp;
1990
1991         return 0;
1992 }
1993
1994 static void ci_read_clock_registers(struct amdgpu_device *adev)
1995 {
1996         struct ci_power_info *pi = ci_get_pi(adev);
1997
1998         pi->clock_registers.cg_spll_func_cntl =
1999                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL);
2000         pi->clock_registers.cg_spll_func_cntl_2 =
2001                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2);
2002         pi->clock_registers.cg_spll_func_cntl_3 =
2003                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3);
2004         pi->clock_registers.cg_spll_func_cntl_4 =
2005                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4);
2006         pi->clock_registers.cg_spll_spread_spectrum =
2007                 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2008         pi->clock_registers.cg_spll_spread_spectrum_2 =
2009                 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2);
2010         pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL);
2011         pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL);
2012         pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL);
2013         pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL);
2014         pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL);
2015         pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1);
2016         pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2);
2017         pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1);
2018         pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2);
2019 }
2020
2021 static void ci_init_sclk_t(struct amdgpu_device *adev)
2022 {
2023         struct ci_power_info *pi = ci_get_pi(adev);
2024
2025         pi->low_sclk_interrupt_t = 0;
2026 }
2027
2028 static void ci_enable_thermal_protection(struct amdgpu_device *adev,
2029                                          bool enable)
2030 {
2031         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2032
2033         if (enable)
2034                 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2035         else
2036                 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2037         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2038 }
2039
2040 static void ci_enable_acpi_power_management(struct amdgpu_device *adev)
2041 {
2042         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2043
2044         tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK;
2045
2046         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2047 }
2048
2049 #if 0
2050 static int ci_enter_ulp_state(struct amdgpu_device *adev)
2051 {
2052
2053         WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
2054
2055         udelay(25000);
2056
2057         return 0;
2058 }
2059
2060 static int ci_exit_ulp_state(struct amdgpu_device *adev)
2061 {
2062         int i;
2063
2064         WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
2065
2066         udelay(7000);
2067
2068         for (i = 0; i < adev->usec_timeout; i++) {
2069                 if (RREG32(mmSMC_RESP_0) == 1)
2070                         break;
2071                 udelay(1000);
2072         }
2073
2074         return 0;
2075 }
2076 #endif
2077
2078 static int ci_notify_smc_display_change(struct amdgpu_device *adev,
2079                                         bool has_display)
2080 {
2081         PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
2082
2083         return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
2084 }
2085
2086 static int ci_enable_ds_master_switch(struct amdgpu_device *adev,
2087                                       bool enable)
2088 {
2089         struct ci_power_info *pi = ci_get_pi(adev);
2090
2091         if (enable) {
2092                 if (pi->caps_sclk_ds) {
2093                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
2094                                 return -EINVAL;
2095                 } else {
2096                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2097                                 return -EINVAL;
2098                 }
2099         } else {
2100                 if (pi->caps_sclk_ds) {
2101                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2102                                 return -EINVAL;
2103                 }
2104         }
2105
2106         return 0;
2107 }
2108
2109 static void ci_program_display_gap(struct amdgpu_device *adev)
2110 {
2111         u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2112         u32 pre_vbi_time_in_us;
2113         u32 frame_time_in_us;
2114         u32 ref_clock = adev->clock.spll.reference_freq;
2115         u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev);
2116         u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
2117
2118         tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK;
2119         if (adev->pm.dpm.new_active_crtc_count > 0)
2120                 tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2121         else
2122                 tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2123         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2124
2125         if (refresh_rate == 0)
2126                 refresh_rate = 60;
2127         if (vblank_time == 0xffffffff)
2128                 vblank_time = 500;
2129         frame_time_in_us = 1000000 / refresh_rate;
2130         pre_vbi_time_in_us =
2131                 frame_time_in_us - 200 - vblank_time;
2132         tmp = pre_vbi_time_in_us * (ref_clock / 100);
2133
2134         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp);
2135         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
2136         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
2137
2138
2139         ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1));
2140
2141 }
2142
2143 static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
2144 {
2145         struct ci_power_info *pi = ci_get_pi(adev);
2146         u32 tmp;
2147
2148         if (enable) {
2149                 if (pi->caps_sclk_ss_support) {
2150                         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2151                         tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2152                         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2153                 }
2154         } else {
2155                 tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2156                 tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
2157                 WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp);
2158
2159                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2160                 tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2161                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2162         }
2163 }
2164
2165 static void ci_program_sstp(struct amdgpu_device *adev)
2166 {
2167         WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER,
2168         ((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) |
2169          (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT)));
2170 }
2171
2172 static void ci_enable_display_gap(struct amdgpu_device *adev)
2173 {
2174         u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2175
2176         tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK |
2177                         CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK);
2178         tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) |
2179                 (AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT));
2180
2181         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2182 }
2183
2184 static void ci_program_vc(struct amdgpu_device *adev)
2185 {
2186         u32 tmp;
2187
2188         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2189         tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2190         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2191
2192         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0);
2193         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1);
2194         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2);
2195         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3);
2196         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4);
2197         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5);
2198         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6);
2199         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7);
2200 }
2201
2202 static void ci_clear_vc(struct amdgpu_device *adev)
2203 {
2204         u32 tmp;
2205
2206         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2207         tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2208         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2209
2210         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
2211         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0);
2212         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0);
2213         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0);
2214         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0);
2215         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0);
2216         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0);
2217         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0);
2218 }
2219
2220 static int ci_upload_firmware(struct amdgpu_device *adev)
2221 {
2222         int i, ret;
2223
2224         if (amdgpu_ci_is_smc_running(adev)) {
2225                 DRM_INFO("smc is running, no need to load smc firmware\n");
2226                 return 0;
2227         }
2228
2229         for (i = 0; i < adev->usec_timeout; i++) {
2230                 if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
2231                         break;
2232         }
2233         WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1);
2234
2235         amdgpu_ci_stop_smc_clock(adev);
2236         amdgpu_ci_reset_smc(adev);
2237
2238         ret = amdgpu_ci_load_smc_ucode(adev, SMC_RAM_END);
2239
2240         return ret;
2241
2242 }
2243
2244 static int ci_get_svi2_voltage_table(struct amdgpu_device *adev,
2245                                      struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
2246                                      struct atom_voltage_table *voltage_table)
2247 {
2248         u32 i;
2249
2250         if (voltage_dependency_table == NULL)
2251                 return -EINVAL;
2252
2253         voltage_table->mask_low = 0;
2254         voltage_table->phase_delay = 0;
2255
2256         voltage_table->count = voltage_dependency_table->count;
2257         for (i = 0; i < voltage_table->count; i++) {
2258                 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2259                 voltage_table->entries[i].smio_low = 0;
2260         }
2261
2262         return 0;
2263 }
2264
2265 static int ci_construct_voltage_tables(struct amdgpu_device *adev)
2266 {
2267         struct ci_power_info *pi = ci_get_pi(adev);
2268         int ret;
2269
2270         if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2271                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
2272                                                         VOLTAGE_OBJ_GPIO_LUT,
2273                                                         &pi->vddc_voltage_table);
2274                 if (ret)
2275                         return ret;
2276         } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2277                 ret = ci_get_svi2_voltage_table(adev,
2278                                                 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2279                                                 &pi->vddc_voltage_table);
2280                 if (ret)
2281                         return ret;
2282         }
2283
2284         if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2285                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC,
2286                                                          &pi->vddc_voltage_table);
2287
2288         if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2289                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
2290                                                         VOLTAGE_OBJ_GPIO_LUT,
2291                                                         &pi->vddci_voltage_table);
2292                 if (ret)
2293                         return ret;
2294         } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2295                 ret = ci_get_svi2_voltage_table(adev,
2296                                                 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2297                                                 &pi->vddci_voltage_table);
2298                 if (ret)
2299                         return ret;
2300         }
2301
2302         if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2303                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI,
2304                                                          &pi->vddci_voltage_table);
2305
2306         if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2307                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
2308                                                         VOLTAGE_OBJ_GPIO_LUT,
2309                                                         &pi->mvdd_voltage_table);
2310                 if (ret)
2311                         return ret;
2312         } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2313                 ret = ci_get_svi2_voltage_table(adev,
2314                                                 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2315                                                 &pi->mvdd_voltage_table);
2316                 if (ret)
2317                         return ret;
2318         }
2319
2320         if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2321                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD,
2322                                                          &pi->mvdd_voltage_table);
2323
2324         return 0;
2325 }
2326
2327 static void ci_populate_smc_voltage_table(struct amdgpu_device *adev,
2328                                           struct atom_voltage_table_entry *voltage_table,
2329                                           SMU7_Discrete_VoltageLevel *smc_voltage_table)
2330 {
2331         int ret;
2332
2333         ret = ci_get_std_voltage_value_sidd(adev, voltage_table,
2334                                             &smc_voltage_table->StdVoltageHiSidd,
2335                                             &smc_voltage_table->StdVoltageLoSidd);
2336
2337         if (ret) {
2338                 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2339                 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2340         }
2341
2342         smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2343         smc_voltage_table->StdVoltageHiSidd =
2344                 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2345         smc_voltage_table->StdVoltageLoSidd =
2346                 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2347 }
2348
2349 static int ci_populate_smc_vddc_table(struct amdgpu_device *adev,
2350                                       SMU7_Discrete_DpmTable *table)
2351 {
2352         struct ci_power_info *pi = ci_get_pi(adev);
2353         unsigned int count;
2354
2355         table->VddcLevelCount = pi->vddc_voltage_table.count;
2356         for (count = 0; count < table->VddcLevelCount; count++) {
2357                 ci_populate_smc_voltage_table(adev,
2358                                               &pi->vddc_voltage_table.entries[count],
2359                                               &table->VddcLevel[count]);
2360
2361                 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2362                         table->VddcLevel[count].Smio |=
2363                                 pi->vddc_voltage_table.entries[count].smio_low;
2364                 else
2365                         table->VddcLevel[count].Smio = 0;
2366         }
2367         table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2368
2369         return 0;
2370 }
2371
2372 static int ci_populate_smc_vddci_table(struct amdgpu_device *adev,
2373                                        SMU7_Discrete_DpmTable *table)
2374 {
2375         unsigned int count;
2376         struct ci_power_info *pi = ci_get_pi(adev);
2377
2378         table->VddciLevelCount = pi->vddci_voltage_table.count;
2379         for (count = 0; count < table->VddciLevelCount; count++) {
2380                 ci_populate_smc_voltage_table(adev,
2381                                               &pi->vddci_voltage_table.entries[count],
2382                                               &table->VddciLevel[count]);
2383
2384                 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2385                         table->VddciLevel[count].Smio |=
2386                                 pi->vddci_voltage_table.entries[count].smio_low;
2387                 else
2388                         table->VddciLevel[count].Smio = 0;
2389         }
2390         table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2391
2392         return 0;
2393 }
2394
2395 static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev,
2396                                       SMU7_Discrete_DpmTable *table)
2397 {
2398         struct ci_power_info *pi = ci_get_pi(adev);
2399         unsigned int count;
2400
2401         table->MvddLevelCount = pi->mvdd_voltage_table.count;
2402         for (count = 0; count < table->MvddLevelCount; count++) {
2403                 ci_populate_smc_voltage_table(adev,
2404                                               &pi->mvdd_voltage_table.entries[count],
2405                                               &table->MvddLevel[count]);
2406
2407                 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2408                         table->MvddLevel[count].Smio |=
2409                                 pi->mvdd_voltage_table.entries[count].smio_low;
2410                 else
2411                         table->MvddLevel[count].Smio = 0;
2412         }
2413         table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2414
2415         return 0;
2416 }
2417
2418 static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev,
2419                                           SMU7_Discrete_DpmTable *table)
2420 {
2421         int ret;
2422
2423         ret = ci_populate_smc_vddc_table(adev, table);
2424         if (ret)
2425                 return ret;
2426
2427         ret = ci_populate_smc_vddci_table(adev, table);
2428         if (ret)
2429                 return ret;
2430
2431         ret = ci_populate_smc_mvdd_table(adev, table);
2432         if (ret)
2433                 return ret;
2434
2435         return 0;
2436 }
2437
2438 static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
2439                                   SMU7_Discrete_VoltageLevel *voltage)
2440 {
2441         struct ci_power_info *pi = ci_get_pi(adev);
2442         u32 i = 0;
2443
2444         if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2445                 for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2446                         if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2447                                 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2448                                 break;
2449                         }
2450                 }
2451
2452                 if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2453                         return -EINVAL;
2454         }
2455
2456         return -EINVAL;
2457 }
2458
2459 static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
2460                                          struct atom_voltage_table_entry *voltage_table,
2461                                          u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2462 {
2463         u16 v_index, idx;
2464         bool voltage_found = false;
2465         *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2466         *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2467
2468         if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2469                 return -EINVAL;
2470
2471         if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2472                 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2473                         if (voltage_table->value ==
2474                             adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2475                                 voltage_found = true;
2476                                 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2477                                         idx = v_index;
2478                                 else
2479                                         idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2480                                 *std_voltage_lo_sidd =
2481                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2482                                 *std_voltage_hi_sidd =
2483                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2484                                 break;
2485                         }
2486                 }
2487
2488                 if (!voltage_found) {
2489                         for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2490                                 if (voltage_table->value <=
2491                                     adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2492                                         voltage_found = true;
2493                                         if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2494                                                 idx = v_index;
2495                                         else
2496                                                 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2497                                         *std_voltage_lo_sidd =
2498                                                 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2499                                         *std_voltage_hi_sidd =
2500                                                 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2501                                         break;
2502                                 }
2503                         }
2504                 }
2505         }
2506
2507         return 0;
2508 }
2509
2510 static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev,
2511                                                   const struct amdgpu_phase_shedding_limits_table *limits,
2512                                                   u32 sclk,
2513                                                   u32 *phase_shedding)
2514 {
2515         unsigned int i;
2516
2517         *phase_shedding = 1;
2518
2519         for (i = 0; i < limits->count; i++) {
2520                 if (sclk < limits->entries[i].sclk) {
2521                         *phase_shedding = i;
2522                         break;
2523                 }
2524         }
2525 }
2526
2527 static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev,
2528                                                   const struct amdgpu_phase_shedding_limits_table *limits,
2529                                                   u32 mclk,
2530                                                   u32 *phase_shedding)
2531 {
2532         unsigned int i;
2533
2534         *phase_shedding = 1;
2535
2536         for (i = 0; i < limits->count; i++) {
2537                 if (mclk < limits->entries[i].mclk) {
2538                         *phase_shedding = i;
2539                         break;
2540                 }
2541         }
2542 }
2543
2544 static int ci_init_arb_table_index(struct amdgpu_device *adev)
2545 {
2546         struct ci_power_info *pi = ci_get_pi(adev);
2547         u32 tmp;
2548         int ret;
2549
2550         ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start,
2551                                      &tmp, pi->sram_end);
2552         if (ret)
2553                 return ret;
2554
2555         tmp &= 0x00FFFFFF;
2556         tmp |= MC_CG_ARB_FREQ_F1 << 24;
2557
2558         return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start,
2559                                        tmp, pi->sram_end);
2560 }
2561
2562 static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
2563                                          struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table,
2564                                          u32 clock, u32 *voltage)
2565 {
2566         u32 i = 0;
2567
2568         if (allowed_clock_voltage_table->count == 0)
2569                 return -EINVAL;
2570
2571         for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2572                 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2573                         *voltage = allowed_clock_voltage_table->entries[i].v;
2574                         return 0;
2575                 }
2576         }
2577
2578         *voltage = allowed_clock_voltage_table->entries[i-1].v;
2579
2580         return 0;
2581 }
2582
2583 static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr)
2584 {
2585         u32 i;
2586         u32 tmp;
2587         u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK);
2588
2589         if (sclk < min)
2590                 return 0;
2591
2592         for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
2593                 tmp = sclk >> i;
2594                 if (tmp >= min || i == 0)
2595                         break;
2596         }
2597
2598         return (u8)i;
2599 }
2600
2601 static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
2602 {
2603         return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2604 }
2605
2606 static int ci_reset_to_default(struct amdgpu_device *adev)
2607 {
2608         return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2609                 0 : -EINVAL;
2610 }
2611
2612 static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev)
2613 {
2614         u32 tmp;
2615
2616         tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8;
2617
2618         if (tmp == MC_CG_ARB_FREQ_F0)
2619                 return 0;
2620
2621         return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
2622 }
2623
2624 static void ci_register_patching_mc_arb(struct amdgpu_device *adev,
2625                                         const u32 engine_clock,
2626                                         const u32 memory_clock,
2627                                         u32 *dram_timimg2)
2628 {
2629         bool patch;
2630         u32 tmp, tmp2;
2631
2632         tmp = RREG32(mmMC_SEQ_MISC0);
2633         patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2634
2635         if (patch &&
2636             ((adev->pdev->device == 0x67B0) ||
2637              (adev->pdev->device == 0x67B1))) {
2638                 if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2639                         tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2640                         *dram_timimg2 &= ~0x00ff0000;
2641                         *dram_timimg2 |= tmp2 << 16;
2642                 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2643                         tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2644                         *dram_timimg2 &= ~0x00ff0000;
2645                         *dram_timimg2 |= tmp2 << 16;
2646                 }
2647         }
2648 }
2649
2650 static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev,
2651                                                 u32 sclk,
2652                                                 u32 mclk,
2653                                                 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2654 {
2655         u32 dram_timing;
2656         u32 dram_timing2;
2657         u32 burst_time;
2658
2659         amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk);
2660
2661         dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING);
2662         dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
2663         burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK;
2664
2665         ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2);
2666
2667         arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2668         arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2669         arb_regs->McArbBurstTime = (u8)burst_time;
2670
2671         return 0;
2672 }
2673
2674 static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev)
2675 {
2676         struct ci_power_info *pi = ci_get_pi(adev);
2677         SMU7_Discrete_MCArbDramTimingTable arb_regs;
2678         u32 i, j;
2679         int ret =  0;
2680
2681         memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2682
2683         for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2684                 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2685                         ret = ci_populate_memory_timing_parameters(adev,
2686                                                                    pi->dpm_table.sclk_table.dpm_levels[i].value,
2687                                                                    pi->dpm_table.mclk_table.dpm_levels[j].value,
2688                                                                    &arb_regs.entries[i][j]);
2689                         if (ret)
2690                                 break;
2691                 }
2692         }
2693
2694         if (ret == 0)
2695                 ret = amdgpu_ci_copy_bytes_to_smc(adev,
2696                                            pi->arb_table_start,
2697                                            (u8 *)&arb_regs,
2698                                            sizeof(SMU7_Discrete_MCArbDramTimingTable),
2699                                            pi->sram_end);
2700
2701         return ret;
2702 }
2703
2704 static int ci_program_memory_timing_parameters(struct amdgpu_device *adev)
2705 {
2706         struct ci_power_info *pi = ci_get_pi(adev);
2707
2708         if (pi->need_update_smu7_dpm_table == 0)
2709                 return 0;
2710
2711         return ci_do_program_memory_timing_parameters(adev);
2712 }
2713
2714 static void ci_populate_smc_initial_state(struct amdgpu_device *adev,
2715                                           struct amdgpu_ps *amdgpu_boot_state)
2716 {
2717         struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state);
2718         struct ci_power_info *pi = ci_get_pi(adev);
2719         u32 level = 0;
2720
2721         for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2722                 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2723                     boot_state->performance_levels[0].sclk) {
2724                         pi->smc_state_table.GraphicsBootLevel = level;
2725                         break;
2726                 }
2727         }
2728
2729         for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2730                 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2731                     boot_state->performance_levels[0].mclk) {
2732                         pi->smc_state_table.MemoryBootLevel = level;
2733                         break;
2734                 }
2735         }
2736 }
2737
2738 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2739 {
2740         u32 i;
2741         u32 mask_value = 0;
2742
2743         for (i = dpm_table->count; i > 0; i--) {
2744                 mask_value = mask_value << 1;
2745                 if (dpm_table->dpm_levels[i-1].enabled)
2746                         mask_value |= 0x1;
2747                 else
2748                         mask_value &= 0xFFFFFFFE;
2749         }
2750
2751         return mask_value;
2752 }
2753
2754 static void ci_populate_smc_link_level(struct amdgpu_device *adev,
2755                                        SMU7_Discrete_DpmTable *table)
2756 {
2757         struct ci_power_info *pi = ci_get_pi(adev);
2758         struct ci_dpm_table *dpm_table = &pi->dpm_table;
2759         u32 i;
2760
2761         for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2762                 table->LinkLevel[i].PcieGenSpeed =
2763                         (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2764                 table->LinkLevel[i].PcieLaneCount =
2765                         amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2766                 table->LinkLevel[i].EnabledForActivity = 1;
2767                 table->LinkLevel[i].DownT = cpu_to_be32(5);
2768                 table->LinkLevel[i].UpT = cpu_to_be32(30);
2769         }
2770
2771         pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2772         pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2773                 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2774 }
2775
2776 static int ci_populate_smc_uvd_level(struct amdgpu_device *adev,
2777                                      SMU7_Discrete_DpmTable *table)
2778 {
2779         u32 count;
2780         struct atom_clock_dividers dividers;
2781         int ret = -EINVAL;
2782
2783         table->UvdLevelCount =
2784                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2785
2786         for (count = 0; count < table->UvdLevelCount; count++) {
2787                 table->UvdLevel[count].VclkFrequency =
2788                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2789                 table->UvdLevel[count].DclkFrequency =
2790                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2791                 table->UvdLevel[count].MinVddc =
2792                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2793                 table->UvdLevel[count].MinVddcPhases = 1;
2794
2795                 ret = amdgpu_atombios_get_clock_dividers(adev,
2796                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2797                                                          table->UvdLevel[count].VclkFrequency, false, &dividers);
2798                 if (ret)
2799                         return ret;
2800
2801                 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2802
2803                 ret = amdgpu_atombios_get_clock_dividers(adev,
2804                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2805                                                          table->UvdLevel[count].DclkFrequency, false, &dividers);
2806                 if (ret)
2807                         return ret;
2808
2809                 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2810
2811                 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2812                 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2813                 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2814         }
2815
2816         return ret;
2817 }
2818
2819 static int ci_populate_smc_vce_level(struct amdgpu_device *adev,
2820                                      SMU7_Discrete_DpmTable *table)
2821 {
2822         u32 count;
2823         struct atom_clock_dividers dividers;
2824         int ret = -EINVAL;
2825
2826         table->VceLevelCount =
2827                 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2828
2829         for (count = 0; count < table->VceLevelCount; count++) {
2830                 table->VceLevel[count].Frequency =
2831                         adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2832                 table->VceLevel[count].MinVoltage =
2833                         (u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2834                 table->VceLevel[count].MinPhases = 1;
2835
2836                 ret = amdgpu_atombios_get_clock_dividers(adev,
2837                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2838                                                          table->VceLevel[count].Frequency, false, &dividers);
2839                 if (ret)
2840                         return ret;
2841
2842                 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2843
2844                 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2845                 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2846         }
2847
2848         return ret;
2849
2850 }
2851
2852 static int ci_populate_smc_acp_level(struct amdgpu_device *adev,
2853                                      SMU7_Discrete_DpmTable *table)
2854 {
2855         u32 count;
2856         struct atom_clock_dividers dividers;
2857         int ret = -EINVAL;
2858
2859         table->AcpLevelCount = (u8)
2860                 (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2861
2862         for (count = 0; count < table->AcpLevelCount; count++) {
2863                 table->AcpLevel[count].Frequency =
2864                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2865                 table->AcpLevel[count].MinVoltage =
2866                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2867                 table->AcpLevel[count].MinPhases = 1;
2868
2869                 ret = amdgpu_atombios_get_clock_dividers(adev,
2870                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2871                                                          table->AcpLevel[count].Frequency, false, &dividers);
2872                 if (ret)
2873                         return ret;
2874
2875                 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2876
2877                 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2878                 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2879         }
2880
2881         return ret;
2882 }
2883
2884 static int ci_populate_smc_samu_level(struct amdgpu_device *adev,
2885                                       SMU7_Discrete_DpmTable *table)
2886 {
2887         u32 count;
2888         struct atom_clock_dividers dividers;
2889         int ret = -EINVAL;
2890
2891         table->SamuLevelCount =
2892                 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2893
2894         for (count = 0; count < table->SamuLevelCount; count++) {
2895                 table->SamuLevel[count].Frequency =
2896                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2897                 table->SamuLevel[count].MinVoltage =
2898                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2899                 table->SamuLevel[count].MinPhases = 1;
2900
2901                 ret = amdgpu_atombios_get_clock_dividers(adev,
2902                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2903                                                          table->SamuLevel[count].Frequency, false, &dividers);
2904                 if (ret)
2905                         return ret;
2906
2907                 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2908
2909                 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2910                 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2911         }
2912
2913         return ret;
2914 }
2915
2916 static int ci_calculate_mclk_params(struct amdgpu_device *adev,
2917                                     u32 memory_clock,
2918                                     SMU7_Discrete_MemoryLevel *mclk,
2919                                     bool strobe_mode,
2920                                     bool dll_state_on)
2921 {
2922         struct ci_power_info *pi = ci_get_pi(adev);
2923         u32  dll_cntl = pi->clock_registers.dll_cntl;
2924         u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2925         u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2926         u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2927         u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2928         u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2929         u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2930         u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2931         u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2932         struct atom_mpll_param mpll_param;
2933         int ret;
2934
2935         ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
2936         if (ret)
2937                 return ret;
2938
2939         mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK;
2940         mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT);
2941
2942         mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK |
2943                         MPLL_FUNC_CNTL_1__VCO_MODE_MASK);
2944         mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT |
2945                 (mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) |
2946                 (mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT);
2947
2948         mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
2949         mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2950
2951         if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
2952                 mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
2953                                 MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
2954                 mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
2955                                 (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2956         }
2957
2958         if (pi->caps_mclk_ss_support) {
2959                 struct amdgpu_atom_ss ss;
2960                 u32 freq_nom;
2961                 u32 tmp;
2962                 u32 reference_clock = adev->clock.mpll.reference_freq;
2963
2964                 if (mpll_param.qdr == 1)
2965                         freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2966                 else
2967                         freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2968
2969                 tmp = (freq_nom / reference_clock);
2970                 tmp = tmp * tmp;
2971                 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
2972                                                      ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2973                         u32 clks = reference_clock * 5 / ss.rate;
2974                         u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2975
2976                         mpll_ss1 &= ~MPLL_SS1__CLKV_MASK;
2977                         mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT);
2978
2979                         mpll_ss2 &= ~MPLL_SS2__CLKS_MASK;
2980                         mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT);
2981                 }
2982         }
2983
2984         mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK;
2985         mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT);
2986
2987         if (dll_state_on)
2988                 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2989                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK;
2990         else
2991                 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2992                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
2993
2994         mclk->MclkFrequency = memory_clock;
2995         mclk->MpllFuncCntl = mpll_func_cntl;
2996         mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2997         mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2998         mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2999         mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
3000         mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
3001         mclk->DllCntl = dll_cntl;
3002         mclk->MpllSs1 = mpll_ss1;
3003         mclk->MpllSs2 = mpll_ss2;
3004
3005         return 0;
3006 }
3007
3008 static int ci_populate_single_memory_level(struct amdgpu_device *adev,
3009                                            u32 memory_clock,
3010                                            SMU7_Discrete_MemoryLevel *memory_level)
3011 {
3012         struct ci_power_info *pi = ci_get_pi(adev);
3013         int ret;
3014         bool dll_state_on;
3015
3016         if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
3017                 ret = ci_get_dependency_volt_by_clk(adev,
3018                                                     &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
3019                                                     memory_clock, &memory_level->MinVddc);
3020                 if (ret)
3021                         return ret;
3022         }
3023
3024         if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
3025                 ret = ci_get_dependency_volt_by_clk(adev,
3026                                                     &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
3027                                                     memory_clock, &memory_level->MinVddci);
3028                 if (ret)
3029                         return ret;
3030         }
3031
3032         if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
3033                 ret = ci_get_dependency_volt_by_clk(adev,
3034                                                     &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
3035                                                     memory_clock, &memory_level->MinMvdd);
3036                 if (ret)
3037                         return ret;
3038         }
3039
3040         memory_level->MinVddcPhases = 1;
3041
3042         if (pi->vddc_phase_shed_control)
3043                 ci_populate_phase_value_based_on_mclk(adev,
3044                                                       &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3045                                                       memory_clock,
3046                                                       &memory_level->MinVddcPhases);
3047
3048         memory_level->EnabledForActivity = 1;
3049         memory_level->EnabledForThrottle = 1;
3050         memory_level->UpH = 0;
3051         memory_level->DownH = 100;
3052         memory_level->VoltageDownH = 0;
3053         memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
3054
3055         memory_level->StutterEnable = false;
3056         memory_level->StrobeEnable = false;
3057         memory_level->EdcReadEnable = false;
3058         memory_level->EdcWriteEnable = false;
3059         memory_level->RttEnable = false;
3060
3061         memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3062
3063         if (pi->mclk_stutter_mode_threshold &&
3064             (memory_clock <= pi->mclk_stutter_mode_threshold) &&
3065             (!pi->uvd_enabled) &&
3066             (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
3067             (adev->pm.dpm.new_active_crtc_count <= 2))
3068                 memory_level->StutterEnable = true;
3069
3070         if (pi->mclk_strobe_mode_threshold &&
3071             (memory_clock <= pi->mclk_strobe_mode_threshold))
3072                 memory_level->StrobeEnable = 1;
3073
3074         if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
3075                 memory_level->StrobeRatio =
3076                         ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
3077                 if (pi->mclk_edc_enable_threshold &&
3078                     (memory_clock > pi->mclk_edc_enable_threshold))
3079                         memory_level->EdcReadEnable = true;
3080
3081                 if (pi->mclk_edc_wr_enable_threshold &&
3082                     (memory_clock > pi->mclk_edc_wr_enable_threshold))
3083                         memory_level->EdcWriteEnable = true;
3084
3085                 if (memory_level->StrobeEnable) {
3086                         if (ci_get_mclk_frequency_ratio(memory_clock, true) >=
3087                             ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf))
3088                                 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3089                         else
3090                                 dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
3091                 } else {
3092                         dll_state_on = pi->dll_default_on;
3093                 }
3094         } else {
3095                 memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock);
3096                 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3097         }
3098
3099         ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
3100         if (ret)
3101                 return ret;
3102
3103         memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
3104         memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
3105         memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
3106         memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
3107
3108         memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
3109         memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
3110         memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
3111         memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
3112         memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
3113         memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
3114         memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
3115         memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
3116         memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
3117         memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
3118         memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
3119
3120         return 0;
3121 }
3122
3123 static int ci_populate_smc_acpi_level(struct amdgpu_device *adev,
3124                                       SMU7_Discrete_DpmTable *table)
3125 {
3126         struct ci_power_info *pi = ci_get_pi(adev);
3127         struct atom_clock_dividers dividers;
3128         SMU7_Discrete_VoltageLevel voltage_level;
3129         u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
3130         u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
3131         u32 dll_cntl = pi->clock_registers.dll_cntl;
3132         u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
3133         int ret;
3134
3135         table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
3136
3137         if (pi->acpi_vddc)
3138                 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
3139         else
3140                 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
3141
3142         table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
3143
3144         table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq;
3145
3146         ret = amdgpu_atombios_get_clock_dividers(adev,
3147                                                  COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3148                                                  table->ACPILevel.SclkFrequency, false, &dividers);
3149         if (ret)
3150                 return ret;
3151
3152         table->ACPILevel.SclkDid = (u8)dividers.post_divider;
3153         table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3154         table->ACPILevel.DeepSleepDivId = 0;
3155
3156         spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK;
3157         spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
3158
3159         spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
3160         spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT);
3161
3162         table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
3163         table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
3164         table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
3165         table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
3166         table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
3167         table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3168         table->ACPILevel.CcPwrDynRm = 0;
3169         table->ACPILevel.CcPwrDynRm1 = 0;
3170
3171         table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3172         table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3173         table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3174         table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3175         table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3176         table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3177         table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3178         table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3179         table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3180         table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3181         table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3182
3183         table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3184         table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3185
3186         if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3187                 if (pi->acpi_vddci)
3188                         table->MemoryACPILevel.MinVddci =
3189                                 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3190                 else
3191                         table->MemoryACPILevel.MinVddci =
3192                                 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3193         }
3194
3195         if (ci_populate_mvdd_value(adev, 0, &voltage_level))
3196                 table->MemoryACPILevel.MinMvdd = 0;
3197         else
3198                 table->MemoryACPILevel.MinMvdd =
3199                         cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3200
3201         mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK |
3202                 MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK;
3203         mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
3204                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
3205
3206         dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK);
3207
3208         table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3209         table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3210         table->MemoryACPILevel.MpllAdFuncCntl =
3211                 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3212         table->MemoryACPILevel.MpllDqFuncCntl =
3213                 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3214         table->MemoryACPILevel.MpllFuncCntl =
3215                 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3216         table->MemoryACPILevel.MpllFuncCntl_1 =
3217                 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3218         table->MemoryACPILevel.MpllFuncCntl_2 =
3219                 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3220         table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3221         table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3222
3223         table->MemoryACPILevel.EnabledForThrottle = 0;
3224         table->MemoryACPILevel.EnabledForActivity = 0;
3225         table->MemoryACPILevel.UpH = 0;
3226         table->MemoryACPILevel.DownH = 100;
3227         table->MemoryACPILevel.VoltageDownH = 0;
3228         table->MemoryACPILevel.ActivityLevel =
3229                 cpu_to_be16((u16)pi->mclk_activity_target);
3230
3231         table->MemoryACPILevel.StutterEnable = false;
3232         table->MemoryACPILevel.StrobeEnable = false;
3233         table->MemoryACPILevel.EdcReadEnable = false;
3234         table->MemoryACPILevel.EdcWriteEnable = false;
3235         table->MemoryACPILevel.RttEnable = false;
3236
3237         return 0;
3238 }
3239
3240
3241 static int ci_enable_ulv(struct amdgpu_device *adev, bool enable)
3242 {
3243         struct ci_power_info *pi = ci_get_pi(adev);
3244         struct ci_ulv_parm *ulv = &pi->ulv;
3245
3246         if (ulv->supported) {
3247                 if (enable)
3248                         return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3249                                 0 : -EINVAL;
3250                 else
3251                         return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3252                                 0 : -EINVAL;
3253         }
3254
3255         return 0;
3256 }
3257
3258 static int ci_populate_ulv_level(struct amdgpu_device *adev,
3259                                  SMU7_Discrete_Ulv *state)
3260 {
3261         struct ci_power_info *pi = ci_get_pi(adev);
3262         u16 ulv_voltage = adev->pm.dpm.backbias_response_time;
3263
3264         state->CcPwrDynRm = 0;
3265         state->CcPwrDynRm1 = 0;
3266
3267         if (ulv_voltage == 0) {
3268                 pi->ulv.supported = false;
3269                 return 0;
3270         }
3271
3272         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3273                 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3274                         state->VddcOffset = 0;
3275                 else
3276                         state->VddcOffset =
3277                                 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3278         } else {
3279                 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3280                         state->VddcOffsetVid = 0;
3281                 else
3282                         state->VddcOffsetVid = (u8)
3283                                 ((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3284                                  VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3285         }
3286         state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3287
3288         state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3289         state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3290         state->VddcOffset = cpu_to_be16(state->VddcOffset);
3291
3292         return 0;
3293 }
3294
3295 static int ci_calculate_sclk_params(struct amdgpu_device *adev,
3296                                     u32 engine_clock,
3297                                     SMU7_Discrete_GraphicsLevel *sclk)
3298 {
3299         struct ci_power_info *pi = ci_get_pi(adev);
3300         struct atom_clock_dividers dividers;
3301         u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3302         u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3303         u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3304         u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3305         u32 reference_clock = adev->clock.spll.reference_freq;
3306         u32 reference_divider;
3307         u32 fbdiv;
3308         int ret;
3309
3310         ret = amdgpu_atombios_get_clock_dividers(adev,
3311                                                  COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3312                                                  engine_clock, false, &dividers);
3313         if (ret)
3314                 return ret;
3315
3316         reference_divider = 1 + dividers.ref_div;
3317         fbdiv = dividers.fb_div & 0x3FFFFFF;
3318
3319         spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
3320         spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT);
3321         spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
3322
3323         if (pi->caps_sclk_ss_support) {
3324                 struct amdgpu_atom_ss ss;
3325                 u32 vco_freq = engine_clock * dividers.post_div;
3326
3327                 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
3328                                                      ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3329                         u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3330                         u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3331
3332                         cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
3333                         cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT);
3334                         cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT);
3335
3336                         cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK;
3337                         cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT);
3338                 }
3339         }
3340
3341         sclk->SclkFrequency = engine_clock;
3342         sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3343         sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3344         sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3345         sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
3346         sclk->SclkDid = (u8)dividers.post_divider;
3347
3348         return 0;
3349 }
3350
3351 static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
3352                                             u32 engine_clock,
3353                                             u16 sclk_activity_level_t,
3354                                             SMU7_Discrete_GraphicsLevel *graphic_level)
3355 {
3356         struct ci_power_info *pi = ci_get_pi(adev);
3357         int ret;
3358
3359         ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level);
3360         if (ret)
3361                 return ret;
3362
3363         ret = ci_get_dependency_volt_by_clk(adev,
3364                                             &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3365                                             engine_clock, &graphic_level->MinVddc);
3366         if (ret)
3367                 return ret;
3368
3369         graphic_level->SclkFrequency = engine_clock;
3370
3371         graphic_level->Flags =  0;
3372         graphic_level->MinVddcPhases = 1;
3373
3374         if (pi->vddc_phase_shed_control)
3375                 ci_populate_phase_value_based_on_sclk(adev,
3376                                                       &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3377                                                       engine_clock,
3378                                                       &graphic_level->MinVddcPhases);
3379
3380         graphic_level->ActivityLevel = sclk_activity_level_t;
3381
3382         graphic_level->CcPwrDynRm = 0;
3383         graphic_level->CcPwrDynRm1 = 0;
3384         graphic_level->EnabledForThrottle = 1;
3385         graphic_level->UpH = 0;
3386         graphic_level->DownH = 0;
3387         graphic_level->VoltageDownH = 0;
3388         graphic_level->PowerThrottle = 0;
3389
3390         if (pi->caps_sclk_ds)
3391                 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(engine_clock,
3392                                                                                    CISLAND_MINIMUM_ENGINE_CLOCK);
3393
3394         graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3395
3396         graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3397         graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3398         graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3399         graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3400         graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3401         graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3402         graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3403         graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3404         graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3405         graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3406         graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
3407
3408         return 0;
3409 }
3410
3411 static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
3412 {
3413         struct ci_power_info *pi = ci_get_pi(adev);
3414         struct ci_dpm_table *dpm_table = &pi->dpm_table;
3415         u32 level_array_address = pi->dpm_table_start +
3416                 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3417         u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3418                 SMU7_MAX_LEVELS_GRAPHICS;
3419         SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3420         u32 i, ret;
3421
3422         memset(levels, 0, level_array_size);
3423
3424         for (i = 0; i < dpm_table->sclk_table.count; i++) {
3425                 ret = ci_populate_single_graphic_level(adev,
3426                                                        dpm_table->sclk_table.dpm_levels[i].value,
3427                                                        (u16)pi->activity_target[i],
3428                                                        &pi->smc_state_table.GraphicsLevel[i]);
3429                 if (ret)
3430                         return ret;
3431                 if (i > 1)
3432                         pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3433                 if (i == (dpm_table->sclk_table.count - 1))
3434                         pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3435                                 PPSMC_DISPLAY_WATERMARK_HIGH;
3436         }
3437         pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
3438
3439         pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3440         pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3441                 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3442
3443         ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3444                                    (u8 *)levels, level_array_size,
3445                                    pi->sram_end);
3446         if (ret)
3447                 return ret;
3448
3449         return 0;
3450 }
3451
3452 static int ci_populate_ulv_state(struct amdgpu_device *adev,
3453                                  SMU7_Discrete_Ulv *ulv_level)
3454 {
3455         return ci_populate_ulv_level(adev, ulv_level);
3456 }
3457
3458 static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
3459 {
3460         struct ci_power_info *pi = ci_get_pi(adev);
3461         struct ci_dpm_table *dpm_table = &pi->dpm_table;
3462         u32 level_array_address = pi->dpm_table_start +
3463                 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3464         u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3465                 SMU7_MAX_LEVELS_MEMORY;
3466         SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3467         u32 i, ret;
3468
3469         memset(levels, 0, level_array_size);
3470
3471         for (i = 0; i < dpm_table->mclk_table.count; i++) {
3472                 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3473                         return -EINVAL;
3474                 ret = ci_populate_single_memory_level(adev,
3475                                                       dpm_table->mclk_table.dpm_levels[i].value,
3476                                                       &pi->smc_state_table.MemoryLevel[i]);
3477                 if (ret)
3478                         return ret;
3479         }
3480
3481         if ((dpm_table->mclk_table.count >= 2) &&
3482             ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
3483                 pi->smc_state_table.MemoryLevel[1].MinVddc =
3484                         pi->smc_state_table.MemoryLevel[0].MinVddc;
3485                 pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3486                         pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3487         }
3488
3489         pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3490
3491         pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3492         pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3493                 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3494
3495         pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3496                 PPSMC_DISPLAY_WATERMARK_HIGH;
3497
3498         ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3499                                    (u8 *)levels, level_array_size,
3500                                    pi->sram_end);
3501         if (ret)
3502                 return ret;
3503
3504         return 0;
3505 }
3506
3507 static void ci_reset_single_dpm_table(struct amdgpu_device *adev,
3508                                       struct ci_single_dpm_table* dpm_table,
3509                                       u32 count)
3510 {
3511         u32 i;
3512
3513         dpm_table->count = count;
3514         for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3515                 dpm_table->dpm_levels[i].enabled = false;
3516 }
3517
3518 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3519                                       u32 index, u32 pcie_gen, u32 pcie_lanes)
3520 {
3521         dpm_table->dpm_levels[index].value = pcie_gen;
3522         dpm_table->dpm_levels[index].param1 = pcie_lanes;
3523         dpm_table->dpm_levels[index].enabled = true;
3524 }
3525
3526 static int ci_setup_default_pcie_tables(struct amdgpu_device *adev)
3527 {
3528         struct ci_power_info *pi = ci_get_pi(adev);
3529
3530         if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3531                 return -EINVAL;
3532
3533         if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3534                 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3535                 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3536         } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3537                 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3538                 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3539         }
3540
3541         ci_reset_single_dpm_table(adev,
3542                                   &pi->dpm_table.pcie_speed_table,
3543                                   SMU7_MAX_LEVELS_LINK);
3544
3545         if (adev->asic_type == CHIP_BONAIRE)
3546                 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3547                                           pi->pcie_gen_powersaving.min,
3548                                           pi->pcie_lane_powersaving.max);
3549         else
3550                 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3551                                           pi->pcie_gen_powersaving.min,
3552                                           pi->pcie_lane_powersaving.min);
3553         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3554                                   pi->pcie_gen_performance.min,
3555                                   pi->pcie_lane_performance.min);
3556         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3557                                   pi->pcie_gen_powersaving.min,
3558                                   pi->pcie_lane_powersaving.max);
3559         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3560                                   pi->pcie_gen_performance.min,
3561                                   pi->pcie_lane_performance.max);
3562         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3563                                   pi->pcie_gen_powersaving.max,
3564                                   pi->pcie_lane_powersaving.max);
3565         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3566                                   pi->pcie_gen_performance.max,
3567                                   pi->pcie_lane_performance.max);
3568
3569         pi->dpm_table.pcie_speed_table.count = 6;
3570
3571         return 0;
3572 }
3573
3574 static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
3575 {
3576         struct ci_power_info *pi = ci_get_pi(adev);
3577         struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3578                 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3579         struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table =
3580                 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3581         struct amdgpu_cac_leakage_table *std_voltage_table =
3582                 &adev->pm.dpm.dyn_state.cac_leakage_table;
3583         u32 i;
3584
3585         if (allowed_sclk_vddc_table == NULL)
3586                 return -EINVAL;
3587         if (allowed_sclk_vddc_table->count < 1)
3588                 return -EINVAL;
3589         if (allowed_mclk_table == NULL)
3590                 return -EINVAL;
3591         if (allowed_mclk_table->count < 1)
3592                 return -EINVAL;
3593
3594         memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3595
3596         ci_reset_single_dpm_table(adev,
3597                                   &pi->dpm_table.sclk_table,
3598                                   SMU7_MAX_LEVELS_GRAPHICS);
3599         ci_reset_single_dpm_table(adev,
3600                                   &pi->dpm_table.mclk_table,
3601                                   SMU7_MAX_LEVELS_MEMORY);
3602         ci_reset_single_dpm_table(adev,
3603                                   &pi->dpm_table.vddc_table,
3604                                   SMU7_MAX_LEVELS_VDDC);
3605         ci_reset_single_dpm_table(adev,
3606                                   &pi->dpm_table.vddci_table,
3607                                   SMU7_MAX_LEVELS_VDDCI);
3608         ci_reset_single_dpm_table(adev,
3609                                   &pi->dpm_table.mvdd_table,
3610                                   SMU7_MAX_LEVELS_MVDD);
3611
3612         pi->dpm_table.sclk_table.count = 0;
3613         for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3614                 if ((i == 0) ||
3615                     (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3616                      allowed_sclk_vddc_table->entries[i].clk)) {
3617                         pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3618                                 allowed_sclk_vddc_table->entries[i].clk;
3619                         pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3620                                 (i == 0) ? true : false;
3621                         pi->dpm_table.sclk_table.count++;
3622                 }
3623         }
3624
3625         pi->dpm_table.mclk_table.count = 0;
3626         for (i = 0; i < allowed_mclk_table->count; i++) {
3627                 if ((i == 0) ||
3628                     (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3629                      allowed_mclk_table->entries[i].clk)) {
3630                         pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3631                                 allowed_mclk_table->entries[i].clk;
3632                         pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3633                                 (i == 0) ? true : false;
3634                         pi->dpm_table.mclk_table.count++;
3635                 }
3636         }
3637
3638         for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3639                 pi->dpm_table.vddc_table.dpm_levels[i].value =
3640                         allowed_sclk_vddc_table->entries[i].v;
3641                 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3642                         std_voltage_table->entries[i].leakage;
3643                 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3644         }
3645         pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3646
3647         allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3648         if (allowed_mclk_table) {
3649                 for (i = 0; i < allowed_mclk_table->count; i++) {
3650                         pi->dpm_table.vddci_table.dpm_levels[i].value =
3651                                 allowed_mclk_table->entries[i].v;
3652                         pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3653                 }
3654                 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3655         }
3656
3657         allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3658         if (allowed_mclk_table) {
3659                 for (i = 0; i < allowed_mclk_table->count; i++) {
3660                         pi->dpm_table.mvdd_table.dpm_levels[i].value =
3661                                 allowed_mclk_table->entries[i].v;
3662                         pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3663                 }
3664                 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3665         }
3666
3667         ci_setup_default_pcie_tables(adev);
3668
3669         /* save a copy of the default DPM table */
3670         memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
3671                         sizeof(struct ci_dpm_table));
3672
3673         return 0;
3674 }
3675
3676 static int ci_find_boot_level(struct ci_single_dpm_table *table,
3677                               u32 value, u32 *boot_level)
3678 {
3679         u32 i;
3680         int ret = -EINVAL;
3681
3682         for(i = 0; i < table->count; i++) {
3683                 if (value == table->dpm_levels[i].value) {
3684                         *boot_level = i;
3685                         ret = 0;
3686                 }
3687         }
3688
3689         return ret;
3690 }
3691
3692 static void ci_save_default_power_profile(struct amdgpu_device *adev)
3693 {
3694         struct ci_power_info *pi = ci_get_pi(adev);
3695         struct SMU7_Discrete_GraphicsLevel *levels =
3696                                 pi->smc_state_table.GraphicsLevel;
3697         uint32_t min_level = 0;
3698
3699         pi->default_gfx_power_profile.activity_threshold =
3700                         be16_to_cpu(levels[0].ActivityLevel);
3701         pi->default_gfx_power_profile.up_hyst = levels[0].UpH;
3702         pi->default_gfx_power_profile.down_hyst = levels[0].DownH;
3703         pi->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
3704
3705         pi->default_compute_power_profile = pi->default_gfx_power_profile;
3706         pi->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
3707
3708         /* Optimize compute power profile: Use only highest
3709          * 2 power levels (if more than 2 are available), Hysteresis:
3710          * 0ms up, 5ms down
3711          */
3712         if (pi->smc_state_table.GraphicsDpmLevelCount > 2)
3713                 min_level = pi->smc_state_table.GraphicsDpmLevelCount - 2;
3714         else if (pi->smc_state_table.GraphicsDpmLevelCount == 2)
3715                 min_level = 1;
3716         pi->default_compute_power_profile.min_sclk =
3717                         be32_to_cpu(levels[min_level].SclkFrequency);
3718
3719         pi->default_compute_power_profile.up_hyst = 0;
3720         pi->default_compute_power_profile.down_hyst = 5;
3721
3722         pi->gfx_power_profile = pi->default_gfx_power_profile;
3723         pi->compute_power_profile = pi->default_compute_power_profile;
3724 }
3725
3726 static int ci_init_smc_table(struct amdgpu_device *adev)
3727 {
3728         struct ci_power_info *pi = ci_get_pi(adev);
3729         struct ci_ulv_parm *ulv = &pi->ulv;
3730         struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
3731         SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3732         int ret;
3733
3734         ret = ci_setup_default_dpm_tables(adev);
3735         if (ret)
3736                 return ret;
3737
3738         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3739                 ci_populate_smc_voltage_tables(adev, table);
3740
3741         ci_init_fps_limits(adev);
3742
3743         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3744                 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3745
3746         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3747                 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3748
3749         if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
3750                 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3751
3752         if (ulv->supported) {
3753                 ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv);
3754                 if (ret)
3755                         return ret;
3756                 WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3757         }
3758
3759         ret = ci_populate_all_graphic_levels(adev);
3760         if (ret)
3761                 return ret;
3762
3763         ret = ci_populate_all_memory_levels(adev);
3764         if (ret)
3765                 return ret;
3766
3767         ci_populate_smc_link_level(adev, table);
3768
3769         ret = ci_populate_smc_acpi_level(adev, table);
3770         if (ret)
3771                 return ret;
3772
3773         ret = ci_populate_smc_vce_level(adev, table);
3774         if (ret)
3775                 return ret;
3776
3777         ret = ci_populate_smc_acp_level(adev, table);
3778         if (ret)
3779                 return ret;
3780
3781         ret = ci_populate_smc_samu_level(adev, table);
3782         if (ret)
3783                 return ret;
3784
3785         ret = ci_do_program_memory_timing_parameters(adev);
3786         if (ret)
3787                 return ret;
3788
3789         ret = ci_populate_smc_uvd_level(adev, table);
3790         if (ret)
3791                 return ret;
3792
3793         table->UvdBootLevel  = 0;
3794         table->VceBootLevel  = 0;
3795         table->AcpBootLevel  = 0;
3796         table->SamuBootLevel  = 0;
3797         table->GraphicsBootLevel  = 0;
3798         table->MemoryBootLevel  = 0;
3799
3800         ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3801                                  pi->vbios_boot_state.sclk_bootup_value,
3802                                  (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3803
3804         ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3805                                  pi->vbios_boot_state.mclk_bootup_value,
3806                                  (u32 *)&pi->smc_state_table.MemoryBootLevel);
3807
3808         table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3809         table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3810         table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3811
3812         ci_populate_smc_initial_state(adev, amdgpu_boot_state);
3813
3814         ret = ci_populate_bapm_parameters_in_dpm_table(adev);
3815         if (ret)
3816                 return ret;
3817
3818         table->UVDInterval = 1;
3819         table->VCEInterval = 1;
3820         table->ACPInterval = 1;
3821         table->SAMUInterval = 1;
3822         table->GraphicsVoltageChangeEnable = 1;
3823         table->GraphicsThermThrottleEnable = 1;
3824         table->GraphicsInterval = 1;
3825         table->VoltageInterval = 1;
3826         table->ThermalInterval = 1;
3827         table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3828                                              CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3829         table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3830                                             CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3831         table->MemoryVoltageChangeEnable = 1;
3832         table->MemoryInterval = 1;
3833         table->VoltageResponseTime = 0;
3834         table->VddcVddciDelta = 4000;
3835         table->PhaseResponseTime = 0;
3836         table->MemoryThermThrottleEnable = 1;
3837         table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3838         table->PCIeGenInterval = 1;
3839         if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3840                 table->SVI2Enable  = 1;
3841         else
3842                 table->SVI2Enable  = 0;
3843
3844         table->ThermGpio = 17;
3845         table->SclkStepSize = 0x4000;
3846
3847         table->SystemFlags = cpu_to_be32(table->SystemFlags);
3848         table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3849         table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3850         table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3851         table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3852         table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3853         table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3854         table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3855         table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3856         table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3857         table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3858         table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3859         table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3860         table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3861
3862         ret = amdgpu_ci_copy_bytes_to_smc(adev,
3863                                    pi->dpm_table_start +
3864                                    offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3865                                    (u8 *)&table->SystemFlags,
3866                                    sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3867                                    pi->sram_end);
3868         if (ret)
3869                 return ret;
3870
3871         ci_save_default_power_profile(adev);
3872
3873         return 0;
3874 }
3875
3876 static void ci_trim_single_dpm_states(struct amdgpu_device *adev,
3877                                       struct ci_single_dpm_table *dpm_table,
3878                                       u32 low_limit, u32 high_limit)
3879 {
3880         u32 i;
3881
3882         for (i = 0; i < dpm_table->count; i++) {
3883                 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3884                     (dpm_table->dpm_levels[i].value > high_limit))
3885                         dpm_table->dpm_levels[i].enabled = false;
3886                 else
3887                         dpm_table->dpm_levels[i].enabled = true;
3888         }
3889 }
3890
3891 static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev,
3892                                     u32 speed_low, u32 lanes_low,
3893                                     u32 speed_high, u32 lanes_high)
3894 {
3895         struct ci_power_info *pi = ci_get_pi(adev);
3896         struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3897         u32 i, j;
3898
3899         for (i = 0; i < pcie_table->count; i++) {
3900                 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3901                     (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3902                     (pcie_table->dpm_levels[i].value > speed_high) ||
3903                     (pcie_table->dpm_levels[i].param1 > lanes_high))
3904                         pcie_table->dpm_levels[i].enabled = false;
3905                 else
3906                         pcie_table->dpm_levels[i].enabled = true;
3907         }
3908
3909         for (i = 0; i < pcie_table->count; i++) {
3910                 if (pcie_table->dpm_levels[i].enabled) {
3911                         for (j = i + 1; j < pcie_table->count; j++) {
3912                                 if (pcie_table->dpm_levels[j].enabled) {
3913                                         if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3914                                             (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3915                                                 pcie_table->dpm_levels[j].enabled = false;
3916                                 }
3917                         }
3918                 }
3919         }
3920 }
3921
3922 static int ci_trim_dpm_states(struct amdgpu_device *adev,
3923                               struct amdgpu_ps *amdgpu_state)
3924 {
3925         struct ci_ps *state = ci_get_ps(amdgpu_state);
3926         struct ci_power_info *pi = ci_get_pi(adev);
3927         u32 high_limit_count;
3928
3929         if (state->performance_level_count < 1)
3930                 return -EINVAL;
3931
3932         if (state->performance_level_count == 1)
3933                 high_limit_count = 0;
3934         else
3935                 high_limit_count = 1;
3936
3937         ci_trim_single_dpm_states(adev,
3938                                   &pi->dpm_table.sclk_table,
3939                                   state->performance_levels[0].sclk,
3940                                   state->performance_levels[high_limit_count].sclk);
3941
3942         ci_trim_single_dpm_states(adev,
3943                                   &pi->dpm_table.mclk_table,
3944                                   state->performance_levels[0].mclk,
3945                                   state->performance_levels[high_limit_count].mclk);
3946
3947         ci_trim_pcie_dpm_states(adev,
3948                                 state->performance_levels[0].pcie_gen,
3949                                 state->performance_levels[0].pcie_lane,
3950                                 state->performance_levels[high_limit_count].pcie_gen,
3951                                 state->performance_levels[high_limit_count].pcie_lane);
3952
3953         return 0;
3954 }
3955
3956 static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev)
3957 {
3958         struct amdgpu_clock_voltage_dependency_table *disp_voltage_table =
3959                 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3960         struct amdgpu_clock_voltage_dependency_table *vddc_table =
3961                 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3962         u32 requested_voltage = 0;
3963         u32 i;
3964
3965         if (disp_voltage_table == NULL)
3966                 return -EINVAL;
3967         if (!disp_voltage_table->count)
3968                 return -EINVAL;
3969
3970         for (i = 0; i < disp_voltage_table->count; i++) {
3971                 if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3972                         requested_voltage = disp_voltage_table->entries[i].v;
3973         }
3974
3975         for (i = 0; i < vddc_table->count; i++) {
3976                 if (requested_voltage <= vddc_table->entries[i].v) {
3977                         requested_voltage = vddc_table->entries[i].v;
3978                         return (amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3979                                                                   PPSMC_MSG_VddC_Request,
3980                                                                   requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3981                                 0 : -EINVAL;
3982                 }
3983         }
3984
3985         return -EINVAL;
3986 }
3987
3988 static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev)
3989 {
3990         struct ci_power_info *pi = ci_get_pi(adev);
3991         PPSMC_Result result;
3992
3993         ci_apply_disp_minimum_voltage_request(adev);
3994
3995         if (!pi->sclk_dpm_key_disabled) {
3996                 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3997                         result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3998                                                                    PPSMC_MSG_SCLKDPM_SetEnabledMask,
3999                                                                    pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4000                         if (result != PPSMC_Result_OK)
4001                                 return -EINVAL;
4002                 }
4003         }
4004
4005         if (!pi->mclk_dpm_key_disabled) {
4006                 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4007                         result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4008                                                                    PPSMC_MSG_MCLKDPM_SetEnabledMask,
4009                                                                    pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4010                         if (result != PPSMC_Result_OK)
4011                                 return -EINVAL;
4012                 }
4013         }
4014
4015 #if 0
4016         if (!pi->pcie_dpm_key_disabled) {
4017                 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4018                         result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4019                                                                    PPSMC_MSG_PCIeDPM_SetEnabledMask,
4020                                                                    pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4021                         if (result != PPSMC_Result_OK)
4022                                 return -EINVAL;
4023                 }
4024         }
4025 #endif
4026
4027         return 0;
4028 }
4029
4030 static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev,
4031                                                    struct amdgpu_ps *amdgpu_state)
4032 {
4033         struct ci_power_info *pi = ci_get_pi(adev);
4034         struct ci_ps *state = ci_get_ps(amdgpu_state);
4035         struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
4036         u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4037         struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
4038         u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4039         u32 i;
4040
4041         pi->need_update_smu7_dpm_table = 0;
4042
4043         for (i = 0; i < sclk_table->count; i++) {
4044                 if (sclk == sclk_table->dpm_levels[i].value)
4045                         break;
4046         }
4047
4048         if (i >= sclk_table->count) {
4049                 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4050         } else {
4051                 /* XXX check display min clock requirements */
4052                 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
4053                         pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
4054         }
4055
4056         for (i = 0; i < mclk_table->count; i++) {
4057                 if (mclk == mclk_table->dpm_levels[i].value)
4058                         break;
4059         }
4060
4061         if (i >= mclk_table->count)
4062                 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4063
4064         if (adev->pm.dpm.current_active_crtc_count !=
4065             adev->pm.dpm.new_active_crtc_count)
4066                 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4067 }
4068
4069 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev,
4070                                                        struct amdgpu_ps *amdgpu_state)
4071 {
4072         struct ci_power_info *pi = ci_get_pi(adev);
4073         struct ci_ps *state = ci_get_ps(amdgpu_state);
4074         u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4075         u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4076         struct ci_dpm_table *dpm_table = &pi->dpm_table;
4077         int ret;
4078
4079         if (!pi->need_update_smu7_dpm_table)
4080                 return 0;
4081
4082         if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
4083                 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
4084
4085         if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
4086                 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
4087
4088         if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
4089                 ret = ci_populate_all_graphic_levels(adev);
4090                 if (ret)
4091                         return ret;
4092         }
4093
4094         if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
4095                 ret = ci_populate_all_memory_levels(adev);
4096                 if (ret)
4097                         return ret;
4098         }
4099
4100         return 0;
4101 }
4102
4103 static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
4104 {
4105         struct ci_power_info *pi = ci_get_pi(adev);
4106         const struct amdgpu_clock_and_voltage_limits *max_limits;
4107         int i;
4108
4109         if (adev->pm.dpm.ac_power)
4110                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4111         else
4112                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4113
4114         if (enable) {
4115                 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
4116
4117                 for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4118                         if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4119                                 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
4120
4121                                 if (!pi->caps_uvd_dpm)
4122                                         break;
4123                         }
4124                 }
4125
4126                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4127                                                   PPSMC_MSG_UVDDPM_SetEnabledMask,
4128                                                   pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
4129
4130                 if (pi->last_mclk_dpm_enable_mask & 0x1) {
4131                         pi->uvd_enabled = true;
4132                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4133                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4134                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
4135                                                           pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4136                 }
4137         } else {
4138                 if (pi->uvd_enabled) {
4139                         pi->uvd_enabled = false;
4140                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
4141                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4142                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
4143                                                           pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4144                 }
4145         }
4146
4147         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4148                                    PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
4149                 0 : -EINVAL;
4150 }
4151
4152 static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
4153 {
4154         struct ci_power_info *pi = ci_get_pi(adev);
4155         const struct amdgpu_clock_and_voltage_limits *max_limits;
4156         int i;
4157
4158         if (adev->pm.dpm.ac_power)
4159                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4160         else
4161                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4162
4163         if (enable) {
4164                 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
4165                 for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4166                         if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4167                                 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
4168
4169                                 if (!pi->caps_vce_dpm)
4170                                         break;
4171                         }
4172                 }
4173
4174                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4175                                                   PPSMC_MSG_VCEDPM_SetEnabledMask,
4176                                                   pi->dpm_level_enable_mask.vce_dpm_enable_mask);
4177         }
4178
4179         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4180                                    PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
4181                 0 : -EINVAL;
4182 }
4183
4184 #if 0
4185 static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
4186 {
4187         struct ci_power_info *pi = ci_get_pi(adev);
4188         const struct amdgpu_clock_and_voltage_limits *max_limits;
4189         int i;
4190
4191         if (adev->pm.dpm.ac_power)
4192                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4193         else
4194                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4195
4196         if (enable) {
4197                 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
4198                 for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4199                         if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4200                                 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
4201
4202                                 if (!pi->caps_samu_dpm)
4203                                         break;
4204                         }
4205                 }
4206
4207                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4208                                                   PPSMC_MSG_SAMUDPM_SetEnabledMask,
4209                                                   pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4210         }
4211         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4212                                    PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4213                 0 : -EINVAL;
4214 }
4215
4216 static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
4217 {
4218         struct ci_power_info *pi = ci_get_pi(adev);
4219         const struct amdgpu_clock_and_voltage_limits *max_limits;
4220         int i;
4221
4222         if (adev->pm.dpm.ac_power)
4223                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4224         else
4225                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4226
4227         if (enable) {
4228                 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4229                 for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4230                         if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4231                                 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4232
4233                                 if (!pi->caps_acp_dpm)
4234                                         break;
4235                         }
4236                 }
4237
4238                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4239                                                   PPSMC_MSG_ACPDPM_SetEnabledMask,
4240                                                   pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4241         }
4242
4243         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4244                                    PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4245                 0 : -EINVAL;
4246 }
4247 #endif
4248
4249 static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
4250 {
4251         struct ci_power_info *pi = ci_get_pi(adev);
4252         u32 tmp;
4253         int ret = 0;
4254
4255         if (!gate) {
4256                 /* turn the clocks on when decoding */
4257                 if (pi->caps_uvd_dpm ||
4258                     (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4259                         pi->smc_state_table.UvdBootLevel = 0;
4260                 else
4261                         pi->smc_state_table.UvdBootLevel =
4262                                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4263
4264                 tmp = RREG32_SMC(ixDPM_TABLE_475);
4265                 tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
4266                 tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
4267                 WREG32_SMC(ixDPM_TABLE_475, tmp);
4268                 ret = ci_enable_uvd_dpm(adev, true);
4269         } else {
4270                 ret = ci_enable_uvd_dpm(adev, false);
4271                 if (ret)
4272                         return ret;
4273         }
4274
4275         return ret;
4276 }
4277
4278 static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
4279 {
4280         u8 i;
4281         u32 min_evclk = 30000; /* ??? */
4282         struct amdgpu_vce_clock_voltage_dependency_table *table =
4283                 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4284
4285         for (i = 0; i < table->count; i++) {
4286                 if (table->entries[i].evclk >= min_evclk)
4287                         return i;
4288         }
4289
4290         return table->count - 1;
4291 }
4292
4293 static int ci_update_vce_dpm(struct amdgpu_device *adev,
4294                              struct amdgpu_ps *amdgpu_new_state,
4295                              struct amdgpu_ps *amdgpu_current_state)
4296 {
4297         struct ci_power_info *pi = ci_get_pi(adev);
4298         int ret = 0;
4299         u32 tmp;
4300
4301         if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
4302                 if (amdgpu_new_state->evclk) {
4303                         pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
4304                         tmp = RREG32_SMC(ixDPM_TABLE_475);
4305                         tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
4306                         tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT);
4307                         WREG32_SMC(ixDPM_TABLE_475, tmp);
4308
4309                         ret = ci_enable_vce_dpm(adev, true);
4310                 } else {
4311                         ret = ci_enable_vce_dpm(adev, false);
4312                         if (ret)
4313                                 return ret;
4314                 }
4315         }
4316         return ret;
4317 }
4318
4319 #if 0
4320 static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate)
4321 {
4322         return ci_enable_samu_dpm(adev, gate);
4323 }
4324
4325 static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate)
4326 {
4327         struct ci_power_info *pi = ci_get_pi(adev);
4328         u32 tmp;
4329
4330         if (!gate) {
4331                 pi->smc_state_table.AcpBootLevel = 0;
4332
4333                 tmp = RREG32_SMC(ixDPM_TABLE_475);
4334                 tmp &= ~AcpBootLevel_MASK;
4335                 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4336                 WREG32_SMC(ixDPM_TABLE_475, tmp);
4337         }
4338
4339         return ci_enable_acp_dpm(adev, !gate);
4340 }
4341 #endif
4342
4343 static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev,
4344                                              struct amdgpu_ps *amdgpu_state)
4345 {
4346         struct ci_power_info *pi = ci_get_pi(adev);
4347         int ret;
4348
4349         ret = ci_trim_dpm_states(adev, amdgpu_state);
4350         if (ret)
4351                 return ret;
4352
4353         pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4354                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4355         pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4356                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4357         pi->last_mclk_dpm_enable_mask =
4358                 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4359         if (pi->uvd_enabled) {
4360                 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4361                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4362         }
4363         pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4364                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4365
4366         return 0;
4367 }
4368
4369 static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
4370                                        u32 level_mask)
4371 {
4372         u32 level = 0;
4373
4374         while ((level_mask & (1 << level)) == 0)
4375                 level++;
4376
4377         return level;
4378 }
4379
4380
4381 static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
4382                                           enum amd_dpm_forced_level level)
4383 {
4384         struct ci_power_info *pi = ci_get_pi(adev);
4385         u32 tmp, levels, i;
4386         int ret;
4387
4388         if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
4389                 if ((!pi->pcie_dpm_key_disabled) &&
4390                     pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4391                         levels = 0;
4392                         tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4393                         while (tmp >>= 1)
4394                                 levels++;
4395                         if (levels) {
4396                                 ret = ci_dpm_force_state_pcie(adev, level);
4397                                 if (ret)
4398                                         return ret;
4399                                 for (i = 0; i < adev->usec_timeout; i++) {
4400                                         tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4401                                         TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4402                                         TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4403                                         if (tmp == levels)
4404                                                 break;
4405                                         udelay(1);
4406                                 }
4407                         }
4408                 }
4409                 if ((!pi->sclk_dpm_key_disabled) &&
4410                     pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4411                         levels = 0;
4412                         tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4413                         while (tmp >>= 1)
4414                                 levels++;
4415                         if (levels) {
4416                                 ret = ci_dpm_force_state_sclk(adev, levels);
4417                                 if (ret)
4418                                         return ret;
4419                                 for (i = 0; i < adev->usec_timeout; i++) {
4420                                         tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4421                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4422                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4423                                         if (tmp == levels)
4424                                                 break;
4425                                         udelay(1);
4426                                 }
4427                         }
4428                 }
4429                 if ((!pi->mclk_dpm_key_disabled) &&
4430                     pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4431                         levels = 0;
4432                         tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4433                         while (tmp >>= 1)
4434                                 levels++;
4435                         if (levels) {
4436                                 ret = ci_dpm_force_state_mclk(adev, levels);
4437                                 if (ret)
4438                                         return ret;
4439                                 for (i = 0; i < adev->usec_timeout; i++) {
4440                                         tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4441                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4442                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4443                                         if (tmp == levels)
4444                                                 break;
4445                                         udelay(1);
4446                                 }
4447                         }
4448                 }
4449         } else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
4450                 if ((!pi->sclk_dpm_key_disabled) &&
4451                     pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4452                         levels = ci_get_lowest_enabled_level(adev,
4453                                                              pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4454                         ret = ci_dpm_force_state_sclk(adev, levels);
4455                         if (ret)
4456                                 return ret;
4457                         for (i = 0; i < adev->usec_timeout; i++) {
4458                                 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4459                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4460                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4461                                 if (tmp == levels)
4462                                         break;
4463                                 udelay(1);
4464                         }
4465                 }
4466                 if ((!pi->mclk_dpm_key_disabled) &&
4467                     pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4468                         levels = ci_get_lowest_enabled_level(adev,
4469                                                              pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4470                         ret = ci_dpm_force_state_mclk(adev, levels);
4471                         if (ret)
4472                                 return ret;
4473                         for (i = 0; i < adev->usec_timeout; i++) {
4474                                 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4475                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4476                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4477                                 if (tmp == levels)
4478                                         break;
4479                                 udelay(1);
4480                         }
4481                 }
4482                 if ((!pi->pcie_dpm_key_disabled) &&
4483                     pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4484                         levels = ci_get_lowest_enabled_level(adev,
4485                                                              pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4486                         ret = ci_dpm_force_state_pcie(adev, levels);
4487                         if (ret)
4488                                 return ret;
4489                         for (i = 0; i < adev->usec_timeout; i++) {
4490                                 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4491                                 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4492                                 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4493                                 if (tmp == levels)
4494                                         break;
4495                                 udelay(1);
4496                         }
4497                 }
4498         } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
4499                 if (!pi->pcie_dpm_key_disabled) {
4500                         PPSMC_Result smc_result;
4501
4502                         smc_result = amdgpu_ci_send_msg_to_smc(adev,
4503                                                                PPSMC_MSG_PCIeDPM_UnForceLevel);
4504                         if (smc_result != PPSMC_Result_OK)
4505                                 return -EINVAL;
4506                 }
4507                 ret = ci_upload_dpm_level_enable_mask(adev);
4508                 if (ret)
4509                         return ret;
4510         }
4511
4512         adev->pm.dpm.forced_level = level;
4513
4514         return 0;
4515 }
4516
4517 static int ci_set_mc_special_registers(struct amdgpu_device *adev,
4518                                        struct ci_mc_reg_table *table)
4519 {
4520         u8 i, j, k;
4521         u32 temp_reg;
4522
4523         for (i = 0, j = table->last; i < table->last; i++) {
4524                 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4525                         return -EINVAL;
4526                 switch(table->mc_reg_address[i].s1) {
4527                 case mmMC_SEQ_MISC1:
4528                         temp_reg = RREG32(mmMC_PMG_CMD_EMRS);
4529                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
4530                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
4531                         for (k = 0; k < table->num_entries; k++) {
4532                                 table->mc_reg_table_entry[k].mc_data[j] =
4533                                         ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4534                         }
4535                         j++;
4536                         if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4537                                 return -EINVAL;
4538
4539                         temp_reg = RREG32(mmMC_PMG_CMD_MRS);
4540                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
4541                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
4542                         for (k = 0; k < table->num_entries; k++) {
4543                                 table->mc_reg_table_entry[k].mc_data[j] =
4544                                         (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4545                                 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
4546                                         table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4547                         }
4548                         j++;
4549                         if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4550                                 return -EINVAL;
4551
4552                         if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
4553                                 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
4554                                 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
4555                                 for (k = 0; k < table->num_entries; k++) {
4556                                         table->mc_reg_table_entry[k].mc_data[j] =
4557                                                 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4558                                 }
4559                                 j++;
4560                                 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4561                                         return -EINVAL;
4562                         }
4563                         break;
4564                 case mmMC_SEQ_RESERVE_M:
4565                         temp_reg = RREG32(mmMC_PMG_CMD_MRS1);
4566                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
4567                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
4568                         for (k = 0; k < table->num_entries; k++) {
4569                                 table->mc_reg_table_entry[k].mc_data[j] =
4570                                         (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4571                         }
4572                         j++;
4573                         if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4574                                 return -EINVAL;
4575                         break;
4576                 default:
4577                         break;
4578                 }
4579
4580         }
4581
4582         table->last = j;
4583
4584         return 0;
4585 }
4586
4587 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4588 {
4589         bool result = true;
4590
4591         switch(in_reg) {
4592         case mmMC_SEQ_RAS_TIMING:
4593                 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
4594                 break;
4595         case mmMC_SEQ_DLL_STBY:
4596                 *out_reg = mmMC_SEQ_DLL_STBY_LP;
4597                 break;
4598         case mmMC_SEQ_G5PDX_CMD0:
4599                 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
4600                 break;
4601         case mmMC_SEQ_G5PDX_CMD1:
4602                 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
4603                 break;
4604         case mmMC_SEQ_G5PDX_CTRL:
4605                 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
4606                 break;
4607         case mmMC_SEQ_CAS_TIMING:
4608                 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
4609             break;
4610         case mmMC_SEQ_MISC_TIMING:
4611                 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
4612                 break;
4613         case mmMC_SEQ_MISC_TIMING2:
4614                 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
4615                 break;
4616         case mmMC_SEQ_PMG_DVS_CMD:
4617                 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
4618                 break;
4619         case mmMC_SEQ_PMG_DVS_CTL:
4620                 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
4621                 break;
4622         case mmMC_SEQ_RD_CTL_D0:
4623                 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
4624                 break;
4625         case mmMC_SEQ_RD_CTL_D1:
4626                 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
4627                 break;
4628         case mmMC_SEQ_WR_CTL_D0:
4629                 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
4630                 break;
4631         case mmMC_SEQ_WR_CTL_D1:
4632                 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
4633                 break;
4634         case mmMC_PMG_CMD_EMRS:
4635                 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
4636                 break;
4637         case mmMC_PMG_CMD_MRS:
4638                 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
4639                 break;
4640         case mmMC_PMG_CMD_MRS1:
4641                 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
4642                 break;
4643         case mmMC_SEQ_PMG_TIMING:
4644                 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
4645                 break;
4646         case mmMC_PMG_CMD_MRS2:
4647                 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
4648                 break;
4649         case mmMC_SEQ_WR_CTL_2:
4650                 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
4651                 break;
4652         default:
4653                 result = false;
4654                 break;
4655         }
4656
4657         return result;
4658 }
4659
4660 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4661 {
4662         u8 i, j;
4663
4664         for (i = 0; i < table->last; i++) {
4665                 for (j = 1; j < table->num_entries; j++) {
4666                         if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4667                             table->mc_reg_table_entry[j].mc_data[i]) {
4668                                 table->valid_flag |= 1 << i;
4669                                 break;
4670                         }
4671                 }
4672         }
4673 }
4674
4675 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4676 {
4677         u32 i;
4678         u16 address;
4679
4680         for (i = 0; i < table->last; i++) {
4681                 table->mc_reg_address[i].s0 =
4682                         ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4683                         address : table->mc_reg_address[i].s1;
4684         }
4685 }
4686
4687 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4688                                       struct ci_mc_reg_table *ci_table)
4689 {
4690         u8 i, j;
4691
4692         if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4693                 return -EINVAL;
4694         if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4695                 return -EINVAL;
4696
4697         for (i = 0; i < table->last; i++)
4698                 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4699
4700         ci_table->last = table->last;
4701
4702         for (i = 0; i < table->num_entries; i++) {
4703                 ci_table->mc_reg_table_entry[i].mclk_max =
4704                         table->mc_reg_table_entry[i].mclk_max;
4705                 for (j = 0; j < table->last; j++)
4706                         ci_table->mc_reg_table_entry[i].mc_data[j] =
4707                                 table->mc_reg_table_entry[i].mc_data[j];
4708         }
4709         ci_table->num_entries = table->num_entries;
4710
4711         return 0;
4712 }
4713
4714 static int ci_register_patching_mc_seq(struct amdgpu_device *adev,
4715                                        struct ci_mc_reg_table *table)
4716 {
4717         u8 i, k;
4718         u32 tmp;
4719         bool patch;
4720
4721         tmp = RREG32(mmMC_SEQ_MISC0);
4722         patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4723
4724         if (patch &&
4725             ((adev->pdev->device == 0x67B0) ||
4726              (adev->pdev->device == 0x67B1))) {
4727                 for (i = 0; i < table->last; i++) {
4728                         if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4729                                 return -EINVAL;
4730                         switch (table->mc_reg_address[i].s1) {
4731                         case mmMC_SEQ_MISC1:
4732                                 for (k = 0; k < table->num_entries; k++) {
4733                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4734                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4735                                                 table->mc_reg_table_entry[k].mc_data[i] =
4736                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4737                                                         0x00000007;
4738                                 }
4739                                 break;
4740                         case mmMC_SEQ_WR_CTL_D0:
4741                                 for (k = 0; k < table->num_entries; k++) {
4742                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4743                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4744                                                 table->mc_reg_table_entry[k].mc_data[i] =
4745                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4746                                                         0x0000D0DD;
4747                                 }
4748                                 break;
4749                         case mmMC_SEQ_WR_CTL_D1:
4750                                 for (k = 0; k < table->num_entries; k++) {
4751                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4752                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4753                                                 table->mc_reg_table_entry[k].mc_data[i] =
4754                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4755                                                         0x0000D0DD;
4756                                 }
4757                                 break;
4758                         case mmMC_SEQ_WR_CTL_2:
4759                                 for (k = 0; k < table->num_entries; k++) {
4760                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4761                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4762                                                 table->mc_reg_table_entry[k].mc_data[i] = 0;
4763                                 }
4764                                 break;
4765                         case mmMC_SEQ_CAS_TIMING:
4766                                 for (k = 0; k < table->num_entries; k++) {
4767                                         if (table->mc_reg_table_entry[k].mclk_max == 125000)
4768                                                 table->mc_reg_table_entry[k].mc_data[i] =
4769                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4770                                                         0x000C0140;
4771                                         else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4772                                                 table->mc_reg_table_entry[k].mc_data[i] =
4773                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4774                                                         0x000C0150;
4775                                 }
4776                                 break;
4777                         case mmMC_SEQ_MISC_TIMING:
4778                                 for (k = 0; k < table->num_entries; k++) {
4779                                         if (table->mc_reg_table_entry[k].mclk_max == 125000)
4780                                                 table->mc_reg_table_entry[k].mc_data[i] =
4781                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4782                                                         0x00000030;
4783                                         else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4784                                                 table->mc_reg_table_entry[k].mc_data[i] =
4785                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4786                                                         0x00000035;
4787                                 }
4788                                 break;
4789                         default:
4790                                 break;
4791                         }
4792                 }
4793
4794                 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4795                 tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
4796                 tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4797                 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4798                 WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp);
4799         }
4800
4801         return 0;
4802 }
4803
4804 static int ci_initialize_mc_reg_table(struct amdgpu_device *adev)
4805 {
4806         struct ci_power_info *pi = ci_get_pi(adev);
4807         struct atom_mc_reg_table *table;
4808         struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4809         u8 module_index = ci_get_memory_module_index(adev);
4810         int ret;
4811
4812         table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4813         if (!table)
4814                 return -ENOMEM;
4815
4816         WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING));
4817         WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING));
4818         WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY));
4819         WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0));
4820         WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1));
4821         WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL));
4822         WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD));
4823         WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL));
4824         WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING));
4825         WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2));
4826         WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS));
4827         WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS));
4828         WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1));
4829         WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0));
4830         WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1));
4831         WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0));
4832         WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1));
4833         WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING));
4834         WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2));
4835         WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2));
4836
4837         ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
4838         if (ret)
4839                 goto init_mc_done;
4840
4841         ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4842         if (ret)
4843                 goto init_mc_done;
4844
4845         ci_set_s0_mc_reg_index(ci_table);
4846
4847         ret = ci_register_patching_mc_seq(adev, ci_table);
4848         if (ret)
4849                 goto init_mc_done;
4850
4851         ret = ci_set_mc_special_registers(adev, ci_table);
4852         if (ret)
4853                 goto init_mc_done;
4854
4855         ci_set_valid_flag(ci_table);
4856
4857 init_mc_done:
4858         kfree(table);
4859
4860         return ret;
4861 }
4862
4863 static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev,
4864                                         SMU7_Discrete_MCRegisters *mc_reg_table)
4865 {
4866         struct ci_power_info *pi = ci_get_pi(adev);
4867         u32 i, j;
4868
4869         for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4870                 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4871                         if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4872                                 return -EINVAL;
4873                         mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4874                         mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4875                         i++;
4876                 }
4877         }
4878
4879         mc_reg_table->last = (u8)i;
4880
4881         return 0;
4882 }
4883
4884 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4885                                     SMU7_Discrete_MCRegisterSet *data,
4886                                     u32 num_entries, u32 valid_flag)
4887 {
4888         u32 i, j;
4889
4890         for (i = 0, j = 0; j < num_entries; j++) {
4891                 if (valid_flag & (1 << j)) {
4892                         data->value[i] = cpu_to_be32(entry->mc_data[j]);
4893                         i++;
4894                 }
4895         }
4896 }
4897
4898 static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
4899                                                  const u32 memory_clock,
4900                                                  SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4901 {
4902         struct ci_power_info *pi = ci_get_pi(adev);
4903         u32 i = 0;
4904
4905         for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4906                 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4907                         break;
4908         }
4909
4910         if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4911                 --i;
4912
4913         ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4914                                 mc_reg_table_data, pi->mc_reg_table.last,
4915                                 pi->mc_reg_table.valid_flag);
4916 }
4917
4918 static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
4919                                            SMU7_Discrete_MCRegisters *mc_reg_table)
4920 {
4921         struct ci_power_info *pi = ci_get_pi(adev);
4922         u32 i;
4923
4924         for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4925                 ci_convert_mc_reg_table_entry_to_smc(adev,
4926                                                      pi->dpm_table.mclk_table.dpm_levels[i].value,
4927                                                      &mc_reg_table->data[i]);
4928 }
4929
4930 static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev)
4931 {
4932         struct ci_power_info *pi = ci_get_pi(adev);
4933         int ret;
4934
4935         memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4936
4937         ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table);
4938         if (ret)
4939                 return ret;
4940         ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4941
4942         return amdgpu_ci_copy_bytes_to_smc(adev,
4943                                     pi->mc_reg_table_start,
4944                                     (u8 *)&pi->smc_mc_reg_table,
4945                                     sizeof(SMU7_Discrete_MCRegisters),
4946                                     pi->sram_end);
4947 }
4948
4949 static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev)
4950 {
4951         struct ci_power_info *pi = ci_get_pi(adev);
4952
4953         if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4954                 return 0;
4955
4956         memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4957
4958         ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4959
4960         return amdgpu_ci_copy_bytes_to_smc(adev,
4961                                     pi->mc_reg_table_start +
4962                                     offsetof(SMU7_Discrete_MCRegisters, data[0]),
4963                                     (u8 *)&pi->smc_mc_reg_table.data[0],
4964                                     sizeof(SMU7_Discrete_MCRegisterSet) *
4965                                     pi->dpm_table.mclk_table.count,
4966                                     pi->sram_end);
4967 }
4968
4969 static void ci_enable_voltage_control(struct amdgpu_device *adev)
4970 {
4971         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
4972
4973         tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK;
4974         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
4975 }
4976
4977 static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev,
4978                                                       struct amdgpu_ps *amdgpu_state)
4979 {
4980         struct ci_ps *state = ci_get_ps(amdgpu_state);
4981         int i;
4982         u16 pcie_speed, max_speed = 0;
4983
4984         for (i = 0; i < state->performance_level_count; i++) {
4985                 pcie_speed = state->performance_levels[i].pcie_gen;
4986                 if (max_speed < pcie_speed)
4987                         max_speed = pcie_speed;
4988         }
4989
4990         return max_speed;
4991 }
4992
4993 static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev)
4994 {
4995         u32 speed_cntl = 0;
4996
4997         speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) &
4998                 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
4999         speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
5000
5001         return (u16)speed_cntl;
5002 }
5003
5004 static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev)
5005 {
5006         u32 link_width = 0;
5007
5008         link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) &
5009                 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK;
5010         link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
5011
5012         switch (link_width) {
5013         case 1:
5014                 return 1;
5015         case 2:
5016                 return 2;
5017         case 3:
5018                 return 4;
5019         case 4:
5020                 return 8;
5021         case 0:
5022         case 6:
5023         default:
5024                 return 16;
5025         }
5026 }
5027
5028 static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
5029                                                              struct amdgpu_ps *amdgpu_new_state,
5030                                                              struct amdgpu_ps *amdgpu_current_state)
5031 {
5032         struct ci_power_info *pi = ci_get_pi(adev);
5033         enum amdgpu_pcie_gen target_link_speed =
5034                 ci_get_maximum_link_speed(adev, amdgpu_new_state);
5035         enum amdgpu_pcie_gen current_link_speed;
5036
5037         if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
5038                 current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state);
5039         else
5040                 current_link_speed = pi->force_pcie_gen;
5041
5042         pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5043         pi->pspp_notify_required = false;
5044         if (target_link_speed > current_link_speed) {
5045                 switch (target_link_speed) {
5046 #ifdef CONFIG_ACPI
5047                 case AMDGPU_PCIE_GEN3:
5048                         if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
5049                                 break;
5050                         pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
5051                         if (current_link_speed == AMDGPU_PCIE_GEN2)
5052                                 break;
5053                 case AMDGPU_PCIE_GEN2:
5054                         if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
5055                                 break;
5056 #endif
5057                 default:
5058                         pi->force_pcie_gen = ci_get_current_pcie_speed(adev);
5059                         break;
5060                 }
5061         } else {
5062                 if (target_link_speed < current_link_speed)
5063                         pi->pspp_notify_required = true;
5064         }
5065 }
5066
5067 static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
5068                                                            struct amdgpu_ps *amdgpu_new_state,
5069                                                            struct amdgpu_ps *amdgpu_current_state)
5070 {
5071         struct ci_power_info *pi = ci_get_pi(adev);
5072         enum amdgpu_pcie_gen target_link_speed =
5073                 ci_get_maximum_link_speed(adev, amdgpu_new_state);
5074         u8 request;
5075
5076         if (pi->pspp_notify_required) {
5077                 if (target_link_speed == AMDGPU_PCIE_GEN3)
5078                         request = PCIE_PERF_REQ_PECI_GEN3;
5079                 else if (target_link_speed == AMDGPU_PCIE_GEN2)
5080                         request = PCIE_PERF_REQ_PECI_GEN2;
5081                 else
5082                         request = PCIE_PERF_REQ_PECI_GEN1;
5083
5084                 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
5085                     (ci_get_current_pcie_speed(adev) > 0))
5086                         return;
5087
5088 #ifdef CONFIG_ACPI
5089                 amdgpu_acpi_pcie_performance_request(adev, request, false);
5090 #endif
5091         }
5092 }
5093
5094 static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev)
5095 {
5096         struct ci_power_info *pi = ci_get_pi(adev);
5097         struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
5098                 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
5099         struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table =
5100                 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
5101         struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table =
5102                 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
5103
5104         if (allowed_sclk_vddc_table == NULL)
5105                 return -EINVAL;
5106         if (allowed_sclk_vddc_table->count < 1)
5107                 return -EINVAL;
5108         if (allowed_mclk_vddc_table == NULL)
5109                 return -EINVAL;
5110         if (allowed_mclk_vddc_table->count < 1)
5111                 return -EINVAL;
5112         if (allowed_mclk_vddci_table == NULL)
5113                 return -EINVAL;
5114         if (allowed_mclk_vddci_table->count < 1)
5115                 return -EINVAL;
5116
5117         pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
5118         pi->max_vddc_in_pp_table =
5119                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5120
5121         pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
5122         pi->max_vddci_in_pp_table =
5123                 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5124
5125         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
5126                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5127         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
5128                 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5129         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
5130                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5131         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
5132                 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5133
5134         return 0;
5135 }
5136
5137 static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc)
5138 {
5139         struct ci_power_info *pi = ci_get_pi(adev);
5140         struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
5141         u32 leakage_index;
5142
5143         for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5144                 if (leakage_table->leakage_id[leakage_index] == *vddc) {
5145                         *vddc = leakage_table->actual_voltage[leakage_index];
5146                         break;
5147                 }
5148         }
5149 }
5150
5151 static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci)
5152 {
5153         struct ci_power_info *pi = ci_get_pi(adev);
5154         struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
5155         u32 leakage_index;
5156
5157         for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5158                 if (leakage_table->leakage_id[leakage_index] == *vddci) {
5159                         *vddci = leakage_table->actual_voltage[leakage_index];
5160                         break;
5161                 }
5162         }
5163 }
5164
5165 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5166                                                                       struct amdgpu_clock_voltage_dependency_table *table)
5167 {
5168         u32 i;
5169
5170         if (table) {
5171                 for (i = 0; i < table->count; i++)
5172                         ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5173         }
5174 }
5175
5176 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev,
5177                                                                        struct amdgpu_clock_voltage_dependency_table *table)
5178 {
5179         u32 i;
5180
5181         if (table) {
5182                 for (i = 0; i < table->count; i++)
5183                         ci_patch_with_vddci_leakage(adev, &table->entries[i].v);
5184         }
5185 }
5186
5187 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5188                                                                           struct amdgpu_vce_clock_voltage_dependency_table *table)
5189 {
5190         u32 i;
5191
5192         if (table) {
5193                 for (i = 0; i < table->count; i++)
5194                         ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5195         }
5196 }
5197
5198 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5199                                                                           struct amdgpu_uvd_clock_voltage_dependency_table *table)
5200 {
5201         u32 i;
5202
5203         if (table) {
5204                 for (i = 0; i < table->count; i++)
5205                         ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5206         }
5207 }
5208
5209 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev,
5210                                                                    struct amdgpu_phase_shedding_limits_table *table)
5211 {
5212         u32 i;
5213
5214         if (table) {
5215                 for (i = 0; i < table->count; i++)
5216                         ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage);
5217         }
5218 }
5219
5220 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev,
5221                                                             struct amdgpu_clock_and_voltage_limits *table)
5222 {
5223         if (table) {
5224                 ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc);
5225                 ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci);
5226         }
5227 }
5228
5229 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev,
5230                                                          struct amdgpu_cac_leakage_table *table)
5231 {
5232         u32 i;
5233
5234         if (table) {
5235                 for (i = 0; i < table->count; i++)
5236                         ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc);
5237         }
5238 }
5239
5240 static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev)
5241 {
5242
5243         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5244                                                                   &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5245         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5246                                                                   &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5247         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5248                                                                   &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5249         ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev,
5250                                                                    &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5251         ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev,
5252                                                                       &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5253         ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev,
5254                                                                       &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5255         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5256                                                                   &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5257         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5258                                                                   &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5259         ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev,
5260                                                                &adev->pm.dpm.dyn_state.phase_shedding_limits_table);
5261         ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5262                                                         &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5263         ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5264                                                         &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5265         ci_patch_cac_leakage_table_with_vddc_leakage(adev,
5266                                                      &adev->pm.dpm.dyn_state.cac_leakage_table);
5267
5268 }
5269
5270 static void ci_update_current_ps(struct amdgpu_device *adev,
5271                                  struct amdgpu_ps *rps)
5272 {
5273         struct ci_ps *new_ps = ci_get_ps(rps);
5274         struct ci_power_info *pi = ci_get_pi(adev);
5275
5276         pi->current_rps = *rps;
5277         pi->current_ps = *new_ps;
5278         pi->current_rps.ps_priv = &pi->current_ps;
5279         adev->pm.dpm.current_ps = &pi->current_rps;
5280 }
5281
5282 static void ci_update_requested_ps(struct amdgpu_device *adev,
5283                                    struct amdgpu_ps *rps)
5284 {
5285         struct ci_ps *new_ps = ci_get_ps(rps);
5286         struct ci_power_info *pi = ci_get_pi(adev);
5287
5288         pi->requested_rps = *rps;
5289         pi->requested_ps = *new_ps;
5290         pi->requested_rps.ps_priv = &pi->requested_ps;
5291         adev->pm.dpm.requested_ps = &pi->requested_rps;
5292 }
5293
5294 static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
5295 {
5296         struct ci_power_info *pi = ci_get_pi(adev);
5297         struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
5298         struct amdgpu_ps *new_ps = &requested_ps;
5299
5300         ci_update_requested_ps(adev, new_ps);
5301
5302         ci_apply_state_adjust_rules(adev, &pi->requested_rps);
5303
5304         return 0;
5305 }
5306
5307 static void ci_dpm_post_set_power_state(struct amdgpu_device *adev)
5308 {
5309         struct ci_power_info *pi = ci_get_pi(adev);
5310         struct amdgpu_ps *new_ps = &pi->requested_rps;
5311
5312         ci_update_current_ps(adev, new_ps);
5313 }
5314
5315
5316 static void ci_dpm_setup_asic(struct amdgpu_device *adev)
5317 {
5318         ci_read_clock_registers(adev);
5319         ci_enable_acpi_power_management(adev);
5320         ci_init_sclk_t(adev);
5321 }
5322
5323 static int ci_dpm_enable(struct amdgpu_device *adev)
5324 {
5325         struct ci_power_info *pi = ci_get_pi(adev);
5326         struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5327         int ret;
5328
5329         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5330                 ci_enable_voltage_control(adev);
5331                 ret = ci_construct_voltage_tables(adev);
5332                 if (ret) {
5333                         DRM_ERROR("ci_construct_voltage_tables failed\n");
5334                         return ret;
5335                 }
5336         }
5337         if (pi->caps_dynamic_ac_timing) {
5338                 ret = ci_initialize_mc_reg_table(adev);
5339                 if (ret)
5340                         pi->caps_dynamic_ac_timing = false;
5341         }
5342         if (pi->dynamic_ss)
5343                 ci_enable_spread_spectrum(adev, true);
5344         if (pi->thermal_protection)
5345                 ci_enable_thermal_protection(adev, true);
5346         ci_program_sstp(adev);
5347         ci_enable_display_gap(adev);
5348         ci_program_vc(adev);
5349         ret = ci_upload_firmware(adev);
5350         if (ret) {
5351                 DRM_ERROR("ci_upload_firmware failed\n");
5352                 return ret;
5353         }
5354         ret = ci_process_firmware_header(adev);
5355         if (ret) {
5356                 DRM_ERROR("ci_process_firmware_header failed\n");
5357                 return ret;
5358         }
5359         ret = ci_initial_switch_from_arb_f0_to_f1(adev);
5360         if (ret) {
5361                 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5362                 return ret;
5363         }
5364         ret = ci_init_smc_table(adev);
5365         if (ret) {
5366                 DRM_ERROR("ci_init_smc_table failed\n");
5367                 return ret;
5368         }
5369         ret = ci_init_arb_table_index(adev);
5370         if (ret) {
5371                 DRM_ERROR("ci_init_arb_table_index failed\n");
5372                 return ret;
5373         }
5374         if (pi->caps_dynamic_ac_timing) {
5375                 ret = ci_populate_initial_mc_reg_table(adev);
5376                 if (ret) {
5377                         DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5378                         return ret;
5379                 }
5380         }
5381         ret = ci_populate_pm_base(adev);
5382         if (ret) {
5383                 DRM_ERROR("ci_populate_pm_base failed\n");
5384                 return ret;
5385         }
5386         ci_dpm_start_smc(adev);
5387         ci_enable_vr_hot_gpio_interrupt(adev);
5388         ret = ci_notify_smc_display_change(adev, false);
5389         if (ret) {
5390                 DRM_ERROR("ci_notify_smc_display_change failed\n");
5391                 return ret;
5392         }
5393         ci_enable_sclk_control(adev, true);
5394         ret = ci_enable_ulv(adev, true);
5395         if (ret) {
5396                 DRM_ERROR("ci_enable_ulv failed\n");
5397                 return ret;
5398         }
5399         ret = ci_enable_ds_master_switch(adev, true);
5400         if (ret) {
5401                 DRM_ERROR("ci_enable_ds_master_switch failed\n");
5402                 return ret;
5403         }
5404         ret = ci_start_dpm(adev);
5405         if (ret) {
5406                 DRM_ERROR("ci_start_dpm failed\n");
5407                 return ret;
5408         }
5409         ret = ci_enable_didt(adev, true);
5410         if (ret) {
5411                 DRM_ERROR("ci_enable_didt failed\n");
5412                 return ret;
5413         }
5414         ret = ci_enable_smc_cac(adev, true);
5415         if (ret) {
5416                 DRM_ERROR("ci_enable_smc_cac failed\n");
5417                 return ret;
5418         }
5419         ret = ci_enable_power_containment(adev, true);
5420         if (ret) {
5421                 DRM_ERROR("ci_enable_power_containment failed\n");
5422                 return ret;
5423         }
5424
5425         ret = ci_power_control_set_level(adev);
5426         if (ret) {
5427                 DRM_ERROR("ci_power_control_set_level failed\n");
5428                 return ret;
5429         }
5430
5431         ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5432
5433         ret = ci_enable_thermal_based_sclk_dpm(adev, true);
5434         if (ret) {
5435                 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5436                 return ret;
5437         }
5438
5439         ci_thermal_start_thermal_controller(adev);
5440
5441         ci_update_current_ps(adev, boot_ps);
5442
5443         return 0;
5444 }
5445
5446 static void ci_dpm_disable(struct amdgpu_device *adev)
5447 {
5448         struct ci_power_info *pi = ci_get_pi(adev);
5449         struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5450
5451         amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5452                        AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
5453         amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5454                        AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
5455
5456         ci_dpm_powergate_uvd(adev, true);
5457
5458         if (!amdgpu_ci_is_smc_running(adev))
5459                 return;
5460
5461         ci_thermal_stop_thermal_controller(adev);
5462
5463         if (pi->thermal_protection)
5464                 ci_enable_thermal_protection(adev, false);
5465         ci_enable_power_containment(adev, false);
5466         ci_enable_smc_cac(adev, false);
5467         ci_enable_didt(adev, false);
5468         ci_enable_spread_spectrum(adev, false);
5469         ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5470         ci_stop_dpm(adev);
5471         ci_enable_ds_master_switch(adev, false);
5472         ci_enable_ulv(adev, false);
5473         ci_clear_vc(adev);
5474         ci_reset_to_default(adev);
5475         ci_dpm_stop_smc(adev);
5476         ci_force_switch_to_arb_f0(adev);
5477         ci_enable_thermal_based_sclk_dpm(adev, false);
5478
5479         ci_update_current_ps(adev, boot_ps);
5480 }
5481
5482 static int ci_dpm_set_power_state(struct amdgpu_device *adev)
5483 {
5484         struct ci_power_info *pi = ci_get_pi(adev);
5485         struct amdgpu_ps *new_ps = &pi->requested_rps;
5486         struct amdgpu_ps *old_ps = &pi->current_rps;
5487         int ret;
5488
5489         ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps);
5490         if (pi->pcie_performance_request)
5491                 ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
5492         ret = ci_freeze_sclk_mclk_dpm(adev);
5493         if (ret) {
5494                 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5495                 return ret;
5496         }
5497         ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps);
5498         if (ret) {
5499                 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5500                 return ret;
5501         }
5502         ret = ci_generate_dpm_level_enable_mask(adev, new_ps);
5503         if (ret) {
5504                 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5505                 return ret;
5506         }
5507
5508         ret = ci_update_vce_dpm(adev, new_ps, old_ps);
5509         if (ret) {
5510                 DRM_ERROR("ci_update_vce_dpm failed\n");
5511                 return ret;
5512         }
5513
5514         ret = ci_update_sclk_t(adev);
5515         if (ret) {
5516                 DRM_ERROR("ci_update_sclk_t failed\n");
5517                 return ret;
5518         }
5519         if (pi->caps_dynamic_ac_timing) {
5520                 ret = ci_update_and_upload_mc_reg_table(adev);
5521                 if (ret) {
5522                         DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5523                         return ret;
5524                 }
5525         }
5526         ret = ci_program_memory_timing_parameters(adev);
5527         if (ret) {
5528                 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5529                 return ret;
5530         }
5531         ret = ci_unfreeze_sclk_mclk_dpm(adev);
5532         if (ret) {
5533                 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5534                 return ret;
5535         }
5536         ret = ci_upload_dpm_level_enable_mask(adev);
5537         if (ret) {
5538                 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5539                 return ret;
5540         }
5541         if (pi->pcie_performance_request)
5542                 ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
5543
5544         return 0;
5545 }
5546
5547 #if 0
5548 static void ci_dpm_reset_asic(struct amdgpu_device *adev)
5549 {
5550         ci_set_boot_state(adev);
5551 }
5552 #endif
5553
5554 static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev)
5555 {
5556         ci_program_display_gap(adev);
5557 }
5558
5559 union power_info {
5560         struct _ATOM_POWERPLAY_INFO info;
5561         struct _ATOM_POWERPLAY_INFO_V2 info_2;
5562         struct _ATOM_POWERPLAY_INFO_V3 info_3;
5563         struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5564         struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5565         struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5566 };
5567
5568 union pplib_clock_info {
5569         struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5570         struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5571         struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5572         struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5573         struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5574         struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5575 };
5576
5577 union pplib_power_state {
5578         struct _ATOM_PPLIB_STATE v1;
5579         struct _ATOM_PPLIB_STATE_V2 v2;
5580 };
5581
5582 static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev,
5583                                           struct amdgpu_ps *rps,
5584                                           struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5585                                           u8 table_rev)
5586 {
5587         rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5588         rps->class = le16_to_cpu(non_clock_info->usClassification);
5589         rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5590
5591         if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5592                 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5593                 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5594         } else {
5595                 rps->vclk = 0;
5596                 rps->dclk = 0;
5597         }
5598
5599         if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5600                 adev->pm.dpm.boot_ps = rps;
5601         if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5602                 adev->pm.dpm.uvd_ps = rps;
5603 }
5604
5605 static void ci_parse_pplib_clock_info(struct amdgpu_device *adev,
5606                                       struct amdgpu_ps *rps, int index,
5607                                       union pplib_clock_info *clock_info)
5608 {
5609         struct ci_power_info *pi = ci_get_pi(adev);
5610         struct ci_ps *ps = ci_get_ps(rps);
5611         struct ci_pl *pl = &ps->performance_levels[index];
5612
5613         ps->performance_level_count = index + 1;
5614
5615         pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5616         pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5617         pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5618         pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5619
5620         pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
5621                                                    pi->sys_pcie_mask,
5622                                                    pi->vbios_boot_state.pcie_gen_bootup_value,
5623                                                    clock_info->ci.ucPCIEGen);
5624         pl->pcie_lane = amdgpu_get_pcie_lane_support(adev,
5625                                                      pi->vbios_boot_state.pcie_lane_bootup_value,
5626                                                      le16_to_cpu(clock_info->ci.usPCIELane));
5627
5628         if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5629                 pi->acpi_pcie_gen = pl->pcie_gen;
5630         }
5631
5632         if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5633                 pi->ulv.supported = true;
5634                 pi->ulv.pl = *pl;
5635                 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5636         }
5637
5638         /* patch up boot state */
5639         if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5640                 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5641                 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5642                 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5643                 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5644         }
5645
5646         switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5647         case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5648                 pi->use_pcie_powersaving_levels = true;
5649                 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5650                         pi->pcie_gen_powersaving.max = pl->pcie_gen;
5651                 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5652                         pi->pcie_gen_powersaving.min = pl->pcie_gen;
5653                 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5654                         pi->pcie_lane_powersaving.max = pl->pcie_lane;
5655                 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5656                         pi->pcie_lane_powersaving.min = pl->pcie_lane;
5657                 break;
5658         case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5659                 pi->use_pcie_performance_levels = true;
5660                 if (pi->pcie_gen_performance.max < pl->pcie_gen)
5661                         pi->pcie_gen_performance.max = pl->pcie_gen;
5662                 if (pi->pcie_gen_performance.min > pl->pcie_gen)
5663                         pi->pcie_gen_performance.min = pl->pcie_gen;
5664                 if (pi->pcie_lane_performance.max < pl->pcie_lane)
5665                         pi->pcie_lane_performance.max = pl->pcie_lane;
5666                 if (pi->pcie_lane_performance.min > pl->pcie_lane)
5667                         pi->pcie_lane_performance.min = pl->pcie_lane;
5668                 break;
5669         default:
5670                 break;
5671         }
5672 }
5673
5674 static int ci_parse_power_table(struct amdgpu_device *adev)
5675 {
5676         struct amdgpu_mode_info *mode_info = &adev->mode_info;
5677         struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5678         union pplib_power_state *power_state;
5679         int i, j, k, non_clock_array_index, clock_array_index;
5680         union pplib_clock_info *clock_info;
5681         struct _StateArray *state_array;
5682         struct _ClockInfoArray *clock_info_array;
5683         struct _NonClockInfoArray *non_clock_info_array;
5684         union power_info *power_info;
5685         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5686         u16 data_offset;
5687         u8 frev, crev;
5688         u8 *power_state_offset;
5689         struct ci_ps *ps;
5690
5691         if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5692                                    &frev, &crev, &data_offset))
5693                 return -EINVAL;
5694         power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5695
5696         amdgpu_add_thermal_controller(adev);
5697
5698         state_array = (struct _StateArray *)
5699                 (mode_info->atom_context->bios + data_offset +
5700                  le16_to_cpu(power_info->pplib.usStateArrayOffset));
5701         clock_info_array = (struct _ClockInfoArray *)
5702                 (mode_info->atom_context->bios + data_offset +
5703                  le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5704         non_clock_info_array = (struct _NonClockInfoArray *)
5705                 (mode_info->atom_context->bios + data_offset +
5706                  le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5707
5708         adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
5709                                   state_array->ucNumEntries, GFP_KERNEL);
5710         if (!adev->pm.dpm.ps)
5711                 return -ENOMEM;
5712         power_state_offset = (u8 *)state_array->states;
5713         for (i = 0; i < state_array->ucNumEntries; i++) {
5714                 u8 *idx;
5715                 power_state = (union pplib_power_state *)power_state_offset;
5716                 non_clock_array_index = power_state->v2.nonClockInfoIndex;
5717                 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5718                         &non_clock_info_array->nonClockInfo[non_clock_array_index];
5719                 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5720                 if (ps == NULL) {
5721                         kfree(adev->pm.dpm.ps);
5722                         return -ENOMEM;
5723                 }
5724                 adev->pm.dpm.ps[i].ps_priv = ps;
5725                 ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
5726                                               non_clock_info,
5727                                               non_clock_info_array->ucEntrySize);
5728                 k = 0;
5729                 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5730                 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5731                         clock_array_index = idx[j];
5732                         if (clock_array_index >= clock_info_array->ucNumEntries)
5733                                 continue;
5734                         if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5735                                 break;
5736                         clock_info = (union pplib_clock_info *)
5737                                 ((u8 *)&clock_info_array->clockInfo[0] +
5738                                  (clock_array_index * clock_info_array->ucEntrySize));
5739                         ci_parse_pplib_clock_info(adev,
5740                                                   &adev->pm.dpm.ps[i], k,
5741                                                   clock_info);
5742                         k++;
5743                 }
5744                 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5745         }
5746         adev->pm.dpm.num_ps = state_array->ucNumEntries;
5747
5748         /* fill in the vce power states */
5749         for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
5750                 u32 sclk, mclk;
5751                 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
5752                 clock_info = (union pplib_clock_info *)
5753                         &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5754                 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5755                 sclk |= clock_info->ci.ucEngineClockHigh << 16;
5756                 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5757                 mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5758                 adev->pm.dpm.vce_states[i].sclk = sclk;
5759                 adev->pm.dpm.vce_states[i].mclk = mclk;
5760         }
5761
5762         return 0;
5763 }
5764
5765 static int ci_get_vbios_boot_values(struct amdgpu_device *adev,
5766                                     struct ci_vbios_boot_state *boot_state)
5767 {
5768         struct amdgpu_mode_info *mode_info = &adev->mode_info;
5769         int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5770         ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5771         u8 frev, crev;
5772         u16 data_offset;
5773
5774         if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5775                                    &frev, &crev, &data_offset)) {
5776                 firmware_info =
5777                         (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5778                                                     data_offset);
5779                 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5780                 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5781                 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5782                 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev);
5783                 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev);
5784                 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5785                 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5786
5787                 return 0;
5788         }
5789         return -EINVAL;
5790 }
5791
5792 static void ci_dpm_fini(struct amdgpu_device *adev)
5793 {
5794         int i;
5795
5796         for (i = 0; i < adev->pm.dpm.num_ps; i++) {
5797                 kfree(adev->pm.dpm.ps[i].ps_priv);
5798         }
5799         kfree(adev->pm.dpm.ps);
5800         kfree(adev->pm.dpm.priv);
5801         kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5802         amdgpu_free_extended_power_table(adev);
5803 }
5804
5805 /**
5806  * ci_dpm_init_microcode - load ucode images from disk
5807  *
5808  * @adev: amdgpu_device pointer
5809  *
5810  * Use the firmware interface to load the ucode images into
5811  * the driver (not loaded into hw).
5812  * Returns 0 on success, error on failure.
5813  */
5814 static int ci_dpm_init_microcode(struct amdgpu_device *adev)
5815 {
5816         const char *chip_name;
5817         char fw_name[30];
5818         int err;
5819
5820         DRM_DEBUG("\n");
5821
5822         switch (adev->asic_type) {
5823         case CHIP_BONAIRE:
5824                 if ((adev->pdev->revision == 0x80) ||
5825                     (adev->pdev->revision == 0x81) ||
5826                     (adev->pdev->device == 0x665f))
5827                         chip_name = "bonaire_k";
5828                 else
5829                         chip_name = "bonaire";
5830                 break;
5831         case CHIP_HAWAII:
5832                 if (adev->pdev->revision == 0x80)
5833                         chip_name = "hawaii_k";
5834                 else
5835                         chip_name = "hawaii";
5836                 break;
5837         case CHIP_KAVERI:
5838         case CHIP_KABINI:
5839         case CHIP_MULLINS:
5840         default: BUG();
5841         }
5842
5843         snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
5844         err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
5845         if (err)
5846                 goto out;
5847         err = amdgpu_ucode_validate(adev->pm.fw);
5848
5849 out:
5850         if (err) {
5851                 pr_err("cik_smc: Failed to load firmware \"%s\"\n", fw_name);
5852                 release_firmware(adev->pm.fw);
5853                 adev->pm.fw = NULL;
5854         }
5855         return err;
5856 }
5857
5858 static int ci_dpm_init(struct amdgpu_device *adev)
5859 {
5860         int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5861         SMU7_Discrete_DpmTable *dpm_table;
5862         struct amdgpu_gpio_rec gpio;
5863         u16 data_offset, size;
5864         u8 frev, crev;
5865         struct ci_power_info *pi;
5866         int ret;
5867
5868         pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5869         if (pi == NULL)
5870                 return -ENOMEM;
5871         adev->pm.dpm.priv = pi;
5872
5873         pi->sys_pcie_mask =
5874                 (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
5875                 CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
5876
5877         pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5878
5879         pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
5880         pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3;
5881         pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1;
5882         pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3;
5883
5884         pi->pcie_lane_performance.max = 0;
5885         pi->pcie_lane_performance.min = 16;
5886         pi->pcie_lane_powersaving.max = 0;
5887         pi->pcie_lane_powersaving.min = 16;
5888
5889         ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state);
5890         if (ret) {
5891                 ci_dpm_fini(adev);
5892                 return ret;
5893         }
5894
5895         ret = amdgpu_get_platform_caps(adev);
5896         if (ret) {
5897                 ci_dpm_fini(adev);
5898                 return ret;
5899         }
5900
5901         ret = amdgpu_parse_extended_power_table(adev);
5902         if (ret) {
5903                 ci_dpm_fini(adev);
5904                 return ret;
5905         }
5906
5907         ret = ci_parse_power_table(adev);
5908         if (ret) {
5909                 ci_dpm_fini(adev);
5910                 return ret;
5911         }
5912
5913         pi->dll_default_on = false;
5914         pi->sram_end = SMC_RAM_END;
5915
5916         pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5917         pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5918         pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5919         pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5920         pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5921         pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5922         pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5923         pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5924
5925         pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5926
5927         pi->sclk_dpm_key_disabled = 0;
5928         pi->mclk_dpm_key_disabled = 0;
5929         pi->pcie_dpm_key_disabled = 0;
5930         pi->thermal_sclk_dpm_enabled = 0;
5931
5932         if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
5933                 pi->caps_sclk_ds = true;
5934         else
5935                 pi->caps_sclk_ds = false;
5936
5937         pi->mclk_strobe_mode_threshold = 40000;
5938         pi->mclk_stutter_mode_threshold = 40000;
5939         pi->mclk_edc_enable_threshold = 40000;
5940         pi->mclk_edc_wr_enable_threshold = 40000;
5941
5942         ci_initialize_powertune_defaults(adev);
5943
5944         pi->caps_fps = false;
5945
5946         pi->caps_sclk_throttle_low_notification = false;
5947
5948         pi->caps_uvd_dpm = true;
5949         pi->caps_vce_dpm = true;
5950
5951         ci_get_leakage_voltages(adev);
5952         ci_patch_dependency_tables_with_leakage(adev);
5953         ci_set_private_data_variables_based_on_pptable(adev);
5954
5955         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5956                 kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
5957         if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5958                 ci_dpm_fini(adev);
5959                 return -ENOMEM;
5960         }
5961         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5962         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5963         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5964         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5965         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5966         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5967         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5968         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5969         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5970
5971         adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5972         adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5973         adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5974
5975         adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5976         adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5977         adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5978         adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5979
5980         if (adev->asic_type == CHIP_HAWAII) {
5981                 pi->thermal_temp_setting.temperature_low = 94500;
5982                 pi->thermal_temp_setting.temperature_high = 95000;
5983                 pi->thermal_temp_setting.temperature_shutdown = 104000;
5984         } else {
5985                 pi->thermal_temp_setting.temperature_low = 99500;
5986                 pi->thermal_temp_setting.temperature_high = 100000;
5987                 pi->thermal_temp_setting.temperature_shutdown = 104000;
5988         }
5989
5990         pi->uvd_enabled = false;
5991
5992         dpm_table = &pi->smc_state_table;
5993
5994         gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID);
5995         if (gpio.valid) {
5996                 dpm_table->VRHotGpio = gpio.shift;
5997                 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5998         } else {
5999                 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
6000                 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
6001         }
6002
6003         gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID);
6004         if (gpio.valid) {
6005                 dpm_table->AcDcGpio = gpio.shift;
6006                 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
6007         } else {
6008                 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
6009                 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
6010         }
6011
6012         gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID);
6013         if (gpio.valid) {
6014                 u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL);
6015
6016                 switch (gpio.shift) {
6017                 case 0:
6018                         tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
6019                         tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
6020                         break;
6021                 case 1:
6022                         tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
6023                         tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
6024                         break;
6025                 case 2:
6026                         tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK;
6027                         break;
6028                 case 3:
6029                         tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK;
6030                         break;
6031                 case 4:
6032                         tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
6033                         break;
6034                 default:
6035                         DRM_INFO("Invalid PCC GPIO: %u!\n", gpio.shift);
6036                         break;
6037                 }
6038                 WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
6039         }
6040
6041         pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6042         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6043         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6044         if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
6045                 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6046         else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
6047                 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6048
6049         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
6050                 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
6051                         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6052                 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
6053                         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6054                 else
6055                         adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
6056         }
6057
6058         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
6059                 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
6060                         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6061                 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
6062                         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6063                 else
6064                         adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
6065         }
6066
6067         pi->vddc_phase_shed_control = true;
6068
6069 #if defined(CONFIG_ACPI)
6070         pi->pcie_performance_request =
6071                 amdgpu_acpi_is_pcie_performance_request_supported(adev);
6072 #else
6073         pi->pcie_performance_request = false;
6074 #endif
6075
6076         if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
6077                                    &frev, &crev, &data_offset)) {
6078                 pi->caps_sclk_ss_support = true;
6079                 pi->caps_mclk_ss_support = true;
6080                 pi->dynamic_ss = true;
6081         } else {
6082                 pi->caps_sclk_ss_support = false;
6083                 pi->caps_mclk_ss_support = false;
6084                 pi->dynamic_ss = true;
6085         }
6086
6087         if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
6088                 pi->thermal_protection = true;
6089         else
6090                 pi->thermal_protection = false;
6091
6092         pi->caps_dynamic_ac_timing = true;
6093
6094         pi->uvd_power_gated = true;
6095
6096         /* make sure dc limits are valid */
6097         if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
6098             (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
6099                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
6100                         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
6101
6102         pi->fan_ctrl_is_in_default_mode = true;
6103
6104         return 0;
6105 }
6106
6107 static void
6108 ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
6109                                                struct seq_file *m)
6110 {
6111         struct ci_power_info *pi = ci_get_pi(adev);
6112         struct amdgpu_ps *rps = &pi->current_rps;
6113         u32 sclk = ci_get_average_sclk_freq(adev);
6114         u32 mclk = ci_get_average_mclk_freq(adev);
6115         u32 activity_percent = 50;
6116         int ret;
6117
6118         ret = ci_read_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, AverageGraphicsA),
6119                                         &activity_percent);
6120
6121         if (ret == 0) {
6122                 activity_percent += 0x80;
6123                 activity_percent >>= 8;
6124                 activity_percent = activity_percent > 100 ? 100 : activity_percent;
6125         }
6126
6127         seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
6128         seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
6129         seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
6130                    sclk, mclk);
6131         seq_printf(m, "GPU load: %u %%\n", activity_percent);
6132 }
6133
6134 static void ci_dpm_print_power_state(struct amdgpu_device *adev,
6135                                      struct amdgpu_ps *rps)
6136 {
6137         struct ci_ps *ps = ci_get_ps(rps);
6138         struct ci_pl *pl;
6139         int i;
6140
6141         amdgpu_dpm_print_class_info(rps->class, rps->class2);
6142         amdgpu_dpm_print_cap_info(rps->caps);
6143         printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
6144         for (i = 0; i < ps->performance_level_count; i++) {
6145                 pl = &ps->performance_levels[i];
6146                 printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
6147                        i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
6148         }
6149         amdgpu_dpm_print_ps_status(adev, rps);
6150 }
6151
6152 static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
6153                                                 const struct ci_pl *ci_cpl2)
6154 {
6155         return ((ci_cpl1->mclk == ci_cpl2->mclk) &&
6156                   (ci_cpl1->sclk == ci_cpl2->sclk) &&
6157                   (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) &&
6158                   (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
6159 }
6160
6161 static int ci_check_state_equal(struct amdgpu_device *adev,
6162                                 struct amdgpu_ps *cps,
6163                                 struct amdgpu_ps *rps,
6164                                 bool *equal)
6165 {
6166         struct ci_ps *ci_cps;
6167         struct ci_ps *ci_rps;
6168         int i;
6169
6170         if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
6171                 return -EINVAL;
6172
6173         ci_cps = ci_get_ps(cps);
6174         ci_rps = ci_get_ps(rps);
6175
6176         if (ci_cps == NULL) {
6177                 *equal = false;
6178                 return 0;
6179         }
6180
6181         if (ci_cps->performance_level_count != ci_rps->performance_level_count) {
6182
6183                 *equal = false;
6184                 return 0;
6185         }
6186
6187         for (i = 0; i < ci_cps->performance_level_count; i++) {
6188                 if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]),
6189                                         &(ci_rps->performance_levels[i]))) {
6190                         *equal = false;
6191                         return 0;
6192                 }
6193         }
6194
6195         /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
6196         *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
6197         *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
6198
6199         return 0;
6200 }
6201
6202 static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
6203 {
6204         struct ci_power_info *pi = ci_get_pi(adev);
6205         struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6206
6207         if (low)
6208                 return requested_state->performance_levels[0].sclk;
6209         else
6210                 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
6211 }
6212
6213 static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
6214 {
6215         struct ci_power_info *pi = ci_get_pi(adev);
6216         struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6217
6218         if (low)
6219                 return requested_state->performance_levels[0].mclk;
6220         else
6221                 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
6222 }
6223
6224 /* get temperature in millidegrees */
6225 static int ci_dpm_get_temp(struct amdgpu_device *adev)
6226 {
6227         u32 temp;
6228         int actual_temp = 0;
6229
6230         temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
6231                 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
6232
6233         if (temp & 0x200)
6234                 actual_temp = 255;
6235         else
6236                 actual_temp = temp & 0x1ff;
6237
6238         actual_temp = actual_temp * 1000;
6239
6240         return actual_temp;
6241 }
6242
6243 static int ci_set_temperature_range(struct amdgpu_device *adev)
6244 {
6245         int ret;
6246
6247         ret = ci_thermal_enable_alert(adev, false);
6248         if (ret)
6249                 return ret;
6250         ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
6251                                                CISLANDS_TEMP_RANGE_MAX);
6252         if (ret)
6253                 return ret;
6254         ret = ci_thermal_enable_alert(adev, true);
6255         if (ret)
6256                 return ret;
6257         return ret;
6258 }
6259
6260 static int ci_dpm_early_init(void *handle)
6261 {
6262         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6263
6264         ci_dpm_set_dpm_funcs(adev);
6265         ci_dpm_set_irq_funcs(adev);
6266
6267         return 0;
6268 }
6269
6270 static int ci_dpm_late_init(void *handle)
6271 {
6272         int ret;
6273         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6274
6275         if (!amdgpu_dpm)
6276                 return 0;
6277
6278         /* init the sysfs and debugfs files late */
6279         ret = amdgpu_pm_sysfs_init(adev);
6280         if (ret)
6281                 return ret;
6282
6283         ret = ci_set_temperature_range(adev);
6284         if (ret)
6285                 return ret;
6286
6287         return 0;
6288 }
6289
6290 static int ci_dpm_sw_init(void *handle)
6291 {
6292         int ret;
6293         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6294
6295         ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
6296                                 &adev->pm.dpm.thermal.irq);
6297         if (ret)
6298                 return ret;
6299
6300         ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
6301                                 &adev->pm.dpm.thermal.irq);
6302         if (ret)
6303                 return ret;
6304
6305         /* default to balanced state */
6306         adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
6307         adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
6308         adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
6309         adev->pm.default_sclk = adev->clock.default_sclk;
6310         adev->pm.default_mclk = adev->clock.default_mclk;
6311         adev->pm.current_sclk = adev->clock.default_sclk;
6312         adev->pm.current_mclk = adev->clock.default_mclk;
6313         adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
6314
6315         ret = ci_dpm_init_microcode(adev);
6316         if (ret)
6317                 return ret;
6318
6319         if (amdgpu_dpm == 0)
6320                 return 0;
6321
6322         INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
6323         mutex_lock(&adev->pm.mutex);
6324         ret = ci_dpm_init(adev);
6325         if (ret)
6326                 goto dpm_failed;
6327         adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6328         if (amdgpu_dpm == 1)
6329                 amdgpu_pm_print_power_states(adev);
6330         mutex_unlock(&adev->pm.mutex);
6331         DRM_INFO("amdgpu: dpm initialized\n");
6332
6333         return 0;
6334
6335 dpm_failed:
6336         ci_dpm_fini(adev);
6337         mutex_unlock(&adev->pm.mutex);
6338         DRM_ERROR("amdgpu: dpm initialization failed\n");
6339         return ret;
6340 }
6341
6342 static int ci_dpm_sw_fini(void *handle)
6343 {
6344         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6345
6346         flush_work(&adev->pm.dpm.thermal.work);
6347
6348         mutex_lock(&adev->pm.mutex);
6349         amdgpu_pm_sysfs_fini(adev);
6350         ci_dpm_fini(adev);
6351         mutex_unlock(&adev->pm.mutex);
6352
6353         release_firmware(adev->pm.fw);
6354         adev->pm.fw = NULL;
6355
6356         return 0;
6357 }
6358
6359 static int ci_dpm_hw_init(void *handle)
6360 {
6361         int ret;
6362
6363         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6364
6365         if (!amdgpu_dpm) {
6366                 ret = ci_upload_firmware(adev);
6367                 if (ret) {
6368                         DRM_ERROR("ci_upload_firmware failed\n");
6369                         return ret;
6370                 }
6371                 ci_dpm_start_smc(adev);
6372                 return 0;
6373         }
6374
6375         mutex_lock(&adev->pm.mutex);
6376         ci_dpm_setup_asic(adev);
6377         ret = ci_dpm_enable(adev);
6378         if (ret)
6379                 adev->pm.dpm_enabled = false;
6380         else
6381                 adev->pm.dpm_enabled = true;
6382         mutex_unlock(&adev->pm.mutex);
6383
6384         return ret;
6385 }
6386
6387 static int ci_dpm_hw_fini(void *handle)
6388 {
6389         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6390
6391         if (adev->pm.dpm_enabled) {
6392                 mutex_lock(&adev->pm.mutex);
6393                 ci_dpm_disable(adev);
6394                 mutex_unlock(&adev->pm.mutex);
6395         } else {
6396                 ci_dpm_stop_smc(adev);
6397         }
6398
6399         return 0;
6400 }
6401
6402 static int ci_dpm_suspend(void *handle)
6403 {
6404         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6405
6406         if (adev->pm.dpm_enabled) {
6407                 mutex_lock(&adev->pm.mutex);
6408                 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
6409                                AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
6410                 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
6411                                AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
6412                 adev->pm.dpm.last_user_state = adev->pm.dpm.user_state;
6413                 adev->pm.dpm.last_state = adev->pm.dpm.state;
6414                 adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT;
6415                 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT;
6416                 mutex_unlock(&adev->pm.mutex);
6417                 amdgpu_pm_compute_clocks(adev);
6418
6419         }
6420
6421         return 0;
6422 }
6423
6424 static int ci_dpm_resume(void *handle)
6425 {
6426         int ret;
6427         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6428
6429         if (adev->pm.dpm_enabled) {
6430                 /* asic init will reset to the boot state */
6431                 mutex_lock(&adev->pm.mutex);
6432                 ci_dpm_setup_asic(adev);
6433                 ret = ci_dpm_enable(adev);
6434                 if (ret)
6435                         adev->pm.dpm_enabled = false;
6436                 else
6437                         adev->pm.dpm_enabled = true;
6438                 adev->pm.dpm.user_state = adev->pm.dpm.last_user_state;
6439                 adev->pm.dpm.state = adev->pm.dpm.last_state;
6440                 mutex_unlock(&adev->pm.mutex);
6441                 if (adev->pm.dpm_enabled)
6442                         amdgpu_pm_compute_clocks(adev);
6443         }
6444         return 0;
6445 }
6446
6447 static bool ci_dpm_is_idle(void *handle)
6448 {
6449         /* XXX */
6450         return true;
6451 }
6452
6453 static int ci_dpm_wait_for_idle(void *handle)
6454 {
6455         /* XXX */
6456         return 0;
6457 }
6458
6459 static int ci_dpm_soft_reset(void *handle)
6460 {
6461         return 0;
6462 }
6463
6464 static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6465                                       struct amdgpu_irq_src *source,
6466                                       unsigned type,
6467                                       enum amdgpu_interrupt_state state)
6468 {
6469         u32 cg_thermal_int;
6470
6471         switch (type) {
6472         case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
6473                 switch (state) {
6474                 case AMDGPU_IRQ_STATE_DISABLE:
6475                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6476                         cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6477                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6478                         break;
6479                 case AMDGPU_IRQ_STATE_ENABLE:
6480                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6481                         cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6482                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6483                         break;
6484                 default:
6485                         break;
6486                 }
6487                 break;
6488
6489         case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
6490                 switch (state) {
6491                 case AMDGPU_IRQ_STATE_DISABLE:
6492                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6493                         cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6494                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6495                         break;
6496                 case AMDGPU_IRQ_STATE_ENABLE:
6497                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6498                         cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6499                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6500                         break;
6501                 default:
6502                         break;
6503                 }
6504                 break;
6505
6506         default:
6507                 break;
6508         }
6509         return 0;
6510 }
6511
6512 static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
6513                                     struct amdgpu_irq_src *source,
6514                                     struct amdgpu_iv_entry *entry)
6515 {
6516         bool queue_thermal = false;
6517
6518         if (entry == NULL)
6519                 return -EINVAL;
6520
6521         switch (entry->src_id) {
6522         case 230: /* thermal low to high */
6523                 DRM_DEBUG("IH: thermal low to high\n");
6524                 adev->pm.dpm.thermal.high_to_low = false;
6525                 queue_thermal = true;
6526                 break;
6527         case 231: /* thermal high to low */
6528                 DRM_DEBUG("IH: thermal high to low\n");
6529                 adev->pm.dpm.thermal.high_to_low = true;
6530                 queue_thermal = true;
6531                 break;
6532         default:
6533                 break;
6534         }
6535
6536         if (queue_thermal)
6537                 schedule_work(&adev->pm.dpm.thermal.work);
6538
6539         return 0;
6540 }
6541
6542 static int ci_dpm_set_clockgating_state(void *handle,
6543                                           enum amd_clockgating_state state)
6544 {
6545         return 0;
6546 }
6547
6548 static int ci_dpm_set_powergating_state(void *handle,
6549                                           enum amd_powergating_state state)
6550 {
6551         return 0;
6552 }
6553
6554 static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
6555                 enum pp_clock_type type, char *buf)
6556 {
6557         struct ci_power_info *pi = ci_get_pi(adev);
6558         struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
6559         struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
6560         struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
6561
6562         int i, now, size = 0;
6563         uint32_t clock, pcie_speed;
6564
6565         switch (type) {
6566         case PP_SCLK:
6567                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
6568                 clock = RREG32(mmSMC_MSG_ARG_0);
6569
6570                 for (i = 0; i < sclk_table->count; i++) {
6571                         if (clock > sclk_table->dpm_levels[i].value)
6572                                 continue;
6573                         break;
6574                 }
6575                 now = i;
6576
6577                 for (i = 0; i < sclk_table->count; i++)
6578                         size += sprintf(buf + size, "%d: %uMhz %s\n",
6579                                         i, sclk_table->dpm_levels[i].value / 100,
6580                                         (i == now) ? "*" : "");
6581                 break;
6582         case PP_MCLK:
6583                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
6584                 clock = RREG32(mmSMC_MSG_ARG_0);
6585
6586                 for (i = 0; i < mclk_table->count; i++) {
6587                         if (clock > mclk_table->dpm_levels[i].value)
6588                                 continue;
6589                         break;
6590                 }
6591                 now = i;
6592
6593                 for (i = 0; i < mclk_table->count; i++)
6594                         size += sprintf(buf + size, "%d: %uMhz %s\n",
6595                                         i, mclk_table->dpm_levels[i].value / 100,
6596                                         (i == now) ? "*" : "");
6597                 break;
6598         case PP_PCIE:
6599                 pcie_speed = ci_get_current_pcie_speed(adev);
6600                 for (i = 0; i < pcie_table->count; i++) {
6601                         if (pcie_speed != pcie_table->dpm_levels[i].value)
6602                                 continue;
6603                         break;
6604                 }
6605                 now = i;
6606
6607                 for (i = 0; i < pcie_table->count; i++)
6608                         size += sprintf(buf + size, "%d: %s %s\n", i,
6609                                         (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
6610                                         (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
6611                                         (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
6612                                         (i == now) ? "*" : "");
6613                 break;
6614         default:
6615                 break;
6616         }
6617
6618         return size;
6619 }
6620
6621 static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
6622                 enum pp_clock_type type, uint32_t mask)
6623 {
6624         struct ci_power_info *pi = ci_get_pi(adev);
6625
6626         if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
6627                                 AMD_DPM_FORCED_LEVEL_LOW |
6628                                 AMD_DPM_FORCED_LEVEL_HIGH))
6629                 return -EINVAL;
6630
6631         switch (type) {
6632         case PP_SCLK:
6633                 if (!pi->sclk_dpm_key_disabled)
6634                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6635                                         PPSMC_MSG_SCLKDPM_SetEnabledMask,
6636                                         pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
6637                 break;
6638
6639         case PP_MCLK:
6640                 if (!pi->mclk_dpm_key_disabled)
6641                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6642                                         PPSMC_MSG_MCLKDPM_SetEnabledMask,
6643                                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
6644                 break;
6645
6646         case PP_PCIE:
6647         {
6648                 uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
6649                 uint32_t level = 0;
6650
6651                 while (tmp >>= 1)
6652                         level++;
6653
6654                 if (!pi->pcie_dpm_key_disabled)
6655                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6656                                         PPSMC_MSG_PCIeDPM_ForceLevel,
6657                                         level);
6658                 break;
6659         }
6660         default:
6661                 break;
6662         }
6663
6664         return 0;
6665 }
6666
6667 static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
6668 {
6669         struct ci_power_info *pi = ci_get_pi(adev);
6670         struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
6671         struct ci_single_dpm_table *golden_sclk_table =
6672                         &(pi->golden_dpm_table.sclk_table);
6673         int value;
6674
6675         value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
6676                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
6677                         100 /
6678                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6679
6680         return value;
6681 }
6682
6683 static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
6684 {
6685         struct ci_power_info *pi = ci_get_pi(adev);
6686         struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6687         struct ci_single_dpm_table *golden_sclk_table =
6688                         &(pi->golden_dpm_table.sclk_table);
6689
6690         if (value > 20)
6691                 value = 20;
6692
6693         ps->performance_levels[ps->performance_level_count - 1].sclk =
6694                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
6695                         value / 100 +
6696                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6697
6698         return 0;
6699 }
6700
6701 static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
6702 {
6703         struct ci_power_info *pi = ci_get_pi(adev);
6704         struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
6705         struct ci_single_dpm_table *golden_mclk_table =
6706                         &(pi->golden_dpm_table.mclk_table);
6707         int value;
6708
6709         value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
6710                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
6711                         100 /
6712                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6713
6714         return value;
6715 }
6716
6717 static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
6718 {
6719         struct ci_power_info *pi = ci_get_pi(adev);
6720         struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6721         struct ci_single_dpm_table *golden_mclk_table =
6722                         &(pi->golden_dpm_table.mclk_table);
6723
6724         if (value > 20)
6725                 value = 20;
6726
6727         ps->performance_levels[ps->performance_level_count - 1].mclk =
6728                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
6729                         value / 100 +
6730                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6731
6732         return 0;
6733 }
6734
6735 static int ci_dpm_get_power_profile_state(struct amdgpu_device *adev,
6736                 struct amd_pp_profile *query)
6737 {
6738         struct ci_power_info *pi = ci_get_pi(adev);
6739
6740         if (!pi || !query)
6741                 return -EINVAL;
6742
6743         if (query->type == AMD_PP_GFX_PROFILE)
6744                 memcpy(query, &pi->gfx_power_profile,
6745                                 sizeof(struct amd_pp_profile));
6746         else if (query->type == AMD_PP_COMPUTE_PROFILE)
6747                 memcpy(query, &pi->compute_power_profile,
6748                                 sizeof(struct amd_pp_profile));
6749         else
6750                 return -EINVAL;
6751
6752         return 0;
6753 }
6754
6755 static int ci_populate_requested_graphic_levels(struct amdgpu_device *adev,
6756                 struct amd_pp_profile *request)
6757 {
6758         struct ci_power_info *pi = ci_get_pi(adev);
6759         struct ci_dpm_table *dpm_table = &(pi->dpm_table);
6760         struct SMU7_Discrete_GraphicsLevel *levels =
6761                         pi->smc_state_table.GraphicsLevel;
6762         uint32_t array = pi->dpm_table_start +
6763                         offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
6764         uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
6765                         SMU7_MAX_LEVELS_GRAPHICS;
6766         uint32_t i;
6767
6768         for (i = 0; i < dpm_table->sclk_table.count; i++) {
6769                 levels[i].ActivityLevel =
6770                                 cpu_to_be16(request->activity_threshold);
6771                 levels[i].EnabledForActivity = 1;
6772                 levels[i].UpH = request->up_hyst;
6773                 levels[i].DownH = request->down_hyst;
6774         }
6775
6776         return amdgpu_ci_copy_bytes_to_smc(adev, array, (uint8_t *)levels,
6777                                 array_size, pi->sram_end);
6778 }
6779
6780 static void ci_find_min_clock_masks(struct amdgpu_device *adev,
6781                 uint32_t *sclk_mask, uint32_t *mclk_mask,
6782                 uint32_t min_sclk, uint32_t min_mclk)
6783 {
6784         struct ci_power_info *pi = ci_get_pi(adev);
6785         struct ci_dpm_table *dpm_table = &(pi->dpm_table);
6786         uint32_t i;
6787
6788         for (i = 0; i < dpm_table->sclk_table.count; i++) {
6789                 if (dpm_table->sclk_table.dpm_levels[i].enabled &&
6790                         dpm_table->sclk_table.dpm_levels[i].value >= min_sclk)
6791                         *sclk_mask |= 1 << i;
6792         }
6793
6794         for (i = 0; i < dpm_table->mclk_table.count; i++) {
6795                 if (dpm_table->mclk_table.dpm_levels[i].enabled &&
6796                         dpm_table->mclk_table.dpm_levels[i].value >= min_mclk)
6797                         *mclk_mask |= 1 << i;
6798         }
6799 }
6800
6801 static int ci_set_power_profile_state(struct amdgpu_device *adev,
6802                 struct amd_pp_profile *request)
6803 {
6804         struct ci_power_info *pi = ci_get_pi(adev);
6805         int tmp_result, result = 0;
6806         uint32_t sclk_mask = 0, mclk_mask = 0;
6807
6808         tmp_result = ci_freeze_sclk_mclk_dpm(adev);
6809         if (tmp_result) {
6810                 DRM_ERROR("Failed to freeze SCLK MCLK DPM!");
6811                 result = tmp_result;
6812         }
6813
6814         tmp_result = ci_populate_requested_graphic_levels(adev,
6815                         request);
6816         if (tmp_result) {
6817                 DRM_ERROR("Failed to populate requested graphic levels!");
6818                 result = tmp_result;
6819         }
6820
6821         tmp_result = ci_unfreeze_sclk_mclk_dpm(adev);
6822         if (tmp_result) {
6823                 DRM_ERROR("Failed to unfreeze SCLK MCLK DPM!");
6824                 result = tmp_result;
6825         }
6826
6827         ci_find_min_clock_masks(adev, &sclk_mask, &mclk_mask,
6828                         request->min_sclk, request->min_mclk);
6829
6830         if (sclk_mask) {
6831                 if (!pi->sclk_dpm_key_disabled)
6832                         amdgpu_ci_send_msg_to_smc_with_parameter(
6833                                 adev,
6834                                 PPSMC_MSG_SCLKDPM_SetEnabledMask,
6835                                 pi->dpm_level_enable_mask.
6836                                 sclk_dpm_enable_mask &
6837                                 sclk_mask);
6838         }
6839
6840         if (mclk_mask) {
6841                 if (!pi->mclk_dpm_key_disabled)
6842                         amdgpu_ci_send_msg_to_smc_with_parameter(
6843                                 adev,
6844                                 PPSMC_MSG_MCLKDPM_SetEnabledMask,
6845                                 pi->dpm_level_enable_mask.
6846                                 mclk_dpm_enable_mask &
6847                                 mclk_mask);
6848         }
6849
6850
6851         return result;
6852 }
6853
6854 static int ci_dpm_set_power_profile_state(struct amdgpu_device *adev,
6855                 struct amd_pp_profile *request)
6856 {
6857         struct ci_power_info *pi = ci_get_pi(adev);
6858         int ret = -1;
6859
6860         if (!pi || !request)
6861                 return -EINVAL;
6862
6863         if (adev->pm.dpm.forced_level !=
6864                         AMD_DPM_FORCED_LEVEL_AUTO)
6865                 return -EINVAL;
6866
6867         if (request->min_sclk ||
6868                 request->min_mclk ||
6869                 request->activity_threshold ||
6870                 request->up_hyst ||
6871                 request->down_hyst) {
6872                 if (request->type == AMD_PP_GFX_PROFILE)
6873                         memcpy(&pi->gfx_power_profile, request,
6874                                         sizeof(struct amd_pp_profile));
6875                 else if (request->type == AMD_PP_COMPUTE_PROFILE)
6876                         memcpy(&pi->compute_power_profile, request,
6877                                         sizeof(struct amd_pp_profile));
6878                 else
6879                         return -EINVAL;
6880
6881                 if (request->type == pi->current_power_profile)
6882                         ret = ci_set_power_profile_state(
6883                                         adev,
6884                                         request);
6885         } else {
6886                 /* set power profile if it exists */
6887                 switch (request->type) {
6888                 case AMD_PP_GFX_PROFILE:
6889                         ret = ci_set_power_profile_state(
6890                                 adev,
6891                                 &pi->gfx_power_profile);
6892                         break;
6893                 case AMD_PP_COMPUTE_PROFILE:
6894                         ret = ci_set_power_profile_state(
6895                                 adev,
6896                                 &pi->compute_power_profile);
6897                         break;
6898                 default:
6899                         return -EINVAL;
6900                 }
6901         }
6902
6903         if (!ret)
6904                 pi->current_power_profile = request->type;
6905
6906         return 0;
6907 }
6908
6909 static int ci_dpm_reset_power_profile_state(struct amdgpu_device *adev,
6910                 struct amd_pp_profile *request)
6911 {
6912         struct ci_power_info *pi = ci_get_pi(adev);
6913
6914         if (!pi || !request)
6915                 return -EINVAL;
6916
6917         if (request->type == AMD_PP_GFX_PROFILE) {
6918                 pi->gfx_power_profile = pi->default_gfx_power_profile;
6919                 return ci_dpm_set_power_profile_state(adev,
6920                                           &pi->gfx_power_profile);
6921         } else if (request->type == AMD_PP_COMPUTE_PROFILE) {
6922                 pi->compute_power_profile =
6923                         pi->default_compute_power_profile;
6924                 return ci_dpm_set_power_profile_state(adev,
6925                                           &pi->compute_power_profile);
6926         } else
6927                 return -EINVAL;
6928 }
6929
6930 static int ci_dpm_switch_power_profile(struct amdgpu_device *adev,
6931                 enum amd_pp_profile_type type)
6932 {
6933         struct ci_power_info *pi = ci_get_pi(adev);
6934         struct amd_pp_profile request = {0};
6935
6936         if (!pi)
6937                 return -EINVAL;
6938
6939         if (pi->current_power_profile != type) {
6940                 request.type = type;
6941                 return ci_dpm_set_power_profile_state(adev, &request);
6942         }
6943
6944         return 0;
6945 }
6946
6947 static int ci_dpm_read_sensor(struct amdgpu_device *adev, int idx,
6948                               void *value, int *size)
6949 {
6950         u32 activity_percent = 50;
6951         int ret;
6952
6953         /* size must be at least 4 bytes for all sensors */
6954         if (*size < 4)
6955                 return -EINVAL;
6956
6957         switch (idx) {
6958         case AMDGPU_PP_SENSOR_GFX_SCLK:
6959                 *((uint32_t *)value) = ci_get_average_sclk_freq(adev);
6960                 *size = 4;
6961                 return 0;
6962         case AMDGPU_PP_SENSOR_GFX_MCLK:
6963                 *((uint32_t *)value) = ci_get_average_mclk_freq(adev);
6964                 *size = 4;
6965                 return 0;
6966         case AMDGPU_PP_SENSOR_GPU_TEMP:
6967                 *((uint32_t *)value) = ci_dpm_get_temp(adev);
6968                 *size = 4;
6969                 return 0;
6970         case AMDGPU_PP_SENSOR_GPU_LOAD:
6971                 ret = ci_read_smc_soft_register(adev,
6972                                                 offsetof(SMU7_SoftRegisters,
6973                                                          AverageGraphicsA),
6974                                                 &activity_percent);
6975                 if (ret == 0) {
6976                         activity_percent += 0x80;
6977                         activity_percent >>= 8;
6978                         activity_percent =
6979                                 activity_percent > 100 ? 100 : activity_percent;
6980                 }
6981                 *((uint32_t *)value) = activity_percent;
6982                 *size = 4;
6983                 return 0;
6984         default:
6985                 return -EINVAL;
6986         }
6987 }
6988
6989 const struct amd_ip_funcs ci_dpm_ip_funcs = {
6990         .name = "ci_dpm",
6991         .early_init = ci_dpm_early_init,
6992         .late_init = ci_dpm_late_init,
6993         .sw_init = ci_dpm_sw_init,
6994         .sw_fini = ci_dpm_sw_fini,
6995         .hw_init = ci_dpm_hw_init,
6996         .hw_fini = ci_dpm_hw_fini,
6997         .suspend = ci_dpm_suspend,
6998         .resume = ci_dpm_resume,
6999         .is_idle = ci_dpm_is_idle,
7000         .wait_for_idle = ci_dpm_wait_for_idle,
7001         .soft_reset = ci_dpm_soft_reset,
7002         .set_clockgating_state = ci_dpm_set_clockgating_state,
7003         .set_powergating_state = ci_dpm_set_powergating_state,
7004 };
7005
7006 static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
7007         .get_temperature = &ci_dpm_get_temp,
7008         .pre_set_power_state = &ci_dpm_pre_set_power_state,
7009         .set_power_state = &ci_dpm_set_power_state,
7010         .post_set_power_state = &ci_dpm_post_set_power_state,
7011         .display_configuration_changed = &ci_dpm_display_configuration_changed,
7012         .get_sclk = &ci_dpm_get_sclk,
7013         .get_mclk = &ci_dpm_get_mclk,
7014         .print_power_state = &ci_dpm_print_power_state,
7015         .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
7016         .force_performance_level = &ci_dpm_force_performance_level,
7017         .vblank_too_short = &ci_dpm_vblank_too_short,
7018         .powergate_uvd = &ci_dpm_powergate_uvd,
7019         .set_fan_control_mode = &ci_dpm_set_fan_control_mode,
7020         .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
7021         .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
7022         .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
7023         .print_clock_levels = ci_dpm_print_clock_levels,
7024         .force_clock_level = ci_dpm_force_clock_level,
7025         .get_sclk_od = ci_dpm_get_sclk_od,
7026         .set_sclk_od = ci_dpm_set_sclk_od,
7027         .get_mclk_od = ci_dpm_get_mclk_od,
7028         .set_mclk_od = ci_dpm_set_mclk_od,
7029         .check_state_equal = ci_check_state_equal,
7030         .get_vce_clock_state = amdgpu_get_vce_clock_state,
7031         .get_power_profile_state = ci_dpm_get_power_profile_state,
7032         .set_power_profile_state = ci_dpm_set_power_profile_state,
7033         .reset_power_profile_state = ci_dpm_reset_power_profile_state,
7034         .switch_power_profile = ci_dpm_switch_power_profile,
7035         .read_sensor = ci_dpm_read_sensor,
7036 };
7037
7038 static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
7039 {
7040         if (adev->pm.funcs == NULL)
7041                 adev->pm.funcs = &ci_dpm_funcs;
7042 }
7043
7044 static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
7045         .set = ci_dpm_set_interrupt_state,
7046         .process = ci_dpm_process_interrupt,
7047 };
7048
7049 static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
7050 {
7051         adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
7052         adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
7053 }